test script works with guardiand

This commit is contained in:
spacemandev 2022-06-24 13:22:18 -05:00
parent 9e6b001493
commit b48b7ce32d
10 changed files with 83 additions and 790 deletions

3
.gitignore vendored
View File

@ -1,2 +1,3 @@
.DS_STORE
book
book
projects/wormhole/

View File

@ -1,2 +1,3 @@
node_modules/
wormhole/
wormhole/
nohup.out

View File

@ -1,6 +1,6 @@
import { exec } from "child_process";
import fs from "fs";
import { ethers } from "ethers";
import { ethers, ContractFactory } from "ethers";
import {
getEmitterAddressEth,
parseSequenceFromLogEth,
@ -21,6 +21,7 @@ async function main() {
console.log(
`Deploying EVM network: ${process.argv[2]} to ${network.rpc}`
);
exec(
`cd chains/evm && forge build && forge create --legacy --rpc-url ${network.rpc} --private-key ${network.privateKey} src/Messenger.sol:Messenger && exit`,
(err, out, errStr) => {
@ -43,6 +44,24 @@ async function main() {
}
}
);
/*
exec(`cd chains/evm && forge build`) //Compiles EVM Code
const signer = new ethers.Wallet(network.privateKey).connect(
new ethers.providers.JsonRpcProvider(network.rpc)
);
const MessengerJSON = JSON.parse(fs.readFileSync('./chains/evm/out/Messenger.sol/Messenger.json').toString());
const MessengerFactory = new ContractFactory(MessengerJSON.abi, Buffer.from(MessengerJSON.deployedBytecode.object, "hex"), signer);
const contract = await MessengerFactory.deploy();
console.log("Deployed to address: ", contract.address);
network.deployedAddress = contract.address;
network.emittedVAAs = [];
config.networks[process.argv[2]] = network;
fs.writeFileSync(
"./xdapp.config.json",
JSON.stringify(config, null, 4)
);
*/
} else {
throw new Error("Invalid Network Type!");
}
@ -71,13 +90,7 @@ async function main() {
const messenger = new ethers.Contract(
network.deployedAddress,
JSON.parse(
fs
.readFileSync(
"./chains/evm/out/Messenger.sol/Messenger.json"
)
.toString()
).abi,
JSON.parse(fs.readFileSync('./chains/evm/out/Messenger.sol/Messenger.json').toString()).abi,
signer
);
await messenger.registerApplicationContracts(
@ -86,7 +99,7 @@ async function main() {
);
}
console.log(
`Network(${process.argv[2]}) Registered Emitter: ${targetNetwork.deployedAddress} from Chain: ${targetNetwork.wormholeChainId}`
`Network(${process.argv[2]}) Registered Emitter: ${targetNetwork.deployedAddress} from Chain: ${process.argv[4]}`
);
} else if (process.argv[3] == "send_msg") {
if (!network.deployedAddress) {
@ -99,21 +112,17 @@ async function main() {
);
const messenger = new ethers.Contract(
network.deployedAddress,
JSON.parse(
fs
.readFileSync(
"./chains/evm/out/Messenger.sol/Messenger.json"
)
.toString()
).abi,
JSON.parse(fs.readFileSync('./chains/evm/out/Messenger.sol/Messenger.json').toString()).abi,
signer
);
const tx = await (
await messenger.sendMsg(Buffer.from(process.argv[4]))
).wait();
await new Promise((r) => setTimeout(r, 5000));
const emitterAddr = getEmitterAddressEth(messenger.address);
const seq = parseSequenceFromLogEth(tx, network.bridgeAddress);
console.log("Searching for: ", `${config.wormhole.restAddress}/v1/signed_vaa/${network.wormholeChainId}/${emitterAddr}/${seq}`)
const vaaBytes = await (
await fetch(
`${config.wormhole.restAddress}/v1/signed_vaa/${network.wormholeChainId}/${emitterAddr}/${seq}`
@ -149,19 +158,11 @@ async function main() {
);
const messenger = new ethers.Contract(
network.deployedAddress,
JSON.parse(
fs
.readFileSync(
"./chains/evm/out/Messenger.sol/Messenger.json"
)
.toString()
).abi,
JSON.parse(fs.readFileSync('./chains/evm/out/Messenger.sol/Messenger.json').toString()).abi,
signer
);
const tx = await messenger.receiveEncodedMsg(
Buffer.from(vaaBytes, "base64")
);
const tx = await messenger.receiveEncodedMsg(Buffer.from(vaaBytes, "base64"));
console.log(`Submitted VAA: ${vaaBytes}\nTX: ${tx.hash}`);
}
} else if (process.argv[3] == "get_current_msg") {
@ -174,13 +175,7 @@ async function main() {
);
const messenger = new ethers.Contract(
network.deployedAddress,
JSON.parse(
fs
.readFileSync(
"./chains/evm/out/Messenger.sol/Messenger.json"
)
.toString()
).abi,
JSON.parse(fs.readFileSync('./chains/evm/out/Messenger.sol/Messenger.json').toString()).abi,
signer
);
console.log(

View File

@ -1,677 +0,0 @@
2022-06-23T01:46:09.080Z INFO guardian-0 status server listening on [::]:6060
badger 2022/06/23 01:46:09 INFO: All 0 tables opened in 0s
badger 2022/06/23 01:46:09 INFO: Discard stats nextEmptySlot: 0
badger 2022/06/23 01:46:09 INFO: Set nextTxnTs to 0
2022-06-23T01:46:11.836Z INFO guardian-0 Loaded guardian key {"address": "0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"}
2022-06-23T01:46:11.846Z INFO guardian-0 Telemetry disabled
2022-06-23T01:46:11.848Z INFO guardian-0 publicrpc server listening {"addr": "[::]:7070"}
2022-06-23T01:46:11.853Z INFO guardian-0 admin server listening on {"path": "/tmp/admin.sock"}
2022-06-23T01:46:11.858Z INFO guardian-0.supervisor supervisor processor started
2022-06-23T01:46:11.911Z INFO guardian-0 Starting Terra watcher
2022-06-23T01:46:11.919Z INFO guardian-0 Starting Terra 2 watcher
2022-06-23T01:46:11.939Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:11.939Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:11.954Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:11.956Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:11.980Z INFO guardian-0 Started internal services
2022-06-23T01:46:12.014Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.027Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.030Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.031Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.032Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.056Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "381.299405ms"}
2022-06-23T01:46:12.063Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "403.14798ms"}
2022-06-23T01:46:12.064Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "718.397523ms"}
2022-06-23T01:46:12.064Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "695.131816ms"}
2022-06-23T01:46:12.065Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.066Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.086Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "529.532934ms"}
2022-06-23T01:46:12.087Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "346.285743ms"}
2022-06-23T01:46:12.088Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.107Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "288.940962ms"}
2022-06-23T01:46:12.108Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.109Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.109Z INFO guardian-0 publicweb server listening {"addr": ""}
2022-06-23T01:46:12.128Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "275.125674ms"}
2022-06-23T01:46:12.129Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "503.988233ms"}
2022-06-23T01:46:12.130Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022/06/23 01:46:12 failed to sufficiently increase receive buffer size (was: 208 kiB, wanted: 2048 kiB, got: 416 kiB). See https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size for details.
2022-06-23T01:46:12.149Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "371.860783ms"}
2022-06-23T01:46:12.150Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:12.165Z INFO guardian-0.root.p2p Connecting to bootstrap peers {"bootstrap_peers": "/dns4/guardian-0.guardian/udp/8999/quic/p2p/12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw"}
2022-06-23T01:46:12.166Z INFO guardian-0.root.p2p Subscribing pubsub topic {"topic": "/wormhole/dev/broadcast"}
2022-06-23T01:46:12.169Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "479.554257ms"}
2022-06-23T01:46:12.170Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.177Z INFO dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:276 starting refreshing cpl 0 with key CIQAAAIF3YAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA (routing table size was 0)
2022-06-23T01:46:12.190Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "267.325443ms"}
2022-06-23T01:46:12.191Z WARN dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:133 failed when refreshing routing table2 errors occurred:
* failed to query for self, err=failed to find any peer in table
* failed to refresh cpl=0, err=failed to find any peer in table
2022-06-23T01:46:12.191Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.193Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.196Z INFO guardian-0.root.p2p We're a bootstrap node
2022-06-23T01:46:12.197Z INFO guardian-0.root.p2p Connected to bootstrap peers {"num": 0}
2022-06-23T01:46:12.199Z INFO guardian-0.root.p2p Node has been started {"peer_id": "12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw", "addrs": "[/ip4/172.17.0.2/udp/8999/quic /ip4/127.0.0.1/udp/8999/quic]"}
2022-06-23T01:46:12.205Z INFO dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:276 starting refreshing cpl 0 with key CIQAAADM5IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA (routing table size was 0)
2022-06-23T01:46:12.206Z WARN dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:196 failed when refreshing routing table {"error": "2 errors occurred:\n\t* failed to query for self, err=failed to find any peer in table\n\t* failed to refresh cpl=0, err=failed to find any peer in table\n\n"}
2022-06-23T01:46:12.213Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "664.065308ms"}
2022-06-23T01:46:12.214Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "643.33179ms"}
2022-06-23T01:46:12.433Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.453Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "1.043280351s"}
2022-06-23T01:46:12.455Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.474Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "969.238316ms"}
2022-06-23T01:46:12.476Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:12.494Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.495Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:12.495Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.514Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "760.246505ms"}
2022-06-23T01:46:12.515Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "1.050614025s"}
2022-06-23T01:46:12.517Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.518Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.536Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "526.434771ms"}
2022-06-23T01:46:12.537Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "423.72529ms"}
2022-06-23T01:46:12.562Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.581Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "767.02716ms"}
2022-06-23T01:46:12.636Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.654Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "644.110367ms"}
2022-06-23T01:46:12.674Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.694Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "928.777565ms"}
2022-06-23T01:46:12.695Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:12.714Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "718.027607ms"}
2022-06-23T01:46:12.784Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.803Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "480.413639ms"}
2022-06-23T01:46:12.805Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.824Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "599.94366ms"}
2022-06-23T01:46:12.896Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.897Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:12.898Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:12.905Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.916Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "985.116951ms"}
2022-06-23T01:46:12.918Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.937Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "490.23796ms"}
2022-06-23T01:46:12.983Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.002Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "1.221199793s"}
2022-06-23T01:46:13.034Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:13.053Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "464.436068ms"}
2022-06-23T01:46:13.054Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:13.073Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "486.414266ms"}
2022-06-23T01:46:13.091Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.097Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.116Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "581.83612ms"}
2022-06-23T01:46:13.296Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.315Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "792.110201ms"}
2022-06-23T01:46:13.352Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.372Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "1.119418761s"}
2022-06-23T01:46:13.373Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.393Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "598.029357ms"}
2022-06-23T01:46:13.394Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.413Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "1.306859108s"}
2022-06-23T01:46:13.463Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.464Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.465Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:13.471Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:13.483Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "1.184855871s"}
2022-06-23T01:46:13.503Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:13.504Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:13.524Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "1.481885894s"}
2022-06-23T01:46:13.525Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "1.653330262s"}
2022-06-23T01:46:13.526Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.545Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "903.304159ms"}
2022-06-23T01:46:13.547Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.566Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "1.564108082s"}
2022-06-23T01:46:13.603Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.608Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.627Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "1.55356197s"}
2022-06-23T01:46:13.652Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.671Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "1.571900114s"}
2022-06-23T01:46:13.701Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.720Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.738Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "2.217342614s"}
2022-06-23T01:46:13.925Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.944Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "880.85204ms"}
2022-06-23T01:46:14.015Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.034Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "1.403058038s"}
2022-06-23T01:46:14.131Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.150Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "969.315636ms"}
2022-06-23T01:46:14.247Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.266Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "1.026900344s"}
2022-06-23T01:46:14.474Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.493Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "1.433589929s"}
2022-06-23T01:46:14.517Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.536Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "2.438293112s"}
2022-06-23T01:46:14.575Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:14.576Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:14.595Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "676.9799ms"}
2022-06-23T01:46:14.596Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "502.911284ms"}
2022-06-23T01:46:14.689Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.708Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "1.144392209s"}
2022-06-23T01:46:14.742Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.760Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "1.247276925s"}
2022-06-23T01:46:14.850Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.868Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "920.452437ms"}
2022-06-23T01:46:15.008Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:15.008Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:15.015Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:15.027Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:15.046Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "1.018075883s"}
2022-06-23T01:46:15.141Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.159Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "2.975496985s"}
2022-06-23T01:46:15.198Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.218Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "848.255763ms"}
2022-06-23T01:46:15.219Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:15.219Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:15.239Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "2.227299695s"}
2022-06-23T01:46:15.240Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.259Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "2.128781487s"}
2022-06-23T01:46:15.297Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.317Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "1.846438262s"}
2022-06-23T01:46:15.342Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.360Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "1.449230867s"}
2022-06-23T01:46:15.458Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.477Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "3.48610792s"}
2022-06-23T01:46:15.809Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.828Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "1.719378196s"}
2022-06-23T01:46:15.878Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.898Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "1.425203675s"}
2022-06-23T01:46:15.952Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.970Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "3.003008048s"}
2022-06-23T01:46:15.990Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:15.997Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.016Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "1.624274118s"}
2022-06-23T01:46:16.042Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.061Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "1.350130088s"}
2022-06-23T01:46:16.084Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:16.085Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:16.092Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:16.104Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:16.124Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "3.227239733s"}
2022-06-23T01:46:16.125Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.145Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "3.746036618s"}
2022-06-23T01:46:16.146Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:16.165Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "1.27387341s"}
2022-06-23T01:46:16.304Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:16.323Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "1.270284631s"}
2022-06-23T01:46:16.834Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.854Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "2.141106113s"}
2022-06-23T01:46:17.001Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.020Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "3.377476402s"}
2022-06-23T01:46:17.189Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.208Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "3.507230961s"}
2022-06-23T01:46:17.344Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.363Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "3.244030108s"}
2022-06-23T01:46:17.408Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:17.433Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.434Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.453Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "5.267882146s"}
2022-06-23T01:46:17.454Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "3.64480936s"}
2022-06-23T01:46:17.497Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:17.515Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "3.522991813s"}
2022-06-23T01:46:17.573Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.591Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "4.459919713s"}
2022-06-23T01:46:17.660Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:17.665Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.684Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "2.943651069s"}
2022-06-23T01:46:18.155Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:18.174Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "4.815367041s"}
2022-06-23T01:46:18.482Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:18.501Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "1.183399705s"}
2022-06-23T01:46:18.623Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:18.642Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "2.151246866s"}
2022-06-23T01:46:19.046Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.067Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "3.709365136s"}
2022-06-23T01:46:19.068Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.086Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "3.470498357s"}
2022-06-23T01:46:19.087Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.106Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "4.002004299s"}
2022-06-23T01:46:19.395Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:19.396Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:19.405Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:19.408Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:19.431Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "2.948900832s"}
2022-06-23T01:46:19.953Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.979Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "4.070249768s"}
2022-06-23T01:46:20.465Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.490Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "4.68099847s"}
2022-06-23T01:46:20.638Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:20.659Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.660Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.679Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "8.484882035s"}
2022-06-23T01:46:20.680Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "3.726228022s"}
2022-06-23T01:46:20.701Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:20.720Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "1.927893074s"}
2022-06-23T01:46:20.742Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.761Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "3.714938635s"}
2022-06-23T01:46:21.093Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:21.117Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "5.685547874s"}
2022-06-23T01:46:21.136Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:21.142Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:21.161Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "3.053515001s"}
2022-06-23T01:46:21.847Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:21.884Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "3.626272661s"}
2022-06-23T01:46:22.105Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.127Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "3.657998601s"}
2022-06-23T01:46:22.426Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:22.428Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:22.441Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:22.444Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:22.466Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "4.171386775s"}
2022-06-23T01:46:22.582Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.602Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "9.881617684s"}
2022-06-23T01:46:22.748Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.768Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "6.174998288s"}
2022-06-23T01:46:22.798Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.818Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "8.303916185s"}
2022-06-23T01:46:23.033Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:23.055Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "2.957415405s"}
2022-06-23T01:46:23.137Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:23.156Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "3.025381397s"}
2022-06-23T01:46:23.702Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:23.739Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "4.536075788s"}
2022-06-23T01:46:24.117Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.141Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "3.101068358s"}
2022-06-23T01:46:24.239Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:24.245Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.264Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "8.403382909s"}
2022-06-23T01:46:24.443Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.464Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "10.535908405s"}
2022-06-23T01:46:24.503Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.523Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "7.258541148s"}
2022-06-23T01:46:25.215Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:25.235Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "5.519431698s"}
2022-06-23T01:46:25.850Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:25.876Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "6.192406237s"}
2022-06-23T01:46:26.050Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:26.071Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "5.731651447s"}
2022-06-23T01:46:26.209Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:26.229Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "7.895065277s"}
2022-06-23T01:46:26.598Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:26.631Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "3.897063041s"}
2022-06-23T01:46:26.640Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:26.641Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:26.649Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:26.666Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:26.686Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "9.799764648s"}
2022-06-23T01:46:26.836Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:26.857Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "4.688539534s"}
2022-06-23T01:46:27.277Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:27.298Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "12.524406984s"}
2022-06-23T01:46:28.993Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:29.013Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "9.759797387s"}
2022-06-23T01:46:29.195Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:29.201Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:29.220Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "11.023202241s"}
2022-06-23T01:46:29.288Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:29.311Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "7.50489408s"}
2022-06-23T01:46:30.797Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:30.826Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "6.519156792s"}
2022-06-23T01:46:31.188Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:31.213Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "12.590564047s"}
2022-06-23T01:46:31.602Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:31.631Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "4.205351693s"}
2022-06-23T01:46:31.632Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:31.651Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "12.716561437s"}
2022-06-23T01:46:31.813Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:31.834Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "7.271435966s"}
2022-06-23T01:46:31.839Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:31.857Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "12.612949664s"}
2022-06-23T01:46:32.121Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:32.144Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "8.11059061s"}
2022-06-23T01:46:32.536Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:32.565Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "10.727690014s"}
2022-06-23T01:46:32.701Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:32.708Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:32.730Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "5.88676377s"}
2022-06-23T01:46:34.187Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:34.213Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "7.268425411s"}
2022-06-23T01:46:35.070Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:35.094Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "19.09186602s"}
2022-06-23T01:46:36.541Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:36.543Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:36.555Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:36.558Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:36.581Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "8.553513052s"}
2022-06-23T01:46:36.907Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:36.927Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "8.910416941s"}
2022-06-23T01:46:37.395Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:37.414Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "13.967797812s"}
➜ evm-messenger git:(guardiand) ✗ docker logs guardiand > output.log
2022-06-23T01:46:09.080Z INFO guardian-0 status server listening on [::]:6060
badger 2022/06/23 01:46:09 INFO: All 0 tables opened in 0s
badger 2022/06/23 01:46:09 INFO: Discard stats nextEmptySlot: 0
badger 2022/06/23 01:46:09 INFO: Set nextTxnTs to 0
2022-06-23T01:46:11.836Z INFO guardian-0 Loaded guardian key {"address": "0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"}
2022-06-23T01:46:11.846Z INFO guardian-0 Telemetry disabled
2022-06-23T01:46:11.848Z INFO guardian-0 publicrpc server listening {"addr": "[::]:7070"}
2022-06-23T01:46:11.853Z INFO guardian-0 admin server listening on {"path": "/tmp/admin.sock"}
2022-06-23T01:46:11.858Z INFO guardian-0.supervisor supervisor processor started
2022-06-23T01:46:11.911Z INFO guardian-0 Starting Terra watcher
2022-06-23T01:46:11.919Z INFO guardian-0 Starting Terra 2 watcher
2022-06-23T01:46:11.939Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:11.939Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:11.954Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:11.956Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:11.980Z INFO guardian-0 Started internal services
2022-06-23T01:46:12.014Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.027Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.030Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.031Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.032Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.056Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "381.299405ms"}
2022-06-23T01:46:12.063Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "403.14798ms"}
2022-06-23T01:46:12.064Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "718.397523ms"}
2022-06-23T01:46:12.064Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "695.131816ms"}
2022-06-23T01:46:12.065Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.066Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.086Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "529.532934ms"}
2022-06-23T01:46:12.087Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "346.285743ms"}
2022-06-23T01:46:12.088Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.107Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "288.940962ms"}
2022-06-23T01:46:12.108Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.109Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.109Z INFO guardian-0 publicweb server listening {"addr": ""}
2022-06-23T01:46:12.128Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "275.125674ms"}
2022-06-23T01:46:12.129Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "503.988233ms"}
2022-06-23T01:46:12.130Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022/06/23 01:46:12 failed to sufficiently increase receive buffer size (was: 208 kiB, wanted: 2048 kiB, got: 416 kiB). See https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size for details.
2022-06-23T01:46:12.149Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "371.860783ms"}
2022-06-23T01:46:12.150Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:12.165Z INFO guardian-0.root.p2p Connecting to bootstrap peers {"bootstrap_peers": "/dns4/guardian-0.guardian/udp/8999/quic/p2p/12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw"}
2022-06-23T01:46:12.166Z INFO guardian-0.root.p2p Subscribing pubsub topic {"topic": "/wormhole/dev/broadcast"}
2022-06-23T01:46:12.169Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "479.554257ms"}
2022-06-23T01:46:12.170Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.177Z INFO dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:276 starting refreshing cpl 0 with key CIQAAAIF3YAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA (routing table size was 0)
2022-06-23T01:46:12.190Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "267.325443ms"}
2022-06-23T01:46:12.191Z WARN dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:133 failed when refreshing routing table2 errors occurred:
* failed to query for self, err=failed to find any peer in table
* failed to refresh cpl=0, err=failed to find any peer in table
2022-06-23T01:46:12.191Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.193Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.196Z INFO guardian-0.root.p2p We're a bootstrap node
2022-06-23T01:46:12.197Z INFO guardian-0.root.p2p Connected to bootstrap peers {"num": 0}
2022-06-23T01:46:12.199Z INFO guardian-0.root.p2p Node has been started {"peer_id": "12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw", "addrs": "[/ip4/172.17.0.2/udp/8999/quic /ip4/127.0.0.1/udp/8999/quic]"}
2022-06-23T01:46:12.205Z INFO dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:276 starting refreshing cpl 0 with key CIQAAADM5IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA (routing table size was 0)
2022-06-23T01:46:12.206Z WARN dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:196 failed when refreshing routing table {"error": "2 errors occurred:\n\t* failed to query for self, err=failed to find any peer in table\n\t* failed to refresh cpl=0, err=failed to find any peer in table\n\n"}
2022-06-23T01:46:12.213Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "664.065308ms"}
2022-06-23T01:46:12.214Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "643.33179ms"}
2022-06-23T01:46:12.433Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.453Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "1.043280351s"}
2022-06-23T01:46:12.455Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.474Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "969.238316ms"}
2022-06-23T01:46:12.476Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:12.494Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.495Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:12.495Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.514Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "760.246505ms"}
2022-06-23T01:46:12.515Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "1.050614025s"}
2022-06-23T01:46:12.517Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.518Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.536Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "526.434771ms"}
2022-06-23T01:46:12.537Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "423.72529ms"}
2022-06-23T01:46:12.562Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.581Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "767.02716ms"}
2022-06-23T01:46:12.636Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.654Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "644.110367ms"}
2022-06-23T01:46:12.674Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.694Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "928.777565ms"}
2022-06-23T01:46:12.695Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:12.714Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "718.027607ms"}
2022-06-23T01:46:12.784Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.803Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "480.413639ms"}
2022-06-23T01:46:12.805Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.824Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "599.94366ms"}
2022-06-23T01:46:12.896Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.897Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:12.898Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:12.905Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.916Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "985.116951ms"}
2022-06-23T01:46:12.918Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.937Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "490.23796ms"}
2022-06-23T01:46:12.983Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.002Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "1.221199793s"}
2022-06-23T01:46:13.034Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:13.053Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "464.436068ms"}
2022-06-23T01:46:13.054Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:13.073Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "486.414266ms"}
2022-06-23T01:46:13.091Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.097Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.116Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "581.83612ms"}
2022-06-23T01:46:13.296Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.315Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "792.110201ms"}
2022-06-23T01:46:13.352Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.372Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "1.119418761s"}
2022-06-23T01:46:13.373Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.393Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "598.029357ms"}
2022-06-23T01:46:13.394Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.413Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "1.306859108s"}
2022-06-23T01:46:13.463Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.464Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.465Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:13.471Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:13.483Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "1.184855871s"}
2022-06-23T01:46:13.503Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:13.504Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:13.524Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "1.481885894s"}
2022-06-23T01:46:13.525Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "1.653330262s"}
2022-06-23T01:46:13.526Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.545Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "903.304159ms"}
2022-06-23T01:46:13.547Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.566Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "1.564108082s"}
2022-06-23T01:46:13.603Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.608Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.627Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "1.55356197s"}
2022-06-23T01:46:13.652Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.671Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "1.571900114s"}
2022-06-23T01:46:13.701Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.720Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.738Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "2.217342614s"}
2022-06-23T01:46:13.925Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.944Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "880.85204ms"}
2022-06-23T01:46:14.015Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.034Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "1.403058038s"}
2022-06-23T01:46:14.131Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.150Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "969.315636ms"}
2022-06-23T01:46:14.247Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.266Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "1.026900344s"}
2022-06-23T01:46:14.474Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.493Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "1.433589929s"}
2022-06-23T01:46:14.517Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.536Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "2.438293112s"}
2022-06-23T01:46:14.575Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:14.576Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:14.595Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "676.9799ms"}
2022-06-23T01:46:14.596Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "502.911284ms"}
2022-06-23T01:46:14.689Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.708Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "1.144392209s"}
2022-06-23T01:46:14.742Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.760Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "1.247276925s"}
2022-06-23T01:46:14.850Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.868Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "920.452437ms"}
2022-06-23T01:46:15.008Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:15.008Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:15.015Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:15.027Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:15.046Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "1.018075883s"}
2022-06-23T01:46:15.141Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.159Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "2.975496985s"}
2022-06-23T01:46:15.198Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.218Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "848.255763ms"}
2022-06-23T01:46:15.219Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:15.219Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:15.239Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "2.227299695s"}
2022-06-23T01:46:15.240Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.259Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "2.128781487s"}
2022-06-23T01:46:15.297Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.317Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "1.846438262s"}
2022-06-23T01:46:15.342Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.360Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "1.449230867s"}
2022-06-23T01:46:15.458Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.477Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "3.48610792s"}
2022-06-23T01:46:15.809Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.828Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "1.719378196s"}
2022-06-23T01:46:15.878Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.898Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "1.425203675s"}
2022-06-23T01:46:15.952Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.970Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "3.003008048s"}
2022-06-23T01:46:15.990Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:15.997Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.016Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "1.624274118s"}
2022-06-23T01:46:16.042Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.061Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "1.350130088s"}
2022-06-23T01:46:16.084Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:16.085Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:16.092Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:16.104Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:16.124Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "3.227239733s"}
2022-06-23T01:46:16.125Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.145Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "3.746036618s"}
2022-06-23T01:46:16.146Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:16.165Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "1.27387341s"}
2022-06-23T01:46:16.304Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:16.323Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "1.270284631s"}
2022-06-23T01:46:16.834Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.854Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "2.141106113s"}
2022-06-23T01:46:17.001Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.020Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "3.377476402s"}
2022-06-23T01:46:17.189Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.208Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "3.507230961s"}
2022-06-23T01:46:17.344Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.363Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "3.244030108s"}
2022-06-23T01:46:17.408Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:17.433Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.434Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.453Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "5.267882146s"}
2022-06-23T01:46:17.454Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "3.64480936s"}
2022-06-23T01:46:17.497Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:17.515Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "3.522991813s"}
2022-06-23T01:46:17.573Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.591Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "4.459919713s"}
2022-06-23T01:46:17.660Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:17.665Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.684Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "2.943651069s"}
2022-06-23T01:46:18.155Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:18.174Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "4.815367041s"}
2022-06-23T01:46:18.482Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:18.501Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "1.183399705s"}
2022-06-23T01:46:18.623Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:18.642Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "2.151246866s"}
2022-06-23T01:46:19.046Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.067Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "3.709365136s"}
2022-06-23T01:46:19.068Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.086Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "3.470498357s"}
2022-06-23T01:46:19.087Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.106Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "4.002004299s"}
2022-06-23T01:46:19.395Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:19.396Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:19.405Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:19.408Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:19.431Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "2.948900832s"}
2022-06-23T01:46:19.953Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.979Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "4.070249768s"}
2022-06-23T01:46:20.465Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.490Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "4.68099847s"}
2022-06-23T01:46:20.638Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:20.659Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.660Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.679Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "8.484882035s"}
2022-06-23T01:46:20.680Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "3.726228022s"}
2022-06-23T01:46:20.701Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:20.720Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "1.927893074s"}
2022-06-23T01:46:20.742Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.761Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "3.714938635s"}
2022-06-23T01:46:21.093Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:21.117Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "5.685547874s"}
2022-06-23T01:46:21.136Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:21.142Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:21.161Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "3.053515001s"}
2022-06-23T01:46:21.847Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:21.884Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "3.626272661s"}
2022-06-23T01:46:22.105Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.127Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "3.657998601s"}
2022-06-23T01:46:22.426Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:22.428Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:22.441Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:22.444Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:22.466Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "4.171386775s"}
2022-06-23T01:46:22.582Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.602Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "9.881617684s"}
2022-06-23T01:46:22.748Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.768Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "6.174998288s"}
2022-06-23T01:46:22.798Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.818Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "8.303916185s"}
2022-06-23T01:46:23.033Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:23.055Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "2.957415405s"}
2022-06-23T01:46:23.137Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:23.156Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "3.025381397s"}
2022-06-23T01:46:23.702Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:23.739Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "4.536075788s"}
2022-06-23T01:46:24.117Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.141Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "3.101068358s"}
2022-06-23T01:46:24.239Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:24.245Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.264Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "8.403382909s"}
2022-06-23T01:46:24.443Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.464Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "10.535908405s"}
2022-06-23T01:46:24.503Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.523Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "7.258541148s"}
2022-06-23T01:46:25.215Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:25.235Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "5.519431698s"}
2022-06-23T01:46:25.850Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:25.876Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "6.192406237s"}
2022-06-23T01:46:26.050Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:26.071Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "5.731651447s"}
2022-06-23T01:46:26.209Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:26.229Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "7.895065277s"}
2022-06-23T01:46:26.598Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:26.631Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "3.897063041s"}
2022-06-23T01:46:26.640Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:26.641Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:26.649Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:26.666Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:26.686Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "9.799764648s"}
2022-06-23T01:46:26.836Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:26.857Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "4.688539534s"}
2022-06-23T01:46:27.277Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:27.298Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "12.524406984s"}
2022-06-23T01:46:28.993Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:29.013Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "9.759797387s"}
2022-06-23T01:46:29.195Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:29.201Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:29.220Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "11.023202241s"}
2022-06-23T01:46:29.288Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:29.311Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "7.50489408s"}
2022-06-23T01:46:30.797Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:30.826Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "6.519156792s"}
2022-06-23T01:46:31.188Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:31.213Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "12.590564047s"}
2022-06-23T01:46:31.602Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:31.631Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "4.205351693s"}
2022-06-23T01:46:31.632Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:31.651Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "12.716561437s"}
2022-06-23T01:46:31.813Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:31.834Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "7.271435966s"}
2022-06-23T01:46:31.839Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:31.857Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "12.612949664s"}
2022-06-23T01:46:32.121Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:32.144Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "8.11059061s"}
2022-06-23T01:46:32.536Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:32.565Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "10.727690014s"}
2022-06-23T01:46:32.701Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:32.708Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:32.730Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "5.88676377s"}
2022-06-23T01:46:34.187Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:34.213Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "7.268425411s"}
2022-06-23T01:46:35.070Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:35.094Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "19.09186602s"}
2022-06-23T01:46:36.541Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:36.543Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:36.555Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:36.558Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:36.581Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "8.553513052s"}
2022-06-23T01:46:36.907Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:36.927Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "8.910416941s"}
2022-06-23T01:46:37.395Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:37.414Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "13.967797812s"}

View File

@ -5,7 +5,7 @@
"main": "starter.js",
"scripts": {
"guardiand": "sh wormhole.sh",
"cleanup": "npx pm2 kill && docker kill guardiand && docker rm guardiand",
"cleanup": "docker kill guardiand && docker rm guardiand && npx pm2 kill",
"test": "sh tests/evm0-evm1.sh"
},
"keywords": [],

View File

@ -9,7 +9,7 @@ fi
# If it doens't then clone and build guardiand
if [ ! -d "./wormhole" ]
then
#git clone https://github.com/certusone/wormhole
git clone https://github.com/certusone/wormhole
cd wormhole/
DOCKER_BUILDKIT=1 docker build --target go-export -f Dockerfile.proto -o type=local,dest=node .
DOCKER_BUILDKIT=1 docker build --target node-export -f Dockerfile.proto -o type=local,dest=. .
@ -26,21 +26,13 @@ then
fi
# Start EVM Chain 0
npx pm2 start 'ganache -p 8545 -m "myth like bonus scare over problem client lizard pioneer submit female collect" --block-time 2' --name eth0
npx pm2 start 'ganache -p 8545 -m "myth like bonus scare over problem client lizard pioneer submit female collect" --block-time 1' --name evm0
# Start EVM Chain 1
npx pm2 start 'ganache -p 8546 -m "myth like bonus scare over problem client lizard pioneer submit female collect" --block-time 2' --name eth1
npx pm2 start 'ganache -p 8546 -m "myth like bonus scare over problem client lizard pioneer submit female collect" --block-time 1' --name evm1
#Install Wormhole Eth Dependencies
cd wormhole/ethereum
if [ ! -d "./node_modules/" ]
then
npm i
fi
if [! -f "./.env"]
then
cp .env.test .env
fi
npm i
cp .env.test .env
npm run build

View File

@ -1,25 +1,29 @@
{
"networks": {
"evm0": {
"type": "evm",
"wormholeChainId": 2,
"rpc": "http://locahost:8545",
"privateKey": "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d",
"bridgeAddress": "0xC89Ce4735882C9F0f0FE26686c53074E09B0D550",
"deployedAddress": "",
"emittedVAAs": []
},
"evm1": {
"type": "evm",
"wormholeChainId": 4,
"rpc": "http://locahost:8545",
"privateKey": "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d",
"bridgeAddress": "0xC89Ce4735882C9F0f0FE26686c53074E09B0D550",
"deployedAddress": "",
"emittedVAAs": []
}
"networks": {
"evm0": {
"type": "evm",
"wormholeChainId": 2,
"rpc": "http://localhost:8545",
"privateKey": "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d",
"bridgeAddress": "0xC89Ce4735882C9F0f0FE26686c53074E09B0D550",
"deployedAddress": "0xfcced5e997e7fb1d0594518d3ed57245bb8ed17e",
"emittedVAAs": [
"AQAAAAABAKWa25QetRl/vZ45Y4s1NKOXe01+Q0olM4dKfe6EroILVAjrxOk5OZNplA/0weOFvtirBgy1cybbnd/fHNumnUEBYrYAOgAAAAAAAgAAAAAAAAAAAAAAAPzO1emX5/sdBZRRjT7VckW7jtF+AAAAAAAAAAABRnJvbTogZXZtMFxuTXNnOiBIZWxsbyBXb3JsZCE="
]
},
"wormhole": {
"restAddress": "http://localhost:7071"
"evm1": {
"type": "evm",
"wormholeChainId": 4,
"rpc": "http://localhost:8546",
"privateKey": "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d",
"bridgeAddress": "0xC89Ce4735882C9F0f0FE26686c53074E09B0D550",
"deployedAddress": "0xfcced5e997e7fb1d0594518d3ed57245bb8ed17e",
"emittedVAAs": [
"AQAAAAABAE29ghIeU7n00u2DECRWvICEYEJbQpDxoxSwj6JSLlQjBjjbbL0Px+wrkDxpmOPnxdgNACRGeTsVNUntX42f4FcBYrYARQAAAAAABAAAAAAAAAAAAAAAAPzO1emX5/sdBZRRjT7VckW7jtF+AAAAAAAAAAABRnJvbTogZXZtMVxuTXNnOiBIZWxsbyBXb3JsZCE="
]
}
}
},
"wormhole": {
"restAddress": "http://localhost:7071"
}
}

View File

@ -1,37 +0,0 @@
{
"networks": {
"eth0": {
"type": "evm",
"wormholeChainId": 2,
"rpc": "",
"privateKey": "",
"bridgeAddress": "0xC89Ce4735882C9F0f0FE26686c53074E09B0D550",
"deployedAddress": "",
"emittedVAAs": [
]
},
"eth1": {
"type": "evm",
"wormholeChainId": 4,
"rpc": "",
"privateKey": "",
"bridgeAddress": "0xC89Ce4735882C9F0f0FE26686c53074E09B0D550",
"deployedAddress": "",
"emittedVAAs": [
]
},
"algo": {
"type": "algorand",
"rpc": "" ,
"wormholeChainId": 8,
"mnemonic": "",
"deployedAddress": "",
"appId": 0,
"bridgeId": 4,
"emittedVAAs":[]
}
},
"wormhole": {
"restAddress": ""
}
}

View File

@ -105,7 +105,10 @@ async function main() {
const messenger = new ethers.Contract(
network.deployedAddress,
JSON.parse(fs.readFileSync('./chains/evm/out/Messenger.sol/Messenger.json').toString()).abi,
signer
signer,
{
gasPrice: '2000000000'
}
);
await messenger.registerApplicationContracts(targetNetwork.wormholeChainId, emitterAddr);
} else if (network.type == "algorand"){
@ -145,15 +148,19 @@ async function main() {
const messenger = new ethers.Contract(
network.deployedAddress,
JSON.parse(fs.readFileSync('./chains/evm/out/Messenger.sol/Messenger.json').toString()).abi,
signer
signer,
{
gasPrice: '2000000000'
}
);
const tx = await (await messenger.sendMsg(Buffer.from(process.argv[4]))).wait();
const tx = await (await messenger.sendMsg(Buffer.from(process.argv[4]), {gasPrice: '2000000000'})).wait();
await new Promise((r) => setTimeout(r, 5000));
const emitterAddr = getEmitterAddressEth(messenger.address);
const seq = parseSequenceFromLogEth(
tx,
network.bridgeAddress
);
console.log(`${config.wormhole.restAddress}/v1/signed_vaa/${network.wormholeChainId}/${emitterAddr}/${seq}`);
const vaaBytes = await (
await fetch(
`${config.wormhole.restAddress}/v1/signed_vaa/${network.wormholeChainId}/${emitterAddr}/${seq}`
@ -239,7 +246,10 @@ async function main() {
const messenger = new ethers.Contract(
network.deployedAddress,
JSON.parse(fs.readFileSync('./chains/evm/out/Messenger.sol/Messenger.json').toString()).abi,
signer
signer,
{
gasPrice: '2000000000'
}
);
const tx = await messenger.receiveEncodedMsg(Buffer.from(vaaBytes, "base64"));
@ -286,7 +296,10 @@ async function main() {
const messenger = new ethers.Contract(
network.deployedAddress,
JSON.parse(fs.readFileSync('./chains/evm/out/Messenger.sol/Messenger.json').toString()).abi,
signer
signer,
{
gasPrice: '2000000000'
}
);
console.log(`${process.argv[2]} Current Msg: `, await messenger.getCurrentMsg());
}

View File

@ -1,5 +1,6 @@
node messenger.js eth0 deploy
node messenger.js eth1 deploy
sleep 5
node messenger.js eth0 register_chain eth1
node messenger.js eth1 register_chain eth0
node messenger.js eth0 send_msg "From: eth0\nMsg: Hello World!"