added orechestator code

This commit is contained in:
Dev Bharel 2022-06-22 21:37:26 -05:00
parent 883f40bbbb
commit 443a90f4fa
15 changed files with 8838 additions and 5 deletions

View File

@ -1 +1,2 @@
node_modules/
node_modules/
wormhole/

View File

@ -0,0 +1,3 @@
cache/
out/
!src/Wormhole/

View File

@ -0,0 +1,7 @@
[default]
src = 'src'
out = 'out'
libs = ['lib']
solc_version = '0.8.10'
# See more config options https://github.com/foundry-rs/foundry/tree/master/config

View File

@ -0,0 +1,53 @@
//SPDX-License-Identifier: Unlicense
pragma solidity ^0.8.0;
import "./Wormhole/IWormhole.sol";
contract Messenger {
string private current_msg;
address private wormhole_core_bridge_address = address(0xC89Ce4735882C9F0f0FE26686c53074E09B0D550);
IWormhole core_bridge = IWormhole(wormhole_core_bridge_address);
uint32 nonce = 0;
mapping(uint16 => bytes32) _applicationContracts;
address owner;
mapping(bytes32 => bool) _completedMessages;
constructor(){
owner = msg.sender;
}
function sendMsg(bytes memory str) public returns (uint64 sequence) {
sequence = core_bridge.publishMessage(nonce, str, 1);
nonce = nonce+1;
}
function receiveEncodedMsg(bytes memory encodedMsg) public {
(IWormhole.VM memory vm, bool valid, string memory reason) = core_bridge.parseAndVerifyVM(encodedMsg);
//1. Check Wormhole Guardian Signatures
// If the VM is NOT valid, will return the reason it's not valid
// If the VM IS valid, reason will be blank
require(valid, reason);
//2. Check if the Emitter Chain contract is registered
require(_applicationContracts[vm.emitterChainId] == vm.emitterAddress, "Invalid Emitter Address!");
//3. Check that the message hasn't already been processed
require(!_completedMessages[vm.hash], "Message already processed");
_completedMessages[vm.hash] = true;
//Do the thing
current_msg = string(vm.payload);
}
function getCurrentMsg() public view returns (string memory){
return current_msg;
}
/**
Registers it's sibling applications on other chains as the only ones that can send this instance messages
*/
function registerApplicationContracts(uint16 chainId, bytes32 applicationAddr) public {
require(msg.sender == owner, "Only owner can register new chains!");
_applicationContracts[chainId] = applicationAddr;
}
}

View File

@ -0,0 +1,42 @@
// contracts/Messages.sol
// SPDX-License-Identifier: Apache 2
pragma solidity ^0.8.0;
import "./Structs.sol";
interface IWormhole is Structs {
event LogMessagePublished(address indexed sender, uint64 sequence, uint32 nonce, bytes payload, uint8 consistencyLevel);
function publishMessage(
uint32 nonce,
bytes memory payload,
uint8 consistencyLevel
) external payable returns (uint64 sequence);
function parseAndVerifyVM(bytes calldata encodedVM) external view returns (Structs.VM memory vm, bool valid, string memory reason);
function verifyVM(Structs.VM memory vm) external view returns (bool valid, string memory reason);
function verifySignatures(bytes32 hash, Structs.Signature[] memory signatures, Structs.GuardianSet memory guardianSet) external pure returns (bool valid, string memory reason) ;
function parseVM(bytes memory encodedVM) external pure returns (Structs.VM memory vm);
function getGuardianSet(uint32 index) external view returns (Structs.GuardianSet memory) ;
function getCurrentGuardianSetIndex() external view returns (uint32) ;
function getGuardianSetExpiry() external view returns (uint32) ;
function governanceActionIsConsumed(bytes32 hash) external view returns (bool) ;
function isInitialized(address impl) external view returns (bool) ;
function chainId() external view returns (uint16) ;
function governanceChainId() external view returns (uint16);
function governanceContract() external view returns (bytes32);
function messageFee() external view returns (uint256) ;
}

View File

@ -0,0 +1,40 @@
// contracts/Structs.sol
// SPDX-License-Identifier: Apache 2
pragma solidity ^0.8.0;
interface Structs {
struct Provider {
uint16 chainId;
uint16 governanceChainId;
bytes32 governanceContract;
}
struct GuardianSet {
address[] keys;
uint32 expirationTime;
}
struct Signature {
bytes32 r;
bytes32 s;
uint8 v;
uint8 guardianIndex;
}
struct VM {
uint8 version;
uint32 timestamp;
uint32 nonce;
uint16 emitterChainId;
bytes32 emitterAddress;
uint64 sequence;
uint8 consistencyLevel;
bytes payload;
uint32 guardianSetIndex;
Signature[] signatures;
bytes32 hash;
}
}

View File

@ -0,0 +1,195 @@
import { exec } from "child_process";
import fs from "fs";
import { ethers } from "ethers";
import {
getEmitterAddressEth,
parseSequenceFromLogEth,
} from "@certusone/wormhole-sdk";
import fetch from "node-fetch";
async function main() {
let config = JSON.parse(fs.readFileSync("./xdapp.config.json").toString());
let network = config.networks[process.argv[2]];
if (!network) {
throw new Error("Network not defined in config file.");
}
if (process.argv[3] == "deploy") {
if (network.type == "evm") {
console.log(
`Deploying EVM network: ${process.argv[2]} to ${network.rpc}`
);
exec(
`cd chains/evm && forge build && forge create --legacy --rpc-url ${network.rpc} --private-key ${network.privateKey} src/Messenger.sol:Messenger && exit`,
(err, out, errStr) => {
if (err) {
throw new Error(err);
}
if (out) {
console.log(out);
network.deployedAddress = out
.split("Deployed to: ")[1]
.split("\n")[0]
.trim();
network.emittedVAAs = []; //Resets the emittedVAAs
config.networks[process.argv[2]] = network;
fs.writeFileSync(
"./xdapp.config.json",
JSON.stringify(config, null, 4)
);
}
}
);
} else {
throw new Error("Invalid Network Type!");
}
} else if (process.argv[3] == "register_chain") {
if (!network.deployedAddress) {
throw new Error("Deploy to this network first!");
}
const targetNetwork = config.networks[process.argv[4]];
if (!targetNetwork.deployedAddress) {
throw new Error("Target Network not deployed yet!");
}
let emitterAddr;
if (targetNetwork.type == "evm") {
emitterAddr = Buffer.from(
getEmitterAddressEth(targetNetwork.deployedAddress),
"hex"
);
}
if (network.type == "evm") {
const signer = new ethers.Wallet(network.privateKey).connect(
new ethers.providers.JsonRpcProvider(network.rpc)
);
const messenger = new ethers.Contract(
network.deployedAddress,
JSON.parse(
fs
.readFileSync(
"./chains/evm/out/Messenger.sol/Messenger.json"
)
.toString()
).abi,
signer
);
await messenger.registerApplicationContracts(
targetNetwork.wormholeChainId,
emitterAddr
);
}
console.log(
`Network(${process.argv[2]}) Registered Emitter: ${targetNetwork.deployedAddress} from Chain: ${targetNetwork.wormholeChainId}`
);
} else if (process.argv[3] == "send_msg") {
if (!network.deployedAddress) {
throw new Error("Deploy to this network first!");
}
if (network.type == "evm") {
const signer = new ethers.Wallet(network.privateKey).connect(
new ethers.providers.JsonRpcProvider(network.rpc)
);
const messenger = new ethers.Contract(
network.deployedAddress,
JSON.parse(
fs
.readFileSync(
"./chains/evm/out/Messenger.sol/Messenger.json"
)
.toString()
).abi,
signer
);
const tx = await (
await messenger.sendMsg(Buffer.from(process.argv[4]))
).wait();
await new Promise((r) => setTimeout(r, 5000));
const emitterAddr = getEmitterAddressEth(messenger.address);
const seq = parseSequenceFromLogEth(tx, network.bridgeAddress);
const vaaBytes = await (
await fetch(
`${config.wormhole.restAddress}/v1/signed_vaa/${network.wormholeChainId}/${emitterAddr}/${seq}`
)
).json();
if (!network.emittedVAAs) {
network.emittedVAAs = [vaaBytes.vaaBytes];
} else {
network.emittedVAAs.push(vaaBytes.vaaBytes);
}
config.networks[process.argv[2]] = network;
fs.writeFileSync(
"./xdapp.config.json",
JSON.stringify(config, null, 2)
);
console.log(
`Network(${process.argv[2]}) Emitted VAA: `,
vaaBytes.vaaBytes
);
}
} else if (process.argv[3] == "submit_vaa") {
if (!network.deployedAddress) {
throw new Error("Deploy to this network first!");
}
const targetNetwork = config.networks[process.argv[4]];
const vaaBytes = isNaN(parseInt(process.argv[5]))
? targetNetwork.emittedVAAs.pop()
: targetNetwork.emittedVAAs[parseInt(process.argv[5])];
if (network.type == "evm") {
const signer = new ethers.Wallet(network.privateKey).connect(
new ethers.providers.JsonRpcProvider(network.rpc)
);
const messenger = new ethers.Contract(
network.deployedAddress,
JSON.parse(
fs
.readFileSync(
"./chains/evm/out/Messenger.sol/Messenger.json"
)
.toString()
).abi,
signer
);
const tx = await messenger.receiveEncodedMsg(
Buffer.from(vaaBytes, "base64")
);
console.log(`Submitted VAA: ${vaaBytes}\nTX: ${tx.hash}`);
}
} else if (process.argv[3] == "get_current_msg") {
if (!network.deployedAddress) {
throw new Error("Deploy to this network first!");
}
if (network.type == "evm") {
const signer = new ethers.Wallet(network.privateKey).connect(
new ethers.providers.JsonRpcProvider(network.rpc)
);
const messenger = new ethers.Contract(
network.deployedAddress,
JSON.parse(
fs
.readFileSync(
"./chains/evm/out/Messenger.sol/Messenger.json"
)
.toString()
).abi,
signer
);
console.log(
`${process.argv[2]} Current Msg: `,
await messenger.getCurrentMsg()
);
}
} else {
throw new Error("Unkown command!");
}
}
main();

View File

@ -0,0 +1,677 @@
2022-06-23T01:46:09.080Z INFO guardian-0 status server listening on [::]:6060
badger 2022/06/23 01:46:09 INFO: All 0 tables opened in 0s
badger 2022/06/23 01:46:09 INFO: Discard stats nextEmptySlot: 0
badger 2022/06/23 01:46:09 INFO: Set nextTxnTs to 0
2022-06-23T01:46:11.836Z INFO guardian-0 Loaded guardian key {"address": "0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"}
2022-06-23T01:46:11.846Z INFO guardian-0 Telemetry disabled
2022-06-23T01:46:11.848Z INFO guardian-0 publicrpc server listening {"addr": "[::]:7070"}
2022-06-23T01:46:11.853Z INFO guardian-0 admin server listening on {"path": "/tmp/admin.sock"}
2022-06-23T01:46:11.858Z INFO guardian-0.supervisor supervisor processor started
2022-06-23T01:46:11.911Z INFO guardian-0 Starting Terra watcher
2022-06-23T01:46:11.919Z INFO guardian-0 Starting Terra 2 watcher
2022-06-23T01:46:11.939Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:11.939Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:11.954Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:11.956Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:11.980Z INFO guardian-0 Started internal services
2022-06-23T01:46:12.014Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.027Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.030Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.031Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.032Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.056Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "381.299405ms"}
2022-06-23T01:46:12.063Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "403.14798ms"}
2022-06-23T01:46:12.064Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "718.397523ms"}
2022-06-23T01:46:12.064Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "695.131816ms"}
2022-06-23T01:46:12.065Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.066Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.086Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "529.532934ms"}
2022-06-23T01:46:12.087Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "346.285743ms"}
2022-06-23T01:46:12.088Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.107Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "288.940962ms"}
2022-06-23T01:46:12.108Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.109Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.109Z INFO guardian-0 publicweb server listening {"addr": ""}
2022-06-23T01:46:12.128Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "275.125674ms"}
2022-06-23T01:46:12.129Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "503.988233ms"}
2022-06-23T01:46:12.130Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022/06/23 01:46:12 failed to sufficiently increase receive buffer size (was: 208 kiB, wanted: 2048 kiB, got: 416 kiB). See https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size for details.
2022-06-23T01:46:12.149Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "371.860783ms"}
2022-06-23T01:46:12.150Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:12.165Z INFO guardian-0.root.p2p Connecting to bootstrap peers {"bootstrap_peers": "/dns4/guardian-0.guardian/udp/8999/quic/p2p/12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw"}
2022-06-23T01:46:12.166Z INFO guardian-0.root.p2p Subscribing pubsub topic {"topic": "/wormhole/dev/broadcast"}
2022-06-23T01:46:12.169Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "479.554257ms"}
2022-06-23T01:46:12.170Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.177Z INFO dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:276 starting refreshing cpl 0 with key CIQAAAIF3YAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA (routing table size was 0)
2022-06-23T01:46:12.190Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "267.325443ms"}
2022-06-23T01:46:12.191Z WARN dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:133 failed when refreshing routing table2 errors occurred:
* failed to query for self, err=failed to find any peer in table
* failed to refresh cpl=0, err=failed to find any peer in table
2022-06-23T01:46:12.191Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.193Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.196Z INFO guardian-0.root.p2p We're a bootstrap node
2022-06-23T01:46:12.197Z INFO guardian-0.root.p2p Connected to bootstrap peers {"num": 0}
2022-06-23T01:46:12.199Z INFO guardian-0.root.p2p Node has been started {"peer_id": "12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw", "addrs": "[/ip4/172.17.0.2/udp/8999/quic /ip4/127.0.0.1/udp/8999/quic]"}
2022-06-23T01:46:12.205Z INFO dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:276 starting refreshing cpl 0 with key CIQAAADM5IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA (routing table size was 0)
2022-06-23T01:46:12.206Z WARN dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:196 failed when refreshing routing table {"error": "2 errors occurred:\n\t* failed to query for self, err=failed to find any peer in table\n\t* failed to refresh cpl=0, err=failed to find any peer in table\n\n"}
2022-06-23T01:46:12.213Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "664.065308ms"}
2022-06-23T01:46:12.214Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "643.33179ms"}
2022-06-23T01:46:12.433Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.453Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "1.043280351s"}
2022-06-23T01:46:12.455Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.474Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "969.238316ms"}
2022-06-23T01:46:12.476Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:12.494Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.495Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:12.495Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.514Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "760.246505ms"}
2022-06-23T01:46:12.515Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "1.050614025s"}
2022-06-23T01:46:12.517Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.518Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.536Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "526.434771ms"}
2022-06-23T01:46:12.537Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "423.72529ms"}
2022-06-23T01:46:12.562Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.581Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "767.02716ms"}
2022-06-23T01:46:12.636Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.654Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "644.110367ms"}
2022-06-23T01:46:12.674Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.694Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "928.777565ms"}
2022-06-23T01:46:12.695Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:12.714Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "718.027607ms"}
2022-06-23T01:46:12.784Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.803Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "480.413639ms"}
2022-06-23T01:46:12.805Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.824Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "599.94366ms"}
2022-06-23T01:46:12.896Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.897Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:12.898Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:12.905Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.916Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "985.116951ms"}
2022-06-23T01:46:12.918Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.937Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "490.23796ms"}
2022-06-23T01:46:12.983Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.002Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "1.221199793s"}
2022-06-23T01:46:13.034Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:13.053Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "464.436068ms"}
2022-06-23T01:46:13.054Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:13.073Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "486.414266ms"}
2022-06-23T01:46:13.091Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.097Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.116Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "581.83612ms"}
2022-06-23T01:46:13.296Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.315Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "792.110201ms"}
2022-06-23T01:46:13.352Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.372Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "1.119418761s"}
2022-06-23T01:46:13.373Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.393Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "598.029357ms"}
2022-06-23T01:46:13.394Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.413Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "1.306859108s"}
2022-06-23T01:46:13.463Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.464Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.465Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:13.471Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:13.483Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "1.184855871s"}
2022-06-23T01:46:13.503Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:13.504Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:13.524Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "1.481885894s"}
2022-06-23T01:46:13.525Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "1.653330262s"}
2022-06-23T01:46:13.526Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.545Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "903.304159ms"}
2022-06-23T01:46:13.547Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.566Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "1.564108082s"}
2022-06-23T01:46:13.603Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.608Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.627Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "1.55356197s"}
2022-06-23T01:46:13.652Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.671Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "1.571900114s"}
2022-06-23T01:46:13.701Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.720Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.738Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "2.217342614s"}
2022-06-23T01:46:13.925Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.944Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "880.85204ms"}
2022-06-23T01:46:14.015Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.034Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "1.403058038s"}
2022-06-23T01:46:14.131Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.150Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "969.315636ms"}
2022-06-23T01:46:14.247Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.266Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "1.026900344s"}
2022-06-23T01:46:14.474Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.493Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "1.433589929s"}
2022-06-23T01:46:14.517Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.536Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "2.438293112s"}
2022-06-23T01:46:14.575Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:14.576Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:14.595Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "676.9799ms"}
2022-06-23T01:46:14.596Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "502.911284ms"}
2022-06-23T01:46:14.689Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.708Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "1.144392209s"}
2022-06-23T01:46:14.742Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.760Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "1.247276925s"}
2022-06-23T01:46:14.850Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.868Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "920.452437ms"}
2022-06-23T01:46:15.008Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:15.008Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:15.015Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:15.027Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:15.046Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "1.018075883s"}
2022-06-23T01:46:15.141Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.159Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "2.975496985s"}
2022-06-23T01:46:15.198Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.218Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "848.255763ms"}
2022-06-23T01:46:15.219Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:15.219Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:15.239Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "2.227299695s"}
2022-06-23T01:46:15.240Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.259Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "2.128781487s"}
2022-06-23T01:46:15.297Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.317Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "1.846438262s"}
2022-06-23T01:46:15.342Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.360Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "1.449230867s"}
2022-06-23T01:46:15.458Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.477Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "3.48610792s"}
2022-06-23T01:46:15.809Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.828Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "1.719378196s"}
2022-06-23T01:46:15.878Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.898Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "1.425203675s"}
2022-06-23T01:46:15.952Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.970Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "3.003008048s"}
2022-06-23T01:46:15.990Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:15.997Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.016Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "1.624274118s"}
2022-06-23T01:46:16.042Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.061Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "1.350130088s"}
2022-06-23T01:46:16.084Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:16.085Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:16.092Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:16.104Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:16.124Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "3.227239733s"}
2022-06-23T01:46:16.125Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.145Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "3.746036618s"}
2022-06-23T01:46:16.146Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:16.165Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "1.27387341s"}
2022-06-23T01:46:16.304Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:16.323Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "1.270284631s"}
2022-06-23T01:46:16.834Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.854Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "2.141106113s"}
2022-06-23T01:46:17.001Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.020Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "3.377476402s"}
2022-06-23T01:46:17.189Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.208Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "3.507230961s"}
2022-06-23T01:46:17.344Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.363Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "3.244030108s"}
2022-06-23T01:46:17.408Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:17.433Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.434Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.453Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "5.267882146s"}
2022-06-23T01:46:17.454Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "3.64480936s"}
2022-06-23T01:46:17.497Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:17.515Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "3.522991813s"}
2022-06-23T01:46:17.573Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.591Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "4.459919713s"}
2022-06-23T01:46:17.660Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:17.665Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.684Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "2.943651069s"}
2022-06-23T01:46:18.155Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:18.174Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "4.815367041s"}
2022-06-23T01:46:18.482Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:18.501Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "1.183399705s"}
2022-06-23T01:46:18.623Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:18.642Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "2.151246866s"}
2022-06-23T01:46:19.046Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.067Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "3.709365136s"}
2022-06-23T01:46:19.068Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.086Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "3.470498357s"}
2022-06-23T01:46:19.087Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.106Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "4.002004299s"}
2022-06-23T01:46:19.395Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:19.396Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:19.405Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:19.408Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:19.431Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "2.948900832s"}
2022-06-23T01:46:19.953Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.979Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "4.070249768s"}
2022-06-23T01:46:20.465Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.490Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "4.68099847s"}
2022-06-23T01:46:20.638Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:20.659Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.660Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.679Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "8.484882035s"}
2022-06-23T01:46:20.680Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "3.726228022s"}
2022-06-23T01:46:20.701Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:20.720Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "1.927893074s"}
2022-06-23T01:46:20.742Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.761Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "3.714938635s"}
2022-06-23T01:46:21.093Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:21.117Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "5.685547874s"}
2022-06-23T01:46:21.136Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:21.142Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:21.161Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "3.053515001s"}
2022-06-23T01:46:21.847Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:21.884Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "3.626272661s"}
2022-06-23T01:46:22.105Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.127Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "3.657998601s"}
2022-06-23T01:46:22.426Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:22.428Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:22.441Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:22.444Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:22.466Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "4.171386775s"}
2022-06-23T01:46:22.582Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.602Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "9.881617684s"}
2022-06-23T01:46:22.748Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.768Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "6.174998288s"}
2022-06-23T01:46:22.798Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.818Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "8.303916185s"}
2022-06-23T01:46:23.033Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:23.055Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "2.957415405s"}
2022-06-23T01:46:23.137Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:23.156Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "3.025381397s"}
2022-06-23T01:46:23.702Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:23.739Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "4.536075788s"}
2022-06-23T01:46:24.117Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.141Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "3.101068358s"}
2022-06-23T01:46:24.239Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:24.245Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.264Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "8.403382909s"}
2022-06-23T01:46:24.443Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.464Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "10.535908405s"}
2022-06-23T01:46:24.503Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.523Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "7.258541148s"}
2022-06-23T01:46:25.215Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:25.235Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "5.519431698s"}
2022-06-23T01:46:25.850Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:25.876Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "6.192406237s"}
2022-06-23T01:46:26.050Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:26.071Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "5.731651447s"}
2022-06-23T01:46:26.209Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:26.229Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "7.895065277s"}
2022-06-23T01:46:26.598Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:26.631Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "3.897063041s"}
2022-06-23T01:46:26.640Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:26.641Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:26.649Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:26.666Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:26.686Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "9.799764648s"}
2022-06-23T01:46:26.836Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:26.857Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "4.688539534s"}
2022-06-23T01:46:27.277Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:27.298Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "12.524406984s"}
2022-06-23T01:46:28.993Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:29.013Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "9.759797387s"}
2022-06-23T01:46:29.195Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:29.201Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:29.220Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "11.023202241s"}
2022-06-23T01:46:29.288Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:29.311Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "7.50489408s"}
2022-06-23T01:46:30.797Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:30.826Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "6.519156792s"}
2022-06-23T01:46:31.188Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:31.213Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "12.590564047s"}
2022-06-23T01:46:31.602Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:31.631Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "4.205351693s"}
2022-06-23T01:46:31.632Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:31.651Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "12.716561437s"}
2022-06-23T01:46:31.813Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:31.834Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "7.271435966s"}
2022-06-23T01:46:31.839Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:31.857Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "12.612949664s"}
2022-06-23T01:46:32.121Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:32.144Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "8.11059061s"}
2022-06-23T01:46:32.536Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:32.565Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "10.727690014s"}
2022-06-23T01:46:32.701Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:32.708Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:32.730Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "5.88676377s"}
2022-06-23T01:46:34.187Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:34.213Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "7.268425411s"}
2022-06-23T01:46:35.070Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:35.094Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "19.09186602s"}
2022-06-23T01:46:36.541Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:36.543Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:36.555Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:36.558Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:36.581Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "8.553513052s"}
2022-06-23T01:46:36.907Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:36.927Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "8.910416941s"}
2022-06-23T01:46:37.395Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:37.414Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "13.967797812s"}
➜ evm-messenger git:(guardiand) ✗ docker logs guardiand > output.log
2022-06-23T01:46:09.080Z INFO guardian-0 status server listening on [::]:6060
badger 2022/06/23 01:46:09 INFO: All 0 tables opened in 0s
badger 2022/06/23 01:46:09 INFO: Discard stats nextEmptySlot: 0
badger 2022/06/23 01:46:09 INFO: Set nextTxnTs to 0
2022-06-23T01:46:11.836Z INFO guardian-0 Loaded guardian key {"address": "0xbeFA429d57cD18b7F8A4d91A2da9AB4AF05d0FBe"}
2022-06-23T01:46:11.846Z INFO guardian-0 Telemetry disabled
2022-06-23T01:46:11.848Z INFO guardian-0 publicrpc server listening {"addr": "[::]:7070"}
2022-06-23T01:46:11.853Z INFO guardian-0 admin server listening on {"path": "/tmp/admin.sock"}
2022-06-23T01:46:11.858Z INFO guardian-0.supervisor supervisor processor started
2022-06-23T01:46:11.911Z INFO guardian-0 Starting Terra watcher
2022-06-23T01:46:11.919Z INFO guardian-0 Starting Terra 2 watcher
2022-06-23T01:46:11.939Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:11.939Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:11.954Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:11.956Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:11.980Z INFO guardian-0 Started internal services
2022-06-23T01:46:12.014Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.027Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.030Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.031Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.032Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.056Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "381.299405ms"}
2022-06-23T01:46:12.063Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "403.14798ms"}
2022-06-23T01:46:12.064Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "718.397523ms"}
2022-06-23T01:46:12.064Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "695.131816ms"}
2022-06-23T01:46:12.065Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.066Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.086Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "529.532934ms"}
2022-06-23T01:46:12.087Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "346.285743ms"}
2022-06-23T01:46:12.088Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.107Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "288.940962ms"}
2022-06-23T01:46:12.108Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.109Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.109Z INFO guardian-0 publicweb server listening {"addr": ""}
2022-06-23T01:46:12.128Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "275.125674ms"}
2022-06-23T01:46:12.129Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "503.988233ms"}
2022-06-23T01:46:12.130Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022/06/23 01:46:12 failed to sufficiently increase receive buffer size (was: 208 kiB, wanted: 2048 kiB, got: 416 kiB). See https://github.com/lucas-clemente/quic-go/wiki/UDP-Receive-Buffer-Size for details.
2022-06-23T01:46:12.149Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "371.860783ms"}
2022-06-23T01:46:12.150Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:12.165Z INFO guardian-0.root.p2p Connecting to bootstrap peers {"bootstrap_peers": "/dns4/guardian-0.guardian/udp/8999/quic/p2p/12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw"}
2022-06-23T01:46:12.166Z INFO guardian-0.root.p2p Subscribing pubsub topic {"topic": "/wormhole/dev/broadcast"}
2022-06-23T01:46:12.169Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "479.554257ms"}
2022-06-23T01:46:12.170Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.177Z INFO dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:276 starting refreshing cpl 0 with key CIQAAAIF3YAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA (routing table size was 0)
2022-06-23T01:46:12.190Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "267.325443ms"}
2022-06-23T01:46:12.191Z WARN dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:133 failed when refreshing routing table2 errors occurred:
* failed to query for self, err=failed to find any peer in table
* failed to refresh cpl=0, err=failed to find any peer in table
2022-06-23T01:46:12.191Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.193Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.196Z INFO guardian-0.root.p2p We're a bootstrap node
2022-06-23T01:46:12.197Z INFO guardian-0.root.p2p Connected to bootstrap peers {"num": 0}
2022-06-23T01:46:12.199Z INFO guardian-0.root.p2p Node has been started {"peer_id": "12D3KooWL3XJ9EMCyZvmmGXL2LMiVBtrVa2BuESsJiXkSj7333Jw", "addrs": "[/ip4/172.17.0.2/udp/8999/quic /ip4/127.0.0.1/udp/8999/quic]"}
2022-06-23T01:46:12.205Z INFO dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:276 starting refreshing cpl 0 with key CIQAAADM5IAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA (routing table size was 0)
2022-06-23T01:46:12.206Z WARN dht/RtRefreshManager rtrefresh/rt_refresh_manager.go:196 failed when refreshing routing table {"error": "2 errors occurred:\n\t* failed to query for self, err=failed to find any peer in table\n\t* failed to refresh cpl=0, err=failed to find any peer in table\n\n"}
2022-06-23T01:46:12.213Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "664.065308ms"}
2022-06-23T01:46:12.214Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "643.33179ms"}
2022-06-23T01:46:12.433Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.453Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "1.043280351s"}
2022-06-23T01:46:12.455Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.474Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "969.238316ms"}
2022-06-23T01:46:12.476Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:12.494Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.495Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:12.495Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.514Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "760.246505ms"}
2022-06-23T01:46:12.515Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "1.050614025s"}
2022-06-23T01:46:12.517Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.518Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.536Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "526.434771ms"}
2022-06-23T01:46:12.537Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "423.72529ms"}
2022-06-23T01:46:12.562Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.581Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "767.02716ms"}
2022-06-23T01:46:12.636Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.654Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "644.110367ms"}
2022-06-23T01:46:12.674Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.694Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "928.777565ms"}
2022-06-23T01:46:12.695Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:12.714Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "718.027607ms"}
2022-06-23T01:46:12.784Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.803Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "480.413639ms"}
2022-06-23T01:46:12.805Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.824Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "599.94366ms"}
2022-06-23T01:46:12.896Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:12.897Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:12.898Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:12.905Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.916Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "985.116951ms"}
2022-06-23T01:46:12.918Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:12.937Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "490.23796ms"}
2022-06-23T01:46:12.983Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.002Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "1.221199793s"}
2022-06-23T01:46:13.034Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:13.053Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "464.436068ms"}
2022-06-23T01:46:13.054Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:13.073Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "486.414266ms"}
2022-06-23T01:46:13.091Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.097Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.116Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "581.83612ms"}
2022-06-23T01:46:13.296Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.315Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "792.110201ms"}
2022-06-23T01:46:13.352Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.372Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "1.119418761s"}
2022-06-23T01:46:13.373Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.393Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "598.029357ms"}
2022-06-23T01:46:13.394Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.413Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "1.306859108s"}
2022-06-23T01:46:13.463Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.464Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.465Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:13.471Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:13.483Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "1.184855871s"}
2022-06-23T01:46:13.503Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:13.504Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:13.524Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "1.481885894s"}
2022-06-23T01:46:13.525Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "1.653330262s"}
2022-06-23T01:46:13.526Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.545Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "903.304159ms"}
2022-06-23T01:46:13.547Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.566Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "1.564108082s"}
2022-06-23T01:46:13.603Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.608Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.627Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "1.55356197s"}
2022-06-23T01:46:13.652Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.671Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "1.571900114s"}
2022-06-23T01:46:13.701Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:13.720Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.738Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "2.217342614s"}
2022-06-23T01:46:13.925Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:13.944Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "880.85204ms"}
2022-06-23T01:46:14.015Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.034Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "1.403058038s"}
2022-06-23T01:46:14.131Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.150Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "969.315636ms"}
2022-06-23T01:46:14.247Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.266Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "1.026900344s"}
2022-06-23T01:46:14.474Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.493Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "1.433589929s"}
2022-06-23T01:46:14.517Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.536Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "2.438293112s"}
2022-06-23T01:46:14.575Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:14.576Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:14.595Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "676.9799ms"}
2022-06-23T01:46:14.596Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "502.911284ms"}
2022-06-23T01:46:14.689Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.708Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "1.144392209s"}
2022-06-23T01:46:14.742Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.760Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "1.247276925s"}
2022-06-23T01:46:14.850Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:14.868Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "920.452437ms"}
2022-06-23T01:46:15.008Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:15.008Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:15.015Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:15.027Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:15.046Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "1.018075883s"}
2022-06-23T01:46:15.141Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.159Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "2.975496985s"}
2022-06-23T01:46:15.198Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.218Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "848.255763ms"}
2022-06-23T01:46:15.219Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:15.219Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:15.239Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "2.227299695s"}
2022-06-23T01:46:15.240Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.259Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "2.128781487s"}
2022-06-23T01:46:15.297Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.317Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "1.846438262s"}
2022-06-23T01:46:15.342Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.360Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "1.449230867s"}
2022-06-23T01:46:15.458Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.477Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "3.48610792s"}
2022-06-23T01:46:15.809Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.828Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "1.719378196s"}
2022-06-23T01:46:15.878Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.898Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "1.425203675s"}
2022-06-23T01:46:15.952Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:15.970Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "3.003008048s"}
2022-06-23T01:46:15.990Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:15.997Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.016Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "1.624274118s"}
2022-06-23T01:46:16.042Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.061Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "1.350130088s"}
2022-06-23T01:46:16.084Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:16.085Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:16.092Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:16.104Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:16.124Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "3.227239733s"}
2022-06-23T01:46:16.125Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.145Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "3.746036618s"}
2022-06-23T01:46:16.146Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:16.165Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "1.27387341s"}
2022-06-23T01:46:16.304Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:16.323Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "1.270284631s"}
2022-06-23T01:46:16.834Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:16.854Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "2.141106113s"}
2022-06-23T01:46:17.001Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.020Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "3.377476402s"}
2022-06-23T01:46:17.189Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.208Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "3.507230961s"}
2022-06-23T01:46:17.344Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.363Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "3.244030108s"}
2022-06-23T01:46:17.408Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:17.433Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.434Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.453Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "5.267882146s"}
2022-06-23T01:46:17.454Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "3.64480936s"}
2022-06-23T01:46:17.497Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:17.515Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "3.522991813s"}
2022-06-23T01:46:17.573Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.591Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "4.459919713s"}
2022-06-23T01:46:17.660Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:17.665Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:17.684Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "2.943651069s"}
2022-06-23T01:46:18.155Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:18.174Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "4.815367041s"}
2022-06-23T01:46:18.482Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:18.501Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "1.183399705s"}
2022-06-23T01:46:18.623Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:18.642Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "2.151246866s"}
2022-06-23T01:46:19.046Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.067Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "3.709365136s"}
2022-06-23T01:46:19.068Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.086Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "3.470498357s"}
2022-06-23T01:46:19.087Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.106Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "4.002004299s"}
2022-06-23T01:46:19.395Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:19.396Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:19.405Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:19.408Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:19.431Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "2.948900832s"}
2022-06-23T01:46:19.953Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:19.979Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "4.070249768s"}
2022-06-23T01:46:20.465Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.490Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "4.68099847s"}
2022-06-23T01:46:20.638Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:20.659Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.660Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.679Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "8.484882035s"}
2022-06-23T01:46:20.680Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "3.726228022s"}
2022-06-23T01:46:20.701Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:20.720Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "1.927893074s"}
2022-06-23T01:46:20.742Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:20.761Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "3.714938635s"}
2022-06-23T01:46:21.093Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:21.117Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "5.685547874s"}
2022-06-23T01:46:21.136Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:21.142Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:21.161Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "3.053515001s"}
2022-06-23T01:46:21.847Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:21.884Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "3.626272661s"}
2022-06-23T01:46:22.105Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.127Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "3.657998601s"}
2022-06-23T01:46:22.426Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:22.428Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:22.441Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:22.444Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:22.466Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "4.171386775s"}
2022-06-23T01:46:22.582Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.602Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "9.881617684s"}
2022-06-23T01:46:22.748Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.768Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "6.174998288s"}
2022-06-23T01:46:22.798Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:22.818Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "8.303916185s"}
2022-06-23T01:46:23.033Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:23.055Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "2.957415405s"}
2022-06-23T01:46:23.137Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:23.156Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "3.025381397s"}
2022-06-23T01:46:23.702Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:23.739Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "4.536075788s"}
2022-06-23T01:46:24.117Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.141Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "3.101068358s"}
2022-06-23T01:46:24.239Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:24.245Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.264Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "8.403382909s"}
2022-06-23T01:46:24.443Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.464Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "10.535908405s"}
2022-06-23T01:46:24.503Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:24.523Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "7.258541148s"}
2022-06-23T01:46:25.215Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:25.235Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "5.519431698s"}
2022-06-23T01:46:25.850Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:25.876Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "6.192406237s"}
2022-06-23T01:46:26.050Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:26.071Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "5.731651447s"}
2022-06-23T01:46:26.209Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:26.229Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "7.895065277s"}
2022-06-23T01:46:26.598Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:26.631Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "3.897063041s"}
2022-06-23T01:46:26.640Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:26.641Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:26.649Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:26.666Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:26.686Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "9.799764648s"}
2022-06-23T01:46:26.836Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:26.857Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "4.688539534s"}
2022-06-23T01:46:27.277Z ERROR guardian-0.supervisor Runnable died {"dn": "root.celowatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:27.298Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.celowatch", "backoff": "12.524406984s"}
2022-06-23T01:46:28.993Z ERROR guardian-0.supervisor Runnable died {"dn": "root.avalanchewatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:29.013Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.avalanchewatch", "backoff": "9.759797387s"}
2022-06-23T01:46:29.195Z INFO guardian-0.root.terrawatch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:29.201Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terrawatch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:29.220Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terrawatch", "backoff": "11.023202241s"}
2022-06-23T01:46:29.288Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-finalized", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:29.311Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-finalized", "backoff": "7.50489408s"}
2022-06-23T01:46:30.797Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:30.826Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "6.519156792s"}
2022-06-23T01:46:31.188Z ERROR guardian-0.supervisor Runnable died {"dn": "root.klaytnwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:31.213Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.klaytnwatch", "backoff": "12.590564047s"}
2022-06-23T01:46:31.602Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:31.631Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "4.205351693s"}
2022-06-23T01:46:31.632Z ERROR guardian-0.supervisor Runnable died {"dn": "root.bscwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8546: connect: connection refused"}
2022-06-23T01:46:31.651Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.bscwatch", "backoff": "12.716561437s"}
2022-06-23T01:46:31.813Z ERROR guardian-0.supervisor Runnable died {"dn": "root.ethwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:31.834Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.ethwatch", "backoff": "7.271435966s"}
2022-06-23T01:46:31.839Z ERROR guardian-0.supervisor Runnable died {"dn": "root.fantomwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:31.857Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.fantomwatch", "backoff": "12.612949664s"}
2022-06-23T01:46:32.121Z ERROR guardian-0.supervisor Runnable died {"dn": "root.acalawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:32.144Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.acalawatch", "backoff": "8.11059061s"}
2022-06-23T01:46:32.536Z ERROR guardian-0.supervisor Runnable died {"dn": "root.aurorawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:32.565Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.aurorawatch", "backoff": "10.727690014s"}
2022-06-23T01:46:32.701Z INFO guardian-0.root.terra2watch connecting to websocket {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:32.708Z ERROR guardian-0.supervisor Runnable died {"dn": "root.terra2watch", "error": "returned error when NODE_STATE_NEW: websocket dial failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:32.730Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.terra2watch", "backoff": "5.88676377s"}
2022-06-23T01:46:34.187Z ERROR guardian-0.supervisor Runnable died {"dn": "root.karurawatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:34.213Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.karurawatch", "backoff": "7.268425411s"}
2022-06-23T01:46:35.070Z ERROR guardian-0.supervisor Runnable died {"dn": "root.polygonwatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:35.094Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.polygonwatch", "backoff": "19.09186602s"}
2022-06-23T01:46:36.541Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to indexer {"url": "ws://host.docker.internal:8545"}
2022-06-23T01:46:36.543Z INFO guardian-0.root.algorandwatch Algorand watcher connecting to RPC node {"url": "https://host.docker.internal:4001"}
2022-06-23T01:46:36.555Z ERROR guardian-0.root.algorandwatch StatusAfterBlock {"error": "Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:36.558Z ERROR guardian-0.supervisor Runnable died {"dn": "root.algorandwatch", "error": "returned error when NODE_STATE_NEW: Get \"https://host.docker.internal:4001/v2/status/wait-for-block-after/0\": dial tcp 192.168.65.2:4001: connect: connection refused"}
2022-06-23T01:46:36.581Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.algorandwatch", "backoff": "8.553513052s"}
2022-06-23T01:46:36.907Z ERROR guardian-0.supervisor Runnable died {"dn": "root.solwatch-confirmed", "error": "returned error when NODE_STATE_NEW: rpc call getSlot() on http://host.docker.internal:8899: Post \"http://host.docker.internal:8899\": dial tcp 192.168.65.2:8899: connect: connection refused"}
2022-06-23T01:46:36.927Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.solwatch-confirmed", "backoff": "8.910416941s"}
2022-06-23T01:46:37.395Z ERROR guardian-0.supervisor Runnable died {"dn": "root.oasiswatch", "error": "returned error when NODE_STATE_NEW: dialing eth client failed: dial tcp 192.168.65.2:8545: connect: connection refused"}
2022-06-23T01:46:37.414Z INFO guardian-0.supervisor rescheduling supervised node {"dn": "root.oasiswatch", "backoff": "13.967797812s"}

7657
projects/evm-messenger/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@ -4,7 +4,8 @@
"description": "A simple template for getting started with xDapps.",
"main": "starter.js",
"scripts": {
"test": "sh test.sh"
"guardiand": "sh wormhole.sh",
"cleanup": "npx pm2 kill && docker kill guardiand && docker rm guardiand"
},
"keywords": [],
"author": "",
@ -12,7 +13,11 @@
"workspaces": [],
"type": "module",
"dependencies": {
"@certusone/wormhole-sdk": "^0.3.3"
"@certusone/wormhole-sdk": "^0.3.3",
"byteify": "^2.0.10",
"ethers": "^5.6.9",
"ganache": "^7.3.1",
"node-fetch": "^3.2.6",
"pm2": "^5.2.0"
}
}
}

View File

@ -0,0 +1,11 @@
node orchestrator.js eth0 deploy
node orchestrator.js eth1 deploy
node orchestrator.js eth0 register_chain eth1
node orchestrator.js eth1 register_chain eth0
node orchestrator.js eth0 send_msg "From: eth0\nMsg: Hello World!"
node orchestrator.js eth1 submit_vaa eth0 latest
node orchestrator.js eth1 send_msg "From: eth1\nMsg: Hello World!"
node orchestrator.js eth0 submit_vaa eth1 latest
sleep 10
node orchestrator.js eth0 get_current_msg
node orchestrator.js eth1 get_current_msg

View File

@ -0,0 +1,117 @@
#!/usr/bin/env bash
if [! docker info > /dev/null 2>&1] ; then
echo "This script uses docker, and it isn't running - please start docker and try again!"
exit 1
fi
# Check if wormhole/ repo exists.
# If it doens't then clone and build guardiand
if [ ! -d "./wormhole" ]
then
#git clone https://github.com/certusone/wormhole
cd wormhole/
DOCKER_BUILDKIT=1 docker build --target go-export -f Dockerfile.proto -o type=local,dest=node .
DOCKER_BUILDKIT=1 docker build --target node-export -f Dockerfile.proto -o type=local,dest=. .
cd node/
echo "Have patience, this step takes upwards of 500 seconds!"
if [ $(uname -m) = "arm64" ]; then
echo "Building Guardian for linux/amd64"
DOCKER_BUILDKIT=1 docker build --platform linux/amd64 -f Dockerfile -t guardian .
else
echo "Building Guardian natively"
DOCKER_BUILDKIT=1 docker build -f Dockerfile -t guardian .
fi
cd ../../
fi
# Start EVM Chain 0
npx pm2 start 'ganache -p 8545 -m "myth like bonus scare over problem client lizard pioneer submit female collect" --block-time 2' --name eth0
# Start EVM Chain 1
npx pm2 start 'ganache -p 8546 -m "myth like bonus scare over problem client lizard pioneer submit female collect" --block-time 2' --name eth1
#Install Wormhole Eth Dependencies
cd wormhole/ethereum
if [ ! -d "./node_modules/" ]
then
npm i
fi
if [! -f "./.env"]
then
cp .env.test .env
fi
npm run build
# Deploy Wormhole Contracts to EVM Chain 0
npm run migrate && npx truffle exec scripts/deploy_test_token.js && npx truffle exec scripts/register_solana_chain.js && npx truffle exec scripts/register_terra_chain.js && npx truffle exec scripts/register_bsc_chain.js && npx truffle exec scripts/register_algo_chain.js
# Deploy Wormhole Contracts to EVM Chain 1
perl -pi -e 's/CHAIN_ID=0x2/CHAIN_ID=0x4/g' .env && perl -pi -e 's/8545/8546/g' truffle-config.js
npm run migrate && npx truffle exec scripts/deploy_test_token.js && npx truffle exec scripts/register_solana_chain.js && npx truffle exec scripts/register_terra_chain.js && npx truffle exec scripts/register_eth_chain.js && npx truffle exec scripts/register_algo_chain.js && nc -lkp 2000 0.0.0.0
perl -pi -e 's/CHAIN_ID=0x4/CHAIN_ID=0x2/g' .env && perl -pi -e 's/8546/8545/g' truffle-config.js
cd ../../
# Run Guardiand
if [ $(uname -m) = "arm64" ]; then
docker run -d --name guardiand -p 7073:7073 -p 7071:7071 -p 7070:7070 --hostname guardian-0 --cap-add=IPC_LOCK --platform linux/amd64 --entrypoint /guardiand guardian node \
--unsafeDevMode --guardianKey /tmp/bridge.key --publicRPC "[::]:7070" --publicWeb "[::]:7071" --adminSocket /tmp/admin.sock --dataDir /tmp/data \
--ethRPC ws://host.docker.internal:8545 \
--ethContract "0xC89Ce4735882C9F0f0FE26686c53074E09B0D550" \
--bscRPC ws://host.docker.internal:8546 \
--bscContract "0xC89Ce4735882C9F0f0FE26686c53074E09B0D550" \
--polygonRPC ws://host.docker.internal:8545 \
--avalancheRPC ws://host.docker.internal:8545 \
--auroraRPC ws://host.docker.internal:8545 \
--fantomRPC ws://host.docker.internal:8545 \
--oasisRPC ws://host.docker.internal:8545 \
--karuraRPC ws://host.docker.internal:8545 \
--acalaRPC ws://host.docker.internal:8545 \
--klaytnRPC ws://host.docker.internal:8545 \
--celoRPC ws://host.docker.internal:8545 \
--moonbeamRPC ws://host.docker.internal:8545 \
--neonRPC ws://host.docker.internal:8545 \
--terraWS ws://host.docker.internal:8545 \
--terra2WS ws://host.docker.internal:8545 \
--terraLCD http://host.docker.internal:1317 \
--terra2LCD http://host.docker.internal:1317 \
--terraContract terra18vd8fpwxzck93qlwghaj6arh4p7c5n896xzem5 \
--terra2Contract terra18vd8fpwxzck93qlwghaj6arh4p7c5n896xzem5 \
--solanaContract Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o \
--solanaWS ws://host.docker.internal:8900 \
--solanaRPC http://host.docker.internal:8899 \
--algorandIndexerRPC ws://host.docker.internal:8545 \
--algorandIndexerToken "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \
--algorandAlgodToken "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \
--algorandAlgodRPC http://host.docker.internal:4001 \
--algorandAppID "4"
else
docker run -d --name guardiand -p 7073:7073 -p 7071:7071 -p 7070:7070 --hostname guardian-0 --cap-add=IPC_LOCK --entrypoint /guardiand guardian node \
--unsafeDevMode --guardianKey /tmp/bridge.key --publicRPC "[::]:7070" --publicWeb "[::]:7071" --adminSocket /tmp/admin.sock --dataDir /tmp/data \
--ethRPC ws://host.docker.internal:8545 \
--ethContract "0xC89Ce4735882C9F0f0FE26686c53074E09B0D550" \
--bscRPC ws://host.docker.internal:8546 \
--bscContract "0xC89Ce4735882C9F0f0FE26686c53074E09B0D550" \
--polygonRPC ws://host.docker.internal:8545 \
--avalancheRPC ws://host.docker.internal:8545 \
--auroraRPC ws://host.docker.internal:8545 \
--fantomRPC ws://host.docker.internal:8545 \
--oasisRPC ws://host.docker.internal:8545 \
--karuraRPC ws://host.docker.internal:8545 \
--acalaRPC ws://host.docker.internal:8545 \
--klaytnRPC ws://host.docker.internal:8545 \
--celoRPC ws://host.docker.internal:8545 \
--moonbeamRPC ws://host.docker.internal:8545 \
--neonRPC ws://host.docker.internal:8545 \
--terraWS ws://host.docker.internal:8545 \
--terraLCD https://http://terra-terrad:1317 \
--terraContract terra18vd8fpwxzck93qlwghaj6arh4p7c5n896xzem5 \
--solanaContract Bridge1p5gheXUvJ6jGWGeCsgPKgnE3YgdGKRVCMY9o \
--solanaWS ws://host.docker.internal:8900 \
--solanaRPC http://host.docker.internal:8899 \
--algorandIndexerRPC ws://host.docker.internal:8545 \
--algorandIndexerToken "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \
--algorandAlgodToken "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" \
--algorandAlgodRPC https://http://host.docker.internal:4001 \
--algorandAppID "4"
fi

View File

@ -0,0 +1,25 @@
{
"networks": {
"evm0": {
"type": "evm",
"wormholeChainId": 2,
"rpc": "http://locahost:8545",
"privateKey": "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d",
"bridgeAddress": "0xC89Ce4735882C9F0f0FE26686c53074E09B0D550",
"deployedAddress": "",
"emittedVAAs": []
},
"evm1": {
"type": "evm",
"wormholeChainId": 4,
"rpc": "http://locahost:8545",
"privateKey": "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d",
"bridgeAddress": "0xC89Ce4735882C9F0f0FE26686c53074E09B0D550",
"deployedAddress": "",
"emittedVAAs": []
}
},
"wormhole": {
"restAddress": "http://localhost:7071"
}
}