NTT / Acct / Node: Guardian support (#3815)
* NTT/Acct/Node: Guardian changes * Add per-emitter enforcement * complete ntt accountant integration tests and run in parallel * Minor tweaks * Increase delay in tests * fix accountant ci check * Add CI AR address * update prefixes * increase timeout * update ntt transfer wire format * Code review rework from PR #3800 * Up tilt timeout * Allow NTT accountant without base accountant * Define known automatic relayer emitters * Code review rework --------- Co-authored-by: Evan Gray <battledingo@gmail.com>
This commit is contained in:
parent
b31bec73e2
commit
3a9dfd968b
|
@ -35,7 +35,7 @@ jobs:
|
|||
kubectl config use-context ci
|
||||
|
||||
# temporarily set --guardiand_loglevel=info to debug https://github.com/wormhole-foundation/wormhole/issues/3052
|
||||
- run: tilt ci -- --ci --namespace=$DEPLOY_NS --num=2 --guardiand_loglevel=info
|
||||
- run: tilt ci --timeout 45m0s -- --ci --namespace=$DEPLOY_NS --num=2 --guardiand_loglevel=info
|
||||
timeout-minutes: 60
|
||||
|
||||
# Clean up k8s resources
|
||||
|
|
18
Tiltfile
18
Tiltfile
|
@ -274,17 +274,25 @@ def build_node_yaml():
|
|||
"--wormchainURL",
|
||||
"wormchain:9090",
|
||||
|
||||
"--accountantWS",
|
||||
"http://wormchain:26657",
|
||||
|
||||
"--accountantContract",
|
||||
"wormhole14hj2tavq8fpesdwxxcu44rty3hh90vhujrvcmstl4zr3txmfvw9srrg465",
|
||||
"--accountantKeyPath",
|
||||
"/tmp/mounted-keys/wormchain/accountantKey",
|
||||
"--accountantKeyPassPhrase",
|
||||
"test0000",
|
||||
"--accountantWS",
|
||||
"http://wormchain:26657",
|
||||
"--accountantCheckEnabled",
|
||||
"true",
|
||||
|
||||
"--accountantNttContract",
|
||||
"wormhole17p9rzwnnfxcjp32un9ug7yhhzgtkhvl9jfksztgw5uh69wac2pgshdnj3k",
|
||||
"--accountantNttKeyPath",
|
||||
"/tmp/mounted-keys/wormchain/accountantNttKey",
|
||||
"--accountantNttKeyPassPhrase",
|
||||
"test0000",
|
||||
|
||||
"--ibcContract",
|
||||
"wormhole1nc5tatafv6eyq7llkr2gv50ff9e22mnf70qgjlv737ktmt4eswrq0kdhcj",
|
||||
"--ibcWS",
|
||||
|
@ -597,6 +605,12 @@ if ci_tests:
|
|||
trigger_mode = trigger_mode,
|
||||
resource_deps = [], # uses devnet-consts.json, but wormchain/contracts/tools/test_accountant.sh handles waiting for guardian, not having deps gets the build earlier
|
||||
)
|
||||
k8s_resource(
|
||||
"ntt-accountant-ci-tests",
|
||||
labels = ["ci"],
|
||||
trigger_mode = trigger_mode,
|
||||
resource_deps = [], # uses devnet-consts.json, but wormchain/contracts/tools/test_ntt_accountant.sh handles waiting for guardian, not having deps gets the build earlier
|
||||
)
|
||||
k8s_resource(
|
||||
"query-ci-tests",
|
||||
labels = ["ci"],
|
||||
|
|
|
@ -41,7 +41,7 @@ spec:
|
|||
- --wallet.deterministic
|
||||
- --chain.time="1970-01-01T00:00:00+00:00"
|
||||
- --host=0.0.0.0
|
||||
- --wallet.totalAccounts=11
|
||||
- --wallet.totalAccounts=13
|
||||
- --chain.chainId=1
|
||||
- --chain.asyncRequestProcessing=false
|
||||
ports:
|
||||
|
|
|
@ -42,7 +42,7 @@ spec:
|
|||
- --wallet.deterministic
|
||||
- --chain.time="1970-01-01T00:00:00+00:00"
|
||||
- --host=0.0.0.0
|
||||
- --wallet.totalAccounts=11
|
||||
- --wallet.totalAccounts=13
|
||||
- --chain.chainId=1397
|
||||
- --chain.asyncRequestProcessing=false
|
||||
ports:
|
||||
|
|
|
@ -60,6 +60,10 @@ spec:
|
|||
path: gwrelayerKey0
|
||||
- key: gwrelayerKey1
|
||||
path: gwrelayerKey1
|
||||
- key: accountantNttKey0
|
||||
path: accountantNttKey0
|
||||
- key: accountantNttKey1
|
||||
path: accountantNttKey1
|
||||
- name: node-config
|
||||
configMap:
|
||||
name: node-config
|
||||
|
@ -224,6 +228,8 @@ data:
|
|||
accountantKey1: LS0tLS1CRUdJTiBURU5ERVJNSU5UIFBSSVZBVEUgS0VZLS0tLS0Ka2RmOiBiY3J5cHQKc2FsdDogNzc1M0NCQTBBMUQ0NTJCMkE2QzlERDM4ODc3MTg0NEEKdHlwZTogc2VjcDI1NmsxCgpSYnhRVWRnK2ZHcjMzZTAyVWFFQW1YTDFlNFkrTGJUMFdqbnl4RVR3OXBoL2JXOGI0MzdhWmErOWlCc3NBa0UyCnRScUwvb0J1NWFnQXJocHNnWUgxNlhOWjJHMXRwY0R3V0dQZ1VWVT0KPUd6YUwKLS0tLS1FTkQgVEVOREVSTUlOVCBQUklWQVRFIEtFWS0tLS0t
|
||||
gwrelayerKey0: LS0tLS1CRUdJTiBURU5ERVJNSU5UIFBSSVZBVEUgS0VZLS0tLS0KdHlwZTogc2VjcDI1NmsxCmtkZjogYmNyeXB0CnNhbHQ6IDc4OUYzRTBCMkVGNDcyNjAyQzNFMUE0OUI2OENFQzlBCgpGWHAvSllPS3E4WmZtOWxHZ3ZFNEM3NXFyUXFNZFp2RHNWRjhObTdMQU1oR2dHbXBnZnpoZjUrZ3IwZ1hjYjVWCmtSTXA2c0p0NkxCVzRPYWF2ckk3ay84Vml2NWhMVU1la1dPMHg5bz0KPUxrb1MKLS0tLS1FTkQgVEVOREVSTUlOVCBQUklWQVRFIEtFWS0tLS0t
|
||||
gwrelayerKey1: LS0tLS1CRUdJTiBURU5ERVJNSU5UIFBSSVZBVEUgS0VZLS0tLS0Ka2RmOiBiY3J5cHQKc2FsdDogNDc5RDk3RDE2OTE0QkQ4QjlFNUUwQzkzMDA0RDA4RUEKdHlwZTogc2VjcDI1NmsxCgpvTEJ0aUkwT2pudXo5bHlzeVlZOFhQeEVkTnpwYUJOVWFkL0UySlJld2pFWFZNVVNTWll2QVZKbERiN3hEQjlSCmEvdm45SFNPM2hKOFc1QTBKOVFqUVZXRzVoZXBNZVpQUEI4M1FCUT0KPVJuTGEKLS0tLS1FTkQgVEVOREVSTUlOVCBQUklWQVRFIEtFWS0tLS0t
|
||||
accountantNttKey0: LS0tLS1CRUdJTiBURU5ERVJNSU5UIFBSSVZBVEUgS0VZLS0tLS0Ka2RmOiBiY3J5cHQKc2FsdDogNzI4NTBEREJFNDQ4NzZBN0Q1Q0YxNDlBQjBGQjNBQzEKdHlwZTogc2VjcDI1NmsxCgpYN1BGMUJaZFBZMmlvRHdVRm9KcXdVdVg4YlFmcFNGckk4UklPS2g1ZUg5cCtDUzZYMm5lM2hVWGFPTDB3YXhUCnM3QVduTzErU241L1g1V0NicklqNHdDVUcwUWdNb0IyN2VFQnB2ND0KPWJiSEkKLS0tLS1FTkQgVEVOREVSTUlOVCBQUklWQVRFIEtFWS0tLS0t
|
||||
accountantNttKey1: LS0tLS1CRUdJTiBURU5ERVJNSU5UIFBSSVZBVEUgS0VZLS0tLS0Ka2RmOiBiY3J5cHQKc2FsdDogNEI2NDI1NDY0MDY0RTIzQjJENUUyNkQyNUI1QUIzQTcKdHlwZTogc2VjcDI1NmsxCgp2NDFNNGdqelc2MHVwcUhyb2l3aURYakVJMEE5WjN1R2lZcmdyNVpjUit2c3V5RFdDZWNXZUFqV2NXb2tINmRhCldKQ1cvdjNua1pqa0xhajByeEpxYTNrSThodDBtdjZ4eDB0WHhSUT0KPUpSZS8KLS0tLS1FTkQgVEVOREVSTUlOVCBQUklWQVRFIEtFWS0tLS0t
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
|
|
|
@ -69,7 +69,32 @@ spec:
|
|||
command:
|
||||
- test
|
||||
- -e
|
||||
- "/app/accountant/success"
|
||||
- "/app/tools/success"
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
---
|
||||
kind: Job
|
||||
apiVersion: batch/v1
|
||||
metadata:
|
||||
name: ntt-accountant-ci-tests
|
||||
spec:
|
||||
backoffLimit: 0
|
||||
template:
|
||||
spec:
|
||||
restartPolicy: Never
|
||||
containers:
|
||||
- name: ntt-accountant-ci-tests
|
||||
image: wormchain-deploy
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- "bash /app/tools/test_ntt_accountant.sh && touch /app/tools/success"
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- test
|
||||
- -e
|
||||
- "/app/tools/success"
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 5
|
||||
---
|
||||
|
|
|
@ -130,9 +130,7 @@ var (
|
|||
nearRPC *string
|
||||
nearContract *string
|
||||
|
||||
wormchainURL *string
|
||||
wormchainKeyPath *string
|
||||
wormchainKeyPassPhrase *string
|
||||
wormchainURL *string
|
||||
|
||||
ibcWS *string
|
||||
ibcLCD *string
|
||||
|
@ -145,6 +143,10 @@ var (
|
|||
accountantKeyPath *string
|
||||
accountantKeyPassPhrase *string
|
||||
|
||||
accountantNttContract *string
|
||||
accountantNttKeyPath *string
|
||||
accountantNttKeyPassPhrase *string
|
||||
|
||||
aptosRPC *string
|
||||
aptosAccount *string
|
||||
aptosHandle *string
|
||||
|
@ -309,9 +311,6 @@ func init() {
|
|||
nearContract = NodeCmd.Flags().String("nearContract", "", "Near contract")
|
||||
|
||||
wormchainURL = node.RegisterFlagWithValidationOrFail(NodeCmd, "wormchainURL", "Wormhole-chain gRPC URL", "wormchain:9090", []string{""})
|
||||
// TODO: These are deprecated. Get rid of them once the guardians have had a chance to migrate off of them.
|
||||
wormchainKeyPath = NodeCmd.Flags().String("wormchainKeyPath", "", "path to wormhole-chain private key for signing transactions")
|
||||
wormchainKeyPassPhrase = NodeCmd.Flags().String("wormchainKeyPassPhrase", "", "pass phrase used to unarmor the wormchain key file")
|
||||
|
||||
ibcWS = node.RegisterFlagWithValidationOrFail(NodeCmd, "ibcWS", "Websocket used to listen to the IBC receiver smart contract on wormchain", "ws://wormchain:26657/websocket", []string{"ws", "wss"})
|
||||
ibcLCD = node.RegisterFlagWithValidationOrFail(NodeCmd, "ibcLCD", "Path to LCD service root for http calls", "http://wormchain:1317", []string{"http", "https"})
|
||||
|
@ -324,6 +323,10 @@ func init() {
|
|||
accountantKeyPassPhrase = NodeCmd.Flags().String("accountantKeyPassPhrase", "", "pass phrase used to unarmor the accountant key file")
|
||||
accountantCheckEnabled = NodeCmd.Flags().Bool("accountantCheckEnabled", false, "Should accountant be enforced on transfers")
|
||||
|
||||
accountantNttContract = NodeCmd.Flags().String("accountantNttContract", "", "Address of the NTT accountant smart contract on wormchain")
|
||||
accountantNttKeyPath = NodeCmd.Flags().String("accountantNttKeyPath", "", "path to NTT accountant private key for signing transactions")
|
||||
accountantNttKeyPassPhrase = NodeCmd.Flags().String("accountantNttKeyPassPhrase", "", "pass phrase used to unarmor the NTT accountant key file")
|
||||
|
||||
aptosRPC = node.RegisterFlagWithValidationOrFail(NodeCmd, "aptosRPC", "Aptos RPC URL", "http://aptos:8080", []string{"http", "https"})
|
||||
aptosAccount = NodeCmd.Flags().String("aptosAccount", "", "aptos account")
|
||||
aptosHandle = NodeCmd.Flags().String("aptosHandle", "", "aptos handle")
|
||||
|
@ -1027,51 +1030,67 @@ func runNode(cmd *cobra.Command, args []string) {
|
|||
wormchainId = "wormchain-testnet-0"
|
||||
}
|
||||
|
||||
var accountantWormchainConn *wormconn.ClientConn
|
||||
var accountantWormchainConn, accountantNttWormchainConn *wormconn.ClientConn
|
||||
if *accountantContract != "" {
|
||||
// TODO: wormchainKeyPath and wormchainKeyPassPhrase are being replaced by accountantKeyPath and accountantKeyPassPhrase.
|
||||
// Give the guardians time to migrate off of the old parameters, but then remove them.
|
||||
keyPath := *accountantKeyPath
|
||||
if keyPath == "" {
|
||||
if *wormchainKeyPath == "" {
|
||||
logger.Fatal("if accountantContract is specified, accountantKeyPath is required", zap.String("component", "gacct"))
|
||||
}
|
||||
logger.Error("the wormchainKeyPath parameter is deprecated, please change to accountantKeyPath", zap.String("component", "gacct"))
|
||||
keyPath = *wormchainKeyPath
|
||||
} else if *wormchainKeyPath != "" {
|
||||
logger.Fatal("the wormchainKeyPath parameter is obsolete, please remove it", zap.String("component", "gacct"))
|
||||
if *accountantKeyPath == "" {
|
||||
logger.Fatal("if accountantContract is specified, accountantKeyPath is required", zap.String("component", "gacct"))
|
||||
}
|
||||
|
||||
keyPassPhrase := *accountantKeyPassPhrase
|
||||
if keyPassPhrase == "" {
|
||||
if *wormchainKeyPassPhrase == "" {
|
||||
logger.Fatal("if accountantContract is specified, accountantKeyPassPhrase is required", zap.String("component", "gacct"))
|
||||
}
|
||||
logger.Error("the wormchainKeyPassPhrase parameter is deprecated, please change to accountantKeyPassPhrase", zap.String("component", "gacct"))
|
||||
keyPassPhrase = *wormchainKeyPassPhrase
|
||||
} else if *wormchainKeyPassPhrase != "" {
|
||||
logger.Fatal("the wormchainKeyPassPhrase parameter is obsolete, please remove it", zap.String("component", "gacct"))
|
||||
if *accountantKeyPassPhrase == "" {
|
||||
logger.Fatal("if accountantContract is specified, accountantKeyPassPhrase is required", zap.String("component", "gacct"))
|
||||
}
|
||||
|
||||
keyPathName := keyPath
|
||||
keyPathName := *accountantKeyPath
|
||||
if *unsafeDevMode {
|
||||
idx, err := devnet.GetDevnetIndex()
|
||||
if err != nil {
|
||||
logger.Fatal("failed to get devnet index", zap.Error(err), zap.String("component", "gacct"))
|
||||
}
|
||||
keyPathName = fmt.Sprint(keyPath, idx)
|
||||
keyPathName = fmt.Sprint(*accountantKeyPath, idx)
|
||||
}
|
||||
|
||||
wormchainKey, err := wormconn.LoadWormchainPrivKey(keyPathName, keyPassPhrase)
|
||||
wormchainKey, err := wormconn.LoadWormchainPrivKey(keyPathName, *accountantKeyPassPhrase)
|
||||
if err != nil {
|
||||
logger.Fatal("failed to load wormchain private key", zap.Error(err), zap.String("component", "gacct"))
|
||||
logger.Fatal("failed to load accountant private key", zap.Error(err), zap.String("component", "gacct"))
|
||||
}
|
||||
|
||||
// Connect to wormchain.
|
||||
logger.Info("Connecting to wormchain", zap.String("wormchainURL", *wormchainURL), zap.String("keyPath", keyPathName), zap.String("component", "gacct"))
|
||||
// Connect to wormchain for the accountant.
|
||||
logger.Info("Connecting to wormchain for accountant", zap.String("wormchainURL", *wormchainURL), zap.String("keyPath", keyPathName), zap.String("component", "gacct"))
|
||||
accountantWormchainConn, err = wormconn.NewConn(rootCtx, *wormchainURL, wormchainKey, wormchainId)
|
||||
if err != nil {
|
||||
logger.Fatal("failed to connect to wormchain", zap.Error(err), zap.String("component", "gacct"))
|
||||
logger.Fatal("failed to connect to wormchain for accountant", zap.Error(err), zap.String("component", "gacct"))
|
||||
}
|
||||
}
|
||||
|
||||
// If the NTT accountant is enabled, create a wormchain connection for it.
|
||||
if *accountantNttContract != "" {
|
||||
if *accountantNttKeyPath == "" {
|
||||
logger.Fatal("if accountantNttContract is specified, accountantNttKeyPath is required", zap.String("component", "gacct"))
|
||||
}
|
||||
|
||||
if *accountantNttKeyPassPhrase == "" {
|
||||
logger.Fatal("if accountantNttContract is specified, accountantNttKeyPassPhrase is required", zap.String("component", "gacct"))
|
||||
}
|
||||
|
||||
keyPathName := *accountantNttKeyPath
|
||||
if *unsafeDevMode {
|
||||
idx, err := devnet.GetDevnetIndex()
|
||||
if err != nil {
|
||||
logger.Fatal("failed to get devnet index", zap.Error(err), zap.String("component", "gacct"))
|
||||
}
|
||||
keyPathName = fmt.Sprint(*accountantNttKeyPath, idx)
|
||||
}
|
||||
|
||||
wormchainKey, err := wormconn.LoadWormchainPrivKey(keyPathName, *accountantNttKeyPassPhrase)
|
||||
if err != nil {
|
||||
logger.Fatal("failed to load NTT accountant private key", zap.Error(err), zap.String("component", "gacct"))
|
||||
}
|
||||
|
||||
// Connect to wormchain for the NTT accountant.
|
||||
logger.Info("Connecting to wormchain for NTT accountant", zap.String("wormchainURL", *wormchainURL), zap.String("keyPath", keyPathName), zap.String("component", "gacct"))
|
||||
accountantNttWormchainConn, err = wormconn.NewConn(rootCtx, *wormchainURL, wormchainKey, wormchainId)
|
||||
if err != nil {
|
||||
logger.Fatal("failed to connect to wormchain for NTT accountant", zap.Error(err), zap.String("component", "gacct"))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1553,7 +1572,7 @@ func runNode(cmd *cobra.Command, args []string) {
|
|||
guardianOptions := []*node.GuardianOption{
|
||||
node.GuardianOptionDatabase(db),
|
||||
node.GuardianOptionWatchers(watcherConfigs, ibcWatcherConfig),
|
||||
node.GuardianOptionAccountant(*accountantContract, *accountantWS, *accountantCheckEnabled, accountantWormchainConn),
|
||||
node.GuardianOptionAccountant(*accountantWS, *accountantContract, *accountantCheckEnabled, accountantWormchainConn, *accountantNttContract, accountantNttWormchainConn),
|
||||
node.GuardianOptionGovernor(*chainGovernorEnabled),
|
||||
node.GuardianOptionGatewayRelayer(*gatewayRelayerContract, gatewayRelayerWormchainConn),
|
||||
node.GuardianOptionQueryHandler(*ccqEnabled, *ccqAllowedRequesters),
|
||||
|
|
|
@ -437,5 +437,5 @@ func submit(
|
|||
gsIndex := uint32(0)
|
||||
guardianIndex := uint32(0)
|
||||
|
||||
return accountant.SubmitObservationsToContract(ctx, logger, gk, gsIndex, guardianIndex, wormchainConn, contract, msgs)
|
||||
return accountant.SubmitObservationsToContract(ctx, logger, gk, gsIndex, guardianIndex, wormchainConn, contract, accountant.SubmitObservationPrefix, msgs)
|
||||
}
|
||||
|
|
|
@ -41,26 +41,27 @@ type (
|
|||
BroadcastTxResponseToString(txResp *sdktx.BroadcastTxResponse) string
|
||||
}
|
||||
|
||||
// tokenBridgeKey is the key to the map of token bridges being monitored
|
||||
tokenBridgeKey struct {
|
||||
// emitterKey is the key to a map of emitters to be monitored
|
||||
emitterKey struct {
|
||||
emitterChainId vaa.ChainID
|
||||
emitterAddr vaa.Address
|
||||
}
|
||||
|
||||
// tokenBridgeEntry is the payload of the map of the token bridges being monitored
|
||||
tokenBridgeEntry struct {
|
||||
}
|
||||
// validEmitters is a set of supported emitter chain / address pairs. The payload is the enforcement flag.
|
||||
validEmitters map[emitterKey]bool
|
||||
|
||||
// pendingEntry is the payload for each pending transfer
|
||||
pendingEntry struct {
|
||||
msg *common.MessagePublication
|
||||
msgId string
|
||||
digest string
|
||||
msg *common.MessagePublication
|
||||
msgId string
|
||||
digest string
|
||||
isNTT bool
|
||||
enforceFlag bool
|
||||
|
||||
// stateLock is used to protect the contents of the state struct.
|
||||
stateLock sync.Mutex
|
||||
|
||||
// The state struct contains anything that can be modifed. It is protected by the state lock.
|
||||
// The state struct contains anything that can be modified. It is protected by the state lock.
|
||||
state struct {
|
||||
// updTime is the time that the state struct was last updated.
|
||||
updTime time.Time
|
||||
|
@ -86,16 +87,27 @@ type Accountant struct {
|
|||
gst *common.GuardianSetState
|
||||
guardianAddr ethCommon.Address
|
||||
msgChan chan<- *common.MessagePublication
|
||||
tokenBridges map[tokenBridgeKey]*tokenBridgeEntry
|
||||
tokenBridges validEmitters
|
||||
pendingTransfersLock sync.Mutex
|
||||
pendingTransfers map[string]*pendingEntry // Key is the message ID (emitterChain/emitterAddr/seqNo)
|
||||
subChan chan *common.MessagePublication
|
||||
env common.Environment
|
||||
|
||||
nttContract string
|
||||
nttWormchainConn AccountantWormchainConn
|
||||
nttDirectEmitters validEmitters
|
||||
nttArEmitters validEmitters
|
||||
nttSubChan chan *common.MessagePublication
|
||||
}
|
||||
|
||||
// On startup, there can be a large number of re-submission requests.
|
||||
const subChanSize = 500
|
||||
|
||||
// baseEnabled returns true if the base accountant is enabled, false if not.
|
||||
func (acct *Accountant) baseEnabled() bool {
|
||||
return acct.contract != ""
|
||||
}
|
||||
|
||||
// NewAccountant creates a new instance of the Accountant object.
|
||||
func NewAccountant(
|
||||
ctx context.Context,
|
||||
|
@ -106,6 +118,8 @@ func NewAccountant(
|
|||
wsUrl string, // the URL of the wormchain websocket interface
|
||||
wormchainConn AccountantWormchainConn, // used for communicating with the smart contract
|
||||
enforceFlag bool, // whether or not accountant should be enforced
|
||||
nttContract string, // the address of the NTT smart contract on wormchain
|
||||
nttWormchainConn AccountantWormchainConn, // used for communicating with the NTT smart contract
|
||||
gk *ecdsa.PrivateKey, // the guardian key used for signing observation requests
|
||||
gst *common.GuardianSetState, // used to get the current guardian set index when sending observation requests
|
||||
msgChan chan<- *common.MessagePublication, // the channel where transfers received by the accountant runnable should be published
|
||||
|
@ -124,42 +138,60 @@ func NewAccountant(
|
|||
gst: gst,
|
||||
guardianAddr: ethCrypto.PubkeyToAddress(gk.PublicKey),
|
||||
msgChan: msgChan,
|
||||
tokenBridges: make(map[tokenBridgeKey]*tokenBridgeEntry),
|
||||
tokenBridges: make(validEmitters),
|
||||
pendingTransfers: make(map[string]*pendingEntry),
|
||||
subChan: make(chan *common.MessagePublication, subChanSize),
|
||||
env: env,
|
||||
|
||||
nttContract: nttContract,
|
||||
nttWormchainConn: nttWormchainConn,
|
||||
nttDirectEmitters: make(validEmitters),
|
||||
nttArEmitters: make(validEmitters),
|
||||
nttSubChan: make(chan *common.MessagePublication, subChanSize),
|
||||
}
|
||||
}
|
||||
|
||||
// Run initializes the accountant and starts the watcher runnable.
|
||||
// Start initializes the accountant and starts the worker and watcher runnables.
|
||||
func (acct *Accountant) Start(ctx context.Context) error {
|
||||
acct.logger.Debug("entering Start", zap.Bool("enforceFlag", acct.enforceFlag))
|
||||
acct.logger.Debug("entering Start", zap.Bool("enforceFlag", acct.enforceFlag), zap.Bool("baseEnabled", acct.baseEnabled()), zap.Bool("nttEnabled", acct.nttEnabled()))
|
||||
acct.pendingTransfersLock.Lock()
|
||||
defer acct.pendingTransfersLock.Unlock()
|
||||
|
||||
emitterMap := sdk.KnownTokenbridgeEmitters
|
||||
if acct.env == common.TestNet {
|
||||
emitterMap = sdk.KnownTestnetTokenbridgeEmitters
|
||||
} else if acct.env == common.UnsafeDevNet || acct.env == common.GoTest || acct.env == common.AccountantMock {
|
||||
emitterMap = sdk.KnownDevnetTokenbridgeEmitters
|
||||
if !acct.baseEnabled() && !acct.nttEnabled() {
|
||||
return fmt.Errorf("start should not be called when neither base nor NTT accountant are enabled")
|
||||
}
|
||||
|
||||
// Build the map of token bridges to be monitored.
|
||||
for chainId, emitterAddrBytes := range emitterMap {
|
||||
emitterAddr, err := vaa.BytesToAddress(emitterAddrBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert emitter address for chain: %v", chainId)
|
||||
if acct.baseEnabled() {
|
||||
emitterMap := sdk.KnownTokenbridgeEmitters
|
||||
if acct.env == common.TestNet {
|
||||
emitterMap = sdk.KnownTestnetTokenbridgeEmitters
|
||||
} else if acct.env == common.UnsafeDevNet || acct.env == common.GoTest || acct.env == common.AccountantMock {
|
||||
emitterMap = sdk.KnownDevnetTokenbridgeEmitters
|
||||
}
|
||||
|
||||
tbk := tokenBridgeKey{emitterChainId: chainId, emitterAddr: emitterAddr}
|
||||
_, exists := acct.tokenBridges[tbk]
|
||||
if exists {
|
||||
return fmt.Errorf("detected duplicate token bridge for chain: %v", chainId)
|
||||
}
|
||||
// Build the map of token bridges to be monitored.
|
||||
for chainId, emitterAddrBytes := range emitterMap {
|
||||
emitterAddr, err := vaa.BytesToAddress(emitterAddrBytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to convert emitter address for chain: %v", chainId)
|
||||
}
|
||||
|
||||
tbe := &tokenBridgeEntry{}
|
||||
acct.tokenBridges[tbk] = tbe
|
||||
acct.logger.Info("will monitor token bridge:", zap.Stringer("emitterChainId", tbk.emitterChainId), zap.Stringer("emitterAddr", tbk.emitterAddr))
|
||||
tbk := emitterKey{emitterChainId: chainId, emitterAddr: emitterAddr}
|
||||
_, exists := acct.tokenBridges[tbk]
|
||||
if exists {
|
||||
return fmt.Errorf("detected duplicate token bridge for chain: %v", chainId)
|
||||
}
|
||||
|
||||
acct.tokenBridges[tbk] = acct.enforceFlag
|
||||
acct.logger.Info("will monitor token bridge:", zap.Stringer("emitterChainId", tbk.emitterChainId), zap.Stringer("emitterAddr", tbk.emitterAddr))
|
||||
}
|
||||
}
|
||||
|
||||
// The NTT data structures should be set up before we reload from the db.
|
||||
if acct.nttEnabled() {
|
||||
if err := acct.nttStart(ctx); err != nil {
|
||||
return fmt.Errorf("failed to start ntt accountant: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Load any existing pending transfers from the db.
|
||||
|
@ -168,22 +200,24 @@ func (acct *Accountant) Start(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// Start the watcher to listen to transfer events from the smart contract.
|
||||
if acct.env == common.AccountantMock {
|
||||
// We're not in a runnable context, so we can't use supervisor.
|
||||
go func() {
|
||||
_ = acct.worker(ctx)
|
||||
}()
|
||||
} else if acct.env != common.GoTest {
|
||||
if err := supervisor.Run(ctx, "acctworker", common.WrapWithScissors(acct.worker, "acctworker")); err != nil {
|
||||
return fmt.Errorf("failed to start submit observation worker: %w", err)
|
||||
}
|
||||
if acct.baseEnabled() {
|
||||
if acct.env == common.AccountantMock {
|
||||
// We're not in a runnable context, so we can't use supervisor.
|
||||
go func() {
|
||||
_ = acct.baseWorker(ctx)
|
||||
}()
|
||||
} else if acct.env != common.GoTest {
|
||||
if err := supervisor.Run(ctx, "acctworker", common.WrapWithScissors(acct.baseWorker, "acctworker")); err != nil {
|
||||
return fmt.Errorf("failed to start submit observation worker: %w", err)
|
||||
}
|
||||
|
||||
if err := supervisor.Run(ctx, "acctwatcher", common.WrapWithScissors(acct.watcher, "acctwatcher")); err != nil {
|
||||
return fmt.Errorf("failed to start watcher: %w", err)
|
||||
}
|
||||
if err := supervisor.Run(ctx, "acctwatcher", common.WrapWithScissors(acct.baseWatcher, "acctwatcher")); err != nil {
|
||||
return fmt.Errorf("failed to start watcher: %w", err)
|
||||
}
|
||||
|
||||
if err := supervisor.Run(ctx, "acctaudit", common.WrapWithScissors(acct.audit, "acctaudit")); err != nil {
|
||||
return fmt.Errorf("failed to start audit worker: %w", err)
|
||||
if err := supervisor.Run(ctx, "acctaudit", common.WrapWithScissors(acct.audit, "acctaudit")); err != nil {
|
||||
return fmt.Errorf("failed to start audit worker: %w", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -195,46 +229,84 @@ func (acct *Accountant) Close() {
|
|||
acct.wormchainConn.Close()
|
||||
acct.wormchainConn = nil
|
||||
}
|
||||
if acct.nttWormchainConn != nil {
|
||||
acct.nttWormchainConn.Close()
|
||||
acct.nttWormchainConn = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (acct *Accountant) FeatureString() string {
|
||||
var ret string
|
||||
if !acct.enforceFlag {
|
||||
return "acct:logonly"
|
||||
ret = "acct-logonly"
|
||||
} else {
|
||||
ret = "acct"
|
||||
}
|
||||
return "acct:enforced"
|
||||
if acct.nttEnabled() {
|
||||
if ret != "" {
|
||||
ret += ":"
|
||||
}
|
||||
ret += "ntt-acct"
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
// IsMessageCoveredByAccountant returns `true` if a message should be processed by the Global Accountant, `false` if not.
|
||||
func (acct *Accountant) IsMessageCoveredByAccountant(msg *common.MessagePublication) bool {
|
||||
ret, _, _ := acct.isMessageCoveredByAccountant(msg)
|
||||
return ret
|
||||
}
|
||||
|
||||
// isMessageCoveredByAccountant returns true if a message should be processed by the Global Accountant, false if not.
|
||||
// It also returns whether or not it is a Native Token Transfer and whether or not accounting is being enforced for this emitter.
|
||||
func (acct *Accountant) isMessageCoveredByAccountant(msg *common.MessagePublication) (bool, bool, bool) {
|
||||
isTBT, enforceFlag := acct.isTokenBridgeTransfer(msg)
|
||||
if isTBT {
|
||||
return true, false, enforceFlag
|
||||
}
|
||||
|
||||
isNTT, enforceFlag := nttIsMsgDirectNTT(msg, acct.nttDirectEmitters)
|
||||
if isNTT {
|
||||
return true, true, enforceFlag
|
||||
}
|
||||
|
||||
isNTT, enforceFlag = nttIsMsgArNTT(msg, acct.nttArEmitters, acct.nttDirectEmitters)
|
||||
if isNTT {
|
||||
return true, true, enforceFlag
|
||||
}
|
||||
|
||||
return false, false, false
|
||||
}
|
||||
|
||||
// isTokenBridgeTransfer returns true if a message is a token bridge transfer and whether or not accounting is being enforced for this emitter.
|
||||
func (acct *Accountant) isTokenBridgeTransfer(msg *common.MessagePublication) (bool, bool) {
|
||||
msgId := msg.MessageIDString()
|
||||
|
||||
// We only care about token bridges.
|
||||
tbk := tokenBridgeKey{emitterChainId: msg.EmitterChain, emitterAddr: msg.EmitterAddress}
|
||||
if _, exists := acct.tokenBridges[tbk]; !exists {
|
||||
if msg.EmitterChain != vaa.ChainIDPythNet {
|
||||
acct.logger.Debug("ignoring vaa because it is not a token bridge", zap.String("msgID", msgId))
|
||||
}
|
||||
|
||||
return false
|
||||
enforceFlag, exists := acct.tokenBridges[emitterKey{emitterChainId: msg.EmitterChain, emitterAddr: msg.EmitterAddress}]
|
||||
if !exists {
|
||||
return false, false
|
||||
}
|
||||
|
||||
// We only care about transfers.
|
||||
if !vaa.IsTransfer(msg.Payload) {
|
||||
acct.logger.Info("ignoring vaa because it is not a transfer", zap.String("msgID", msgId))
|
||||
return false
|
||||
return false, false
|
||||
}
|
||||
|
||||
return true
|
||||
return true, enforceFlag
|
||||
}
|
||||
|
||||
// SubmitObservation will submit token bridge transfers to the accountant smart contract. This is called from the processor
|
||||
// loop when a local observation is received from a watcher. It returns true if the observation can be published immediately,
|
||||
// false if not (because it has been submitted to accountant).
|
||||
// false if not (because it has been submitted to the accountant).
|
||||
func (acct *Accountant) SubmitObservation(msg *common.MessagePublication) (bool, error) {
|
||||
msgId := msg.MessageIDString()
|
||||
acct.logger.Debug("in SubmitObservation", zap.String("msgID", msgId))
|
||||
|
||||
if !acct.IsMessageCoveredByAccountant(msg) {
|
||||
coveredByAcct, isNTT, enforceFlag := acct.isMessageCoveredByAccountant(msg)
|
||||
if !coveredByAcct {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
|
@ -251,16 +323,16 @@ func (acct *Accountant) SubmitObservation(msg *common.MessagePublication) (bool,
|
|||
zap.String("msgID", msgId),
|
||||
zap.String("oldDigest", oldEntry.digest),
|
||||
zap.String("newDigest", digest),
|
||||
zap.Bool("enforcing", acct.enforceFlag),
|
||||
zap.Bool("enforcing", enforceFlag),
|
||||
)
|
||||
} else {
|
||||
acct.logger.Info("blocking transfer because it is already outstanding", zap.String("msgID", msgId), zap.Bool("enforcing", acct.enforceFlag))
|
||||
acct.logger.Info("blocking transfer because it is already outstanding", zap.String("msgID", msgId), zap.Bool("enforcing", enforceFlag))
|
||||
}
|
||||
return !acct.enforceFlag, nil
|
||||
return !enforceFlag, nil
|
||||
}
|
||||
|
||||
// Add it to the pending map and the database.
|
||||
pe := &pendingEntry{msg: msg, msgId: msgId, digest: digest}
|
||||
pe := &pendingEntry{msg: msg, msgId: msgId, digest: digest, isNTT: isNTT, enforceFlag: enforceFlag}
|
||||
if err := acct.addPendingTransferAlreadyLocked(pe); err != nil {
|
||||
acct.logger.Error("failed to persist pending transfer, blocking publishing", zap.String("msgID", msgId), zap.Error(err))
|
||||
return false, err
|
||||
|
@ -268,17 +340,21 @@ func (acct *Accountant) SubmitObservation(msg *common.MessagePublication) (bool,
|
|||
|
||||
// This transaction may take a while. Pass it off to the worker so we don't block the processor.
|
||||
if acct.env != common.GoTest {
|
||||
acct.logger.Info("submitting transfer to accountant for approval", zap.String("msgID", msgId), zap.Bool("canPublish", !acct.enforceFlag))
|
||||
tag := "accountant"
|
||||
if isNTT {
|
||||
tag = "ntt-accountant"
|
||||
}
|
||||
acct.logger.Info(fmt.Sprintf("submitting transfer to %s for approval", tag), zap.String("msgID", msgId), zap.Bool("canPublish", !enforceFlag))
|
||||
_ = acct.submitObservation(pe)
|
||||
}
|
||||
|
||||
// If we are not enforcing accountant, the event can be published. Otherwise we have to wait to hear back from the contract.
|
||||
return !acct.enforceFlag, nil
|
||||
return !enforceFlag, nil
|
||||
}
|
||||
|
||||
// publishTransferAlreadyLocked publishes a pending transfer to the accountant channel and deletes it from the pending map. It assumes the caller holds the lock.
|
||||
func (acct *Accountant) publishTransferAlreadyLocked(pe *pendingEntry) {
|
||||
if acct.enforceFlag {
|
||||
if pe.enforceFlag {
|
||||
select {
|
||||
case acct.msgChan <- pe.msg:
|
||||
acct.logger.Debug("published transfer to channel", zap.String("msgId", pe.msgId))
|
||||
|
@ -331,7 +407,8 @@ func (acct *Accountant) loadPendingTransfers() error {
|
|||
|
||||
for _, msg := range pendingTransfers {
|
||||
msgId := msg.MessageIDString()
|
||||
if !acct.IsMessageCoveredByAccountant(msg) {
|
||||
coveredByAcct, isNTT, enforceFlag := acct.isMessageCoveredByAccountant(msg)
|
||||
if !coveredByAcct {
|
||||
acct.logger.Error("dropping reloaded pending transfer because it is not covered by the accountant", zap.String("msgID", msgId))
|
||||
if err := acct.db.AcctDeletePendingTransfer(msgId); err != nil {
|
||||
acct.logger.Error("failed to delete pending transfer from the db", zap.String("msgId", msgId), zap.Error(err))
|
||||
|
@ -342,7 +419,7 @@ func (acct *Accountant) loadPendingTransfers() error {
|
|||
acct.logger.Info("reloaded pending transfer", zap.String("msgID", msgId))
|
||||
|
||||
digest := msg.CreateDigest()
|
||||
pe := &pendingEntry{msg: msg, msgId: msgId, digest: digest}
|
||||
pe := &pendingEntry{msg: msg, msgId: msgId, digest: digest, isNTT: isNTT, enforceFlag: enforceFlag}
|
||||
pe.setUpdTime()
|
||||
acct.pendingTransfers[msgId] = pe
|
||||
}
|
||||
|
@ -357,7 +434,7 @@ func (acct *Accountant) loadPendingTransfers() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// submitObservation sends an observation request to the worker so it can be submited to the contract. If the transfer is already
|
||||
// submitObservation sends an observation request to the worker so it can be submitted to the contract. If the transfer is already
|
||||
// marked as "submit pending", this function returns false without doing anything. Otherwise it returns true. The return value can
|
||||
// be used to avoid unnecessary error logging. If writing to the channel would block, this function returns without doing anything,
|
||||
// assuming the pending transfer will be handled on the next audit interval. This function grabs the state lock.
|
||||
|
@ -372,17 +449,27 @@ func (acct *Accountant) submitObservation(pe *pendingEntry) bool {
|
|||
pe.state.submitPending = true
|
||||
pe.state.updTime = time.Now()
|
||||
|
||||
select {
|
||||
case acct.subChan <- pe.msg:
|
||||
acct.logger.Debug("submitted observation to channel", zap.String("msgId", pe.msgId))
|
||||
default:
|
||||
acct.logger.Error("unable to submit observation because the channel is full, will try next interval", zap.String("msgId", pe.msgId))
|
||||
pe.state.submitPending = false
|
||||
if pe.isNTT {
|
||||
acct.submitToChannel(pe, acct.nttSubChan, "ntt-accountant")
|
||||
} else {
|
||||
acct.submitToChannel(pe, acct.subChan, "accountant")
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// submitToChannel submits an observation to the specified channel. If the submission fails because the channel is full,
|
||||
// it marks the transfer as pending so it will be resubmitted by the audit.
|
||||
func (acct *Accountant) submitToChannel(pe *pendingEntry, subChan chan *common.MessagePublication, tag string) {
|
||||
select {
|
||||
case subChan <- pe.msg:
|
||||
acct.logger.Debug(fmt.Sprintf("submitted observation to channel for %s", tag), zap.String("msgId", pe.msgId))
|
||||
default:
|
||||
acct.logger.Error(fmt.Sprintf("unable to submit observation to %s because the channel is full, will try next interval", tag), zap.String("msgId", pe.msgId))
|
||||
pe.state.submitPending = false
|
||||
}
|
||||
}
|
||||
|
||||
// clearSubmitPendingFlags is called after a batch is finished being submitted (success or fail). It clears the submit pending flag for everything in the batch.
|
||||
// It grabs the pending transfer and state locks.
|
||||
func (acct *Accountant) clearSubmitPendingFlags(msgs []*common.MessagePublication) {
|
||||
|
|
|
@ -121,6 +121,8 @@ func newAccountantForTest(
|
|||
"none", // accountantWS
|
||||
wormchainConn,
|
||||
accountantCheckEnabled,
|
||||
"",
|
||||
nil,
|
||||
gk,
|
||||
gst,
|
||||
acctWriteC,
|
||||
|
|
|
@ -128,26 +128,33 @@ func (acct *Accountant) audit(ctx context.Context) error {
|
|||
|
||||
// runAudit is the entry point for the audit of the pending transfer map. It creates a temporary map of all pending transfers and invokes the main audit function.
|
||||
func (acct *Accountant) runAudit() {
|
||||
tmpMap := acct.createAuditMap()
|
||||
acct.logger.Debug("in AuditPendingTransfers: starting audit", zap.Int("numPending", len(tmpMap)))
|
||||
acct.performAudit(tmpMap)
|
||||
acct.logger.Debug("leaving AuditPendingTransfers")
|
||||
tmpMap := acct.createAuditMap(false)
|
||||
acct.logger.Debug("in AuditPendingTransfers: starting base audit", zap.Int("numPending", len(tmpMap)))
|
||||
acct.performAudit(tmpMap, acct.wormchainConn, acct.contract)
|
||||
acct.logger.Debug("in AuditPendingTransfers: finished base audit")
|
||||
|
||||
tmpMap = acct.createAuditMap(true)
|
||||
acct.logger.Debug("in AuditPendingTransfers: starting ntt audit", zap.Int("numPending", len(tmpMap)))
|
||||
acct.performAudit(tmpMap, acct.nttWormchainConn, acct.nttContract)
|
||||
acct.logger.Debug("in AuditPendingTransfers: finished ntt audit")
|
||||
}
|
||||
|
||||
// createAuditMap creates a temporary map of all pending transfers. It grabs the pending transfer lock.
|
||||
func (acct *Accountant) createAuditMap() map[string]*pendingEntry {
|
||||
func (acct *Accountant) createAuditMap(isNTT bool) map[string]*pendingEntry {
|
||||
acct.pendingTransfersLock.Lock()
|
||||
defer acct.pendingTransfersLock.Unlock()
|
||||
|
||||
tmpMap := make(map[string]*pendingEntry)
|
||||
for _, pe := range acct.pendingTransfers {
|
||||
if pe.hasBeenPendingForTooLong() {
|
||||
auditErrors.Inc()
|
||||
acct.logger.Error("transfer has been in the submit pending state for too long", zap.Stringer("lastUpdateTime", pe.updTime()))
|
||||
if pe.isNTT == isNTT {
|
||||
if pe.hasBeenPendingForTooLong() {
|
||||
auditErrors.Inc()
|
||||
acct.logger.Error("transfer has been in the submit pending state for too long", zap.Stringer("lastUpdateTime", pe.updTime()))
|
||||
}
|
||||
key := pe.makeAuditKey()
|
||||
acct.logger.Debug("will audit pending transfer", zap.String("msgId", pe.msgId), zap.String("moKey", key), zap.Bool("submitPending", pe.submitPending()), zap.Stringer("lastUpdateTime", pe.updTime()))
|
||||
tmpMap[key] = pe
|
||||
}
|
||||
key := pe.makeAuditKey()
|
||||
acct.logger.Debug("will audit pending transfer", zap.String("msgId", pe.msgId), zap.String("moKey", key), zap.Bool("submitPending", pe.submitPending()), zap.Stringer("lastUpdateTime", pe.updTime()))
|
||||
tmpMap[key] = pe
|
||||
}
|
||||
|
||||
return tmpMap
|
||||
|
@ -162,9 +169,9 @@ func (pe *pendingEntry) hasBeenPendingForTooLong() bool {
|
|||
|
||||
// performAudit audits the temporary map against the smart contract. It is meant to be run in a go routine. It takes a temporary map of all pending transfers
|
||||
// and validates that against what is reported by the smart contract. For more details, please see the prologue of this file.
|
||||
func (acct *Accountant) performAudit(tmpMap map[string]*pendingEntry) {
|
||||
acct.logger.Debug("entering performAudit")
|
||||
missingObservations, err := acct.queryMissingObservations()
|
||||
func (acct *Accountant) performAudit(tmpMap map[string]*pendingEntry, wormchainConn AccountantWormchainConn, contract string) {
|
||||
acct.logger.Debug("entering performAudit", zap.String("contract", contract))
|
||||
missingObservations, err := acct.queryMissingObservations(wormchainConn, contract)
|
||||
if err != nil {
|
||||
acct.logger.Error("unable to perform audit, failed to query missing observations", zap.Error(err))
|
||||
for _, pe := range tmpMap {
|
||||
|
@ -198,7 +205,7 @@ func (acct *Accountant) performAudit(tmpMap map[string]*pendingEntry) {
|
|||
pendingTransfers = append(pendingTransfers, pe)
|
||||
}
|
||||
|
||||
transferDetails, err := acct.queryBatchTransferStatus(keys)
|
||||
transferDetails, err := acct.queryBatchTransferStatus(keys, wormchainConn, contract)
|
||||
if err != nil {
|
||||
acct.logger.Error("unable to finish audit, failed to query for transfer statuses", zap.Error(err))
|
||||
for _, pe := range tmpMap {
|
||||
|
@ -270,7 +277,7 @@ func (acct *Accountant) handleMissingObservation(mo MissingObservation) {
|
|||
}
|
||||
|
||||
// queryMissingObservations queries the contract for the set of observations it thinks are missing for this guardian.
|
||||
func (acct *Accountant) queryMissingObservations() ([]MissingObservation, error) {
|
||||
func (acct *Accountant) queryMissingObservations(wormchainConn AccountantWormchainConn, contract string) ([]MissingObservation, error) {
|
||||
gs := acct.gst.Get()
|
||||
if gs == nil {
|
||||
return nil, fmt.Errorf("failed to get guardian set")
|
||||
|
@ -283,7 +290,7 @@ func (acct *Accountant) queryMissingObservations() ([]MissingObservation, error)
|
|||
|
||||
query := fmt.Sprintf(`{"missing_observations":{"guardian_set": %d, "index": %d}}`, gs.Index, guardianIndex)
|
||||
acct.logger.Debug("submitting missing_observations query", zap.String("query", query))
|
||||
respBytes, err := acct.wormchainConn.SubmitQuery(acct.ctx, acct.contract, []byte(query))
|
||||
respBytes, err := wormchainConn.SubmitQuery(acct.ctx, contract, []byte(query))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("missing_observations query failed: %w, %s", err, query)
|
||||
}
|
||||
|
@ -303,8 +310,8 @@ type queryConn interface {
|
|||
}
|
||||
|
||||
// queryBatchTransferStatus queries the status of the specified transfers and returns a map keyed by transfer key (as a string) to the status.
|
||||
func (acct *Accountant) queryBatchTransferStatus(keys []TransferKey) (map[string]*TransferStatus, error) {
|
||||
return queryBatchTransferStatusWithConn(acct.ctx, acct.logger, acct.wormchainConn, acct.contract, keys)
|
||||
func (acct *Accountant) queryBatchTransferStatus(keys []TransferKey, wormchainConn AccountantWormchainConn, contract string) (map[string]*TransferStatus, error) {
|
||||
return queryBatchTransferStatusWithConn(acct.ctx, acct.logger, wormchainConn, contract, keys)
|
||||
}
|
||||
|
||||
// queryBatchTransferStatus is a free function that queries the status of the specified transfers and returns a map keyed by transfer key (as a string)
|
||||
|
|
|
@ -0,0 +1,200 @@
|
|||
package accountant
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
|
||||
"github.com/certusone/wormhole/node/pkg/common"
|
||||
"github.com/certusone/wormhole/node/pkg/supervisor"
|
||||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// nttEnabled returns true if NTT is enabled, false if not.
|
||||
func (acct *Accountant) nttEnabled() bool {
|
||||
return acct.nttContract != ""
|
||||
}
|
||||
|
||||
// nttStart initializes the NTT accountant and starts the NTT specific worker and watcher runnables.
|
||||
func (acct *Accountant) nttStart(ctx context.Context) error {
|
||||
acct.logger.Debug("entering nttStart")
|
||||
|
||||
var err error
|
||||
acct.nttDirectEmitters, acct.nttArEmitters, err = nttGetEmitters(acct.env)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to set up NTT emitters: %w", err)
|
||||
}
|
||||
|
||||
for emitter, enforceFlag := range acct.nttDirectEmitters {
|
||||
tag := ""
|
||||
if !enforceFlag {
|
||||
tag = " in log only mode"
|
||||
}
|
||||
acct.logger.Info(fmt.Sprintf("will monitor%s for NTT emitter", tag), zap.Stringer("emitterChainId", emitter.emitterChainId), zap.Stringer("emitterAddr", emitter.emitterAddr))
|
||||
}
|
||||
|
||||
for emitter := range acct.nttArEmitters {
|
||||
acct.logger.Info("will monitor AR emitter for NTT", zap.Stringer("emitterChainId", emitter.emitterChainId), zap.Stringer("emitterAddr", emitter.emitterAddr))
|
||||
}
|
||||
|
||||
// Start the watcher to listen to transfer events from the smart contract.
|
||||
if acct.env == common.AccountantMock {
|
||||
// We're not in a runnable context, so we can't use supervisor.
|
||||
go func() {
|
||||
_ = acct.nttWorker(ctx)
|
||||
}()
|
||||
} else if acct.env != common.GoTest {
|
||||
if err := supervisor.Run(ctx, "nttacctworker", common.WrapWithScissors(acct.nttWorker, "nttacctworker")); err != nil {
|
||||
return fmt.Errorf("failed to start NTT submit observation worker: %w", err)
|
||||
}
|
||||
|
||||
if err := supervisor.Run(ctx, "nttacctwatcher", common.WrapWithScissors(acct.nttWatcher, "nttacctwatcher")); err != nil {
|
||||
return fmt.Errorf("failed to start NTT watcher: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
var WH_PREFIX = []byte{0x99, 0x45, 0xFF, 0x10}
|
||||
var NTT_PREFIX = []byte{0x99, 0x4E, 0x54, 0x54}
|
||||
|
||||
const NTT_PREFIX_OFFSET = 136
|
||||
const NTT_PREFIX_END = NTT_PREFIX_OFFSET + 4
|
||||
|
||||
// isNTT determines if the payload bytes are for a Native Token Transfer, according to the following implementation:
|
||||
// https://github.com/wormhole-foundation/example-native-token-transfers/blob/22bde0c7d8139675582d861dc8245eb1912324fa/evm/test/TransceiverStructs.t.sol#L42
|
||||
func nttIsPayloadNTT(payload []byte) bool {
|
||||
if len(payload) < NTT_PREFIX_END {
|
||||
return false
|
||||
}
|
||||
|
||||
if !bytes.Equal(payload[0:4], WH_PREFIX) {
|
||||
return false
|
||||
}
|
||||
|
||||
if !bytes.Equal(payload[NTT_PREFIX_OFFSET:NTT_PREFIX_END], NTT_PREFIX) {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// isMsgDirectNTT determines if a message publication is for a Native Token Transfer directly from an NTT endpoint.
|
||||
// It also returns if NTT accounting should be enforced for this emitter.
|
||||
func nttIsMsgDirectNTT(msg *common.MessagePublication, emitters validEmitters) (bool, bool) {
|
||||
enforceFlag, exists := emitters[emitterKey{emitterChainId: msg.EmitterChain, emitterAddr: msg.EmitterAddress}]
|
||||
if !exists {
|
||||
return false, false
|
||||
}
|
||||
if !nttIsPayloadNTT(msg.Payload) {
|
||||
return false, false
|
||||
}
|
||||
return true, enforceFlag
|
||||
}
|
||||
|
||||
// nttIsMsgArNTT determines if a message publication is for a Native Token Transfer forwarded from an automatic relayer.
|
||||
// It first checks if the emitter is a configured relayer. If so, it parses the AR payload to get the sender address and
|
||||
// checks to see if the emitter chain / sender address are for a Native Token Transfer emitter.
|
||||
// It also returns if NTT accounting should be enforced for this emitter.
|
||||
func nttIsMsgArNTT(msg *common.MessagePublication, arEmitters validEmitters, nttEmitters validEmitters) (bool, bool) {
|
||||
if _, exists := arEmitters[emitterKey{emitterChainId: msg.EmitterChain, emitterAddr: msg.EmitterAddress}]; !exists {
|
||||
return false, false
|
||||
}
|
||||
|
||||
if success, senderAddress, nttPayload := nttParseArPayload(msg.Payload); success {
|
||||
// If msg.EmitterChain / ar.Sender is in nttEmitters then this is a Native Token Transfer.
|
||||
if enforceFlag, exists := nttEmitters[emitterKey{emitterChainId: msg.EmitterChain, emitterAddr: senderAddress}]; exists {
|
||||
if nttIsPayloadNTT(nttPayload) {
|
||||
return true, enforceFlag
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return false, false
|
||||
}
|
||||
|
||||
const PAYLOAD_ID_DELIVERY_INSTRUCTION = uint8(1)
|
||||
|
||||
// nttParseArPayload extracts the sender address from an AR payload. This is based on the following implementation:
|
||||
// https://github.com/wormhole-foundation/wormhole/blob/main/ethereum/contracts/relayer/wormholeRelayer/WormholeRelayerSerde.sol#L70-L97
|
||||
func nttParseArPayload(msgPayload []byte) (bool, [32]byte, []byte) {
|
||||
var senderAddress [32]byte
|
||||
reader := bytes.NewReader(msgPayload[:])
|
||||
|
||||
var deliveryInstruction uint8
|
||||
if err := binary.Read(reader, binary.BigEndian, &deliveryInstruction); err != nil {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
if deliveryInstruction != PAYLOAD_ID_DELIVERY_INSTRUCTION {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
var targetChain uint16
|
||||
if err := binary.Read(reader, binary.BigEndian, &targetChain); err != nil {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
var targetAddress [32]byte
|
||||
if n, err := reader.Read(targetAddress[:]); err != nil || n != 32 {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
var payloadLen uint32
|
||||
if err := binary.Read(reader, binary.BigEndian, &payloadLen); err != nil {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
payload := make([]byte, payloadLen)
|
||||
if n, err := reader.Read(payload[:]); err != nil || n != int(payloadLen) {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
var requestedReceiverValue [32]byte
|
||||
if n, err := reader.Read(requestedReceiverValue[:]); err != nil || n != 32 {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
var extraReceiverValue [32]byte
|
||||
if n, err := reader.Read(extraReceiverValue[:]); err != nil || n != 32 {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
var encodedExecutionInfoLen uint32
|
||||
if err := binary.Read(reader, binary.BigEndian, &encodedExecutionInfoLen); err != nil {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
encodedExecutionInfo := make([]byte, encodedExecutionInfoLen)
|
||||
if n, err := reader.Read(encodedExecutionInfo[:]); err != nil || n != int(encodedExecutionInfoLen) {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
var refundChain uint16
|
||||
if err := binary.Read(reader, binary.BigEndian, &refundChain); err != nil {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
var refundAddress [32]byte
|
||||
if n, err := reader.Read(refundAddress[:]); err != nil || n != 32 {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
var refundDeliveryProvider [32]byte
|
||||
if n, err := reader.Read(refundDeliveryProvider[:]); err != nil || n != 32 {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
var sourceDeliveryProvider [32]byte
|
||||
if n, err := reader.Read(sourceDeliveryProvider[:]); err != nil || n != 32 {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
if n, err := reader.Read(senderAddress[:]); err != nil || n != 32 {
|
||||
return false, senderAddress, nil
|
||||
}
|
||||
|
||||
return true, senderAddress, payload
|
||||
}
|
|
@ -0,0 +1,68 @@
|
|||
package accountant
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/certusone/wormhole/node/pkg/common"
|
||||
"github.com/wormhole-foundation/wormhole/sdk"
|
||||
"github.com/wormhole-foundation/wormhole/sdk/vaa"
|
||||
)
|
||||
|
||||
type emitterConfigEntry struct {
|
||||
chainId vaa.ChainID
|
||||
addr string
|
||||
logOnly bool
|
||||
}
|
||||
|
||||
type emitterConfig []emitterConfigEntry
|
||||
|
||||
// nttGetEmitters returns the set of direct NTT and AR emitters based on the environment passed in.
|
||||
func nttGetEmitters(env common.Environment) (validEmitters, validEmitters, error) {
|
||||
var directEmitterConfig emitterConfig
|
||||
arEmitterConfig := sdk.KnownAutomaticRelayerEmitters
|
||||
if env == common.MainNet {
|
||||
directEmitterConfig = emitterConfig{}
|
||||
} else if env == common.TestNet {
|
||||
directEmitterConfig = emitterConfig{}
|
||||
arEmitterConfig = sdk.KnownTestnetAutomaticRelayerEmitters
|
||||
} else {
|
||||
// Every other environment uses the devnet ones.
|
||||
directEmitterConfig = emitterConfig{
|
||||
{chainId: vaa.ChainIDEthereum, addr: "000000000000000000000000855FA758c77D68a04990E992aA4dcdeF899F654A"},
|
||||
{chainId: vaa.ChainIDEthereum, addr: "000000000000000000000000fA2435Eacf10Ca62ae6787ba2fB044f8733Ee843"},
|
||||
{chainId: vaa.ChainIDBSC, addr: "000000000000000000000000fA2435Eacf10Ca62ae6787ba2fB044f8733Ee843"},
|
||||
{chainId: vaa.ChainIDBSC, addr: "000000000000000000000000855FA758c77D68a04990E992aA4dcdeF899F654A"},
|
||||
}
|
||||
arEmitterConfig = sdk.KnownDevnetAutomaticRelayerEmitters
|
||||
}
|
||||
|
||||
// Build the direct emitter map, setting the payload based on whether or not the config says it should be log only.
|
||||
directEmitters := make(validEmitters)
|
||||
for _, emitter := range directEmitterConfig {
|
||||
addr, err := vaa.StringToAddress(emitter.addr)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf(`failed to parse direct emitter address "%s": %w`, emitter.addr, err)
|
||||
}
|
||||
ek := emitterKey{emitterChainId: emitter.chainId, emitterAddr: addr}
|
||||
if _, exists := directEmitters[ek]; exists {
|
||||
return nil, nil, fmt.Errorf(`duplicate direct emitter "%s:%s"`, emitter.chainId.String(), emitter.addr)
|
||||
}
|
||||
directEmitters[ek] = !emitter.logOnly
|
||||
}
|
||||
|
||||
// Build the automatic relayer emitter map based on the standard config in the SDK.
|
||||
arEmitters := make(validEmitters)
|
||||
for _, emitter := range arEmitterConfig {
|
||||
addr, err := vaa.StringToAddress(emitter.Addr)
|
||||
if err != nil {
|
||||
return nil, nil, fmt.Errorf(`failed to parse AR emitter address "%s": %w`, emitter.Addr, err)
|
||||
}
|
||||
ek := emitterKey{emitterChainId: emitter.ChainId, emitterAddr: addr}
|
||||
if _, exists := directEmitters[ek]; exists {
|
||||
return nil, nil, fmt.Errorf(`duplicate AR emitter "%s:%s"`, emitter.ChainId.String(), emitter.Addr)
|
||||
}
|
||||
arEmitters[ek] = true
|
||||
}
|
||||
|
||||
return directEmitters, arEmitters, nil
|
||||
}
|
|
@ -0,0 +1,236 @@
|
|||
package accountant
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/certusone/wormhole/node/pkg/common"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/wormhole-foundation/wormhole/sdk/vaa"
|
||||
)
|
||||
|
||||
const goodPayload = "9945ff10042942fafabe0000000000000000000000000000000000000000000000000000042942fababe00000000000000000000000000000000000000000000000000000091128434bafe23430000000000000000000000000000000000ce00aa00000000004667921341234300000000000000000000000000000000000000000000000000004f994e545407000000000012d687beefface00000000000000000000000000000000000000000000000000000000feebcafe0000000000000000000000000000000000000000000000000000000000110000"
|
||||
|
||||
func TestNttParsePayloadSuccess(t *testing.T) {
|
||||
payload, err := hex.DecodeString(goodPayload)
|
||||
require.NoError(t, err)
|
||||
assert.True(t, nttIsPayloadNTT(payload))
|
||||
}
|
||||
|
||||
func TestNttParsePayloadTooShort(t *testing.T) {
|
||||
payload, err := hex.DecodeString("9945ff10042942fafabe00000000000000000000000000000000000000000000000000000079000000367999a1014667921341234300000000000000000000000000000000000000000000000000004f994e54")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, nttIsPayloadNTT(payload))
|
||||
}
|
||||
|
||||
func TestNttParsePayloadNoWhPrefix(t *testing.T) {
|
||||
payload, err := hex.DecodeString("9845ff10042942fafabe00000000000000000000000000000000000000000000000000000079000000367999a1014667921341234300000000000000000000000000000000000000000000000000004f994e545407000000000012d687beefface00000000000000000000000000000000000000000000000000000000feebcafe0000000000000000000000000000000000000000000000000000000000110000")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, nttIsPayloadNTT(payload))
|
||||
}
|
||||
|
||||
func TestNttParsePayloadNoTransferPrefix(t *testing.T) {
|
||||
payload, err := hex.DecodeString("9945ff10042942fafabe00000000000000000000000000000000000000000000000000000079000000367999a1014667921341234300000000000000000000000000000000000000000000000000004f994e545307000000000012d687beefface00000000000000000000000000000000000000000000000000000000feebcafe0000000000000000000000000000000000000000000000000000000000110000")
|
||||
require.NoError(t, err)
|
||||
assert.False(t, nttIsPayloadNTT(payload))
|
||||
}
|
||||
|
||||
func TestNttParseMsgSuccess(t *testing.T) {
|
||||
emitterAddr, err := vaa.StringToAddress("000000000000000000000000000000000000000000000000656e64706f696e74")
|
||||
require.NoError(t, err)
|
||||
|
||||
payload, err := hex.DecodeString(goodPayload)
|
||||
require.NoError(t, err)
|
||||
|
||||
emitters := map[emitterKey]bool{
|
||||
{emitterChainId: vaa.ChainIDEthereum, emitterAddr: emitterAddr}: true,
|
||||
}
|
||||
|
||||
msg := &common.MessagePublication{
|
||||
TxHash: hashFromString("0x06f541f5ecfc43407c31587aa6ac3a689e8960f36dc23c332db5510dfc6a4063"),
|
||||
Timestamp: time.Unix(int64(1654543099), 0),
|
||||
Nonce: uint32(42),
|
||||
Sequence: uint64(123456),
|
||||
EmitterChain: vaa.ChainIDEthereum,
|
||||
EmitterAddress: emitterAddr,
|
||||
ConsistencyLevel: uint8(0),
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
isNTT, enforceFlag := nttIsMsgDirectNTT(msg, emitters)
|
||||
assert.True(t, isNTT)
|
||||
assert.True(t, enforceFlag)
|
||||
}
|
||||
|
||||
func TestNttParseMsgWrongEmitterChain(t *testing.T) {
|
||||
emitterAddr, err := vaa.StringToAddress("000000000000000000000000000000000000000000000000656e64706f696e74")
|
||||
require.NoError(t, err)
|
||||
|
||||
payload, err := hex.DecodeString(goodPayload)
|
||||
require.NoError(t, err)
|
||||
|
||||
emitters := map[emitterKey]bool{
|
||||
{emitterChainId: vaa.ChainIDEthereum, emitterAddr: emitterAddr}: true,
|
||||
}
|
||||
|
||||
msg := &common.MessagePublication{
|
||||
TxHash: hashFromString("0x06f541f5ecfc43407c31587aa6ac3a689e8960f36dc23c332db5510dfc6a4063"),
|
||||
Timestamp: time.Unix(int64(1654543099), 0),
|
||||
Nonce: uint32(42),
|
||||
Sequence: uint64(123456),
|
||||
EmitterChain: vaa.ChainIDSolana,
|
||||
EmitterAddress: emitterAddr,
|
||||
ConsistencyLevel: uint8(0),
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
isNTT, _ := nttIsMsgDirectNTT(msg, emitters)
|
||||
assert.False(t, isNTT)
|
||||
}
|
||||
|
||||
func TestNttParseMsgWrongEmitterAddress(t *testing.T) {
|
||||
goodEmitterAddr, err := vaa.StringToAddress("000000000000000000000000000000000000000000000000656e64706f696e74")
|
||||
require.NoError(t, err)
|
||||
|
||||
badEmitterAddr, err := vaa.StringToAddress("000000000000000000000000000000000000000000000000656e64706f696e75")
|
||||
require.NoError(t, err)
|
||||
|
||||
payload, err := hex.DecodeString(goodPayload)
|
||||
require.NoError(t, err)
|
||||
|
||||
emitters := map[emitterKey]bool{
|
||||
{emitterChainId: vaa.ChainIDEthereum, emitterAddr: goodEmitterAddr}: true,
|
||||
}
|
||||
|
||||
msg := &common.MessagePublication{
|
||||
TxHash: hashFromString("0x06f541f5ecfc43407c31587aa6ac3a689e8960f36dc23c332db5510dfc6a4063"),
|
||||
Timestamp: time.Unix(int64(1654543099), 0),
|
||||
Nonce: uint32(42),
|
||||
Sequence: uint64(123456),
|
||||
EmitterChain: vaa.ChainIDEthereum,
|
||||
EmitterAddress: badEmitterAddr,
|
||||
ConsistencyLevel: uint8(0),
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
isNTT, _ := nttIsMsgDirectNTT(msg, emitters)
|
||||
assert.False(t, isNTT)
|
||||
}
|
||||
|
||||
const goodArPayload = "0127150000000000000000000000005a76440b725909000697e0f72646adf1a492df8b000000d99945ff1000000000000000000000000024c7e23e3a97cd2f04c9eb9f354bb7f3b31d2d1a000000000000000000000000605de5e0880cfd6ffc61af9585cbab3946594a3d009100000000000000000000000000000000000000000000000000000000000000040000000000000000000000008f26a0025dccc6cfc07a7d38756280a10e295ad7004f994e5454080000000077359400000000000000000000000000169d91c797edf56100f1b765268145660503a4230000000000000000000000008f26a0025dccc6cfc07a7d38756280a10e295ad7271500000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000493e0000000000000000000000000000000000000000000000000000000000983146f271500000000000000000000000000000000000000000000000000000000000000000000000000000000000000007a0a53847776f7e94cc35742971acb2217b0db810000000000000000000000007a0a53847776f7e94cc35742971acb2217b0db81000000000000000000000000c5bf11ab6ae525ffca02e2af7f6704cdcecec2ea00"
|
||||
const nttPayloadInAr = "9945ff1000000000000000000000000024c7e23e3a97cd2f04c9eb9f354bb7f3b31d2d1a000000000000000000000000605de5e0880cfd6ffc61af9585cbab3946594a3d009100000000000000000000000000000000000000000000000000000000000000040000000000000000000000008f26a0025dccc6cfc07a7d38756280a10e295ad7004f994e5454080000000077359400000000000000000000000000169d91c797edf56100f1b765268145660503a4230000000000000000000000008f26a0025dccc6cfc07a7d38756280a10e295ad727150000"
|
||||
|
||||
func TestNttParseArPayloadSuccess(t *testing.T) {
|
||||
nttEmitterAddr, err := vaa.StringToAddress("000000000000000000000000c5bf11ab6ae525ffca02e2af7f6704cdcecec2ea")
|
||||
require.NoError(t, err)
|
||||
|
||||
payload, err := hex.DecodeString(goodArPayload)
|
||||
require.NoError(t, err)
|
||||
|
||||
success, senderAddress, payload := nttParseArPayload(payload)
|
||||
require.True(t, success)
|
||||
assert.True(t, bytes.Equal(nttEmitterAddr[:], senderAddress[:]))
|
||||
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, nttPayloadInAr, hex.EncodeToString(payload[:]))
|
||||
}
|
||||
|
||||
func TestNttParseArPayloadWrongDeliveryInstruction(t *testing.T) {
|
||||
badArPayload := "02271200000000000000000000000079689ce600d3fd3524ec2b4bedcc70131eda67b60000009f9945ff10000000000000000000000000e493cc4f069821404d272b994bb80b1ba1631914007900000000000000070000000000000000000000008f26a0025dccc6cfc07a7d38756280a10e295ad7004f994e54540800000000000003e8000000000000000000000000a88085e6370a551cc046fb6b1e3fb9be23ac3a210000000000000000000000008f26a0025dccc6cfc07a7d38756280a10e295ad7271200000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000600000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000007a120000000000000000000000000000000000000000000000000000000046f5399e7271200000000000000000000000000000000000000000000000000000000000000000000000000000000000000007a0a53847776f7e94cc35742971acb2217b0db810000000000000000000000007a0a53847776f7e94cc35742971acb2217b0db81000000000000000000000000e493cc4f069821404d272b994bb80b1ba163191400"
|
||||
payload, err := hex.DecodeString(badArPayload)
|
||||
require.NoError(t, err)
|
||||
|
||||
success, _, _ := nttParseArPayload(payload)
|
||||
require.False(t, success)
|
||||
}
|
||||
|
||||
func TestNttParseArPayloadTooShort(t *testing.T) {
|
||||
badArPayload := "01271200000000000000000000000079689ce600d3fd3524ec2b4bedcc70131eda67b60000009f9945ff10000000000000000000000000e4"
|
||||
payload, err := hex.DecodeString(badArPayload)
|
||||
require.NoError(t, err)
|
||||
|
||||
success, _, _ := nttParseArPayload(payload)
|
||||
require.False(t, success)
|
||||
}
|
||||
|
||||
func TestNttParseArPayloadReallyTooShort(t *testing.T) {
|
||||
badArPayload := "01"
|
||||
payload, err := hex.DecodeString(badArPayload)
|
||||
require.NoError(t, err)
|
||||
|
||||
success, _, _ := nttParseArPayload(payload)
|
||||
require.False(t, success)
|
||||
}
|
||||
|
||||
func TestNttParseArMsgSuccess(t *testing.T) {
|
||||
arEmitterAddr, err := vaa.StringToAddress("0000000000000000000000007b1bd7a6b4e61c2a123ac6bc2cbfc614437d0470")
|
||||
require.NoError(t, err)
|
||||
|
||||
arEmitters := map[emitterKey]bool{
|
||||
{emitterChainId: vaa.ChainIDArbitrumSepolia, emitterAddr: arEmitterAddr}: true,
|
||||
}
|
||||
|
||||
nttEmitterAddr, err := vaa.StringToAddress("000000000000000000000000c5bf11ab6ae525ffca02e2af7f6704cdcecec2ea")
|
||||
require.NoError(t, err)
|
||||
|
||||
nttEmitters := map[emitterKey]bool{
|
||||
{emitterChainId: vaa.ChainIDArbitrumSepolia, emitterAddr: nttEmitterAddr}: true,
|
||||
}
|
||||
|
||||
payload, err := hex.DecodeString(goodArPayload)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &common.MessagePublication{
|
||||
TxHash: hashFromString("0x06f541f5ecfc43407c31587aa6ac3a689e8960f36dc23c332db5510dfc6a4063"),
|
||||
Timestamp: time.Unix(int64(1708575745), 0),
|
||||
Nonce: uint32(0),
|
||||
Sequence: uint64(259),
|
||||
EmitterChain: vaa.ChainIDArbitrumSepolia,
|
||||
EmitterAddress: arEmitterAddr,
|
||||
ConsistencyLevel: uint8(15),
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
isNTT, enforceFlag := nttIsMsgArNTT(msg, arEmitters, nttEmitters)
|
||||
assert.True(t, isNTT)
|
||||
assert.True(t, enforceFlag)
|
||||
}
|
||||
|
||||
func TestNttParseArMsgUnknownArEmitter(t *testing.T) {
|
||||
arEmitterAddr, err := vaa.StringToAddress("0000000000000000000000007b1bd7a6b4e61c2a123ac6bc2cbfc614437d0470")
|
||||
require.NoError(t, err)
|
||||
|
||||
arEmitters := map[emitterKey]bool{
|
||||
{emitterChainId: vaa.ChainIDArbitrumSepolia, emitterAddr: arEmitterAddr}: true,
|
||||
}
|
||||
|
||||
nttEmitterAddr, err := vaa.StringToAddress("000000000000000000000000c5bf11ab6ae525ffca02e2af7f6704cdcecec2ea")
|
||||
require.NoError(t, err)
|
||||
|
||||
nttEmitters := map[emitterKey]bool{
|
||||
{emitterChainId: vaa.ChainIDArbitrumSepolia, emitterAddr: nttEmitterAddr}: true,
|
||||
}
|
||||
|
||||
differentEmitterAddr, err := vaa.StringToAddress("0000000000000000000000007b1bd7a6b4e61c2a123ac6bc2cbfc614437d0471") // This is different.
|
||||
require.NoError(t, err)
|
||||
|
||||
payload, err := hex.DecodeString(goodArPayload)
|
||||
require.NoError(t, err)
|
||||
|
||||
msg := &common.MessagePublication{
|
||||
TxHash: hashFromString("0x06f541f5ecfc43407c31587aa6ac3a689e8960f36dc23c332db5510dfc6a4063"),
|
||||
Timestamp: time.Unix(int64(1708575745), 0),
|
||||
Nonce: uint32(0),
|
||||
Sequence: uint64(259),
|
||||
EmitterChain: vaa.ChainIDArbitrumSepolia,
|
||||
EmitterAddress: differentEmitterAddr,
|
||||
ConsistencyLevel: uint8(15),
|
||||
Payload: payload,
|
||||
}
|
||||
|
||||
isNTT, _ := nttIsMsgArNTT(msg, arEmitters, nttEmitters)
|
||||
assert.False(t, isNTT)
|
||||
}
|
|
@ -22,18 +22,39 @@ import (
|
|||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// TODO: Arbitrary values. What makes sense?
|
||||
const batchSize = 10
|
||||
const delayInMS = 100 * time.Millisecond
|
||||
|
||||
// baseWorker is the entry point for the base accountant worker.
|
||||
func (acct *Accountant) baseWorker(ctx context.Context) error {
|
||||
return acct.worker(ctx, false)
|
||||
}
|
||||
|
||||
// nttWorker is the entry point for the NTT accountant worker.
|
||||
func (acct *Accountant) nttWorker(ctx context.Context) error {
|
||||
return acct.worker(ctx, true)
|
||||
}
|
||||
|
||||
// worker listens for observation requests from the accountant and submits them to the smart contract.
|
||||
func (acct *Accountant) worker(ctx context.Context) error {
|
||||
func (acct *Accountant) worker(ctx context.Context, isNTT bool) error {
|
||||
subChan := acct.subChan
|
||||
wormchainConn := acct.wormchainConn
|
||||
contract := acct.contract
|
||||
prefix := SubmitObservationPrefix
|
||||
tag := "accountant"
|
||||
if isNTT {
|
||||
subChan = acct.nttSubChan
|
||||
wormchainConn = acct.nttWormchainConn
|
||||
contract = acct.nttContract
|
||||
prefix = NttSubmitObservationPrefix
|
||||
tag = "ntt-accountant"
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
if err := acct.handleBatch(ctx); err != nil {
|
||||
if err := acct.handleBatch(ctx, subChan, wormchainConn, contract, prefix, tag); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -42,13 +63,13 @@ func (acct *Accountant) worker(ctx context.Context) error {
|
|||
|
||||
// handleBatch reads a batch of events from the channel, either until a timeout occurs or the batch is full,
|
||||
// and submits them to the smart contract.
|
||||
func (acct *Accountant) handleBatch(ctx context.Context) error {
|
||||
func (acct *Accountant) handleBatch(ctx context.Context, subChan chan *common.MessagePublication, wormchainConn AccountantWormchainConn, contract string, prefix []byte, tag string) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, delayInMS)
|
||||
defer cancel()
|
||||
|
||||
msgs, err := readFromChannel[*common.MessagePublication](ctx, acct.subChan, batchSize)
|
||||
msgs, err := readFromChannel[*common.MessagePublication](ctx, subChan, batchSize)
|
||||
if err != nil && !errors.Is(err, context.DeadlineExceeded) {
|
||||
return fmt.Errorf("failed to read messages from `acct.subChan`: %w", err)
|
||||
return fmt.Errorf("failed to read messages from channel for %s: %w", tag, err)
|
||||
}
|
||||
|
||||
if len(msgs) != 0 {
|
||||
|
@ -61,15 +82,15 @@ func (acct *Accountant) handleBatch(ctx context.Context) error {
|
|||
|
||||
gs := acct.gst.Get()
|
||||
if gs == nil {
|
||||
return fmt.Errorf("failed to get guardian set")
|
||||
return fmt.Errorf("failed to get guardian set for %s", tag)
|
||||
}
|
||||
|
||||
guardianIndex, found := gs.KeyIndex(acct.guardianAddr)
|
||||
if !found {
|
||||
return fmt.Errorf("failed to get guardian index")
|
||||
return fmt.Errorf("failed to get guardian index for %s", tag)
|
||||
}
|
||||
|
||||
acct.submitObservationsToContract(msgs, gs.Index, uint32(guardianIndex))
|
||||
acct.submitObservationsToContract(msgs, gs.Index, uint32(guardianIndex), wormchainConn, contract, prefix, tag)
|
||||
transfersSubmitted.Add(float64(len(msgs)))
|
||||
return nil
|
||||
}
|
||||
|
@ -174,7 +195,8 @@ type (
|
|||
}
|
||||
)
|
||||
|
||||
var submitObservationPrefix = []byte("acct_sub_obsfig_000000000000000000|")
|
||||
var SubmitObservationPrefix = []byte("acct_sub_obsfig_000000000000000000|")
|
||||
var NttSubmitObservationPrefix = []byte("ntt_acct_sub_obsfig_00000000000000|")
|
||||
|
||||
func (k TransferKey) String() string {
|
||||
return fmt.Sprintf("%v/%v/%v", k.EmitterChain, hex.EncodeToString(k.EmitterAddress[:]), k.Sequence)
|
||||
|
@ -192,13 +214,13 @@ func (sb SignatureBytes) MarshalJSON() ([]byte, error) {
|
|||
|
||||
// submitObservationsToContract makes a call to the smart contract to submit a batch of observation requests.
|
||||
// It should be called from a go routine because it can block.
|
||||
func (acct *Accountant) submitObservationsToContract(msgs []*common.MessagePublication, gsIndex uint32, guardianIndex uint32) {
|
||||
txResp, err := SubmitObservationsToContract(acct.ctx, acct.logger, acct.gk, gsIndex, guardianIndex, acct.wormchainConn, acct.contract, msgs)
|
||||
func (acct *Accountant) submitObservationsToContract(msgs []*common.MessagePublication, gsIndex uint32, guardianIndex uint32, wormchainConn AccountantWormchainConn, contract string, prefix []byte, tag string) {
|
||||
txResp, err := SubmitObservationsToContract(acct.ctx, acct.logger, acct.gk, gsIndex, guardianIndex, wormchainConn, contract, prefix, msgs)
|
||||
if err != nil {
|
||||
// This means the whole batch failed. They will all get retried the next audit cycle.
|
||||
acct.logger.Error("failed to submit any observations in batch", zap.Int("numMsgs", len(msgs)), zap.Error(err))
|
||||
acct.logger.Error(fmt.Sprintf("failed to submit any observations in batch to %s", tag), zap.Int("numMsgs", len(msgs)), zap.Error(err))
|
||||
for idx, msg := range msgs {
|
||||
acct.logger.Error("failed to submit observation", zap.Int("idx", idx), zap.String("msgId", msg.MessageIDString()))
|
||||
acct.logger.Error(fmt.Sprintf("failed to submit observation to %s", tag), zap.Int("idx", idx), zap.String("msgId", msg.MessageIDString()))
|
||||
}
|
||||
|
||||
submitFailures.Add(float64(len(msgs)))
|
||||
|
@ -209,9 +231,9 @@ func (acct *Accountant) submitObservationsToContract(msgs []*common.MessagePubli
|
|||
responses, err := GetObservationResponses(txResp)
|
||||
if err != nil {
|
||||
// This means the whole batch failed. They will all get retried the next audit cycle.
|
||||
acct.logger.Error("failed to get responses from batch", zap.Error(err), zap.String("txResp", acct.wormchainConn.BroadcastTxResponseToString(txResp)))
|
||||
acct.logger.Error(fmt.Sprintf("failed to get responses from batch from %s", tag), zap.Error(err), zap.String("txResp", wormchainConn.BroadcastTxResponseToString(txResp)))
|
||||
for idx, msg := range msgs {
|
||||
acct.logger.Error("need to retry observation", zap.Int("idx", idx), zap.String("msgId", msg.MessageIDString()))
|
||||
acct.logger.Error(fmt.Sprintf("need to retry observation to %s", tag), zap.Int("idx", idx), zap.String("msgId", msg.MessageIDString()))
|
||||
}
|
||||
|
||||
submitFailures.Add(float64(len(msgs)))
|
||||
|
@ -221,9 +243,9 @@ func (acct *Accountant) submitObservationsToContract(msgs []*common.MessagePubli
|
|||
|
||||
if len(responses) != len(msgs) {
|
||||
// This means the whole batch failed. They will all get retried the next audit cycle.
|
||||
acct.logger.Error("number of responses does not match number of messages", zap.Int("numMsgs", len(msgs)), zap.Int("numResp", len(responses)), zap.Error(err))
|
||||
acct.logger.Error(fmt.Sprintf("number of responses from %s does not match number of messages", tag), zap.Int("numMsgs", len(msgs)), zap.Int("numResp", len(responses)), zap.Error(err))
|
||||
for idx, msg := range msgs {
|
||||
acct.logger.Error("need to retry observation", zap.Int("idx", idx), zap.String("msgId", msg.MessageIDString()))
|
||||
acct.logger.Error(fmt.Sprintf("need to retry observation to %s", tag), zap.Int("idx", idx), zap.String("msgId", msg.MessageIDString()))
|
||||
}
|
||||
|
||||
submitFailures.Add(float64(len(msgs)))
|
||||
|
@ -237,14 +259,14 @@ func (acct *Accountant) submitObservationsToContract(msgs []*common.MessagePubli
|
|||
status, exists := responses[msgId]
|
||||
if !exists {
|
||||
// This will get retried next audit interval.
|
||||
acct.logger.Error("did not receive an observation response for message", zap.String("msgId", msgId))
|
||||
acct.logger.Error(fmt.Sprintf("did not receive an observation response from %s for message", tag), zap.String("msgId", msgId))
|
||||
submitFailures.Inc()
|
||||
continue
|
||||
}
|
||||
|
||||
switch status.Type {
|
||||
case "pending":
|
||||
acct.logger.Info("transfer is pending", zap.String("msgId", msgId))
|
||||
acct.logger.Info(fmt.Sprintf("transfer is pending on %s", tag), zap.String("msgId", msgId))
|
||||
case "committed":
|
||||
acct.handleCommittedTransfer(msgId)
|
||||
case "error":
|
||||
|
@ -252,7 +274,7 @@ func (acct *Accountant) submitObservationsToContract(msgs []*common.MessagePubli
|
|||
acct.handleTransferError(msgId, status.Data, "transfer failed")
|
||||
default:
|
||||
// This will get retried next audit interval.
|
||||
acct.logger.Error("unexpected status response on observation", zap.String("msgId", msgId), zap.String("status", status.Type), zap.String("text", status.Data))
|
||||
acct.logger.Error(fmt.Sprintf("unexpected status response from %s on observation", tag), zap.String("msgId", msgId), zap.String("status", status.Type), zap.String("text", status.Data))
|
||||
submitFailures.Inc()
|
||||
}
|
||||
}
|
||||
|
@ -297,6 +319,7 @@ func SubmitObservationsToContract(
|
|||
guardianIndex uint32,
|
||||
wormchainConn AccountantWormchainConn,
|
||||
contract string,
|
||||
prefix []byte,
|
||||
msgs []*common.MessagePublication,
|
||||
) (*sdktx.BroadcastTxResponse, error) {
|
||||
obs := make([]Observation, len(msgs))
|
||||
|
@ -313,6 +336,7 @@ func SubmitObservationsToContract(
|
|||
}
|
||||
|
||||
logger.Debug("in SubmitObservationsToContract, encoding observation",
|
||||
zap.String("contract", contract),
|
||||
zap.Int("idx", idx),
|
||||
zap.String("txHash", msg.TxHash.String()), zap.String("encTxHash", hex.EncodeToString(obs[idx].TxHash[:])),
|
||||
zap.Stringer("timeStamp", msg.Timestamp), zap.Uint32("encTimestamp", obs[idx].Timestamp),
|
||||
|
@ -330,7 +354,7 @@ func SubmitObservationsToContract(
|
|||
return nil, fmt.Errorf("failed to marshal accountant observation request: %w", err)
|
||||
}
|
||||
|
||||
digest, err := vaa.MessageSigningDigest(submitObservationPrefix, bytes)
|
||||
digest, err := vaa.MessageSigningDigest(prefix, bytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to sign accountant Observation request: %w", err)
|
||||
}
|
||||
|
@ -363,6 +387,8 @@ func SubmitObservationsToContract(
|
|||
}
|
||||
|
||||
logger.Debug("in SubmitObservationsToContract, sending broadcast",
|
||||
zap.String("contract", contract),
|
||||
zap.String("sender", wormchainConn.SenderAddress()),
|
||||
zap.Int("numObs", len(obs)),
|
||||
zap.String("observations", string(bytes)),
|
||||
zap.Uint32("gsIndex", gsIndex), zap.Uint32("guardianIndex", guardianIndex),
|
||||
|
@ -394,8 +420,17 @@ func SubmitObservationsToContract(
|
|||
return txResp, fmt.Errorf("failed to submit observations: %s", txResp.TxResponse.RawLog)
|
||||
}
|
||||
|
||||
logger.Info("done sending broadcast", zap.Int("numObs", len(obs)), zap.Int64("gasUsed", txResp.TxResponse.GasUsed), zap.Stringer("elapsedTime", time.Since(start)))
|
||||
logger.Debug("in SubmitObservationsToContract, done sending broadcast", zap.String("resp", wormchainConn.BroadcastTxResponseToString(txResp)))
|
||||
logger.Info("done sending broadcast",
|
||||
zap.String("contract", contract),
|
||||
zap.Int("numObs", len(obs)),
|
||||
zap.Int64("gasUsed", txResp.TxResponse.GasUsed),
|
||||
zap.Stringer("elapsedTime", time.Since(start)),
|
||||
)
|
||||
|
||||
logger.Debug("in SubmitObservationsToContract, done sending broadcast",
|
||||
zap.String("contract", contract),
|
||||
zap.String("resp", wormchainConn.BroadcastTxResponseToString(txResp)),
|
||||
)
|
||||
return txResp, nil
|
||||
}
|
||||
|
||||
|
|
|
@ -21,29 +21,45 @@ import (
|
|||
"go.uber.org/zap"
|
||||
)
|
||||
|
||||
// watcher reads transaction events from the smart contract and publishes them.
|
||||
func (acct *Accountant) watcher(ctx context.Context) error {
|
||||
// baseWatcher is the entry point for the base accountant watcher.
|
||||
func (acct *Accountant) baseWatcher(ctx context.Context) error {
|
||||
return acct.watcher(ctx, false)
|
||||
}
|
||||
|
||||
// nttWatcher is the entry point for the NTT accountant watcher.
|
||||
func (acct *Accountant) nttWatcher(ctx context.Context) error {
|
||||
return acct.watcher(ctx, true)
|
||||
}
|
||||
|
||||
// watcher reads transaction events from an accountant smart contract and publishes them.
|
||||
func (acct *Accountant) watcher(ctx context.Context, isNTT bool) error {
|
||||
tag := "accountant"
|
||||
contract := acct.contract
|
||||
if isNTT {
|
||||
tag = "ntt-accountant"
|
||||
contract = acct.nttContract
|
||||
}
|
||||
errC := make(chan error)
|
||||
|
||||
acct.logger.Info("acctwatch: creating watcher", zap.String("url", acct.wsUrl), zap.String("contract", acct.contract))
|
||||
acct.logger.Info(fmt.Sprintf("acctwatch: creating %s watcher", tag), zap.String("url", acct.wsUrl), zap.String("contract", contract))
|
||||
tmConn, err := tmHttp.New(acct.wsUrl, "/websocket")
|
||||
if err != nil {
|
||||
connectionErrors.Inc()
|
||||
return fmt.Errorf("failed to establish tendermint connection: %w", err)
|
||||
return fmt.Errorf("failed to establish %s tendermint connection: %w", tag, err)
|
||||
}
|
||||
|
||||
if err := tmConn.Start(); err != nil {
|
||||
connectionErrors.Inc()
|
||||
return fmt.Errorf("failed to start tendermint connection: %w", err)
|
||||
return fmt.Errorf("failed to start %s tendermint connection: %w", tag, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := tmConn.Stop(); err != nil {
|
||||
connectionErrors.Inc()
|
||||
acct.logger.Error("acctwatch: failed to stop tendermint connection", zap.Error(err))
|
||||
acct.logger.Error(fmt.Sprintf("acctwatch: failed to stop %s tendermint connection", tag), zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
query := fmt.Sprintf("execute._contract_address='%s'", acct.contract)
|
||||
query := fmt.Sprintf("execute._contract_address='%s'", contract)
|
||||
events, err := tmConn.Subscribe(
|
||||
ctx,
|
||||
"guardiand",
|
||||
|
@ -51,15 +67,15 @@ func (acct *Accountant) watcher(ctx context.Context) error {
|
|||
64, // channel capacity
|
||||
)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to subscribe to accountant events: %w", err)
|
||||
return fmt.Errorf("failed to subscribe to %s events: %w", tag, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := tmConn.UnsubscribeAll(ctx, "guardiand"); err != nil {
|
||||
acct.logger.Error("acctwatch: failed to unsubscribe from events", zap.Error(err))
|
||||
acct.logger.Error(fmt.Sprintf("acctwatch: failed to unsubscribe from %s events", tag), zap.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
go acct.handleEvents(ctx, events, errC)
|
||||
go acct.handleEvents(ctx, events, errC, contract, tag)
|
||||
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
|
@ -70,7 +86,7 @@ func (acct *Accountant) watcher(ctx context.Context) error {
|
|||
}
|
||||
|
||||
// handleEvents handles events from the tendermint client library.
|
||||
func (acct *Accountant) handleEvents(ctx context.Context, evts <-chan tmCoreTypes.ResultEvent, errC chan error) {
|
||||
func (acct *Accountant) handleEvents(ctx context.Context, evts <-chan tmCoreTypes.ResultEvent, errC chan error, contract string, tag string) {
|
||||
defer close(errC)
|
||||
|
||||
for {
|
||||
|
@ -80,31 +96,31 @@ func (acct *Accountant) handleEvents(ctx context.Context, evts <-chan tmCoreType
|
|||
case e := <-evts:
|
||||
tx, ok := e.Data.(tmTypes.EventDataTx)
|
||||
if !ok {
|
||||
acct.logger.Error("unknown data from event subscription", zap.Stringer("e.Data", reflect.TypeOf(e.Data)), zap.Any("event", e))
|
||||
acct.logger.Error(fmt.Sprintf("unknown data from %s event subscription", tag), zap.Stringer("e.Data", reflect.TypeOf(e.Data)), zap.Any("event", e))
|
||||
continue
|
||||
}
|
||||
|
||||
for _, event := range tx.Result.Events {
|
||||
if event.Type == "wasm-Observation" {
|
||||
evt, err := parseEvent[WasmObservation](acct.logger, event, "wasm-Observation", acct.contract)
|
||||
evt, err := parseEvent[WasmObservation](acct.logger, event, "wasm-Observation", contract)
|
||||
if err != nil {
|
||||
acct.logger.Error("failed to parse wasm transfer event", zap.Error(err), zap.Stringer("e.Data", reflect.TypeOf(e.Data)), zap.Any("event", event))
|
||||
acct.logger.Error(fmt.Sprintf("failed to parse wasm transfer event from %s", tag), zap.Error(err), zap.Stringer("e.Data", reflect.TypeOf(e.Data)), zap.Any("event", event))
|
||||
continue
|
||||
}
|
||||
|
||||
eventsReceived.Inc()
|
||||
acct.processPendingTransfer(evt)
|
||||
acct.processPendingTransfer(evt, tag)
|
||||
} else if event.Type == "wasm-ObservationError" {
|
||||
evt, err := parseEvent[WasmObservationError](acct.logger, event, "wasm-ObservationError", acct.contract)
|
||||
evt, err := parseEvent[WasmObservationError](acct.logger, event, "wasm-ObservationError", contract)
|
||||
if err != nil {
|
||||
acct.logger.Error("failed to parse wasm observation error event", zap.Error(err), zap.Stringer("e.Data", reflect.TypeOf(e.Data)), zap.Any("event", event))
|
||||
acct.logger.Error(fmt.Sprintf("failed to parse wasm observation error event from %s", tag), zap.Error(err), zap.Stringer("e.Data", reflect.TypeOf(e.Data)), zap.Any("event", event))
|
||||
continue
|
||||
}
|
||||
|
||||
errorEventsReceived.Inc()
|
||||
acct.handleTransferError(evt.Key.String(), evt.Error, "transfer error event received")
|
||||
acct.handleTransferError(evt.Key.String(), evt.Error, fmt.Sprintf("transfer error event received from %s", tag))
|
||||
} else {
|
||||
acct.logger.Debug("ignoring uninteresting event", zap.String("eventType", event.Type))
|
||||
acct.logger.Debug(fmt.Sprintf("ignoring uninteresting event from %s", tag), zap.String("eventType", event.Type))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -149,11 +165,11 @@ func parseEvent[T any](logger *zap.Logger, event tmAbci.Event, name string, cont
|
|||
}
|
||||
|
||||
// processPendingTransfer takes a WasmObservation event, determines if we are expecting it, and if so, publishes it.
|
||||
func (acct *Accountant) processPendingTransfer(xfer *WasmObservation) {
|
||||
func (acct *Accountant) processPendingTransfer(xfer *WasmObservation, tag string) {
|
||||
acct.pendingTransfersLock.Lock()
|
||||
defer acct.pendingTransfersLock.Unlock()
|
||||
|
||||
acct.logger.Info("acctwatch: transfer event detected",
|
||||
acct.logger.Info(fmt.Sprintf("acctwatch: transfer event detected from %s", tag),
|
||||
zap.String("tx_hash", hex.EncodeToString(xfer.TxHash)),
|
||||
zap.Uint32("timestamp", xfer.Timestamp),
|
||||
zap.Uint32("nonce", xfer.Nonce),
|
||||
|
|
|
@ -906,7 +906,14 @@ func TestGuardianConfigs(t *testing.T) {
|
|||
{
|
||||
name: "unfulfilled-dependency",
|
||||
opts: []*GuardianOption{
|
||||
GuardianOptionAccountant("", "", false, nil),
|
||||
GuardianOptionAccountant(
|
||||
"", // websocket
|
||||
"", // contract
|
||||
false, // enforcing
|
||||
nil, // wormchainConn
|
||||
"", // nttContract
|
||||
nil, // nttWormchainConn
|
||||
),
|
||||
},
|
||||
err: "Check the order of your options.",
|
||||
},
|
||||
|
|
|
@ -115,14 +115,21 @@ func GuardianOptionNoAccountant() *GuardianOption {
|
|||
return &GuardianOption{
|
||||
name: "accountant",
|
||||
f: func(ctx context.Context, logger *zap.Logger, g *G) error {
|
||||
logger.Info("acct: accountant is disabled", zap.String("component", "gacct"))
|
||||
logger.Info("accountant is disabled", zap.String("component", "gacct"))
|
||||
return nil
|
||||
}}
|
||||
}
|
||||
|
||||
// GuardianOptionAccountant configures the Accountant module.
|
||||
// Dependencies: db
|
||||
func GuardianOptionAccountant(contract string, websocket string, enforcing bool, wormchainConn *wormconn.ClientConn) *GuardianOption {
|
||||
func GuardianOptionAccountant(
|
||||
websocket string,
|
||||
contract string,
|
||||
enforcing bool,
|
||||
wormchainConn *wormconn.ClientConn,
|
||||
nttContract string,
|
||||
nttWormchainConn *wormconn.ClientConn,
|
||||
) *GuardianOption {
|
||||
return &GuardianOption{
|
||||
name: "accountant",
|
||||
dependencies: []string{"db"},
|
||||
|
@ -131,21 +138,29 @@ func GuardianOptionAccountant(contract string, websocket string, enforcing bool,
|
|||
// will be passed to it for processing. It will forward all token bridge transfers to the accountant contract.
|
||||
// If accountantCheckEnabled is set to true, token bridge transfers will not be signed and published until they
|
||||
// are approved by the accountant smart contract.
|
||||
if contract == "" {
|
||||
logger.Info("acct: accountant is disabled", zap.String("component", "gacct"))
|
||||
if contract == "" && nttContract == "" {
|
||||
logger.Info("accountant is disabled", zap.String("component", "gacct"))
|
||||
return nil
|
||||
}
|
||||
|
||||
if websocket == "" {
|
||||
return errors.New("acct: if accountantContract is specified, accountantWS is required")
|
||||
return errors.New("if accountantContract is specified, accountantWS is required")
|
||||
}
|
||||
if wormchainConn == nil {
|
||||
return errors.New("acct: if accountantContract is specified, the wormchain sending connection must be enabled before.")
|
||||
if contract != "" {
|
||||
if wormchainConn == nil {
|
||||
return errors.New("if accountantContract is specified, the wormchain sending connection must be enabled before")
|
||||
}
|
||||
if enforcing {
|
||||
logger.Info("accountant is enabled and will be enforced", zap.String("component", "gacct"))
|
||||
} else {
|
||||
logger.Info("accountant is enabled but will not be enforced", zap.String("component", "gacct"))
|
||||
}
|
||||
}
|
||||
if enforcing {
|
||||
logger.Info("acct: accountant is enabled and will be enforced", zap.String("component", "gacct"))
|
||||
} else {
|
||||
logger.Info("acct: accountant is enabled but will not be enforced", zap.String("component", "gacct"))
|
||||
if nttContract != "" {
|
||||
if nttWormchainConn == nil {
|
||||
return errors.New("if accountantNttContract is specified, the NTT wormchain sending connection must be enabled")
|
||||
}
|
||||
logger.Info("NTT accountant is enabled", zap.String("component", "gacct"))
|
||||
}
|
||||
|
||||
g.acct = accountant.NewAccountant(
|
||||
|
@ -157,6 +172,8 @@ func GuardianOptionAccountant(contract string, websocket string, enforcing bool,
|
|||
websocket,
|
||||
wormchainConn,
|
||||
enforcing,
|
||||
nttContract,
|
||||
nttWormchainConn,
|
||||
g.gk,
|
||||
g.gst,
|
||||
g.acctC.writeC,
|
||||
|
|
|
@ -305,6 +305,16 @@
|
|||
"name": "10",
|
||||
"public": "0x610bb1573d1046fcb8a70bbbd395754cd57c2b60",
|
||||
"private": "0x77c5495fbb039eed474fc940f29955ed0531693cc9212911efd35dff0373153f"
|
||||
},
|
||||
{
|
||||
"name": "11",
|
||||
"public": "0x855FA758c77D68a04990E992aA4dcdeF899F654A",
|
||||
"private": "0xd99b5b29e6da2528bf458b26237a6cf8655a3e3276c1cdc0de1f98cefee81c01"
|
||||
},
|
||||
{
|
||||
"name": "12",
|
||||
"public": "0xfA2435Eacf10Ca62ae6787ba2fB044f8733Ee843",
|
||||
"private": "0x9b9c613a36396172eab2d34d72331c8ca83a358781883a535d2941f66db07b24"
|
||||
}
|
||||
],
|
||||
"devnetGuardians": [
|
||||
|
|
|
@ -26,3 +26,18 @@ var knownDevnetNFTBridgeEmitters = map[vaa.ChainID]string{
|
|||
vaa.ChainIDEthereum: "00000000000000000000000026b4afb60d6c903165150c6f0aa14f8016be4aec",
|
||||
vaa.ChainIDBSC: "00000000000000000000000026b4afb60d6c903165150c6f0aa14f8016be4aec",
|
||||
}
|
||||
|
||||
// KnownDevnetAutomaticRelayerEmitters is a list of well-known devnet emitters for the Automatic Relayers.
|
||||
// It is based on this: https://github.com/wormhole-foundation/wormhole/blob/2c9703670eadc48a7dc8967e81ed2823affcc679/sdk/js/src/relayer/consts.ts#L82
|
||||
// Note that the format of this is different from the other maps because we don't want to limit it to one per chain.
|
||||
var KnownDevnetAutomaticRelayerEmitters = []struct {
|
||||
ChainId vaa.ChainID
|
||||
Addr string
|
||||
}{
|
||||
{ChainId: vaa.ChainIDEthereum, Addr: "000000000000000000000000E66C1Bc1b369EF4F376b84373E3Aa004E8F4C083"},
|
||||
{ChainId: vaa.ChainIDBSC, Addr: "000000000000000000000000E66C1Bc1b369EF4F376b84373E3Aa004E8F4C083"},
|
||||
|
||||
// NTT end to end testing uses special emitters in local dev and CI.
|
||||
{ChainId: vaa.ChainIDEthereum, Addr: "00000000000000000000000053855d4b64e9a3cf59a84bc768ada716b5536bc5"},
|
||||
{ChainId: vaa.ChainIDBSC, Addr: "00000000000000000000000053855d4b64e9a3cf59a84bc768ada716b5536bc5"},
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ import {
|
|||
SolanaAccountQueryResponse,
|
||||
} from "..";
|
||||
|
||||
jest.setTimeout(60000);
|
||||
jest.setTimeout(120000);
|
||||
|
||||
const SOLANA_NODE_URL = "http://localhost:8899";
|
||||
const POLYGON_NODE_URL = "https://polygon-mumbai-bor.publicnode.com";
|
||||
|
|
|
@ -27,7 +27,7 @@ import { PopulateData, TmplSig } from "../TmplSig";
|
|||
const CORE_ID = BigInt(1004);
|
||||
const TOKEN_BRIDGE_ID = BigInt(1006);
|
||||
|
||||
jest.setTimeout(60000);
|
||||
jest.setTimeout(120000);
|
||||
|
||||
describe("Unit Tests", () => {
|
||||
describe("Algorand unit tests", () => {
|
||||
|
|
|
@ -55,7 +55,7 @@ import {
|
|||
getSignedVaaSolana,
|
||||
} from "./utils/getSignedVaa";
|
||||
|
||||
jest.setTimeout(60000);
|
||||
jest.setTimeout(120000);
|
||||
|
||||
const APTOS_NFT_BRIDGE_ADDRESS = CONTRACTS.DEVNET.aptos.nft_bridge;
|
||||
const ETH_NFT_BRIDGE_ADDRESS = CONTRACTS.DEVNET.ethereum.nft_bridge;
|
||||
|
|
|
@ -34,7 +34,7 @@ import {
|
|||
} from "./utils/consts";
|
||||
import { getSignedVaaEthereum, getSignedVaaSolana } from "./utils/getSignedVaa";
|
||||
|
||||
jest.setTimeout(60000);
|
||||
jest.setTimeout(120000);
|
||||
|
||||
// ethereum setup
|
||||
const web3 = new Web3(ETH_NODE_URL);
|
||||
|
|
|
@ -61,7 +61,7 @@ import {
|
|||
const CORE_ID = BigInt(1004);
|
||||
const TOKEN_BRIDGE_ID = BigInt(1006);
|
||||
|
||||
jest.setTimeout(60000);
|
||||
jest.setTimeout(120000);
|
||||
|
||||
describe("Algorand tests", () => {
|
||||
test("Algorand transfer native ALGO to Eth and back again", (done) => {
|
||||
|
|
|
@ -43,7 +43,7 @@ import {
|
|||
WORMHOLE_RPC_HOSTS,
|
||||
} from "./utils/consts";
|
||||
|
||||
jest.setTimeout(60000);
|
||||
jest.setTimeout(120000);
|
||||
|
||||
async function transferFromEthToSolana(): Promise<string> {
|
||||
// create a keypair for Solana
|
||||
|
|
|
@ -36,7 +36,7 @@ import {
|
|||
} from "near-api-js/lib/providers";
|
||||
import { parseNearAmount } from "near-api-js/lib/utils/format";
|
||||
|
||||
jest.setTimeout(60000);
|
||||
jest.setTimeout(120000);
|
||||
|
||||
let near: Near;
|
||||
let nearProvider: Provider;
|
||||
|
|
|
@ -43,7 +43,7 @@ import {
|
|||
WORMHOLE_RPC_HOSTS,
|
||||
} from "./utils/consts";
|
||||
|
||||
jest.setTimeout(60000);
|
||||
jest.setTimeout(120000);
|
||||
|
||||
describe("Solana to Ethereum", () => {
|
||||
test("Attest Solana SPL to Ethereum", (done) => {
|
||||
|
|
|
@ -78,7 +78,7 @@ import {
|
|||
mintAndTransferCoinSui,
|
||||
} from "./utils/helpers";
|
||||
|
||||
jest.setTimeout(60000);
|
||||
jest.setTimeout(120000);
|
||||
|
||||
// Sui constants
|
||||
const SUI_CORE_BRIDGE_STATE_OBJECT_ID = CONTRACTS.DEVNET.sui.core;
|
||||
|
|
|
@ -53,7 +53,7 @@ import {
|
|||
waitForTerraExecution,
|
||||
} from "./utils/helpers";
|
||||
|
||||
jest.setTimeout(60000);
|
||||
jest.setTimeout(120000);
|
||||
|
||||
function sleep(ms: number) {
|
||||
return new Promise((resolve) => setTimeout(resolve, ms));
|
||||
|
|
|
@ -24,6 +24,10 @@ export const ETH_PRIVATE_KEY9 =
|
|||
"0xb0057716d5917badaf911b193b12b910811c1497b5bada8d7711f758981c3773"; // account 9 - accountant tests
|
||||
export const ETH_PRIVATE_KEY10 =
|
||||
"0x77c5495fbb039eed474fc940f29955ed0531693cc9212911efd35dff0373153f"; // account 10 - sui tests
|
||||
export const ETH_PRIVATE_KEY11 =
|
||||
"0xd99b5b29e6da2528bf458b26237a6cf8655a3e3276c1cdc0de1f98cefee81c01"; // account 11 - ntt-accountant tests
|
||||
export const ETH_PRIVATE_KEY12 =
|
||||
"0x9b9c613a36396172eab2d34d72331c8ca83a358781883a535d2941f66db07b24"; // account 12 - ntt-accountant tests
|
||||
export const SOLANA_HOST = ci
|
||||
? "http://solana-devnet:8899"
|
||||
: "http://localhost:8899";
|
||||
|
|
|
@ -160,3 +160,25 @@ func GetEmitterAddressForChain(chainID vaa.ChainID, emitterType EmitterType) (va
|
|||
|
||||
return vaa.Address{}, fmt.Errorf("lookup failed")
|
||||
}
|
||||
|
||||
// KnownAutomaticRelayerEmitters is a list of well-known mainnet emitters for the Automatic Relayers.
|
||||
// It is based on this: https://github.com/wormhole-foundation/wormhole/blob/2c9703670eadc48a7dc8967e81ed2823affcc679/sdk/js/src/relayer/consts.ts#L95
|
||||
// Note that the format of this is different from the other maps because we don't want to limit it to one per chain.
|
||||
var KnownAutomaticRelayerEmitters = []struct {
|
||||
ChainId vaa.ChainID
|
||||
Addr string
|
||||
}{
|
||||
{ChainId: vaa.ChainIDEthereum, Addr: "00000000000000000000000027428DD2d3DD32A4D7f7C497eAaa23130d894911"},
|
||||
{ChainId: vaa.ChainIDBSC, Addr: "00000000000000000000000027428DD2d3DD32A4D7f7C497eAaa23130d894911"},
|
||||
{ChainId: vaa.ChainIDPolygon, Addr: "00000000000000000000000027428DD2d3DD32A4D7f7C497eAaa23130d894911"},
|
||||
{ChainId: vaa.ChainIDAvalanche, Addr: "00000000000000000000000027428DD2d3DD32A4D7f7C497eAaa23130d894911"},
|
||||
{ChainId: vaa.ChainIDFantom, Addr: "00000000000000000000000027428DD2d3DD32A4D7f7C497eAaa23130d894911"},
|
||||
{ChainId: vaa.ChainIDKlaytn, Addr: "00000000000000000000000027428DD2d3DD32A4D7f7C497eAaa23130d894911"},
|
||||
{ChainId: vaa.ChainIDCelo, Addr: "00000000000000000000000027428DD2d3DD32A4D7f7C497eAaa23130d894911"},
|
||||
{ChainId: vaa.ChainIDAcala, Addr: "00000000000000000000000027428DD2d3DD32A4D7f7C497eAaa23130d894911"},
|
||||
{ChainId: vaa.ChainIDKarura, Addr: "00000000000000000000000027428DD2d3DD32A4D7f7C497eAaa23130d894911"},
|
||||
{ChainId: vaa.ChainIDMoonbeam, Addr: "00000000000000000000000027428DD2d3DD32A4D7f7C497eAaa23130d894911"},
|
||||
{ChainId: vaa.ChainIDArbitrum, Addr: "00000000000000000000000027428DD2d3DD32A4D7f7C497eAaa23130d894911"},
|
||||
{ChainId: vaa.ChainIDOptimism, Addr: "00000000000000000000000027428DD2d3DD32A4D7f7C497eAaa23130d894911"},
|
||||
{ChainId: vaa.ChainIDBase, Addr: "000000000000000000000000706f82e9bb5b0813501714ab5974216704980e31"},
|
||||
}
|
||||
|
|
|
@ -70,3 +70,25 @@ var knownTestnetNFTBridgeEmitters = map[vaa.ChainID]string{
|
|||
vaa.ChainIDBaseSepolia: "000000000000000000000000268557122Ffd64c85750d630b716471118F323c8",
|
||||
vaa.ChainIDOptimismSepolia: "00000000000000000000000027812285fbe85BA1DF242929B906B31EE3dd1b9f",
|
||||
}
|
||||
|
||||
// KnownTestnetAutomaticRelayerEmitters is a list of well-known testnet emitters for the Automatic Relayers.
|
||||
// It is based on this: https://github.com/wormhole-foundation/wormhole/blob/2c9703670eadc48a7dc8967e81ed2823affcc679/sdk/js/src/relayer/consts.ts#L14
|
||||
// Note that the format of this is different from the other maps because we don't want to limit it to one per chain.
|
||||
var KnownTestnetAutomaticRelayerEmitters = []struct {
|
||||
ChainId vaa.ChainID
|
||||
Addr string
|
||||
}{
|
||||
{ChainId: vaa.ChainIDEthereum, Addr: "00000000000000000000000028D8F1Be96f97C1387e94A53e00eCcFb4E75175a"},
|
||||
{ChainId: vaa.ChainIDBSC, Addr: "00000000000000000000000080aC94316391752A193C1c47E27D382b507c93F3"},
|
||||
{ChainId: vaa.ChainIDPolygon, Addr: "0000000000000000000000000591C25ebd0580E0d4F27A82Fc2e24E7489CB5e0"},
|
||||
{ChainId: vaa.ChainIDAvalanche, Addr: "000000000000000000000000A3cF45939bD6260bcFe3D66bc73d60f19e49a8BB"},
|
||||
{ChainId: vaa.ChainIDCelo, Addr: "000000000000000000000000306B68267Deb7c5DfCDa3619E22E9Ca39C374f84"},
|
||||
{ChainId: vaa.ChainIDMoonbeam, Addr: "0000000000000000000000000591C25ebd0580E0d4F27A82Fc2e24E7489CB5e0"},
|
||||
{ChainId: vaa.ChainIDArbitrum, Addr: "000000000000000000000000Ad753479354283eEE1b86c9470c84D42f229FF43"},
|
||||
{ChainId: vaa.ChainIDOptimism, Addr: "00000000000000000000000001A957A525a5b7A72808bA9D10c389674E459891"},
|
||||
{ChainId: vaa.ChainIDBase, Addr: "000000000000000000000000ea8029CD7FCAEFFcD1F53686430Db0Fc8ed384E1"},
|
||||
{ChainId: vaa.ChainIDSepolia, Addr: "0000000000000000000000007B1bD7a6b4E61c2a123AC6BC2cbfC614437D0470"},
|
||||
{ChainId: vaa.ChainIDArbitrumSepolia, Addr: "0000000000000000000000007B1bD7a6b4E61c2a123AC6BC2cbfC614437D0470"},
|
||||
{ChainId: vaa.ChainIDOptimismSepolia, Addr: "00000000000000000000000093BAD53DDfB6132b0aC8E37f6029163E63372cEE"},
|
||||
{ChainId: vaa.ChainIDBaseSepolia, Addr: "00000000000000000000000093BAD53DDfB6132b0aC8E37f6029163E63372cEE"},
|
||||
}
|
||||
|
|
|
@ -6,7 +6,7 @@ import { createSpyRPCServiceClient, subscribeSignedVAA } from "..";
|
|||
|
||||
setDefaultWasm("node");
|
||||
|
||||
jest.setTimeout(60000);
|
||||
jest.setTimeout(120000);
|
||||
const ci = !!process.env.CI;
|
||||
export const SOLANA_HOST = ci
|
||||
? "http://solana-devnet:8899"
|
||||
|
|
|
@ -30,7 +30,7 @@ import * as devnetConsts from "../devnet-consts.json";
|
|||
import { parseUnits } from "ethers/lib/utils";
|
||||
import { CosmWasmClient } from "@cosmjs/cosmwasm-stargate";
|
||||
|
||||
jest.setTimeout(60000);
|
||||
jest.setTimeout(120000);
|
||||
|
||||
if (process.env.INIT_SIGNERS_KEYS_CSV === "undefined") {
|
||||
let msg = `.env is missing. run "make contracts-tools-deps" to fetch.`;
|
||||
|
@ -511,13 +511,23 @@ describe("Global Accountant Tests", () => {
|
|||
throw new Error("Expected metrics change did not occur");
|
||||
}
|
||||
// the transfer should fail, because there's an insufficient source balance
|
||||
await expect(
|
||||
fetchGlobalAccountantTransferStatus(
|
||||
if (VAA_SIGNERS.length > 1) {
|
||||
const transferStatus = await fetchGlobalAccountantTransferStatus(
|
||||
CHAIN_ID_BSC,
|
||||
getEmitterAddressEth(CONTRACTS.DEVNET.bsc.core),
|
||||
getEmitterAddressEth(CONTRACTS.DEVNET.bsc.token_bridge),
|
||||
sequence
|
||||
)
|
||||
).rejects.toThrow();
|
||||
);
|
||||
expect(Object.keys(transferStatus)).toContain("pending");
|
||||
expect(Object.keys(transferStatus)).not.toContain("committed");
|
||||
} else {
|
||||
await expect(
|
||||
fetchGlobalAccountantTransferStatus(
|
||||
CHAIN_ID_BSC,
|
||||
getEmitterAddressEth(CONTRACTS.DEVNET.bsc.token_bridge),
|
||||
sequence
|
||||
)
|
||||
).rejects.toThrow();
|
||||
}
|
||||
}
|
||||
console.log("success!");
|
||||
});
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -346,26 +346,50 @@ async function main() {
|
|||
addresses["ntt_global_accountant.wasm"]
|
||||
);
|
||||
|
||||
// const accountingRegistrations = Object.values(registrations).map((r) =>
|
||||
// Buffer.from(r, "hex").toString("base64")
|
||||
// );
|
||||
// const msg = client.wasm.msgExecuteContract({
|
||||
// sender: signer,
|
||||
// contract: addresses["global_accountant.wasm"],
|
||||
// msg: toUtf8(
|
||||
// JSON.stringify({
|
||||
// submit_vaas: {
|
||||
// vaas: accountingRegistrations,
|
||||
// },
|
||||
// })
|
||||
// ),
|
||||
// funds: [],
|
||||
// });
|
||||
// const res = await client.signAndBroadcast(signer, [msg], {
|
||||
// ...ZERO_FEE,
|
||||
// gas: "10000000",
|
||||
// });
|
||||
// console.log(`sent accounting chain registrations, tx: `, res.transactionHash);
|
||||
const allowListResponse = await client.signAndBroadcast(
|
||||
signer,
|
||||
[
|
||||
client.core.msgCreateAllowlistEntryRequest({
|
||||
signer: signer,
|
||||
address: "wormhole14vtqhv6550uh6gycxxum8qmx3kmy7ak2qwzecx",
|
||||
name: "ibcRelayer",
|
||||
}),
|
||||
client.core.msgCreateAllowlistEntryRequest({
|
||||
signer: signer,
|
||||
address: "wormhole1s5a6dg9p902z5rhjgkk0ts8lulvtmhmpftasxe",
|
||||
name: "guardianGatewayRelayer0",
|
||||
}),
|
||||
client.core.msgCreateAllowlistEntryRequest({
|
||||
signer: signer,
|
||||
address: "wormhole1dtwappgz4zfmlhay44x5r787u6ap0zhrk2m09m",
|
||||
name: "guardianGatewayRelayer1",
|
||||
}),
|
||||
client.core.msgCreateAllowlistEntryRequest({
|
||||
signer: signer,
|
||||
address: "wormhole1karc53cm5zyyaeqsw9stmjvu0vwzky7k07lhwm",
|
||||
name: "guardianNttAccountant0",
|
||||
}),
|
||||
client.core.msgCreateAllowlistEntryRequest({
|
||||
signer: signer,
|
||||
address: "wormhole1cdvy8ae9xgmfjj4pztz77dwqm4wa04glz68r5w",
|
||||
name: "guardianNttAccountant1",
|
||||
}),
|
||||
client.core.msgCreateAllowlistEntryRequest({
|
||||
signer: signer,
|
||||
address: "wormhole18s5lynnmx37hq4wlrw9gdn68sg2uxp5rwf5k3u",
|
||||
name: "nttAccountantTest",
|
||||
}),
|
||||
],
|
||||
{
|
||||
...ZERO_FEE,
|
||||
gas: "10000000",
|
||||
}
|
||||
);
|
||||
console.log(
|
||||
"created allowlist entries: ",
|
||||
allowListResponse.transactionHash,
|
||||
allowListResponse.code
|
||||
);
|
||||
}
|
||||
|
||||
try {
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -13,7 +13,7 @@
|
|||
"keywords": [],
|
||||
"author": "",
|
||||
"dependencies": {
|
||||
"@certusone/wormhole-sdk": "0.9.9",
|
||||
"@certusone/wormhole-sdk": "0.10.10",
|
||||
"@cosmjs/cosmwasm-stargate": "0.29.5",
|
||||
"@improbable-eng/grpc-web-node-http-transport": "0.15.0",
|
||||
"@wormhole-foundation/wormchain-sdk": "file:../../ts-sdk",
|
||||
|
|
|
@ -5,4 +5,3 @@ for ((i=0; i<num; i++)); do
|
|||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' guardian-$i.guardian:6060/readyz)" != "200" ]]; do sleep 5; done
|
||||
done
|
||||
CI=true npm run test-accountant
|
||||
# CI=true npm run test-ntt-accountant
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
#!/bin/sh
|
||||
set -e
|
||||
num=${NUM_GUARDIANS:-1} # default value for NUM_GUARDIANS = 1
|
||||
for ((i=0; i<num; i++)); do
|
||||
while [[ "$(curl -s -o /dev/null -w ''%{http_code}'' guardian-$i.guardian:6060/readyz)" != "200" ]]; do sleep 5; done
|
||||
done
|
||||
CI=true npm run test-ntt-accountant
|
|
@ -63,6 +63,27 @@
|
|||
"pub_key": null,
|
||||
"account_number": "0",
|
||||
"sequence": "0"
|
||||
},
|
||||
{
|
||||
"@type": "/cosmos.auth.v1beta1.BaseAccount",
|
||||
"address": "wormhole1karc53cm5zyyaeqsw9stmjvu0vwzky7k07lhwm",
|
||||
"pub_key": null,
|
||||
"account_number": "0",
|
||||
"sequence": "0"
|
||||
},
|
||||
{
|
||||
"@type": "/cosmos.auth.v1beta1.BaseAccount",
|
||||
"address": "wormhole1cdvy8ae9xgmfjj4pztz77dwqm4wa04glz68r5w",
|
||||
"pub_key": null,
|
||||
"account_number": "0",
|
||||
"sequence": "0"
|
||||
},
|
||||
{
|
||||
"@type": "/cosmos.auth.v1beta1.BaseAccount",
|
||||
"address": "wormhole18s5lynnmx37hq4wlrw9gdn68sg2uxp5rwf5k3u",
|
||||
"pub_key": null,
|
||||
"account_number": "0",
|
||||
"sequence": "0"
|
||||
}
|
||||
]
|
||||
},
|
||||
|
@ -136,6 +157,45 @@
|
|||
"amount": "200000000"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "wormhole1karc53cm5zyyaeqsw9stmjvu0vwzky7k07lhwm",
|
||||
"coins": [
|
||||
{
|
||||
"denom": "utest",
|
||||
"amount": "100000000000"
|
||||
},
|
||||
{
|
||||
"denom": "uworm",
|
||||
"amount": "200000000"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "wormhole1cdvy8ae9xgmfjj4pztz77dwqm4wa04glz68r5w",
|
||||
"coins": [
|
||||
{
|
||||
"denom": "utest",
|
||||
"amount": "100000000000"
|
||||
},
|
||||
{
|
||||
"denom": "uworm",
|
||||
"amount": "200000000"
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"address": "wormhole18s5lynnmx37hq4wlrw9gdn68sg2uxp5rwf5k3u",
|
||||
"coins": [
|
||||
{
|
||||
"denom": "utest",
|
||||
"amount": "100000000000"
|
||||
},
|
||||
{
|
||||
"denom": "uworm",
|
||||
"amount": "200000000"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"supply": [],
|
||||
|
|
Loading…
Reference in New Issue