diff --git a/.gitignore b/.gitignore index b2daa0e..6b1e28c 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,9 @@ awscpu # Output of the go coverage tool, specifically when used with LiteIDE *.out +# ignore GoLand metafiles directory +.idea/ + *logs/ .vscode* @@ -42,4 +45,4 @@ db* bin/ build/ -*/mykey/staker.* \ No newline at end of file +keys/staker.* diff --git a/README.md b/README.md index c860d1a..c48ba93 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ The Gecko binary, named `ava`, is in the `build` directory. - Build the docker image of latest gecko branch by `scripts/build_image.sh`. - Check the built image by `docker image ls`, you should see some image tagged `gecko-xxxxxxxx`, where `xxxxxxxx` is the commit id of the Gecko source it was built from. -- Test Gecko by `docker run -ti -p 9651:9651 gecko-xxxxxxxx /gecko/build/ava +- Test Gecko by `docker run -ti -p 9650:9650 -p 9651:9651 gecko-xxxxxxxx /gecko/build/ava --public-ip=127.0.0.1 --snow-sample-size=1 --snow-quorum-size=1 --staking-tls-enabled=false`. (For a production deployment, you may want to extend the docker image with required credentials for staking and TLS.) diff --git a/api/keystore/service.go b/api/keystore/service.go index 604ec5d..dad7073 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -21,10 +21,30 @@ import ( "github.com/ava-labs/gecko/vms/components/codec" jsoncodec "github.com/ava-labs/gecko/utils/json" + zxcvbn "github.com/nbutton23/zxcvbn-go" +) + +const ( + // maxUserPassLen is the maximum length of the username or password allowed + maxUserPassLen = 1024 + + // requiredPassScore defines the score a password must achieve to be accepted + // as a password with strong characteristics by the zxcvbn package + // + // The scoring mechanism defined is as follows; + // + // 0 # too guessable: risky password. (guesses < 10^3) + // 1 # very guessable: protection from throttled online attacks. (guesses < 10^6) + // 2 # somewhat guessable: protection from unthrottled online attacks. (guesses < 10^8) + // 3 # safely unguessable: moderate protection from offline slow-hash scenario. (guesses < 10^10) + // 4 # very unguessable: strong protection from offline slow-hash scenario. (guesses >= 10^10) + requiredPassScore = 2 ) var ( - errEmptyUsername = errors.New("username can't be the empty string") + errEmptyUsername = errors.New("username can't be the empty string") + errUserPassMaxLength = fmt.Errorf("CreateUser call rejected due to username or password exceeding maximum length of %d chars", maxUserPassLen) + errWeakPassword = errors.New("Failed to create user as the given password is too weak. A stronger password is one of 8 or more characters containing attributes of upper and lowercase letters, numbers, and/or special characters") ) // KeyValuePair ... @@ -114,7 +134,11 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre ks.lock.Lock() defer ks.lock.Unlock() - ks.log.Verbo("CreateUser called with %s", args.Username) + ks.log.Verbo("CreateUser called with %.*s", maxUserPassLen, args.Username) + + if len(args.Username) > maxUserPassLen || len(args.Password) > maxUserPassLen { + return errUserPassMaxLength + } if args.Username == "" { return errEmptyUsername @@ -123,6 +147,10 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre return fmt.Errorf("user already exists: %s", args.Username) } + if zxcvbn.PasswordStrength(args.Password, nil).Score < requiredPassScore { + return errWeakPassword + } + usr := &User{} if err := usr.Initialize(args.Password); err != nil { return err diff --git a/api/keystore/service_test.go b/api/keystore/service_test.go index ab2a096..0868a29 100644 --- a/api/keystore/service_test.go +++ b/api/keystore/service_test.go @@ -5,6 +5,8 @@ package keystore import ( "bytes" + "fmt" + "math/rand" "testing" "github.com/ava-labs/gecko/database/memdb" @@ -12,6 +14,12 @@ import ( "github.com/ava-labs/gecko/utils/logging" ) +var ( + // strongPassword defines a password used for the following tests that + // scores high enough to pass the password strength scoring system + strongPassword = "N_+=_jJ;^(<;{4,:*m6CET}'&N;83FYK.wtNpwp-Jt" +) + func TestServiceListNoUsers(t *testing.T) { ks := Keystore{} ks.Initialize(logging.NoLog{}, memdb.New()) @@ -33,7 +41,7 @@ func TestServiceCreateUser(t *testing.T) { reply := CreateUserReply{} if err := ks.CreateUser(nil, &CreateUserArgs{ Username: "bob", - Password: "launch", + Password: strongPassword, }, &reply); err != nil { t.Fatal(err) } @@ -56,6 +64,78 @@ func TestServiceCreateUser(t *testing.T) { } } +// genStr returns a string of given length +func genStr(n int) string { + b := make([]byte, n) + rand.Read(b) + return fmt.Sprintf("%x", b)[:n] +} + +// TestServiceCreateUserArgsChecks generates excessively long usernames or +// passwords to assure the santity checks on string length are not exceeded +func TestServiceCreateUserArgsCheck(t *testing.T) { + ks := Keystore{} + ks.Initialize(logging.NoLog{}, memdb.New()) + + { + reply := CreateUserReply{} + err := ks.CreateUser(nil, &CreateUserArgs{ + Username: genStr(maxUserPassLen + 1), + Password: strongPassword, + }, &reply) + + if reply.Success || err != errUserPassMaxLength { + t.Fatal("User was created when it should have been rejected due to too long a Username, err =", err) + } + } + + { + reply := CreateUserReply{} + err := ks.CreateUser(nil, &CreateUserArgs{ + Username: "shortuser", + Password: genStr(maxUserPassLen + 1), + }, &reply) + + if reply.Success || err != errUserPassMaxLength { + t.Fatal("User was created when it should have been rejected due to too long a Password, err =", err) + } + } + + { + reply := ListUsersReply{} + if err := ks.ListUsers(nil, &ListUsersArgs{}, &reply); err != nil { + t.Fatal(err) + } + + if len(reply.Users) > 0 { + t.Fatalf("A user exists when there should be none") + } + } +} + +// TestServiceCreateUserWeakPassword tests creating a new user with a weak +// password to ensure the password strength check is working +func TestServiceCreateUserWeakPassword(t *testing.T) { + ks := Keystore{} + ks.Initialize(logging.NoLog{}, memdb.New()) + + { + reply := CreateUserReply{} + err := ks.CreateUser(nil, &CreateUserArgs{ + Username: "bob", + Password: "weak", + }, &reply) + + if err != errWeakPassword { + t.Error("Unexpected error occurred when testing weak password:", err) + } + + if reply.Success { + t.Fatal("User was created when it should have been rejected due to weak password") + } + } +} + func TestServiceCreateDuplicate(t *testing.T) { ks := Keystore{} ks.Initialize(logging.NoLog{}, memdb.New()) @@ -64,7 +144,7 @@ func TestServiceCreateDuplicate(t *testing.T) { reply := CreateUserReply{} if err := ks.CreateUser(nil, &CreateUserArgs{ Username: "bob", - Password: "launch", + Password: strongPassword, }, &reply); err != nil { t.Fatal(err) } @@ -77,7 +157,7 @@ func TestServiceCreateDuplicate(t *testing.T) { reply := CreateUserReply{} if err := ks.CreateUser(nil, &CreateUserArgs{ Username: "bob", - Password: "launch!", + Password: strongPassword, }, &reply); err == nil { t.Fatalf("Should have errored due to the username already existing") } @@ -90,7 +170,7 @@ func TestServiceCreateUserNoName(t *testing.T) { reply := CreateUserReply{} if err := ks.CreateUser(nil, &CreateUserArgs{ - Password: "launch", + Password: strongPassword, }, &reply); err == nil { t.Fatalf("Shouldn't have allowed empty username") } @@ -104,7 +184,7 @@ func TestServiceUseBlockchainDB(t *testing.T) { reply := CreateUserReply{} if err := ks.CreateUser(nil, &CreateUserArgs{ Username: "bob", - Password: "launch", + Password: strongPassword, }, &reply); err != nil { t.Fatal(err) } @@ -114,7 +194,7 @@ func TestServiceUseBlockchainDB(t *testing.T) { } { - db, err := ks.GetDatabase(ids.Empty, "bob", "launch") + db, err := ks.GetDatabase(ids.Empty, "bob", strongPassword) if err != nil { t.Fatal(err) } @@ -124,7 +204,7 @@ func TestServiceUseBlockchainDB(t *testing.T) { } { - db, err := ks.GetDatabase(ids.Empty, "bob", "launch") + db, err := ks.GetDatabase(ids.Empty, "bob", strongPassword) if err != nil { t.Fatal(err) } @@ -144,7 +224,7 @@ func TestServiceExportImport(t *testing.T) { reply := CreateUserReply{} if err := ks.CreateUser(nil, &CreateUserArgs{ Username: "bob", - Password: "launch", + Password: strongPassword, }, &reply); err != nil { t.Fatal(err) } @@ -154,7 +234,7 @@ func TestServiceExportImport(t *testing.T) { } { - db, err := ks.GetDatabase(ids.Empty, "bob", "launch") + db, err := ks.GetDatabase(ids.Empty, "bob", strongPassword) if err != nil { t.Fatal(err) } @@ -166,7 +246,7 @@ func TestServiceExportImport(t *testing.T) { exportReply := ExportUserReply{} if err := ks.ExportUser(nil, &ExportUserArgs{ Username: "bob", - Password: "launch", + Password: strongPassword, }, &exportReply); err != nil { t.Fatal(err) } @@ -178,7 +258,7 @@ func TestServiceExportImport(t *testing.T) { reply := ImportUserReply{} if err := newKS.ImportUser(nil, &ImportUserArgs{ Username: "bob", - Password: "launch", + Password: strongPassword, User: exportReply.User, }, &reply); err != nil { t.Fatal(err) @@ -189,7 +269,7 @@ func TestServiceExportImport(t *testing.T) { } { - db, err := newKS.GetDatabase(ids.Empty, "bob", "launch") + db, err := newKS.GetDatabase(ids.Empty, "bob", strongPassword) if err != nil { t.Fatal(err) } diff --git a/chains/atomic/blockchain_memory.go b/chains/atomic/blockchain_memory.go new file mode 100644 index 0000000..a02a85a --- /dev/null +++ b/chains/atomic/blockchain_memory.go @@ -0,0 +1,28 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" +) + +// BlockchainSharedMemory provides the API for a blockchain to interact with +// shared memory of another blockchain +type BlockchainSharedMemory struct { + blockchainID ids.ID + sm *SharedMemory +} + +// GetDatabase returns and locks the provided DB +func (bsm *BlockchainSharedMemory) GetDatabase(id ids.ID) database.Database { + sharedID := bsm.sm.sharedID(id, bsm.blockchainID) + return bsm.sm.GetDatabase(sharedID) +} + +// ReleaseDatabase unlocks the provided DB +func (bsm *BlockchainSharedMemory) ReleaseDatabase(id ids.ID) { + sharedID := bsm.sm.sharedID(id, bsm.blockchainID) + bsm.sm.ReleaseDatabase(sharedID) +} diff --git a/chains/atomic/blockchain_memory_test.go b/chains/atomic/blockchain_memory_test.go new file mode 100644 index 0000000..318ae0d --- /dev/null +++ b/chains/atomic/blockchain_memory_test.go @@ -0,0 +1,34 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/utils/logging" +) + +func TestBlockchainSharedMemory(t *testing.T) { + sm := SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + bsm0 := sm.NewBlockchainSharedMemory(blockchainID0) + bsm1 := sm.NewBlockchainSharedMemory(blockchainID1) + + sharedDB0 := bsm0.GetDatabase(blockchainID1) + if err := sharedDB0.Put([]byte{1}, []byte{2}); err != nil { + t.Fatal(err) + } + bsm0.ReleaseDatabase(blockchainID1) + + sharedDB1 := bsm1.GetDatabase(blockchainID0) + if value, err := sharedDB1.Get([]byte{1}); err != nil { + t.Fatal(err) + } else if !bytes.Equal(value, []byte{2}) { + t.Fatalf("database.Get Returned: 0x%x ; Expected: 0x%x", value, []byte{2}) + } + bsm1.ReleaseDatabase(blockchainID0) +} diff --git a/chains/atomic/memory.go b/chains/atomic/memory.go new file mode 100644 index 0000000..448e6c9 --- /dev/null +++ b/chains/atomic/memory.go @@ -0,0 +1,105 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "bytes" + "sync" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/prefixdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/vms/components/codec" +) + +type rcLock struct { + lock sync.Mutex + count int +} + +// SharedMemory is the interface for shared memory inside a subnet +type SharedMemory struct { + lock sync.Mutex + log logging.Logger + codec codec.Codec + locks map[[32]byte]*rcLock + db database.Database +} + +// Initialize the SharedMemory +func (sm *SharedMemory) Initialize(log logging.Logger, db database.Database) { + sm.log = log + sm.codec = codec.NewDefault() + sm.locks = make(map[[32]byte]*rcLock) + sm.db = db +} + +// NewBlockchainSharedMemory returns a new BlockchainSharedMemory +func (sm *SharedMemory) NewBlockchainSharedMemory(id ids.ID) *BlockchainSharedMemory { + return &BlockchainSharedMemory{ + blockchainID: id, + sm: sm, + } +} + +// GetDatabase returns and locks the provided DB +func (sm *SharedMemory) GetDatabase(id ids.ID) database.Database { + lock := sm.makeLock(id) + lock.Lock() + + return prefixdb.New(id.Bytes(), sm.db) +} + +// ReleaseDatabase unlocks the provided DB +func (sm *SharedMemory) ReleaseDatabase(id ids.ID) { + lock := sm.releaseLock(id) + lock.Unlock() +} + +func (sm *SharedMemory) makeLock(id ids.ID) *sync.Mutex { + sm.lock.Lock() + defer sm.lock.Unlock() + + key := id.Key() + rc, exists := sm.locks[key] + if !exists { + rc = &rcLock{} + sm.locks[key] = rc + } + rc.count++ + return &rc.lock +} + +func (sm *SharedMemory) releaseLock(id ids.ID) *sync.Mutex { + sm.lock.Lock() + defer sm.lock.Unlock() + + key := id.Key() + rc, exists := sm.locks[key] + if !exists { + panic("Attemping to free an unknown lock") + } + rc.count-- + if rc.count == 0 { + delete(sm.locks, key) + } + return &rc.lock +} + +// sharedID calculates the ID of the shared memory space +func (sm *SharedMemory) sharedID(id1, id2 ids.ID) ids.ID { + idKey1 := id1.Key() + idKey2 := id2.Key() + + if bytes.Compare(idKey1[:], idKey2[:]) == 1 { + idKey1, idKey2 = idKey2, idKey1 + } + + combinedBytes, err := sm.codec.Marshal([2][32]byte{idKey1, idKey2}) + sm.log.AssertNoError(err) + + return ids.NewID(hashing.ComputeHash256Array(combinedBytes)) +} diff --git a/chains/atomic/memory_test.go b/chains/atomic/memory_test.go new file mode 100644 index 0000000..f1cf020 --- /dev/null +++ b/chains/atomic/memory_test.go @@ -0,0 +1,69 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/logging" +) + +var ( + blockchainID0 = ids.Empty.Prefix(0) + blockchainID1 = ids.Empty.Prefix(1) +) + +func TestSharedMemorySharedID(t *testing.T) { + sm := SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + sharedID0 := sm.sharedID(blockchainID0, blockchainID1) + sharedID1 := sm.sharedID(blockchainID1, blockchainID0) + + if !sharedID0.Equals(sharedID1) { + t.Fatalf("SharedMemory.sharedID should be communitive") + } +} + +func TestSharedMemoryMakeReleaseLock(t *testing.T) { + sm := SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + sharedID := sm.sharedID(blockchainID0, blockchainID1) + + lock0 := sm.makeLock(sharedID) + + if lock1 := sm.makeLock(sharedID); lock0 != lock1 { + t.Fatalf("SharedMemory.makeLock should have returned the same lock") + } + sm.releaseLock(sharedID) + + if lock2 := sm.makeLock(sharedID); lock0 != lock2 { + t.Fatalf("SharedMemory.makeLock should have returned the same lock") + } + sm.releaseLock(sharedID) + sm.releaseLock(sharedID) + + if lock3 := sm.makeLock(sharedID); lock0 == lock3 { + t.Fatalf("SharedMemory.releaseLock should have returned freed the lock") + } + sm.releaseLock(sharedID) +} + +func TestSharedMemoryUnknownFree(t *testing.T) { + sm := SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + sharedID := sm.sharedID(blockchainID0, blockchainID1) + + defer func() { + if recover() == nil { + t.Fatalf("Should have panicked due to an unknown free") + } + }() + + sm.releaseLock(sharedID) +} diff --git a/chains/atomic/writer.go b/chains/atomic/writer.go new file mode 100644 index 0000000..bacabab --- /dev/null +++ b/chains/atomic/writer.go @@ -0,0 +1,21 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "github.com/ava-labs/gecko/database" +) + +// WriteAll assumes all batches have the same underlying database. Batches +// should not be modified after being passed to this function. +func WriteAll(baseBatch database.Batch, batches ...database.Batch) error { + baseBatch = baseBatch.Inner() + for _, batch := range batches { + batch = batch.Inner() + if err := batch.Replay(baseBatch); err != nil { + return err + } + } + return baseBatch.Write() +} diff --git a/chains/atomic/writer_test.go b/chains/atomic/writer_test.go new file mode 100644 index 0000000..8c79519 --- /dev/null +++ b/chains/atomic/writer_test.go @@ -0,0 +1,61 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/database/prefixdb" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/utils/logging" +) + +func TestWriteAll(t *testing.T) { + baseDB := memdb.New() + prefixedDBChain := prefixdb.New([]byte{0}, baseDB) + prefixedDBSharedMemory := prefixdb.New([]byte{1}, baseDB) + + sm := SharedMemory{} + sm.Initialize(logging.NoLog{}, prefixedDBSharedMemory) + + sharedID := sm.sharedID(blockchainID0, blockchainID1) + + sharedDB := sm.GetDatabase(sharedID) + + writeDB0 := versiondb.New(prefixedDBChain) + writeDB1 := versiondb.New(sharedDB) + defer sm.ReleaseDatabase(sharedID) + + if err := writeDB0.Put([]byte{1}, []byte{2}); err != nil { + t.Fatal(err) + } + if err := writeDB1.Put([]byte{2}, []byte{3}); err != nil { + t.Fatal(err) + } + + batch0, err := writeDB0.CommitBatch() + if err != nil { + t.Fatal(err) + } + batch1, err := writeDB1.CommitBatch() + if err != nil { + t.Fatal(err) + } + + if err := WriteAll(batch0, batch1); err != nil { + t.Fatal(err) + } + + if value, err := prefixedDBChain.Get([]byte{1}); err != nil { + t.Fatal(err) + } else if !bytes.Equal(value, []byte{2}) { + t.Fatalf("database.Get Returned: 0x%x ; Expected: 0x%x", value, []byte{2}) + } else if value, err := sharedDB.Get([]byte{2}); err != nil { + t.Fatal(err) + } else if !bytes.Equal(value, []byte{3}) { + t.Fatalf("database.Get Returned: 0x%x ; Expected: 0x%x", value, []byte{3}) + } +} diff --git a/chains/manager.go b/chains/manager.go index efb4372..aef8064 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/gecko/api" "github.com/ava-labs/gecko/api/keystore" + "github.com/ava-labs/gecko/chains/atomic" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/ids" @@ -26,6 +27,7 @@ import ( "github.com/ava-labs/gecko/snow/triggers" "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/math" "github.com/ava-labs/gecko/vms" avacon "github.com/ava-labs/gecko/snow/consensus/avalanche" @@ -92,6 +94,7 @@ type manager struct { // That is, [chainID].String() is an alias for the chain, too ids.Aliaser + stakingEnabled bool // True iff the network has staking enabled log logging.Logger logFactory logging.Factory vmManager vms.Manager // Manage mappings from vm ID --> vm @@ -109,6 +112,7 @@ type manager struct { awaiter Awaiter // Waits for required connections before running bootstrapping server *api.Server // Handles HTTP API calls keystore *keystore.Keystore + sharedMemory *atomic.SharedMemory unblocked bool blockedChains []ChainParameters @@ -120,6 +124,7 @@ type manager struct { // validate this chain // TODO: Make this function take less arguments func New( + stakingEnabled bool, log logging.Logger, logFactory logging.Factory, vmManager vms.Manager, @@ -135,6 +140,7 @@ func New( awaiter Awaiter, server *api.Server, keystore *keystore.Keystore, + sharedMemory *atomic.SharedMemory, ) Manager { timeoutManager := timeout.Manager{} timeoutManager.Initialize(requestTimeout) @@ -143,6 +149,7 @@ func New( router.Initialize(log, &timeoutManager) m := &manager{ + stakingEnabled: stakingEnabled, log: log, logFactory: logFactory, vmManager: vmManager, @@ -159,6 +166,7 @@ func New( awaiter: awaiter, server: server, keystore: keystore, + sharedMemory: sharedMemory, } m.Initialize() return m @@ -246,6 +254,7 @@ func (m *manager) ForceCreateChain(chain ChainParameters) { NodeID: m.nodeID, HTTP: m.server, Keystore: m.keystore.NewBlockchainKeyStore(chain.ID), + SharedMemory: m.sharedMemory.NewBlockchainSharedMemory(chain.ID), BCLookup: m, } consensusParams := m.consensusParams @@ -256,7 +265,13 @@ func (m *manager) ForceCreateChain(chain ChainParameters) { } // The validators of this blockchain - validators, ok := m.validators.GetValidatorSet(ids.Empty) // TODO: Change argument to chain.SubnetID + var validators validators.Set // Validators validating this blockchain + var ok bool + if m.stakingEnabled { + validators, ok = m.validators.GetValidatorSet(chain.SubnetID) + } else { // Staking is disabled. Every peer validates every subnet. + validators, ok = m.validators.GetValidatorSet(ids.Empty) // ids.Empty is the default subnet ID. TODO: Move to const package so we can use it here. + } if !ok { m.log.Error("couldn't get validator set of subnet with ID %s. The subnet may not exist", chain.SubnetID) return @@ -353,7 +368,7 @@ func (m *manager) createAvalancheChain( msgChan := make(chan common.Message, defaultChannelSize) if err := vm.Initialize(ctx, vmDB, genesisData, msgChan, fxs); err != nil { - return err + return fmt.Errorf("error during vm's Initialize: %w", err) } // Handles serialization/deserialization of vertices and also the @@ -376,13 +391,22 @@ func (m *manager) createAvalancheChain( }, } + bootstrapWeight := uint64(0) + for _, beacon := range beacons.List() { + newWeight, err := math.Add64(bootstrapWeight, beacon.Weight()) + if err != nil { + return err + } + bootstrapWeight = newWeight + } + engine.Initialize(avaeng.Config{ BootstrapConfig: avaeng.BootstrapConfig{ Config: common.Config{ Context: ctx, Validators: validators, Beacons: beacons, - Alpha: (beacons.Len() + 1) / 2, + Alpha: bootstrapWeight/2 + 1, // must be > 50% Sender: &sender, }, VtxBlocked: vtxBlocker, @@ -403,6 +427,8 @@ func (m *manager) createAvalancheChain( go ctx.Log.RecoverAndPanic(handler.Dispatch) awaiting := &networking.AwaitingConnections{ + Requested: beacons, + WeightRequired: (3*bootstrapWeight + 3) / 4, // 75% must be connected to Finish: func() { ctx.Lock.Lock() defer ctx.Lock.Unlock() @@ -410,10 +436,6 @@ func (m *manager) createAvalancheChain( engine.Startup() }, } - for _, vdr := range beacons.List() { - awaiting.Requested.Add(vdr.ID()) - } - awaiting.NumRequired = (3*awaiting.Requested.Len() + 3) / 4 // 75% must be connected to m.awaiter.AwaitConnections(awaiting) return nil @@ -454,6 +476,15 @@ func (m *manager) createSnowmanChain( sender := sender.Sender{} sender.Initialize(ctx, m.sender, m.chainRouter, m.timeoutManager) + bootstrapWeight := uint64(0) + for _, beacon := range beacons.List() { + newWeight, err := math.Add64(bootstrapWeight, beacon.Weight()) + if err != nil { + return err + } + bootstrapWeight = newWeight + } + // The engine handles consensus engine := smeng.Transitive{} engine.Initialize(smeng.Config{ @@ -462,7 +493,7 @@ func (m *manager) createSnowmanChain( Context: ctx, Validators: validators, Beacons: beacons, - Alpha: (beacons.Len() + 1) / 2, + Alpha: bootstrapWeight/2 + 1, // must be > 50% Sender: &sender, }, Blocked: blocked, @@ -482,6 +513,8 @@ func (m *manager) createSnowmanChain( go ctx.Log.RecoverAndPanic(handler.Dispatch) awaiting := &networking.AwaitingConnections{ + Requested: beacons, + WeightRequired: (3*bootstrapWeight + 3) / 4, // 75% must be connected to Finish: func() { ctx.Lock.Lock() defer ctx.Lock.Unlock() @@ -489,10 +522,6 @@ func (m *manager) createSnowmanChain( engine.Startup() }, } - for _, vdr := range beacons.List() { - awaiting.Requested.Add(vdr.ID()) - } - awaiting.NumRequired = (3*awaiting.Requested.Len() + 3) / 4 // 75% must be connected to m.awaiter.AwaitConnections(awaiting) return nil } diff --git a/chains/mock_manager.go b/chains/mock_manager.go new file mode 100644 index 0000000..7c0f86b --- /dev/null +++ b/chains/mock_manager.go @@ -0,0 +1,37 @@ +package chains + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/networking/router" +) + +// MockManager implements Manager but does nothing. Always returns nil error. +// To be used only in tests (namely in package platformvm) +type MockManager struct{} + +// Router ... +func (mm MockManager) Router() router.Router { return nil } + +// CreateChain ... +func (mm MockManager) CreateChain(ChainParameters) {} + +// ForceCreateChain ... +func (mm MockManager) ForceCreateChain(ChainParameters) {} + +// AddRegistrant ... +func (mm MockManager) AddRegistrant(Registrant) {} + +// Lookup ... +func (mm MockManager) Lookup(string) (ids.ID, error) { return ids.ID{}, nil } + +// LookupVM ... +func (mm MockManager) LookupVM(string) (ids.ID, error) { return ids.ID{}, nil } + +// Aliases ... +func (mm MockManager) Aliases(ids.ID) []string { return nil } + +// Alias ... +func (mm MockManager) Alias(ids.ID, string) error { return nil } + +// Shutdown ... +func (mm MockManager) Shutdown() {} diff --git a/database/batch.go b/database/batch.go index 443fd67..53ce3e5 100644 --- a/database/batch.go +++ b/database/batch.go @@ -23,6 +23,11 @@ type Batch interface { // Replay replays the batch contents. Replay(w KeyValueWriter) error + + // Inner returns a Batch writing to the inner database, if one exists. If + // this batch is already writing to the base DB, then itself should be + // returned. + Inner() Batch } // Batcher wraps the NewBatch method of a backing data store. diff --git a/database/leveldb/leveldb.go b/database/leveldb/leveldb.go index ef5e89c..a763829 100644 --- a/database/leveldb/leveldb.go +++ b/database/leveldb/leveldb.go @@ -184,6 +184,9 @@ func (b *batch) Replay(w database.KeyValueWriter) error { return updateError(replay.err) } +// Inner returns itself +func (b *batch) Inner() database.Batch { return b } + type replayer struct { writer database.KeyValueWriter err error diff --git a/database/memdb/memdb.go b/database/memdb/memdb.go index 9f6ba58..24b5104 100644 --- a/database/memdb/memdb.go +++ b/database/memdb/memdb.go @@ -208,6 +208,9 @@ func (b *batch) Replay(w database.KeyValueWriter) error { return nil } +// Inner returns itself +func (b *batch) Inner() database.Batch { return b } + type iterator struct { initialized bool keys []string diff --git a/database/nodb/nodb.go b/database/nodb/nodb.go index 96b5ef0..3f1bceb 100644 --- a/database/nodb/nodb.go +++ b/database/nodb/nodb.go @@ -69,6 +69,9 @@ func (*Batch) Reset() {} // Replay does nothing func (*Batch) Replay(database.KeyValueWriter) error { return database.ErrClosed } +// Inner returns itself +func (b *Batch) Inner() database.Batch { return b } + // Iterator does nothing type Iterator struct{ Err error } diff --git a/database/test_database.go b/database/test_database.go index f255d8a..d299bb1 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -17,6 +17,7 @@ var ( TestBatchDelete, TestBatchReset, TestBatchReplay, + TestBatchInner, TestIterator, TestIteratorStart, TestIteratorPrefix, @@ -299,6 +300,62 @@ func TestBatchReplay(t *testing.T, db Database) { } } +// TestBatchInner ... +func TestBatchInner(t *testing.T, db Database) { + key1 := []byte("hello1") + value1 := []byte("world1") + + key2 := []byte("hello2") + value2 := []byte("world2") + + firstBatch := db.NewBatch() + if firstBatch == nil { + t.Fatalf("db.NewBatch returned nil") + } + + if err := firstBatch.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + secondBatch := db.NewBatch() + if secondBatch == nil { + t.Fatalf("db.NewBatch returned nil") + } + + if err := secondBatch.Put(key2, value2); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + innerFirstBatch := firstBatch.Inner() + innerSecondBatch := secondBatch.Inner() + + if err := innerFirstBatch.Replay(innerSecondBatch); err != nil { + t.Fatalf("Unexpected error on batch.Replay: %s", err) + } + + if err := innerSecondBatch.Write(); err != nil { + t.Fatalf("Unexpected error on batch.Write: %s", err) + } + + if has, err := db.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if !has { + t.Fatalf("db.Has unexpectedly returned false on key %s", key1) + } else if v, err := db.Get(key1); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value1, v) { + t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value1) + } else if has, err := db.Has(key2); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if !has { + t.Fatalf("db.Has unexpectedly returned false on key %s", key2) + } else if v, err := db.Get(key2); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value2, v) { + t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value2) + } +} + // TestIterator ... func TestIterator(t *testing.T, db Database) { key1 := []byte("hello1") diff --git a/database/versiondb/versiondb.go b/database/versiondb/versiondb.go index a475e76..6f42131 100644 --- a/database/versiondb/versiondb.go +++ b/database/versiondb/versiondb.go @@ -184,29 +184,55 @@ func (db *Database) Commit() error { db.lock.Lock() defer db.lock.Unlock() - if db.mem == nil { - return database.ErrClosed + batch, err := db.commitBatch() + if err != nil { + return err } - if len(db.mem) == 0 { - return nil + if err := batch.Write(); err != nil { + return err + } + db.abort() + return nil +} + +// Abort all changes to the underlying database +func (db *Database) Abort() { + db.lock.Lock() + defer db.lock.Unlock() + + db.abort() +} + +func (db *Database) abort() { db.mem = make(map[string]valueDelete, memdb.DefaultSize) } + +// CommitBatch returns a batch that will commit all pending writes to the underlying database +func (db *Database) CommitBatch() (database.Batch, error) { + db.lock.Lock() + defer db.lock.Unlock() + + return db.commitBatch() +} + +func (db *Database) commitBatch() (database.Batch, error) { + if db.mem == nil { + return nil, database.ErrClosed } batch := db.db.NewBatch() for key, value := range db.mem { if value.delete { if err := batch.Delete([]byte(key)); err != nil { - return err + return nil, err } } else if err := batch.Put([]byte(key), value.value); err != nil { - return err + return nil, err } } if err := batch.Write(); err != nil { - return err + return nil, err } - db.mem = make(map[string]valueDelete, memdb.DefaultSize) - return nil + return batch, nil } // Close implements the database.Database interface @@ -289,6 +315,9 @@ func (b *batch) Replay(w database.KeyValueWriter) error { return nil } +// Inner returns itself +func (b *batch) Inner() database.Batch { return b } + // iterator walks over both the in memory database and the underlying database // at the same time. type iterator struct { diff --git a/database/versiondb/versiondb_test.go b/database/versiondb/versiondb_test.go index ab3a9bb..70cf8ff 100644 --- a/database/versiondb/versiondb_test.go +++ b/database/versiondb/versiondb_test.go @@ -256,6 +256,72 @@ func TestCommitClosedDelete(t *testing.T) { } } +func TestAbort(t *testing.T) { + baseDB := memdb.New() + db := New(baseDB) + + key1 := []byte("hello1") + value1 := []byte("world1") + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on db.Put: %s", err) + } + + if value, err := db.Get(key1); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value, value1) { + t.Fatalf("db.Get Returned: 0x%x ; Expected: 0x%x", value, value1) + } else if has, err := baseDB.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("db.Has Returned: %v ; Expected: %v", has, false) + } + + db.Abort() + + if has, err := db.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("db.Has Returned: %v ; Expected: %v", has, false) + } else if has, err := baseDB.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("db.Has Returned: %v ; Expected: %v", has, false) + } +} + +func TestCommitBatch(t *testing.T) { + baseDB := memdb.New() + db := New(baseDB) + + key1 := []byte("hello1") + value1 := []byte("world1") + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on db.Put: %s", err) + } + + batch, err := db.CommitBatch() + if err != nil { + t.Fatalf("Unexpected error on db.CommitBatch: %s", err) + } + db.Abort() + + if err := batch.Write(); err != nil { + t.Fatalf("Unexpected error on batch.Write: %s", err) + } + + if value, err := db.Get(key1); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value, value1) { + t.Fatalf("db.Get Returned: 0x%x ; Expected: 0x%x", value, value1) + } else if value, err := baseDB.Get(key1); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value, value1) { + t.Fatalf("db.Get Returned: 0x%x ; Expected: 0x%x", value, value1) + } +} + func TestSetDatabase(t *testing.T) { baseDB := memdb.New() newDB := memdb.New() diff --git a/genesis/aliases.go b/genesis/aliases.go new file mode 100644 index 0000000..41ff25a --- /dev/null +++ b/genesis/aliases.go @@ -0,0 +1,78 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/avm" + "github.com/ava-labs/gecko/vms/evm" + "github.com/ava-labs/gecko/vms/nftfx" + "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/vms/propertyfx" + "github.com/ava-labs/gecko/vms/secp256k1fx" + "github.com/ava-labs/gecko/vms/spchainvm" + "github.com/ava-labs/gecko/vms/spdagvm" + "github.com/ava-labs/gecko/vms/timestampvm" +) + +// Aliases returns the default aliases based on the network ID +func Aliases(networkID uint32) (map[string][]string, map[[32]byte][]string, map[[32]byte][]string, error) { + generalAliases := map[string][]string{ + "vm/" + platformvm.ID.String(): []string{"vm/platform"}, + "vm/" + avm.ID.String(): []string{"vm/avm"}, + "vm/" + evm.ID.String(): []string{"vm/evm"}, + "vm/" + spdagvm.ID.String(): []string{"vm/spdag"}, + "vm/" + spchainvm.ID.String(): []string{"vm/spchain"}, + "vm/" + timestampvm.ID.String(): []string{"vm/timestamp"}, + "bc/" + ids.Empty.String(): []string{"P", "platform", "bc/P", "bc/platform"}, + } + chainAliases := map[[32]byte][]string{ + ids.Empty.Key(): []string{"P", "platform"}, + } + vmAliases := map[[32]byte][]string{ + platformvm.ID.Key(): []string{"platform"}, + avm.ID.Key(): []string{"avm"}, + evm.ID.Key(): []string{"evm"}, + spdagvm.ID.Key(): []string{"spdag"}, + spchainvm.ID.Key(): []string{"spchain"}, + timestampvm.ID.Key(): []string{"timestamp"}, + secp256k1fx.ID.Key(): []string{"secp256k1fx"}, + nftfx.ID.Key(): []string{"nftfx"}, + propertyfx.ID.Key(): []string{"propertyfx"}, + } + + genesisBytes, err := Genesis(networkID) + if err != nil { + return nil, nil, nil, err + } + + genesis := &platformvm.Genesis{} // TODO let's not re-create genesis to do aliasing + if err := platformvm.Codec.Unmarshal(genesisBytes, genesis); err != nil { + return nil, nil, nil, err + } + if err := genesis.Initialize(); err != nil { + return nil, nil, nil, err + } + + for _, chain := range genesis.Chains { + switch { + case avm.ID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"X", "avm", "bc/X", "bc/avm"} + chainAliases[chain.ID().Key()] = []string{"X", "avm"} + case evm.ID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"C", "evm", "bc/C", "bc/evm"} + chainAliases[chain.ID().Key()] = []string{"C", "evm"} + case spdagvm.ID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"bc/spdag"} + chainAliases[chain.ID().Key()] = []string{"spdag"} + case spchainvm.ID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"bc/spchain"} + chainAliases[chain.ID().Key()] = []string{"spchain"} + case timestampvm.ID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"bc/timestamp"} + chainAliases[chain.ID().Key()] = []string{"timestamp"} + } + } + return generalAliases, chainAliases, vmAliases, nil +} diff --git a/genesis/config.go b/genesis/config.go new file mode 100644 index 0000000..5db0302 --- /dev/null +++ b/genesis/config.go @@ -0,0 +1,108 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/evm" +) + +// Note that since an AVA network has exactly one Platform Chain, +// and the Platform Chain defines the genesis state of the network +// (who is staking, which chains exist, etc.), defining the genesis +// state of the Platform Chain is the same as defining the genesis +// state of the network. + +// Config contains the genesis addresses used to construct a genesis +type Config struct { + MintAddresses, FundedAddresses, FundedEVMAddresses, StakerIDs []string + ParsedMintAddresses, ParsedFundedAddresses, ParsedStakerIDs []ids.ShortID +} + +func (c *Config) init() error { + c.ParsedMintAddresses = nil + for _, addrStr := range c.MintAddresses { + addr, err := ids.ShortFromString(addrStr) + if err != nil { + return err + } + c.ParsedMintAddresses = append(c.ParsedMintAddresses, addr) + } + c.ParsedFundedAddresses = nil + for _, addrStr := range c.FundedAddresses { + addr, err := ids.ShortFromString(addrStr) + if err != nil { + return err + } + c.ParsedFundedAddresses = append(c.ParsedFundedAddresses, addr) + } + c.ParsedStakerIDs = nil + for _, addrStr := range c.StakerIDs { + addr, err := ids.ShortFromString(addrStr) + if err != nil { + return err + } + c.ParsedStakerIDs = append(c.ParsedStakerIDs, addr) + } + return nil +} + +// Hard coded genesis constants +var ( + CascadeConfig = Config{ + MintAddresses: []string{ + "95YUFjhDG892VePMzpwKF9JzewGKvGRi3", + }, + FundedAddresses: []string{ + "9uKvvA7E35QCwLvAaohXTCfFejbf3Rv17", + "JLrYNMYXANGj43BfWXBxMMAEenUBp1Sbn", + "7TUTzwrU6nbZtWHjTHEpdneUvjKBxb3EM", + "77mPUXBdQKwQpPoX6rckCZGLGGdkuG1G6", + "4gGWdFZ4Gax1B466YKXyKRRpWLb42Afdt", + "CKTkzAPsRxCreyiDTnjGxLmjMarxF28fi", + "4ABm9gFHVtsNdcKSd1xsacFkGneSgzpaa", + "DpL8PTsrjtLzv5J8LL3D2A6YcnCTqrNH9", + "ZdhZv6oZrmXLyFDy6ovXAu6VxmbTsT2h", + "6cesTteH62Y5mLoDBUASaBvCXuL2AthL", + }, + FundedEVMAddresses: []string{ + "0x572f4D80f10f663B5049F789546f25f70Bb62a7F", + }, + StakerIDs: []string{ + "NX4zVkuiRJZYe6Nzzav7GXN3TakUet3Co", + "CMsa8cMw4eib1Hb8GG4xiUKAq5eE1BwUX", + "DsMP6jLhi1MkDVc3qx9xx9AAZWx8e87Jd", + "N86eodVZja3GEyZJTo3DFUPGpxEEvjGHs", + "EkKeGSLUbHrrtuayBtbwgWDRUiAziC3ao", + }, + } + DefaultConfig = Config{ + MintAddresses: []string{}, + FundedAddresses: []string{ + // Private key: ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN + "6Y3kysjF9jnHnYkdS9yGAuoHyae2eNmeV", + }, + FundedEVMAddresses: []string{ + // Private key: evm.GenesisTestKey + evm.GenesisTestAddr, + }, + StakerIDs: []string{ + "7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg", + "MFrZFVCXPv5iCn6M9K6XduxGTYp891xXZ", + "NFBbbJ4qCmNaCzeW7sxErhvWqvEQMnYcN", + "GWPcbFJZFfZreETSoWjPimr846mXEKCtu", + "P7oB2McjBGgW2NXXWVYjV8JEDFoW9xDE5", + }, + } +) + +// GetConfig ... +func GetConfig(networkID uint32) *Config { + switch networkID { + case CascadeID: + return &CascadeConfig + default: + return &DefaultConfig + } +} diff --git a/genesis/genesis.go b/genesis/genesis.go index fa34a75..f3c752e 100644 --- a/genesis/genesis.go +++ b/genesis/genesis.go @@ -3,511 +3,316 @@ package genesis -// TODO: Move this to a separate repo and leave only a byte array - import ( + "errors" "fmt" - "math" - "regexp" - "strconv" - "strings" + "math/big" + "time" + + "github.com/ava-labs/coreth/core" + + "github.com/ava-labs/go-ethereum/common" + "github.com/ava-labs/go-ethereum/params" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/json" + "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/avm" + "github.com/ava-labs/gecko/vms/components/codec" "github.com/ava-labs/gecko/vms/evm" + "github.com/ava-labs/gecko/vms/nftfx" "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/vms/propertyfx" + "github.com/ava-labs/gecko/vms/secp256k1fx" "github.com/ava-labs/gecko/vms/spchainvm" "github.com/ava-labs/gecko/vms/spdagvm" "github.com/ava-labs/gecko/vms/timestampvm" ) -// Note that since an AVA network has exactly one Platform Chain, -// and the Platform Chain defines the genesis state of the network -// (who is staking, which chains exist, etc.), defining the genesis -// state of the Platform Chain is the same as defining the genesis -// state of the network. - -// Hardcoded network IDs -const ( - MainnetID uint32 = 1 - TestnetID uint32 = 2 - BorealisID uint32 = 2 - LocalID uint32 = 12345 - - MainnetName = "mainnet" - TestnetName = "testnet" - BorealisName = "borealis" - LocalName = "local" -) - -var ( - validNetworkName = regexp.MustCompile(`network-[0-9]+`) -) - -// Hard coded genesis constants -var ( - // Give special names to the mainnet and testnet - NetworkIDToNetworkName = map[uint32]string{ - MainnetID: MainnetName, - TestnetID: BorealisName, - LocalID: LocalName, - } - NetworkNameToNetworkID = map[string]uint32{ - MainnetName: MainnetID, - TestnetName: TestnetID, - BorealisName: BorealisID, - LocalName: LocalID, - } - Keys = []string{ - "ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN", - } - Addresses = []string{ - "6Y3kysjF9jnHnYkdS9yGAuoHyae2eNmeV", - } - ParsedAddresses = []ids.ShortID{} - StakerIDs = []string{ - "7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg", - "MFrZFVCXPv5iCn6M9K6XduxGTYp891xXZ", - "NFBbbJ4qCmNaCzeW7sxErhvWqvEQMnYcN", - "GWPcbFJZFfZreETSoWjPimr846mXEKCtu", - "P7oB2McjBGgW2NXXWVYjV8JEDFoW9xDE5", - } - ParsedStakerIDs = []ids.ShortID{} -) - -func init() { - for _, addrStr := range Addresses { - addr, err := ids.ShortFromString(addrStr) - if err != nil { - panic(err) - } - ParsedAddresses = append(ParsedAddresses, addr) - } - for _, stakerIDStr := range StakerIDs { - stakerID, err := ids.ShortFromString(stakerIDStr) - if err != nil { - panic(err) - } - ParsedStakerIDs = append(ParsedStakerIDs, stakerID) - } -} - -// NetworkName returns a human readable name for the network with -// ID [networkID] -func NetworkName(networkID uint32) string { - if name, exists := NetworkIDToNetworkName[networkID]; exists { - return name - } - return fmt.Sprintf("network-%d", networkID) -} - -// NetworkID returns the ID of the network with name [networkName] -func NetworkID(networkName string) (uint32, error) { - networkName = strings.ToLower(networkName) - if id, exists := NetworkNameToNetworkID[networkName]; exists { - return id, nil - } - - if id, err := strconv.ParseUint(networkName, 10, 0); err == nil { - if id > math.MaxUint32 { - return 0, fmt.Errorf("NetworkID %s not in [0, 2^32)", networkName) - } - return uint32(id), nil - } - if validNetworkName.MatchString(networkName) { - if id, err := strconv.Atoi(networkName[8:]); err == nil { - if id > math.MaxUint32 { - return 0, fmt.Errorf("NetworkID %s not in [0, 2^32)", networkName) - } - return uint32(id), nil - } - } - - return 0, fmt.Errorf("Failed to parse %s as a network name", networkName) -} - -// Aliases returns the default aliases based on the network ID -func Aliases(networkID uint32) (generalAliases map[string][]string, chainAliases map[[32]byte][]string, vmAliases map[[32]byte][]string) { - generalAliases = map[string][]string{ - "vm/" + platformvm.ID.String(): []string{"vm/platform"}, - "vm/" + avm.ID.String(): []string{"vm/avm"}, - "vm/" + evm.ID.String(): []string{"vm/evm"}, - "vm/" + spdagvm.ID.String(): []string{"vm/spdag"}, - "vm/" + spchainvm.ID.String(): []string{"vm/spchain"}, - "vm/" + timestampvm.ID.String(): []string{"vm/timestamp"}, - "bc/" + ids.Empty.String(): []string{"P", "platform", "bc/P", "bc/platform"}, - } - chainAliases = map[[32]byte][]string{ - ids.Empty.Key(): []string{"P", "platform"}, - } - vmAliases = map[[32]byte][]string{ - platformvm.ID.Key(): []string{"platform"}, - avm.ID.Key(): []string{"avm"}, - evm.ID.Key(): []string{"evm"}, - spdagvm.ID.Key(): []string{"spdag"}, - spchainvm.ID.Key(): []string{"spchain"}, - timestampvm.ID.Key(): []string{"timestamp"}, - } - - genesisBytes := Genesis(networkID) - genesis := &platformvm.Genesis{} // TODO let's not re-create genesis to do aliasing - platformvm.Codec.Unmarshal(genesisBytes, genesis) // TODO check for error - genesis.Initialize() - - for _, chain := range genesis.Chains { - switch { - case avm.ID.Equals(chain.VMID): - generalAliases["bc/"+chain.ID().String()] = []string{"X", "avm", "bc/X", "bc/avm"} - chainAliases[chain.ID().Key()] = []string{"X", "avm"} - case evm.ID.Equals(chain.VMID): - generalAliases["bc/"+chain.ID().String()] = []string{"C", "evm", "bc/C", "bc/evm"} - chainAliases[chain.ID().Key()] = []string{"C", "evm"} - case spdagvm.ID.Equals(chain.VMID): - generalAliases["bc/"+chain.ID().String()] = []string{"bc/spdag"} - chainAliases[chain.ID().Key()] = []string{"spdag"} - case spchainvm.ID.Equals(chain.VMID): - generalAliases["bc/"+chain.ID().String()] = []string{"bc/spchain"} - chainAliases[chain.ID().Key()] = []string{"spchain"} - case timestampvm.ID.Equals(chain.VMID): - generalAliases["bc/"+chain.ID().String()] = []string{"bc/timestamp"} - chainAliases[chain.ID().Key()] = []string{"timestamp"} - } - } - return -} - // Genesis returns the genesis data of the Platform Chain. -// Since the Platform Chain causes the creation of all other -// chains, this function returns the genesis data of the entire network. +// Since an AVA network has exactly one Platform Chain, and the Platform Chain +// defines the genesis state of the network (who is staking, which chains exist, +// etc.), defining the genesis state of the Platform Chain is the same as +// defining the genesis state of the network. // The ID of the new network is [networkID]. -func Genesis(networkID uint32) []byte { - if networkID != LocalID { - panic("unknown network ID provided") + +// FromConfig ... +func FromConfig(networkID uint32, config *Config) ([]byte, error) { + if err := config.init(); err != nil { + return nil, err } - return []byte{ - 0x00, 0x00, 0x00, 0x01, 0x3c, 0xb7, 0xd3, 0x84, - 0x2e, 0x8c, 0xee, 0x6a, 0x0e, 0xbd, 0x09, 0xf1, - 0xfe, 0x88, 0x4f, 0x68, 0x61, 0xe1, 0xb2, 0x9c, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x05, 0xde, 0x31, 0xb4, 0xd8, 0xb2, 0x29, 0x91, - 0xd5, 0x1a, 0xa6, 0xaa, 0x1f, 0xc7, 0x33, 0xf2, - 0x3a, 0x85, 0x1a, 0x8c, 0x94, 0x00, 0x00, 0x12, - 0x30, 0x9c, 0xe5, 0x40, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x5d, 0xbb, 0x75, 0x80, 0x00, 0x00, 0x00, - 0x00, 0x5f, 0x9c, 0xa9, 0x00, 0x00, 0x00, 0x30, - 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, 0x8c, 0xee, - 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, 0x88, 0x4f, - 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xaa, 0x18, - 0xd3, 0x99, 0x1c, 0xf6, 0x37, 0xaa, 0x6c, 0x16, - 0x2f, 0x5e, 0x95, 0xcf, 0x16, 0x3f, 0x69, 0xcd, - 0x82, 0x91, 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, - 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5d, 0xbb, - 0x75, 0x80, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9c, - 0xa9, 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xb7, - 0xd3, 0x84, 0x2e, 0x8c, 0xee, 0x6a, 0x0e, 0xbd, - 0x09, 0xf1, 0xfe, 0x88, 0x4f, 0x68, 0x61, 0xe1, - 0xb2, 0x9c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x05, 0xe9, 0x09, 0x4f, 0x73, 0x69, - 0x80, 0x02, 0xfd, 0x52, 0xc9, 0x08, 0x19, 0xb4, - 0x57, 0xb9, 0xfb, 0xc8, 0x66, 0xab, 0x80, 0x00, - 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x5d, 0xbb, 0x75, 0x80, 0x00, - 0x00, 0x00, 0x00, 0x5f, 0x9c, 0xa9, 0x00, 0x00, - 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, - 0x8c, 0xee, 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, - 0x88, 0x4f, 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x47, 0x9f, 0x66, 0xc8, 0xbe, 0x89, 0x58, 0x30, - 0x54, 0x7e, 0x70, 0xb4, 0xb2, 0x98, 0xca, 0xfd, - 0x43, 0x3d, 0xba, 0x6e, 0x00, 0x00, 0x12, 0x30, - 0x9c, 0xe5, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x5d, 0xbb, 0x75, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x5f, 0x9c, 0xa9, 0x00, 0x00, 0x00, 0x30, 0x39, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x3c, 0xb7, 0xd3, 0x84, 0x2e, 0x8c, 0xee, 0x6a, - 0x0e, 0xbd, 0x09, 0xf1, 0xfe, 0x88, 0x4f, 0x68, - 0x61, 0xe1, 0xb2, 0x9c, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x05, 0xf2, 0x9b, 0xce, - 0x5f, 0x34, 0xa7, 0x43, 0x01, 0xeb, 0x0d, 0xe7, - 0x16, 0xd5, 0x19, 0x4e, 0x4a, 0x4a, 0xea, 0x5d, - 0x7a, 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x5d, 0xbb, 0x75, - 0x80, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9c, 0xa9, - 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xb7, 0xd3, - 0x84, 0x2e, 0x8c, 0xee, 0x6a, 0x0e, 0xbd, 0x09, - 0xf1, 0xfe, 0x88, 0x4f, 0x68, 0x61, 0xe1, 0xb2, - 0x9c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x05, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - 0x41, 0x56, 0x4d, 0x61, 0x76, 0x6d, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x73, - 0x65, 0x63, 0x70, 0x32, 0x35, 0x36, 0x6b, 0x31, - 0x66, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x03, 0x41, 0x56, 0x41, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x03, 0x41, 0x56, 0x41, 0x00, 0x03, 0x41, - 0x56, 0x41, 0x09, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x9f, 0xdf, 0x42, 0xf6, - 0xe4, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x00, 0x01, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, - 0x8c, 0xee, 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, - 0x88, 0x4f, 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x41, 0x74, - 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x65, 0x76, - 0x6d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0xc9, 0x7b, 0x22, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, - 0x7b, 0x22, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, - 0x64, 0x22, 0x3a, 0x34, 0x33, 0x31, 0x31, 0x30, - 0x2c, 0x22, 0x68, 0x6f, 0x6d, 0x65, 0x73, 0x74, - 0x65, 0x61, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x64, 0x61, 0x6f, - 0x46, 0x6f, 0x72, 0x6b, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x64, 0x61, - 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x22, 0x3a, 0x74, 0x72, - 0x75, 0x65, 0x2c, 0x22, 0x65, 0x69, 0x70, 0x31, - 0x35, 0x30, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, - 0x3a, 0x30, 0x2c, 0x22, 0x65, 0x69, 0x70, 0x31, - 0x35, 0x30, 0x48, 0x61, 0x73, 0x68, 0x22, 0x3a, - 0x22, 0x30, 0x78, 0x32, 0x30, 0x38, 0x36, 0x37, - 0x39, 0x39, 0x61, 0x65, 0x65, 0x62, 0x65, 0x61, - 0x65, 0x31, 0x33, 0x35, 0x63, 0x32, 0x34, 0x36, - 0x63, 0x36, 0x35, 0x30, 0x32, 0x31, 0x63, 0x38, - 0x32, 0x62, 0x34, 0x65, 0x31, 0x35, 0x61, 0x32, - 0x63, 0x34, 0x35, 0x31, 0x33, 0x34, 0x30, 0x39, - 0x39, 0x33, 0x61, 0x61, 0x63, 0x66, 0x64, 0x32, - 0x37, 0x35, 0x31, 0x38, 0x38, 0x36, 0x35, 0x31, - 0x34, 0x66, 0x30, 0x22, 0x2c, 0x22, 0x65, 0x69, - 0x70, 0x31, 0x35, 0x35, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x65, 0x69, - 0x70, 0x31, 0x35, 0x38, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x62, 0x79, - 0x7a, 0x61, 0x6e, 0x74, 0x69, 0x75, 0x6d, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, - 0x22, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x74, 0x69, 0x6e, 0x6f, 0x70, 0x6c, 0x65, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, - 0x22, 0x70, 0x65, 0x74, 0x65, 0x72, 0x73, 0x62, - 0x75, 0x72, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x22, 0x3a, 0x30, 0x7d, 0x2c, 0x22, 0x6e, 0x6f, - 0x6e, 0x63, 0x65, 0x22, 0x3a, 0x22, 0x30, 0x78, - 0x30, 0x22, 0x2c, 0x22, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3a, 0x22, - 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x65, 0x78, - 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, 0x61, 0x22, - 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30, 0x22, 0x2c, - 0x22, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x35, 0x66, - 0x35, 0x65, 0x31, 0x30, 0x30, 0x22, 0x2c, 0x22, - 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, - 0x74, 0x79, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, - 0x22, 0x2c, 0x22, 0x6d, 0x69, 0x78, 0x48, 0x61, - 0x73, 0x68, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22, - 0x2c, 0x22, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, - 0x73, 0x65, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22, - 0x2c, 0x22, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x22, - 0x3a, 0x7b, 0x22, 0x37, 0x35, 0x31, 0x61, 0x30, - 0x62, 0x39, 0x36, 0x65, 0x31, 0x30, 0x34, 0x32, - 0x62, 0x65, 0x65, 0x37, 0x38, 0x39, 0x34, 0x35, - 0x32, 0x65, 0x63, 0x62, 0x32, 0x30, 0x32, 0x35, - 0x33, 0x66, 0x62, 0x61, 0x34, 0x30, 0x64, 0x62, - 0x65, 0x38, 0x35, 0x22, 0x3a, 0x7b, 0x22, 0x62, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x3a, - 0x22, 0x30, 0x78, 0x33, 0x33, 0x62, 0x32, 0x65, - 0x33, 0x63, 0x39, 0x66, 0x64, 0x30, 0x38, 0x30, - 0x34, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x22, 0x7d, 0x7d, 0x2c, 0x22, 0x6e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x3a, 0x22, - 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x67, 0x61, - 0x73, 0x55, 0x73, 0x65, 0x64, 0x22, 0x3a, 0x22, - 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, - 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x22, 0x7d, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x53, 0x69, - 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x44, 0x41, 0x47, - 0x20, 0x50, 0x61, 0x79, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x73, 0x70, 0x64, 0x61, 0x67, 0x76, 0x6d, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, 0x8c, 0xee, - 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, 0x88, 0x4f, - 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, - 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x20, 0x50, 0x61, 0x79, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x73, 0x70, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x76, 0x6d, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, - 0x01, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, 0x8c, 0xee, - 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, 0x88, 0x4f, - 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, - 0x30, 0x9c, 0xe5, 0x40, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x17, 0x53, 0x69, 0x6d, 0x70, - 0x6c, 0x65, 0x20, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x20, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x5d, 0xbb, 0x75, 0x80, + // Specify the genesis state of the AVM + avmArgs := avm.BuildGenesisArgs{} + { + ava := avm.AssetDefinition{ + Name: "AVA", + Symbol: "AVA", + Denomination: 9, + InitialState: map[string][]interface{}{}, + } + + if len(config.MintAddresses) > 0 { + ava.InitialState["variableCap"] = []interface{}{avm.Owners{ + Threshold: 1, + Minters: config.MintAddresses, + }} + } + for _, addr := range config.FundedAddresses { + ava.InitialState["fixedCap"] = append(ava.InitialState["fixedCap"], avm.Holder{ + Amount: json.Uint64(45 * units.MegaAva), + Address: addr, + }) + } + + avmArgs.GenesisData = map[string]avm.AssetDefinition{ + // The AVM starts out with one asset, $AVA + "AVA": ava, + } } + avmReply := avm.BuildGenesisReply{} + + avmSS := avm.StaticService{} + err := avmSS.BuildGenesis(nil, &avmArgs, &avmReply) + if err != nil { + panic(err) + } + + // Specify the genesis state of Athereum (the built-in instance of the EVM) + evmBalance, success := new(big.Int).SetString("33b2e3c9fd0804000000000", 16) + if success != true { + return nil, errors.New("problem creating evm genesis state") + } + + alloc := core.GenesisAlloc{} + for _, addr := range config.FundedEVMAddresses { + alloc[common.HexToAddress(addr)] = core.GenesisAccount{ + Balance: evmBalance, + } + } + evmArgs := core.Genesis{ + Config: ¶ms.ChainConfig{ + ChainID: big.NewInt(43110), + HomesteadBlock: big.NewInt(0), + DAOForkBlock: big.NewInt(0), + DAOForkSupport: true, + EIP150Block: big.NewInt(0), + EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), + EIP155Block: big.NewInt(0), + EIP158Block: big.NewInt(0), + ByzantiumBlock: big.NewInt(0), + ConstantinopleBlock: big.NewInt(0), + PetersburgBlock: big.NewInt(0), + }, + Nonce: 0, + Timestamp: 0, + ExtraData: []byte{0}, + GasLimit: 100000000, + Difficulty: big.NewInt(0), + Mixhash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), + Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"), + Alloc: alloc, + Number: 0, + GasUsed: 0, + ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), + } + evmSS := evm.StaticService{} + evmReply, err := evmSS.BuildGenesis(nil, &evmArgs) + if err != nil { + return nil, err + } + + // Specify the genesis state of the simple payments DAG + spdagvmArgs := spdagvm.BuildGenesisArgs{} + for _, addr := range config.ParsedFundedAddresses { + spdagvmArgs.Outputs = append(spdagvmArgs.Outputs, + spdagvm.APIOutput{ + Amount: json.Uint64(20 * units.KiloAva), + Threshold: 1, + Addresses: []ids.ShortID{addr}, + }, + ) + } + + spdagvmReply := spdagvm.BuildGenesisReply{} + spdagvmSS := spdagvm.StaticService{} + if err := spdagvmSS.BuildGenesis(nil, &spdagvmArgs, &spdagvmReply); err != nil { + return nil, fmt.Errorf("problem creating simple payments DAG: %w", err) + } + + // Specify the genesis state of the simple payments chain + spchainvmArgs := spchainvm.BuildGenesisArgs{} + for _, addr := range config.ParsedFundedAddresses { + spchainvmArgs.Accounts = append(spchainvmArgs.Accounts, + spchainvm.APIAccount{ + Address: addr, + Balance: json.Uint64(20 * units.KiloAva), + }, + ) + } + spchainvmReply := spchainvm.BuildGenesisReply{} + + spchainvmSS := spchainvm.StaticService{} + if err := spchainvmSS.BuildGenesis(nil, &spchainvmArgs, &spchainvmReply); err != nil { + return nil, fmt.Errorf("problem creating simple payments chain: %w", err) + } + + // Specify the initial state of the Platform Chain + platformvmArgs := platformvm.BuildGenesisArgs{ + NetworkID: json.Uint32(networkID), + } + for _, addr := range config.ParsedFundedAddresses { + platformvmArgs.Accounts = append(platformvmArgs.Accounts, + platformvm.APIAccount{ + Address: addr, + Balance: json.Uint64(20 * units.KiloAva), + }, + ) + } + + genesisTime := time.Date( + /*year=*/ 2019, + /*month=*/ time.November, + /*day=*/ 1, + /*hour=*/ 0, + /*minute=*/ 0, + /*second=*/ 0, + /*nano-second=*/ 0, + /*location=*/ time.UTC, + ) + stakingDuration := 365 * 24 * time.Hour // ~ 1 year + endStakingTime := genesisTime.Add(stakingDuration) + + for i, validatorID := range config.ParsedStakerIDs { + weight := json.Uint64(20 * units.KiloAva) + platformvmArgs.Validators = append(platformvmArgs.Validators, + platformvm.APIDefaultSubnetValidator{ + APIValidator: platformvm.APIValidator{ + StartTime: json.Uint64(genesisTime.Unix()), + EndTime: json.Uint64(endStakingTime.Unix()), + Weight: &weight, + ID: validatorID, + }, + Destination: config.ParsedFundedAddresses[i%len(config.ParsedFundedAddresses)], + }, + ) + } + + // Specify the chains that exist upon this network's creation + platformvmArgs.Chains = []platformvm.APIChain{ + platformvm.APIChain{ + GenesisData: avmReply.Bytes, + SubnetID: platformvm.DefaultSubnetID, + VMID: avm.ID, + FxIDs: []ids.ID{ + secp256k1fx.ID, + nftfx.ID, + propertyfx.ID, + }, + Name: "X-Chain", + }, + platformvm.APIChain{ + GenesisData: evmReply, + SubnetID: platformvm.DefaultSubnetID, + VMID: evm.ID, + Name: "C-Chain", + }, + platformvm.APIChain{ + GenesisData: spdagvmReply.Bytes, + SubnetID: platformvm.DefaultSubnetID, + VMID: spdagvm.ID, + Name: "Simple DAG Payments", + }, + platformvm.APIChain{ + GenesisData: spchainvmReply.Bytes, + SubnetID: platformvm.DefaultSubnetID, + VMID: spchainvm.ID, + Name: "Simple Chain Payments", + }, + platformvm.APIChain{ + GenesisData: formatting.CB58{Bytes: []byte{}}, // There is no genesis data + SubnetID: platformvm.DefaultSubnetID, + VMID: timestampvm.ID, + Name: "Simple Timestamp Server", + }, + } + + platformvmArgs.Time = json.Uint64(genesisTime.Unix()) + platformvmReply := platformvm.BuildGenesisReply{} + + platformvmSS := platformvm.StaticService{} + if err := platformvmSS.BuildGenesis(nil, &platformvmArgs, &platformvmReply); err != nil { + return nil, fmt.Errorf("problem while building platform chain's genesis state: %w", err) + } + + return platformvmReply.Bytes.Bytes, nil } +// Genesis ... +func Genesis(networkID uint32) ([]byte, error) { return FromConfig(networkID, GetConfig(networkID)) } + // VMGenesis ... -func VMGenesis(networkID uint32, vmID ids.ID) *platformvm.CreateChainTx { - genesisBytes := Genesis(networkID) +func VMGenesis(networkID uint32, vmID ids.ID) (*platformvm.CreateChainTx, error) { + genesisBytes, err := Genesis(networkID) + if err != nil { + return nil, err + } genesis := platformvm.Genesis{} platformvm.Codec.Unmarshal(genesisBytes, &genesis) + if err := genesis.Initialize(); err != nil { + return nil, err + } for _, chain := range genesis.Chains { if chain.VMID.Equals(vmID) { - return chain + return chain, nil } } - return nil + return nil, fmt.Errorf("couldn't find subnet with VM ID %s", vmID) +} + +// AVAAssetID ... +func AVAAssetID(networkID uint32) (ids.ID, error) { + createAVM, err := VMGenesis(networkID, avm.ID) + if err != nil { + return ids.ID{}, err + } + + c := codec.NewDefault() + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(&avm.BaseTx{}), + c.RegisterType(&avm.CreateAssetTx{}), + c.RegisterType(&avm.OperationTx{}), + c.RegisterType(&avm.ImportTx{}), + c.RegisterType(&avm.ExportTx{}), + c.RegisterType(&secp256k1fx.TransferInput{}), + c.RegisterType(&secp256k1fx.MintOutput{}), + c.RegisterType(&secp256k1fx.TransferOutput{}), + c.RegisterType(&secp256k1fx.MintOperation{}), + c.RegisterType(&secp256k1fx.Credential{}), + ) + if errs.Errored() { + return ids.ID{}, errs.Err + } + + genesis := avm.Genesis{} + if err := c.Unmarshal(createAVM.GenesisData, &genesis); err != nil { + return ids.ID{}, err + } + + if len(genesis.Txs) == 0 { + return ids.ID{}, errors.New("genesis creates no transactions") + } + genesisTx := genesis.Txs[0] + + tx := avm.Tx{UnsignedTx: &genesisTx.CreateAssetTx} + txBytes, err := c.Marshal(&tx) + if err != nil { + return ids.ID{}, err + } + tx.Initialize(txBytes) + + return tx.ID(), nil } diff --git a/genesis/genesis_test.go b/genesis/genesis_test.go index 7a6c6eb..31aea89 100644 --- a/genesis/genesis_test.go +++ b/genesis/genesis_test.go @@ -6,6 +6,7 @@ package genesis import ( "testing" + "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/vms/avm" "github.com/ava-labs/gecko/vms/evm" "github.com/ava-labs/gecko/vms/platformvm" @@ -17,11 +18,11 @@ func TestNetworkName(t *testing.T) { if name := NetworkName(MainnetID); name != MainnetName { t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, MainnetName) } - if name := NetworkName(TestnetID); name != BorealisName { - t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, BorealisName) + if name := NetworkName(TestnetID); name != CascadeName { + t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, CascadeName) } - if name := NetworkName(BorealisID); name != BorealisName { - t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, BorealisName) + if name := NetworkName(CascadeID); name != CascadeName { + t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, CascadeName) } if name := NetworkName(4294967295); name != "network-4294967295" { t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, "network-4294967295") @@ -45,7 +46,7 @@ func TestNetworkID(t *testing.T) { t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", TestnetID, id) } - id, err = NetworkID(BorealisName) + id, err = NetworkID(CascadeName) if err != nil { t.Fatal(err) } @@ -53,7 +54,7 @@ func TestNetworkID(t *testing.T) { t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", TestnetID, id) } - id, err = NetworkID("bOrEaLiS") + id, err = NetworkID("cAsCaDe") if err != nil { t.Fatal(err) } @@ -91,7 +92,7 @@ func TestNetworkID(t *testing.T) { } func TestAliases(t *testing.T) { - generalAliases, _, _ := Aliases(LocalID) + generalAliases, _, _, _ := Aliases(LocalID) if _, exists := generalAliases["vm/"+platformvm.ID.String()]; !exists { t.Fatalf("Should have a custom alias from the vm") } else if _, exists := generalAliases["vm/"+avm.ID.String()]; !exists { @@ -106,9 +107,84 @@ func TestAliases(t *testing.T) { } func TestGenesis(t *testing.T) { - genesisBytes := Genesis(LocalID) + genesisBytes, err := Genesis(LocalID) + if err != nil { + t.Fatal(err) + } genesis := platformvm.Genesis{} if err := platformvm.Codec.Unmarshal(genesisBytes, &genesis); err != nil { t.Fatal(err) } } + +func TestVMGenesis(t *testing.T) { + tests := []struct { + networkID uint32 + vmID ids.ID + expectedID string + }{ + { + networkID: CascadeID, + vmID: avm.ID, + expectedID: "4ktRjsAKxgMr2aEzv9SWmrU7Xk5FniHUrVCX4P1TZSfTLZWFM", + }, + { + networkID: LocalID, + vmID: avm.ID, + expectedID: "4R5p2RXDGLqaifZE4hHWH9owe34pfoBULn1DrQTWivjg8o4aH", + }, + { + networkID: CascadeID, + vmID: evm.ID, + expectedID: "2mUYSXfLrDtigwbzj1LxKVsHwELghc5sisoXrzJwLqAAQHF4i", + }, + { + networkID: LocalID, + vmID: evm.ID, + expectedID: "tZGm6RCkeGpVETUTp11DW3UYFZmm69zfqxchpHrSF7wgy8rmw", + }, + } + + for _, test := range tests { + genesisTx, err := VMGenesis(test.networkID, test.vmID) + if err != nil { + t.Fatal(err) + } + if result := genesisTx.ID().String(); test.expectedID != result { + t.Fatalf("%s genesisID with networkID %d was expected to be %s but was %s", + test.vmID, + test.networkID, + test.expectedID, + result) + } + } +} + +func TestAVAAssetID(t *testing.T) { + tests := []struct { + networkID uint32 + expectedID string + }{ + { + networkID: CascadeID, + expectedID: "21d7KVtPrubc5fHr6CGNcgbUb4seUjmZKr35ZX7BZb5iP8pXWA", + }, + { + networkID: LocalID, + expectedID: "n8XH5JY1EX5VYqDeAhB4Zd4GKxi9UNQy6oPpMsCAj1Q6xkiiL", + }, + } + + for _, test := range tests { + avaID, err := AVAAssetID(test.networkID) + if err != nil { + t.Fatal(err) + } + if result := avaID.String(); test.expectedID != result { + t.Fatalf("AVA assetID with networkID %d was expected to be %s but was %s", + test.networkID, + test.expectedID, + result) + } + } +} diff --git a/genesis/network_id.go b/genesis/network_id.go new file mode 100644 index 0000000..5e5c0dd --- /dev/null +++ b/genesis/network_id.go @@ -0,0 +1,73 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" +) + +// Hardcoded network IDs +var ( + MainnetID uint32 = 1 + TestnetID uint32 = 2 + CascadeID uint32 = 2 + LocalID uint32 = 12345 + + MainnetName = "mainnet" + TestnetName = "testnet" + CascadeName = "cascade" + LocalName = "local" + + NetworkIDToNetworkName = map[uint32]string{ + MainnetID: MainnetName, + TestnetID: CascadeName, + LocalID: LocalName, + } + NetworkNameToNetworkID = map[string]uint32{ + MainnetName: MainnetID, + TestnetName: TestnetID, + CascadeName: CascadeID, + LocalName: LocalID, + } + + validNetworkName = regexp.MustCompile(`network-[0-9]+`) +) + +// NetworkName returns a human readable name for the network with +// ID [networkID] +func NetworkName(networkID uint32) string { + if name, exists := NetworkIDToNetworkName[networkID]; exists { + return name + } + return fmt.Sprintf("network-%d", networkID) +} + +// NetworkID returns the ID of the network with name [networkName] +func NetworkID(networkName string) (uint32, error) { + networkName = strings.ToLower(networkName) + if id, exists := NetworkNameToNetworkID[networkName]; exists { + return id, nil + } + + if id, err := strconv.ParseUint(networkName, 10, 0); err == nil { + if id > math.MaxUint32 { + return 0, fmt.Errorf("NetworkID %s not in [0, 2^32)", networkName) + } + return uint32(id), nil + } + if validNetworkName.MatchString(networkName) { + if id, err := strconv.Atoi(networkName[8:]); err == nil { + if id > math.MaxUint32 { + return 0, fmt.Errorf("NetworkID %s not in [0, 2^32)", networkName) + } + return uint32(id), nil + } + } + + return 0, fmt.Errorf("Failed to parse %s as a network name", networkName) +} diff --git a/ids/bag.go b/ids/bag.go index 1022489..5a8aab4 100644 --- a/ids/bag.go +++ b/ids/bag.go @@ -95,7 +95,7 @@ func (b *Bag) Mode() (ID, int) { return b.mode, b.modeFreq } func (b *Bag) Threshold() Set { return b.metThreshold } // Filter returns the bag of ids with the same counts as this bag, except all -// the ids in the returned bag must have the same bits in the range [start, end] +// the ids in the returned bag must have the same bits in the range [start, end) // as id. func (b *Bag) Filter(start, end int, id ID) Bag { newBag := Bag{} diff --git a/ids/id_test.go b/ids/id_test.go index af3efa4..b541ed5 100644 --- a/ids/id_test.go +++ b/ids/id_test.go @@ -5,6 +5,7 @@ package ids import ( "bytes" + "reflect" "testing" ) @@ -29,10 +30,6 @@ func TestID(t *testing.T) { if b := id.Bytes(); !bytes.Equal(hash[:], b) { t.Fatalf("ID.Bytes returned wrong bytes") } - - if str := id.String(); str != "Ba3mm8Ra8JYYebeZ9p7zw1ayorDbeD1euwxhgzSLsncKqGoNt" { - t.Fatalf("ID.String returned wrong string: %s", str) - } } func TestIDBit(t *testing.T) { @@ -79,3 +76,143 @@ func TestFromString(t *testing.T) { t.Fatal("Expected FromString to be inverse of String but it wasn't") } } + +func TestIDFromStringError(t *testing.T) { + tests := []struct { + in string + }{ + {""}, + {"foo"}, + {"foobar"}, + } + for _, tt := range tests { + t.Run(tt.in, func(t *testing.T) { + _, err := FromString(tt.in) + if err == nil { + t.Error("Unexpected success") + } + }) + } +} + +func TestIDMarshalJSON(t *testing.T) { + tests := []struct { + label string + in ID + out []byte + err error + }{ + {"ID{}", ID{}, []byte("null"), nil}, + {"ID(\"ava labs\")", + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + []byte("\"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7\""), + nil, + }, + } + for _, tt := range tests { + t.Run(tt.label, func(t *testing.T) { + out, err := tt.in.MarshalJSON() + if err != tt.err { + t.Errorf("Expected err %s, got error %v", tt.err, err) + } else if !bytes.Equal(out, tt.out) { + t.Errorf("got %q, expected %q", out, tt.out) + } + }) + } +} + +func TestIDUnmarshalJSON(t *testing.T) { + tests := []struct { + label string + in []byte + out ID + err error + }{ + {"ID{}", []byte("null"), ID{}, nil}, + {"ID(\"ava labs\")", + []byte("\"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7\""), + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + nil, + }, + } + for _, tt := range tests { + t.Run(tt.label, func(t *testing.T) { + foo := ID{} + err := foo.UnmarshalJSON(tt.in) + if err != tt.err { + t.Errorf("Expected err %s, got error %v", tt.err, err) + } else if foo.ID != nil && foo.Key() != tt.out.Key() { + t.Errorf("got %q, expected %q", foo.Key(), tt.out.Key()) + } + }) + } +} + +func TestIDHex(t *testing.T) { + id := NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}) + expected := "617661206c61627300000000000000000000000000000000000000000000000000" + actual := id.Hex() + if actual != actual { + t.Fatalf("got %s, expected %s", actual, expected) + } +} + +func TestIDString(t *testing.T) { + tests := []struct { + label string + id ID + expected string + }{ + {"ID{}", ID{}, "nil"}, + {"ID{[32]byte{24}}", NewID([32]byte{24}), "Ba3mm8Ra8JYYebeZ9p7zw1ayorDbeD1euwxhgzSLsncKqGoNt"}, + } + for _, tt := range tests { + t.Run(tt.label, func(t *testing.T) { + result := tt.id.String() + if result != tt.expected { + t.Errorf("got %q, expected %q", result, tt.expected) + } + }) + } +} + +func TestSortIDs(t *testing.T) { + ids := []ID{ + NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + NewID([32]byte{'W', 'a', 'l', 'l', 'e', ' ', 'l', 'a', 'b', 's'}), + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + } + SortIDs(ids) + expected := []ID{ + NewID([32]byte{'W', 'a', 'l', 'l', 'e', ' ', 'l', 'a', 'b', 's'}), + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + } + if !reflect.DeepEqual(ids, expected) { + t.Fatal("[]ID was not sorted lexographically") + } +} + +func TestIsSortedAndUnique(t *testing.T) { + unsorted := []ID{ + NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + } + if IsSortedAndUniqueIDs(unsorted) { + t.Fatal("Wrongly accepted unsorted IDs") + } + duplicated := []ID{ + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + } + if IsSortedAndUniqueIDs(duplicated) { + t.Fatal("Wrongly accepted duplicated IDs") + } + sorted := []ID{ + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + } + if !IsSortedAndUniqueIDs(sorted) { + t.Fatal("Wrongly rejected sorted, unique IDs") + } +} diff --git a/ids/queue_test.go b/ids/queue_test.go new file mode 100644 index 0000000..ab0dd4e --- /dev/null +++ b/ids/queue_test.go @@ -0,0 +1,70 @@ +package ids + +import ( + "reflect" + "testing" +) + +func TestQueueSetinit(t *testing.T) { + qs := QueueSet{} + qs.init() + if qs.idList == nil { + t.Fatal("Failed to initialize") + } + list := qs.idList + qs.init() + if list != qs.idList { + t.Fatal("Mutated an already intialized queue") + } +} + +func TestQueueSetSetHead(t *testing.T) { + qs := QueueSet{} + id := NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}) + qs.SetHead(id) + if qs.idList == nil || id != qs.idList.Front().Value.(ID) { + t.Fatal("Failed to set head of unintilised queue") + } + + qs.SetHead(id) + if qs.idList.Len() != 1 || id != qs.idList.Front().Value.(ID) { + t.Fatal("Mutated a queue which already had the desired head") + } + + id2 := NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}) + qs.SetHead(id2) + if qs.idList.Len() != 1 || id2 != qs.idList.Front().Value.(ID) { + t.Fatal("Didn't replace the existing head") + } +} + +func TestQueueSetAppend(t *testing.T) { + qs := QueueSet{} + id := NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}) + qs.Append(id) + if qs.idList == nil || id != qs.idList.Front().Value.(ID) { + t.Fatal("Failed to append to an uninitialised queue") + } + + id2 := NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}) + qs.Append(id2) + if qs.idList.Len() != 2 || id2 != qs.idList.Back().Value.(ID) { + t.Fatal("Failed to append to the back of the queue") + } +} + +func TestQueueGetTail(t *testing.T) { + qs := QueueSet{} + tail := qs.GetTail() + if !reflect.DeepEqual(tail, ID{}) { + t.Fatalf("Empty queue returned %v, expected empty ID %v", tail, Empty) + } + + qs.Append(NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'})) + id2 := NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}) + qs.Append(id2) + tail = qs.GetTail() + if tail != id2 { + t.Fatalf("Populated queue returned %v, expected %v", tail, id2) + } +} diff --git a/keys/keys1/genCA.sh b/keys/genCA.sh similarity index 100% rename from keys/keys1/genCA.sh rename to keys/genCA.sh diff --git a/keys/mykey/genStaker.sh b/keys/genStaker.sh similarity index 90% rename from keys/mykey/genStaker.sh rename to keys/genStaker.sh index d955767..34f6889 100755 --- a/keys/mykey/genStaker.sh +++ b/keys/genStaker.sh @@ -1,7 +1,7 @@ #!/bin/sh set -ex -keypath=$GOPATH/src/github.com/ava-labs/gecko/keys/mykey +keypath=$GOPATH/src/github.com/ava-labs/gecko/keys if test -f "$keypath/staker.key" || test -f "$keypath/staker.crt"; then echo "staker.key or staker.crt already exists. Not generating new key/certificiate." diff --git a/keys/keys1/genStaker.sh b/keys/keys1/genStaker.sh deleted file mode 100755 index 0a4b836..0000000 --- a/keys/keys1/genStaker.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/staker.key 4096 -openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr -openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys1/rootCA.srl b/keys/keys1/rootCA.srl deleted file mode 100644 index 473c41d..0000000 --- a/keys/keys1/rootCA.srl +++ /dev/null @@ -1 +0,0 @@ -BAF3B5C5C6D0D14A diff --git a/keys/keys1/staker.csr b/keys/keys1/staker.csr deleted file mode 100644 index a4d8227..0000000 --- a/keys/keys1/staker.csr +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK -DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw -ggIKAoICAQDKYSRw/W0YpYH/MTQhiFrR0m89l6yTuzLpDtjudr/5RnhIPvtqk7YI -Gm/m9l29xwR4J5r7SZGs+70yBetkbS+h7PwJ2rmWDwbrdyJKvVBhqf8kSn+VU2Le -PSIcJj193LDyWhV1H4lqNkUkcAR76Fh9qjMvA2p0vJ66+eDLXlph/RYapQx9HgOj -/0BmAKMrYCyo5BhRih+Ougg8aK4G9PQTIA5G2wTWW2QkHxM/QppFjZd/XwQeJ2H6 -ubWMFc5fttf6AzpJvFIDBu/JDCKWiCu5m8t4GL8w2OrIx8Js19lF4YYE2eojCreq -gPi64S3ocqwKsDoySTw6/5iKQ5BUYwUXX3z7EXOqD8SMHefUKeczj4WvAaZLzR27 -qXm55EgRYQAIX4fhmY7NfSop3Wh0Eo62+JHoM/1g+UgOXlbnWpY95Mgd7/fwDSWL -u4IxE0/uq8VufIbfC4yrY8qlTVfAffI1ldRdvJjPJBPiQ0CNrOl60LVptpkGc9sh -H7wZ2bP0bEnYKTgLAfOzD8Ut71O2AOIa80A1GNFl4Yle/MSNJOcQOSpgtWdREzIU -oenAjfuzM4OeTr4cRg4+VYTAo9KHKriN1DuewNzGd8WjKAVHmcIMjqISLTlzMhds -dm+OmfQ6OvyX7v0GTOBbhP09NGcww5A0gCzXN18FS5oxnxe6OG9D0wIDAQABoAAw -DQYJKoZIhvcNAQELBQADggIBAE7VplAZTEGHpYwXZvhlVg0qDsb/7IQj77eNteSU -33Dq6u7QLgS+Ea04Xv5BHnhnBoWRtrNR8WLTw64cuj6p/sqXiQsSNDgxNDPuPG+g -1FFi6wjgtoIJnx/QrITuUyO/MRy1awKLHlGfbY6yXSdLCC9bqLSIRm0tx+E+jo5C -0r5+ZOcLK8ZXWq9uHjmekX0hoN4qzsbQ0J5IeMh9ag+698aqzBSEDljLHg614yiK -FxtpD+23O0XfAdgqFgXRLLg3tt8AkVuys7r/uwHoz9du+nwW2U5nsMIYBXLV2mq3 -1KbpXDTlVwaSoA2LP8dpmvbyTgNbXsjPdS91Rrzd7fcsammcSV0aWPiXmIbTLtn8 -61ZRR0uj+jB68cRjSvegnheifsGyq6alr8OSUMdeWVyiPy2O7N6fUVj+Fmyzl5Ph -fl9UPZTmt/zOZrcSBoWjtZfmQVfw29SfMYwlNKALN4eOT6XwBLDK4uu4UXSoXwi+ -V8evUUfBWcrcXHMTIFhoZbW/b7gjhnv148XWYI0ta8pjt/akzlPLtf4ETPqfECNN -4+p2w9+R5ktzCLeceXQc8eN+ZwjIt31zG48J7Sl1wJB13VR0jPy6zDsyUIswIVfe -7gp7GHg8R0lzDpEYCvU+R7RUWK6xcpjt7+mTHM70csnnOg7uPqnXqOdtVplr0y+R -pmqJ ------END CERTIFICATE REQUEST----- diff --git a/keys/keys2/genCA.sh b/keys/keys2/genCA.sh deleted file mode 100755 index 14a0f4c..0000000 --- a/keys/keys2/genCA.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/rootCA.key 4096 -openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/keys2/genStaker.sh b/keys/keys2/genStaker.sh deleted file mode 100755 index 0a4b836..0000000 --- a/keys/keys2/genStaker.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/staker.key 4096 -openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr -openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys2/rootCA.crt b/keys/keys2/rootCA.crt deleted file mode 100644 index da6320a..0000000 --- a/keys/keys2/rootCA.crt +++ /dev/null @@ -1,34 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB -dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN -AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw -MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM -Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV -BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg -jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 -QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU -m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 -lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB -KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW -cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 -RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH -bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW -T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB -J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU -KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei -73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E -BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj -FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG -XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY -omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv -Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC -XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 -gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn -3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N -W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s -scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU -kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD -DB8IRfWqBx2nWw== ------END CERTIFICATE----- diff --git a/keys/keys2/rootCA.key b/keys/keys2/rootCA.key deleted file mode 100644 index fe23a96..0000000 --- a/keys/keys2/rootCA.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 -6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ -mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h -Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL -AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk -tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd -CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu -TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV -Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll -JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt -RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA -AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg -O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 -WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc -fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o -WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y -243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM -Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv -/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF -2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 -wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R -WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 -POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC -T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW -jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc -23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK -XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl -jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ -/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P -rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl -KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD -E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C -cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE -r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu -GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy -7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr -RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF -SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor -Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY -KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t -Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM -/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 -YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt -I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy -+jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f -UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER -KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW -MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe -f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== ------END RSA PRIVATE KEY----- diff --git a/keys/keys2/rootCA.srl b/keys/keys2/rootCA.srl deleted file mode 100644 index 473c41d..0000000 --- a/keys/keys2/rootCA.srl +++ /dev/null @@ -1 +0,0 @@ -BAF3B5C5C6D0D14A diff --git a/keys/keys2/staker.csr b/keys/keys2/staker.csr deleted file mode 100644 index 8b7f8c9..0000000 --- a/keys/keys2/staker.csr +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK -DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw -ggIKAoICAQDdToR60na6NuR9iSAUMyzPXJNMWVQbLyT5/iZCiJ3BB4YWMBhfxpJW -JiWXcM+znDgpJuyCEeh5Dp6ZY3Fe7k6Hht6FmFpDjwnjpQmdkEKUg00G+ElPTp/U -smsPL+JAswPqBZWpMBS3dsXQNunMMtMGlrf5S0l6XX4y7kc/GTxYgveWZ9JtR/m2 -KNer+wjgBHqJ4rPqnHB30sDYPZg91Cz1Ak8Bb2w2I108zQVgKK6eIqNKXJJ/4piz -SZdU4920wMxYBpnfDAchnxei9U/v3QbT7eKUI2fGr+hOWTIWU80+VeOBt8a6P4sS -9AQh5/6G8qwmAqO3YQ9dxN82iu/H3+N+GGa/M0r5rEWrzwIuFhwKvyQcpPRBm2yQ -nBnhL9G5kN6n4OBM0KsgZ3CYlHZSg4eWcNgBt1WCFsQc7vfUFaJnr8QP3pF4V/4B -ok7wTO5HN0A1EYEVYuX53NGnrKVe+Fg9+xMOgXPWkUNqdvpI9ZbV3Z0S5866qF3/ -vBZrhgCrKc5E/vMexBRe8Ki4wKqONVhi9WGUcRHvFEikc+7VrPj0YaG6zVLd+uOA -JN81fKOPYo4X4sZrMyPYl3OjGtMhfV4KvCaLEr1duOklqO6cCvGQ8iAlLVy3VJyW -5GJ0D0KyiAir4VNdAJKo1ZgiGivJLWulTfjUifCN9o115AiqJxiqwwIDAQABoAAw -DQYJKoZIhvcNAQELBQADggIBAM2IHKsQsebxTD50QQXtSNbyRzG/GpMZuZXn/QYO -QGW0ThJwtcmx6cqQvuyBovH5WhB9QUBFjiKkR7Qef7HUsgxU1cJA75gBfb2GMUru -Q+T37xOxtr6S2TcKOq/LvdJaTYmAHmW9V7vwEcrMRa9lWVTEmJIKTuxiUubpXtup -8OB8WLIvDikVtKtegvl6VCaTApCkUfuLhf7DERQ6sGLXWz6dVQcfvbfcXK2fn1Ik -Koxqy1SSz/rPb4u9NEk1yqvJQdpgnbTM3drTPHiIHCA7F6SjMu5tekHtVQkFOd6c -B0geEwyxY97zqnFv5YXiukXEaAnCRAlOuIZXRqtK6GFthTWo33YpB2KaRUtJ7IuP -og4Q/zjDs8DEc/qbbUbhyulExz6uoyRKO4j/gG3ESC6j09j7Eungt1LDhyt8p3wD -pytIIPkTseykO0CcEpEcGbES6d3u4PrFJ75XWxMkNZVK8mC3faxx2kJLfS1+4Fg8 -A0zbcN6qwm1ezGq2vGQcyVKyFVWJQAEAFuSO8sjW6dk3ClfE+MNGUvxTQMe96V14 -jGRICCp9aJrJXA3u0iQaUX0cXmlhegAYk7Ho/Ef3k/PcP8DzZ8Ck839oRHBw4pPv -tKbyiKnOcet7AFGwsiM2t5VLrj4jovhRLEiaXrCaxNe6j4xs63TEb+8uTCzKyktC -4BFq ------END CERTIFICATE REQUEST----- diff --git a/keys/keys3/genCA.sh b/keys/keys3/genCA.sh deleted file mode 100755 index 14a0f4c..0000000 --- a/keys/keys3/genCA.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/rootCA.key 4096 -openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/keys3/genStaker.sh b/keys/keys3/genStaker.sh deleted file mode 100755 index 0a4b836..0000000 --- a/keys/keys3/genStaker.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/staker.key 4096 -openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr -openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys3/rootCA.crt b/keys/keys3/rootCA.crt deleted file mode 100644 index da6320a..0000000 --- a/keys/keys3/rootCA.crt +++ /dev/null @@ -1,34 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB -dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN -AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw -MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM -Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV -BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg -jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 -QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU -m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 -lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB -KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW -cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 -RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH -bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW -T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB -J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU -KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei -73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E -BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj -FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG -XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY -omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv -Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC -XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 -gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn -3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N -W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s -scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU -kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD -DB8IRfWqBx2nWw== ------END CERTIFICATE----- diff --git a/keys/keys3/rootCA.key b/keys/keys3/rootCA.key deleted file mode 100644 index fe23a96..0000000 --- a/keys/keys3/rootCA.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 -6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ -mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h -Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL -AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk -tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd -CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu -TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV -Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll -JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt -RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA -AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg -O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 -WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc -fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o -WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y -243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM -Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv -/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF -2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 -wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R -WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 -POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC -T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW -jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc -23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK -XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl -jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ -/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P -rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl -KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD -E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C -cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE -r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu -GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy -7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr -RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF -SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor -Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY -KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t -Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM -/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 -YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt -I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy -+jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f -UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER -KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW -MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe -f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== ------END RSA PRIVATE KEY----- diff --git a/keys/keys3/rootCA.srl b/keys/keys3/rootCA.srl deleted file mode 100644 index 473c41d..0000000 --- a/keys/keys3/rootCA.srl +++ /dev/null @@ -1 +0,0 @@ -BAF3B5C5C6D0D14A diff --git a/keys/keys3/staker.csr b/keys/keys3/staker.csr deleted file mode 100644 index 87bcd8d..0000000 --- a/keys/keys3/staker.csr +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK -DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw -ggIKAoICAQC8mVDToHbkUF2gRdVfpydZLNKeQ38d6HZFkUM3U1dWLZFSZNvagN8h -lQvY/tQu3A40p19WgKbzWZre3tg1Akw8Jztdz9gl4RMn142IIO3CiwIptkE0Jopb -ZhmG5fAC2n/MXQtfieI3hzeR04LW4JgLKzf3Nn8xZdlBgJfBmL5qUUnE7O7IbJGG -ma6gSD3ewetE6KQZtNtf0xRIv08doZKYwTl6ItkdGK76ufqq098GVwWvA1wSune4 -+MFgs9N4eFJj6Jyt85fiK/cwPx7KRdgYgBzrZQ4EPshRnwWrBTieOOaJvAA2RMxM -EYzKRrJAAsYI1zxtNyqIUaBTcxmaz+NXUGW+wHwITic0Gp/XQm2Lwr/lxIV6OnAl -L3CgbSXirSnoG+eHQ+vDzBAcRDkTAgv/GUIzlfqT2StTK02uIBgJYzvFTG4plHit -ccRfy8wxsh5Z8xG99lmPQQtLsnlQAV+Li06Cb8CH4hUVoiWiVs5QAahqWmv5fpoX -0Es26RyUHXGbjE202pyMMA7jUerUVKMijOoGZtcH6zB4p/dJ0TtToRwOgrA7NCI9 -AYVtqVXrXG/udj8ur2r1bTVwIbHsOeTEP3gY0mHRWm2E/bLjt9vbYIRUxR8xWnLk -beBziNTwg+36jdDF+6gu3cUz/nbSn8YY+Y1jjXuM3lqF8iMaAobhuwIDAQABoAAw -DQYJKoZIhvcNAQELBQADggIBAEWU13T1alCni4R36J65TrGfIljW8LGhmWRo5xoV -YW7HxxZ/WTAFSwAv0yHCGq+H/tebRZhvua+c+jP16YBDoAp5neGWW57gLDg+35H5 -guLo73p/qM6hyaUGSfyO9D1nS1QX8R0r70TQYbIrVB4uQeTM2pEYR6NYO7bjPEWr -WwC6RnbtnsNGTeoH+LwiM+uY//VB/tUe1u2y6U8HkIXJo7j4+NqUL1xXmYmC6Rph -PNI3MAZUL40z1VX7fn/Vp7+rc0CBUsFMOLfLmSgL8jsQeKuyVAQKA4xzWQ2qeuGV -Bv24rHbnSxYSu8tMs31LZPn+fsvNWB9iU7MEiTUr+8nAPEAANNaBwaD1EUkzC1WC -OcCUpMgkhVuzfHq+eXWnw3cGVvEZ8A4DzOyl1ZFofxBX7IOOv0lmpDQSeEQlmKPF -LdWI2JJM76BjeXI7l5HbOmRJv1kYFwBq/biDxCSmmNX8uHvAj1EgDNXvr/pRw7rT -6yKOLtk1GSCCrrYQijCXRx2u276+j8MtC5i6FVcUoaSYD+nx2+ApOnZlYR7xsJYw -5ECaeDagjHP472FY/fuhy/UwAIFm5gCcVFs3A2M/Iyn+vsAW5WEdh+fwGiWxfw49 -Y5KRT8u7BD0R5T5UYxYwzYekEzxsoD3bvQGx/4tboSUxkOd7pVymbuGzIsQ18heI -78pG ------END CERTIFICATE REQUEST----- diff --git a/keys/keys4/genCA.sh b/keys/keys4/genCA.sh deleted file mode 100755 index 14a0f4c..0000000 --- a/keys/keys4/genCA.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/rootCA.key 4096 -openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/keys4/genStaker.sh b/keys/keys4/genStaker.sh deleted file mode 100755 index 0a4b836..0000000 --- a/keys/keys4/genStaker.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/staker.key 4096 -openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr -openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys4/rootCA.crt b/keys/keys4/rootCA.crt deleted file mode 100644 index da6320a..0000000 --- a/keys/keys4/rootCA.crt +++ /dev/null @@ -1,34 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB -dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN -AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw -MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM -Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV -BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg -jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 -QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU -m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 -lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB -KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW -cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 -RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH -bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW -T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB -J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU -KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei -73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E -BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj -FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG -XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY -omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv -Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC -XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 -gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn -3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N -W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s -scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU -kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD -DB8IRfWqBx2nWw== ------END CERTIFICATE----- diff --git a/keys/keys4/rootCA.key b/keys/keys4/rootCA.key deleted file mode 100644 index fe23a96..0000000 --- a/keys/keys4/rootCA.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 -6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ -mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h -Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL -AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk -tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd -CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu -TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV -Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll -JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt -RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA -AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg -O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 -WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc -fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o -WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y -243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM -Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv -/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF -2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 -wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R -WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 -POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC -T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW -jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc -23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK -XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl -jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ -/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P -rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl -KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD -E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C -cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE -r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu -GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy -7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr -RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF -SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor -Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY -KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t -Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM -/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 -YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt -I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy -+jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f -UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER -KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW -MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe -f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== ------END RSA PRIVATE KEY----- diff --git a/keys/keys4/rootCA.srl b/keys/keys4/rootCA.srl deleted file mode 100644 index 473c41d..0000000 --- a/keys/keys4/rootCA.srl +++ /dev/null @@ -1 +0,0 @@ -BAF3B5C5C6D0D14A diff --git a/keys/keys4/staker.csr b/keys/keys4/staker.csr deleted file mode 100644 index 9d94e57..0000000 --- a/keys/keys4/staker.csr +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK -DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw -ggIKAoICAQDZnDoDHE2nj82xDjH0Tb7OXMqQDHz+zbLidt6MSI1XB3vOAIEiPqrt -enGnqRbVFcm5GZxvxh4YQD8CjKSk1qgZJczs0DPSiGQ8Efl4PGO4xnEbllgL3PUR -PWp7mEV3oh6fxICgQKTBlT671EnFzB5lyJWpumRzvA1vyhBMsY8aO+xdq5LUFltY -zBdvpgLXVaDwHZQ2PQEWtF0d0JO2N0WFFDGNmx6n8pKSeIAVDsTwZCZK+FCeeEyo -GfXsINsc0yCMQslawkfOMqA9yBV3Ji6QmFYKyGYt65MWGNqPA4XrIyliKwCCXwz9 -mjaWyN7rAyw9cWlLMODNmDORWzGRZ5290MEAEIZsqjYHVitRTM/RnNIadToZGO0y -5uAkM14cmTvnsK1CP92qtfSisq75W/I91drThoEtTK78UGOl/5Q1YBR08F+tSUWZ -WyHeI6UOBUCGC2bCtmzKMl7vU25lG6mbCR1JuQi6RYpnfMjXH36lV4S7fTvSwwuR -03h2F3H1eFkWNG2lbFrW0dzDCPg3lXwmFQ65hUcQhctznoBz5C1lF2eW03wuVgxi -nnuVlJHjy/GrqmWsASn1PDuVs4k7k6DJfwyHAiA0uxXrGfxYvp7H8j4+2YOmWiWl -5xYgrEDjur5n8Zx46PHQer2Avq3sbEGEe1MCtXJlj3drd5Him3m+NQIDAQABoAAw -DQYJKoZIhvcNAQELBQADggIBAMdZKzx/Qz07D/ISgEe10+XofO5It86g12YJBgGN -4UEnKNk1quJIs0PAwcDNp7G4BpEMuP5xjyf4q976gzAkTg2kcB+LK85eGGSxkxAt -uFQPlFvk85qn4k7wLSx2zkqs47ItvqK5Ev8lLCZ/HfIy+7y57BKqDTvzhXarE3lq -bEZketwQvDcQPN7Ho9gxDMMQDeE2NeDyYhQtCMlX8PnmBRhWZ4CExODMdm8TrTJJ -5HDoj+fXCaSSbXPN25LKYSKOEM4wtzHa91hQK7JGoeHuSS0zFxDwXNKi3sLLtKTH -jsYL/E9bH2NxKPRoHwCJMS0N2jUqnHFyhQUp8VtJlxN0IsPLZGXFapVo4fk2hDpF -OapX0kysLV37KEklVHucQclU5SeTpzoS7gYXqvOJ3Q/IFR6CFAkCHaDggWpB/sVm -OPG6Pt6XXbGNCav9+Am+0q4UD5O1Sbobx+0XJu3VtnuZdn4Mt0uBSL1DZdG9ceig -mGz4bx1kvnzhL1LOAPmxOYqrLCwqJRkRCa+25uRNqBAqWcU48pwoxC3RLyWvy2UN -8Or+TsBzPUldq8yWn3s0/zE8yui6pxzpGUD2TfbUT78O0HJKn5nQjrjVdQZhaA4t -KnrZCz7lIrHRXf2Hbsg/9QgHhcpkknc98z0trNQHncp/kxUvrBJyJGrUh1DEkOSe -f9p0 ------END CERTIFICATE REQUEST----- diff --git a/keys/keys5/genCA.sh b/keys/keys5/genCA.sh deleted file mode 100755 index 14a0f4c..0000000 --- a/keys/keys5/genCA.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/rootCA.key 4096 -openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/keys5/genStaker.sh b/keys/keys5/genStaker.sh deleted file mode 100755 index 0a4b836..0000000 --- a/keys/keys5/genStaker.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/staker.key 4096 -openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr -openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys5/rootCA.crt b/keys/keys5/rootCA.crt deleted file mode 100644 index da6320a..0000000 --- a/keys/keys5/rootCA.crt +++ /dev/null @@ -1,34 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB -dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN -AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw -MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM -Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV -BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg -jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 -QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU -m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 -lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB -KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW -cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 -RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH -bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW -T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB -J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU -KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei -73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E -BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj -FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG -XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY -omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv -Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC -XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 -gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn -3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N -W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s -scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU -kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD -DB8IRfWqBx2nWw== ------END CERTIFICATE----- diff --git a/keys/keys5/rootCA.key b/keys/keys5/rootCA.key deleted file mode 100644 index fe23a96..0000000 --- a/keys/keys5/rootCA.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 -6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ -mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h -Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL -AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk -tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd -CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu -TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV -Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll -JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt -RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA -AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg -O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 -WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc -fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o -WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y -243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM -Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv -/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF -2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 -wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R -WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 -POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC -T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW -jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc -23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK -XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl -jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ -/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P -rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl -KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD -E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C -cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE -r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu -GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy -7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr -RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF -SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor -Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY -KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t -Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM -/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 -YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt -I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy -+jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f -UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER -KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW -MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe -f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== ------END RSA PRIVATE KEY----- diff --git a/keys/keys5/rootCA.srl b/keys/keys5/rootCA.srl deleted file mode 100644 index 473c41d..0000000 --- a/keys/keys5/rootCA.srl +++ /dev/null @@ -1 +0,0 @@ -BAF3B5C5C6D0D14A diff --git a/keys/keys5/staker.csr b/keys/keys5/staker.csr deleted file mode 100644 index 418de02..0000000 --- a/keys/keys5/staker.csr +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK -DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw -ggIKAoICAQDgK5r5vdHtJFEgw7hGE/lzKaHcvwzr32armq0k9tYchJXfT3k1j1lX -tBAdcUN3gSRKjgzH/vjbn0ea3AiDCUd2Mck/n0KcJZ43S5I7ZjP7rbav296bKCZ1 -Hr7r5gXYFhk+3aUsVfDUqAPBwyP8KeV31ARVA/s+WPeWqs69QXTdyJuBYE5pr40v -1Sf+ebUInZ37uGY3kiO0Ex/JgcoQsGJzrWD/ztbRCFIvrdNJZd0pGvMlmTKp7XsM -R3cpvqk770//MLCdyGW/1IArTSuD1vd7mBX1JyVXKycYN0vIOtbgxPOFutUyqDOe -P7o51q4iPS3dCRgfmn/hWLwy+CtJe0BGKsb4tk0tKxo0se8v9JA8mUtnmzmMt4Y9 -jijOrCOB7XwWKmJYEm8N5Ubcy6cp2oL8vQVtzz3PXrkFt+3cFt1jrjdpQYgH4jyk -kWDeOjEfy1FCwzsNRudLTvLhfLn86/ZT4cLZ9JI7/WW0IPC8Fc7lhznJ+bIQUeEn -daGdgVkxuEg0MxdrMr0jU0IFoXySRXNRzcDWZShEjBTv7tnFxLmoNU+uJb/KpMH6 -sRYi3zs85ecaMKNyG+LDmBahUlHx5hKAH49O8855+AMhsg91ONZJldjQX0oZrIKz -K5BpsqeTl4c2Yt/fALiZaeFk1pBEsvVeMOBCIuWE+b4UIEaLAOhxfwIDAQABoAAw -DQYJKoZIhvcNAQELBQADggIBAMWzSdk6C53ijuLYOt77BAYpxnxRvKA1tsxJd9K5 -+R+ZGiuTLXWArgClhhwx65OTeqrwkVlh2CZoVTBeEMzvxn6zHO4S20KcWJ1lWU76 -OIrBZrAecBVXB5LmttUkvlMgVlWLQuVpVJiEn1jW6KeABqWKCLz7Au8TzHfr1HQ4 -1ukndu3SsKVwSIy0ZHFpQaXvzA8f0V93ap9R2RVw9BXqCJDe+BtQPvlCwOrvQ7d3 -gg+3aymbqUx3hrscEvd1ETad7LyFw3QfPcr1j1FwPH+K1/UDrWxIzxmO+HM5Lh8f -269aYceysgv/xa/KpANTxVAM7j1SE1CjjI5e5CQJVZ+gtAqTIv3lLkk0dWQksObN -Z1tTtJkFAUNbGsMadtVeTmx2eBcRi4LEv0DIPyyWUQTWwTYtaMFi8I0bYPk1T/fV -9umR6jqZ0l1qdiuLYOSYUx4iI5SAmCrA/kEINOj0u2gqqkxdOgUVsuKqer4w9Iyt -qOhhOHwctRo+cIhpVwcF2ouJeNrFqoBzOgHKQxBvcDWJM8ra5GCNIvD3MP4Q63hy -b4fkBcYwb1B2ETH9nSDtfW+JLjt70rvf6IxAiXRRiOv4fPzaUlK49NRVgjzx5Iu+ -8Zq4+I+S6qZOROWsOVSpJu44VvNZd5bMB9dEHnkoGxkPjo8pkC/o0uZbxsnZScSL -WGxS ------END CERTIFICATE REQUEST----- diff --git a/keys/keys1/staker.crt b/keys/local/staker1.crt similarity index 100% rename from keys/keys1/staker.crt rename to keys/local/staker1.crt diff --git a/keys/keys1/staker.key b/keys/local/staker1.key similarity index 100% rename from keys/keys1/staker.key rename to keys/local/staker1.key diff --git a/keys/keys2/staker.crt b/keys/local/staker2.crt similarity index 100% rename from keys/keys2/staker.crt rename to keys/local/staker2.crt diff --git a/keys/keys2/staker.key b/keys/local/staker2.key similarity index 100% rename from keys/keys2/staker.key rename to keys/local/staker2.key diff --git a/keys/keys3/staker.crt b/keys/local/staker3.crt similarity index 100% rename from keys/keys3/staker.crt rename to keys/local/staker3.crt diff --git a/keys/keys3/staker.key b/keys/local/staker3.key similarity index 100% rename from keys/keys3/staker.key rename to keys/local/staker3.key diff --git a/keys/keys4/staker.crt b/keys/local/staker4.crt similarity index 100% rename from keys/keys4/staker.crt rename to keys/local/staker4.crt diff --git a/keys/keys4/staker.key b/keys/local/staker4.key similarity index 100% rename from keys/keys4/staker.key rename to keys/local/staker4.key diff --git a/keys/keys5/staker.crt b/keys/local/staker5.crt similarity index 100% rename from keys/keys5/staker.crt rename to keys/local/staker5.crt diff --git a/keys/keys5/staker.key b/keys/local/staker5.key similarity index 100% rename from keys/keys5/staker.key rename to keys/local/staker5.key diff --git a/keys/mykey/genCA.sh b/keys/mykey/genCA.sh deleted file mode 100755 index 14a0f4c..0000000 --- a/keys/mykey/genCA.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/rootCA.key 4096 -openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/mykey/rootCA.crt b/keys/mykey/rootCA.crt deleted file mode 100644 index da6320a..0000000 --- a/keys/mykey/rootCA.crt +++ /dev/null @@ -1,34 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB -dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN -AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw -MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM -Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV -BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg -jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 -QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU -m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 -lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB -KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW -cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 -RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH -bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW -T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB -J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU -KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei -73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E -BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj -FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG -XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY -omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv -Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC -XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 -gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn -3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N -W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s -scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU -kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD -DB8IRfWqBx2nWw== ------END CERTIFICATE----- diff --git a/keys/mykey/rootCA.key b/keys/mykey/rootCA.key deleted file mode 100644 index fe23a96..0000000 --- a/keys/mykey/rootCA.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 -6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ -mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h -Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL -AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk -tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd -CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu -TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV -Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll -JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt -RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA -AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg -O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 -WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc -fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o -WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y -243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM -Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv -/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF -2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 -wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R -WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 -POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC -T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW -jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc -23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK -XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl -jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ -/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P -rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl -KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD -E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C -cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE -r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu -GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy -7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr -RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF -SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor -Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY -KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t -Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM -/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 -YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt -I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy -+jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f -UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER -KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW -MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe -f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== ------END RSA PRIVATE KEY----- diff --git a/keys/mykey/rootCA.srl b/keys/mykey/rootCA.srl deleted file mode 100644 index de0f603..0000000 --- a/keys/mykey/rootCA.srl +++ /dev/null @@ -1 +0,0 @@ -BAF3B5C5C6D0D164 diff --git a/keys/keys1/rootCA.crt b/keys/rootCA.crt similarity index 100% rename from keys/keys1/rootCA.crt rename to keys/rootCA.crt diff --git a/keys/keys1/rootCA.key b/keys/rootCA.key similarity index 100% rename from keys/keys1/rootCA.key rename to keys/rootCA.key diff --git a/keys/rootCA.srl b/keys/rootCA.srl new file mode 100644 index 0000000..617b916 --- /dev/null +++ b/keys/rootCA.srl @@ -0,0 +1 @@ +BAF3B5C5C6D0D166 diff --git a/main/params.go b/main/params.go index 88a3722..d742ec3 100644 --- a/main/params.go +++ b/main/params.go @@ -8,6 +8,7 @@ import ( "flag" "fmt" "net" + "os" "path" "strings" @@ -32,6 +33,22 @@ var ( Err error ) +// GetIPs returns the default IPs for each network +func GetIPs(networkID uint32) []string { + switch networkID { + case genesis.CascadeID: + return []string{ + "3.227.207.132:21001", + "34.207.133.167:21001", + "107.23.241.199:21001", + "54.197.215.186:21001", + "18.234.153.22:21001", + } + default: + return nil + } +} + var ( errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs") ) @@ -44,72 +61,81 @@ func init() { loggingConfig, err := logging.DefaultConfig() errs.Add(err) + fs := flag.NewFlagSet("gecko", flag.ContinueOnError) + // NetworkID: - networkName := flag.String("network-id", genesis.LocalName, "Network ID this node will connect to") + networkName := fs.String("network-id", genesis.CascadeName, "Network ID this node will connect to") // Ava fees: - flag.Uint64Var(&Config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva") + fs.Uint64Var(&Config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva") // Assertions: - flag.BoolVar(&loggingConfig.Assertions, "assertions-enabled", true, "Turn on assertion execution") + fs.BoolVar(&loggingConfig.Assertions, "assertions-enabled", true, "Turn on assertion execution") // Crypto: - flag.BoolVar(&Config.EnableCrypto, "signature-verification-enabled", true, "Turn on signature verification") + fs.BoolVar(&Config.EnableCrypto, "signature-verification-enabled", true, "Turn on signature verification") // Database: - db := flag.Bool("db-enabled", true, "Turn on persistent storage") - dbDir := flag.String("db-dir", "db", "Database directory for Ava state") + db := fs.Bool("db-enabled", true, "Turn on persistent storage") + dbDir := fs.String("db-dir", "db", "Database directory for Ava state") // IP: - consensusIP := flag.String("public-ip", "", "Public IP of this node") + consensusIP := fs.String("public-ip", "", "Public IP of this node") // HTTP Server: - httpPort := flag.Uint("http-port", 9650, "Port of the HTTP server") - flag.BoolVar(&Config.EnableHTTPS, "http-tls-enabled", false, "Upgrade the HTTP server to HTTPs") - flag.StringVar(&Config.HTTPSKeyFile, "http-tls-key-file", "", "TLS private key file for the HTTPs server") - flag.StringVar(&Config.HTTPSCertFile, "http-tls-cert-file", "", "TLS certificate file for the HTTPs server") + httpPort := fs.Uint("http-port", 9650, "Port of the HTTP server") + fs.BoolVar(&Config.EnableHTTPS, "http-tls-enabled", false, "Upgrade the HTTP server to HTTPs") + fs.StringVar(&Config.HTTPSKeyFile, "http-tls-key-file", "", "TLS private key file for the HTTPs server") + fs.StringVar(&Config.HTTPSCertFile, "http-tls-cert-file", "", "TLS certificate file for the HTTPs server") // Bootstrapping: - bootstrapIPs := flag.String("bootstrap-ips", "", "Comma separated list of bootstrap peer ips to connect to. Example: 127.0.0.1:9630,127.0.0.1:9631") - bootstrapIDs := flag.String("bootstrap-ids", "", "Comma separated list of bootstrap peer ids to connect to. Example: JR4dVmy6ffUGAKCBDkyCbeZbyHQBeDsET,8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") + bootstrapIPs := fs.String("bootstrap-ips", "default", "Comma separated list of bootstrap peer ips to connect to. Example: 127.0.0.1:9630,127.0.0.1:9631") + bootstrapIDs := fs.String("bootstrap-ids", "default", "Comma separated list of bootstrap peer ids to connect to. Example: JR4dVmy6ffUGAKCBDkyCbeZbyHQBeDsET,8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") // Staking: - consensusPort := flag.Uint("staking-port", 9651, "Port of the consensus server") - flag.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Require TLS to authenticate staking connections") - flag.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", "", "TLS private key file for staking connections") - flag.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", "", "TLS certificate file for staking connections") + consensusPort := fs.Uint("staking-port", 9651, "Port of the consensus server") + fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Require TLS to authenticate staking connections") + fs.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", "keys/staker.key", "TLS private key file for staking connections") + fs.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", "keys/staker.crt", "TLS certificate file for staking connections") // Logging: - logsDir := flag.String("log-dir", "", "Logging directory for Ava") - logLevel := flag.String("log-level", "info", "The log level. Should be one of {verbo, debug, info, warn, error, fatal, off}") - logDisplayLevel := flag.String("log-display-level", "", "The log display level. If left blank, will inherit the value of log-level. Otherwise, should be one of {verbo, debug, info, warn, error, fatal, off}") + logsDir := fs.String("log-dir", "", "Logging directory for Ava") + logLevel := fs.String("log-level", "info", "The log level. Should be one of {verbo, debug, info, warn, error, fatal, off}") + logDisplayLevel := fs.String("log-display-level", "", "The log display level. If left blank, will inherit the value of log-level. Otherwise, should be one of {verbo, debug, info, warn, error, fatal, off}") - flag.IntVar(&Config.ConsensusParams.K, "snow-sample-size", 20, "Number of nodes to query for each network poll") - flag.IntVar(&Config.ConsensusParams.Alpha, "snow-quorum-size", 18, "Alpha value to use for required number positive results") - flag.IntVar(&Config.ConsensusParams.BetaVirtuous, "snow-virtuous-commit-threshold", 20, "Beta value to use for virtuous transactions") - flag.IntVar(&Config.ConsensusParams.BetaRogue, "snow-rogue-commit-threshold", 30, "Beta value to use for rogue transactions") - flag.IntVar(&Config.ConsensusParams.Parents, "snow-avalanche-num-parents", 5, "Number of vertexes for reference from each new vertex") - flag.IntVar(&Config.ConsensusParams.BatchSize, "snow-avalanche-batch-size", 30, "Number of operations to batch in each new vertex") + fs.IntVar(&Config.ConsensusParams.K, "snow-sample-size", 5, "Number of nodes to query for each network poll") + fs.IntVar(&Config.ConsensusParams.Alpha, "snow-quorum-size", 4, "Alpha value to use for required number positive results") + fs.IntVar(&Config.ConsensusParams.BetaVirtuous, "snow-virtuous-commit-threshold", 20, "Beta value to use for virtuous transactions") + fs.IntVar(&Config.ConsensusParams.BetaRogue, "snow-rogue-commit-threshold", 30, "Beta value to use for rogue transactions") + fs.IntVar(&Config.ConsensusParams.Parents, "snow-avalanche-num-parents", 5, "Number of vertexes for reference from each new vertex") + fs.IntVar(&Config.ConsensusParams.BatchSize, "snow-avalanche-batch-size", 30, "Number of operations to batch in each new vertex") + fs.IntVar(&Config.ConsensusParams.ConcurrentRepolls, "snow-concurrent-repolls", 1, "Minimum number of concurrent polls for finalizing consensus") // Enable/Disable APIs: - flag.BoolVar(&Config.AdminAPIEnabled, "api-admin-enabled", true, "If true, this node exposes the Admin API") - flag.BoolVar(&Config.KeystoreAPIEnabled, "api-keystore-enabled", true, "If true, this node exposes the Keystore API") - flag.BoolVar(&Config.MetricsAPIEnabled, "api-metrics-enabled", true, "If true, this node exposes the Metrics API") - flag.BoolVar(&Config.IPCEnabled, "api-ipcs-enabled", false, "If true, IPCs can be opened") + fs.BoolVar(&Config.AdminAPIEnabled, "api-admin-enabled", true, "If true, this node exposes the Admin API") + fs.BoolVar(&Config.KeystoreAPIEnabled, "api-keystore-enabled", true, "If true, this node exposes the Keystore API") + fs.BoolVar(&Config.MetricsAPIEnabled, "api-metrics-enabled", true, "If true, this node exposes the Metrics API") + fs.BoolVar(&Config.IPCEnabled, "api-ipcs-enabled", false, "If true, IPCs can be opened") // Throughput Server - throughputPort := flag.Uint("xput-server-port", 9652, "Port of the deprecated throughput test server") - flag.BoolVar(&Config.ThroughputServerEnabled, "xput-server-enabled", false, "If true, throughput test server is created") + throughputPort := fs.Uint("xput-server-port", 9652, "Port of the deprecated throughput test server") + fs.BoolVar(&Config.ThroughputServerEnabled, "xput-server-enabled", false, "If true, throughput test server is created") - flag.Parse() + ferr := fs.Parse(os.Args[1:]) + + if ferr == flag.ErrHelp { + // display usage/help text and exit successfully + os.Exit(0) + } + + if ferr != nil { + // other type of error occurred when parsing args + os.Exit(2) + } networkID, err := genesis.NetworkID(*networkName) errs.Add(err) - if networkID != genesis.LocalID { - errs.Add(fmt.Errorf("the only supported networkID is: %s", genesis.LocalName)) - } - Config.NetworkID = networkID // DB: @@ -143,6 +169,9 @@ func init() { } // Bootstrapping: + if *bootstrapIPs == "default" { + *bootstrapIPs = strings.Join(GetIPs(networkID), ",") + } for _, ip := range strings.Split(*bootstrapIPs, ",") { if ip != "" { addr, err := utils.ToIPDesc(ip) @@ -152,6 +181,14 @@ func init() { }) } } + + if *bootstrapIDs == "default" { + if *bootstrapIPs == "" { + *bootstrapIDs = "" + } else { + *bootstrapIDs = strings.Join(genesis.GetConfig(networkID).StakerIDs, ",") + } + } if Config.EnableStaking { i := 0 cb58 := formatting.CB58{} diff --git a/networking/handshake_handlers.go b/networking/handshake_handlers.go index 83d1348..e037781 100644 --- a/networking/handshake_handlers.go +++ b/networking/handshake_handlers.go @@ -54,6 +54,13 @@ Periodically gossip peerlists. stakers should be in the set). */ +/* +Attempt reconnections + - If a non-staker disconnects, delete the connection + - If a staker disconnects, attempt to reconnect to the node for awhile. If the + node isn't connected to after awhile delete the connection. +*/ + const ( // CurrentVersion this avalanche instance is executing. CurrentVersion = "avalanche/0.0.1" @@ -70,6 +77,9 @@ const ( // GetVersionTimeout is the amount of time to wait before sending a // getVersion message to a partially connected peer GetVersionTimeout = 2 * time.Second + // ReconnectTimeout is the amount of time to wait to reconnect to a staker + // before giving up + ReconnectTimeout = 1 * time.Minute ) // Manager is the struct that will be accessed on event calls @@ -100,6 +110,7 @@ type Handshake struct { connections AddrCert // Connections that I think are connected versionTimeout timer.TimeoutManager + reconnectTimeout timer.TimeoutManager peerListGossiper *timer.Repeater awaitingLock sync.Mutex @@ -143,6 +154,10 @@ func (nm *Handshake) Initialize( nm.versionTimeout.Initialize(GetVersionTimeout) go nm.log.RecoverAndPanic(nm.versionTimeout.Dispatch) + + nm.reconnectTimeout.Initialize(ReconnectTimeout) + go nm.log.RecoverAndPanic(nm.reconnectTimeout.Dispatch) + nm.peerListGossiper = timer.NewRepeater(nm.gossipPeerList, PeerListGossipSpacing) go nm.log.RecoverAndPanic(nm.peerListGossiper.Dispatch) } @@ -290,6 +305,73 @@ func checkPeerCertificate(_ *C.struct_msgnetwork_conn_t, connected C.bool, _ uns return connected } +func (nm *Handshake) connectedToPeer(conn *C.struct_peernetwork_conn_t, addr salticidae.NetAddr) { + ip := toIPDesc(addr) + // If we're enforcing staking, use a peer's certificate to uniquely identify them + // Otherwise, use a hash of their ip to identify them + cert := ids.ShortID{} + ipCert := toShortID(ip) + if nm.enableStaking { + cert = getPeerCert(conn) + } else { + cert = ipCert + } + + nm.log.Debug("Connected to %s", ip) + + longCert := cert.LongID() + nm.reconnectTimeout.Remove(longCert) + nm.reconnectTimeout.Remove(ipCert.LongID()) + + nm.pending.Add(addr, cert) + + handler := new(func()) + *handler = func() { + if nm.pending.ContainsIP(addr) { + nm.SendGetVersion(addr) + nm.versionTimeout.Put(longCert, *handler) + } + } + (*handler)() +} + +func (nm *Handshake) disconnectedFromPeer(addr salticidae.NetAddr) { + cert := ids.ShortID{} + if pendingCert, exists := nm.pending.GetID(addr); exists { + cert = pendingCert + } else if connectedCert, exists := nm.connections.GetID(addr); exists { + cert = connectedCert + } else { + return + } + + nm.log.Info("Disconnected from %s", toIPDesc(addr)) + + longCert := cert.LongID() + if nm.vdrs.Contains(cert) { + nm.reconnectTimeout.Put(longCert, func() { + nm.net.DelPeer(addr) + }) + } else { + nm.net.DelPeer(addr) + } + nm.versionTimeout.Remove(longCert) + + if !nm.enableStaking { + nm.vdrs.Remove(cert) + } + + nm.pending.RemoveIP(addr) + nm.connections.RemoveIP(addr) + nm.numPeers.Set(float64(nm.connections.Len())) + + nm.awaitingLock.Lock() + defer nm.awaitingLock.Unlock() + for _, awaiting := range HandshakeNet.awaiting { + awaiting.Remove(cert) + } +} + // peerHandler notifies a change to the set of connected peers // connected is true if a new peer is connected // connected is false if a formerly connected peer has disconnected @@ -298,68 +380,30 @@ func peerHandler(_conn *C.struct_peernetwork_conn_t, connected C.bool, _ unsafe. pConn := salticidae.PeerNetworkConnFromC(salticidae.CPeerNetworkConn(_conn)) addr := pConn.GetPeerAddr(true) - ip := toIPDesc(addr) - if !connected { - if !HandshakeNet.enableStaking { - cert := toShortID(ip) - HandshakeNet.vdrs.Remove(cert) - } - - cert := ids.ShortID{} - if pendingCert, exists := HandshakeNet.pending.GetID(addr); exists { - cert = pendingCert - } else if connectedCert, exists := HandshakeNet.connections.GetID(addr); exists { - cert = connectedCert - } else { - return - } - - HandshakeNet.pending.RemoveIP(addr) - HandshakeNet.connections.RemoveIP(addr) - - HandshakeNet.numPeers.Set(float64(HandshakeNet.connections.Len())) - - HandshakeNet.log.Warn("Disconnected from %s", ip) - - HandshakeNet.awaitingLock.Lock() - defer HandshakeNet.awaitingLock.Unlock() - - for _, awaiting := range HandshakeNet.awaiting { - awaiting.Remove(cert) - } - - return - } - - HandshakeNet.log.Debug("Connected to %s", ip) - - // If we're enforcing staking, use a peer's certificate to uniquely identify them - // Otherwise, use a hash of their ip to identify them - cert := ids.ShortID{} - if HandshakeNet.enableStaking { - cert = getPeerCert(_conn) + if connected { + HandshakeNet.connectedToPeer(_conn, addr) } else { - cert = toShortID(ip) + HandshakeNet.disconnectedFromPeer(addr) } - HandshakeNet.pending.Add(addr, cert) - - certID := cert.LongID() - handler := new(func()) - *handler = func() { - if HandshakeNet.pending.ContainsIP(addr) { - HandshakeNet.SendGetVersion(addr) - HandshakeNet.versionTimeout.Put(certID, *handler) - } - } - (*handler)() } // unknownPeerHandler notifies of an unknown peer connection attempt //export unknownPeerHandler func unknownPeerHandler(_addr *C.netaddr_t, _cert *C.x509_t, _ unsafe.Pointer) { - addr := salticidae.NetAddrFromC(salticidae.CNetAddr(_addr)) + addr := salticidae.NetAddrFromC(salticidae.CNetAddr(_addr)).Copy(true) ip := toIPDesc(addr) HandshakeNet.log.Info("Adding peer %s", ip) + + cert := ids.ShortID{} + if HandshakeNet.enableStaking { + cert = getCert(salticidae.X509FromC(salticidae.CX509(_cert))) + } else { + cert = toShortID(ip) + } + + HandshakeNet.reconnectTimeout.Put(cert.LongID(), func() { + HandshakeNet.net.DelPeer(addr) + }) HandshakeNet.net.AddPeer(addr) } @@ -522,16 +566,20 @@ func peerList(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe. cErr := salticidae.NewError() for _, ip := range ips { HandshakeNet.log.Verbo("Trying to adding peer %s", ip) - addr := salticidae.NewNetAddrFromIPPortString(ip.String(), false, &cErr) + addr := salticidae.NewNetAddrFromIPPortString(ip.String(), true, &cErr) if cErr.GetCode() == 0 && !HandshakeNet.myAddr.IsEq(addr) { // Make sure not to connect to myself ip := toIPDesc(addr) + ipCert := toShortID(ip) if !HandshakeNet.pending.ContainsIP(addr) && !HandshakeNet.connections.ContainsIP(addr) { HandshakeNet.log.Debug("Adding peer %s", ip) + + HandshakeNet.reconnectTimeout.Put(ipCert.LongID(), func() { + HandshakeNet.net.DelPeer(addr) + }) HandshakeNet.net.AddPeer(addr) } } - addr.Free() } } diff --git a/networking/voting_handlers.go b/networking/voting_handlers.go index ecc7402..4c7be62 100644 --- a/networking/voting_handlers.go +++ b/networking/voting_handlers.go @@ -366,9 +366,9 @@ func (s *Voting) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID vID := validatorID if addr, exists := s.conns.GetIP(vID); exists { addrs = append(addrs, addr) - s.log.Verbo("Sending a PushQuery to %s", toIPDesc(addr)) + s.log.Verbo("Sending a PullQuery to %s", toIPDesc(addr)) } else { - s.log.Warn("Attempted to send a PushQuery message to a disconnected validator: %s", vID) + s.log.Warn("Attempted to send a PullQuery message to a disconnected validator: %s", vID) s.executor.Add(func() { s.router.QueryFailed(vID, chainID, requestID) }) } } diff --git a/node/node.go b/node/node.go index 3c53fd5..caded55 100644 --- a/node/node.go +++ b/node/node.go @@ -25,6 +25,7 @@ import ( "github.com/ava-labs/gecko/api/keystore" "github.com/ava-labs/gecko/api/metrics" "github.com/ava-labs/gecko/chains" + "github.com/ava-labs/gecko/chains/atomic" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/genesis" @@ -35,10 +36,13 @@ import ( "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms" "github.com/ava-labs/gecko/vms/avm" "github.com/ava-labs/gecko/vms/evm" + "github.com/ava-labs/gecko/vms/nftfx" "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/vms/propertyfx" "github.com/ava-labs/gecko/vms/secp256k1fx" "github.com/ava-labs/gecko/vms/spchainvm" "github.com/ava-labs/gecko/vms/spdagvm" @@ -49,6 +53,10 @@ const ( maxMessageSize = 1 << 25 // maximum size of a message sent with salticidae ) +var ( + genesisHashKey = []byte("genesisID") +) + // MainNode is the reference for node callbacks var MainNode = Node{} @@ -68,6 +76,9 @@ type Node struct { // Handles calls to Keystore API keystoreServer keystore.Keystore + // Manages shared memory + sharedMemory atomic.SharedMemory + // Manages creation of blockchains and routing messages to them chainManager chains.Manager @@ -285,7 +296,38 @@ func (n *Node) Dispatch() { n.EC.Dispatch() } ****************************************************************************** */ -func (n *Node) initDatabase() { n.DB = n.Config.DB } +func (n *Node) initDatabase() error { + n.DB = n.Config.DB + + expectedGenesis, err := genesis.Genesis(n.Config.NetworkID) + if err != nil { + return err + } + rawExpectedGenesisHash := hashing.ComputeHash256(expectedGenesis) + + rawGenesisHash, err := n.DB.Get(genesisHashKey) + if err == database.ErrNotFound { + rawGenesisHash = rawExpectedGenesisHash + err = n.DB.Put(genesisHashKey, rawGenesisHash) + } + if err != nil { + return err + } + + genesisHash, err := ids.ToID(rawGenesisHash) + if err != nil { + return err + } + expectedGenesisHash, err := ids.ToID(rawExpectedGenesisHash) + if err != nil { + return err + } + + if !genesisHash.Equals(expectedGenesisHash) { + return fmt.Errorf("db contains invalid genesis hash. DB Genesis: %s Generated Genesis: %s", genesisHash, expectedGenesisHash) + } + return nil +} // Initialize this node's ID // If staking is disabled, a node's ID is a hash of its IP @@ -320,14 +362,29 @@ func (n *Node) initNodeID() error { // AVM, EVM, Simple Payments DAG, Simple Payments Chain // The Platform VM is registered in initStaking because // its factory needs to reference n.chainManager, which is nil right now -func (n *Node) initVMManager() { +func (n *Node) initVMManager() error { + avaAssetID, err := genesis.AVAAssetID(n.Config.NetworkID) + if err != nil { + return err + } + n.vmManager = vms.NewManager(&n.APIServer, n.HTTPLog) - n.vmManager.RegisterVMFactory(avm.ID, &avm.Factory{}) - n.vmManager.RegisterVMFactory(evm.ID, &evm.Factory{}) - n.vmManager.RegisterVMFactory(spdagvm.ID, &spdagvm.Factory{TxFee: n.Config.AvaTxFee}) - n.vmManager.RegisterVMFactory(spchainvm.ID, &spchainvm.Factory{}) - n.vmManager.RegisterVMFactory(secp256k1fx.ID, &secp256k1fx.Factory{}) - n.vmManager.RegisterVMFactory(timestampvm.ID, ×tampvm.Factory{}) + + errs := wrappers.Errs{} + errs.Add( + n.vmManager.RegisterVMFactory(avm.ID, &avm.Factory{ + AVA: avaAssetID, + Platform: ids.Empty, + }), + n.vmManager.RegisterVMFactory(evm.ID, &evm.Factory{}), + n.vmManager.RegisterVMFactory(spdagvm.ID, &spdagvm.Factory{TxFee: n.Config.AvaTxFee}), + n.vmManager.RegisterVMFactory(spchainvm.ID, &spchainvm.Factory{}), + n.vmManager.RegisterVMFactory(timestampvm.ID, ×tampvm.Factory{}), + n.vmManager.RegisterVMFactory(secp256k1fx.ID, &secp256k1fx.Factory{}), + n.vmManager.RegisterVMFactory(nftfx.ID, &nftfx.Factory{}), + n.vmManager.RegisterVMFactory(propertyfx.ID, &propertyfx.Factory{}), + ) + return errs.Err } // Create the EventDispatcher used for hooking events @@ -343,38 +400,64 @@ func (n *Node) initEventDispatcher() { // Initializes the Platform chain. // Its genesis data specifies the other chains that should // be created. -func (n *Node) initChains() { +func (n *Node) initChains() error { n.Log.Info("initializing chains") vdrs := n.vdrs + + // If staking is disabled, ignore updates to Subnets' validator sets + // Instead of updating node's validator manager, platform chain makes changes + // to its own local validator manager (which isn't used for sampling) if !n.Config.EnableStaking { defaultSubnetValidators := validators.NewSet() + defaultSubnetValidators.Add(validators.NewValidator(n.ID, 1)) vdrs = validators.NewManager() vdrs.PutValidatorSet(platformvm.DefaultSubnetID, defaultSubnetValidators) } - n.vmManager.RegisterVMFactory( + avaAssetID, err := genesis.AVAAssetID(n.Config.NetworkID) + if err != nil { + return err + } + createAVMTx, err := genesis.VMGenesis(n.Config.NetworkID, avm.ID) + if err != nil { + return err + } + + err = n.vmManager.RegisterVMFactory( /*vmID=*/ platformvm.ID, /*vmFactory=*/ &platformvm.Factory{ - ChainManager: n.chainManager, - Validators: vdrs, + ChainManager: n.chainManager, + Validators: vdrs, + StakingEnabled: n.Config.EnableStaking, + AVA: avaAssetID, + AVM: createAVMTx.ID(), }, ) + if err != nil { + return err + } beacons := validators.NewSet() for _, peer := range n.Config.BootstrapPeers { beacons.Add(validators.NewValidator(peer.ID, 1)) } - genesisBytes := genesis.Genesis(n.Config.NetworkID) + genesisBytes, err := genesis.Genesis(n.Config.NetworkID) + if err != nil { + return err + } // Create the Platform Chain n.chainManager.ForceCreateChain(chains.ChainParameters{ ID: ids.Empty, + SubnetID: platformvm.DefaultSubnetID, GenesisData: genesisBytes, // Specifies other chains to create VMAlias: platformvm.ID.String(), CustomBeacons: beacons, }) + + return nil } // initAPIServer initializes the server that handles HTTP calls @@ -400,6 +483,7 @@ func (n *Node) initAPIServer() { // Assumes n.DB, n.vdrs all initialized (non-nil) func (n *Node) initChainManager() { n.chainManager = chains.New( + n.Config.EnableStaking, n.Log, n.LogFactory, n.vmManager, @@ -415,12 +499,20 @@ func (n *Node) initChainManager() { n.ValidatorAPI, &n.APIServer, &n.keystoreServer, + &n.sharedMemory, ) n.chainManager.AddRegistrant(&n.APIServer) } -// initWallet initializes the Wallet service +// initSharedMemory initializes the shared memory for cross chain interation +func (n *Node) initSharedMemory() { + n.Log.Info("initializing SharedMemory") + sharedMemoryDB := prefixdb.New([]byte("shared memory"), n.DB) + n.sharedMemory.Initialize(n.Log, sharedMemoryDB) +} + +// initKeystoreAPI initializes the keystore service // Assumes n.APIServer is already set func (n *Node) initKeystoreAPI() { n.Log.Info("initializing Keystore API") @@ -464,24 +556,35 @@ func (n *Node) initIPCAPI() { } // Give chains and VMs aliases as specified by the genesis information -func (n *Node) initAliases() { +func (n *Node) initAliases() error { n.Log.Info("initializing aliases") - defaultAliases, chainAliases, vmAliases := genesis.Aliases(n.Config.NetworkID) + defaultAliases, chainAliases, vmAliases, err := genesis.Aliases(n.Config.NetworkID) + if err != nil { + return err + } + for chainIDKey, aliases := range chainAliases { chainID := ids.NewID(chainIDKey) for _, alias := range aliases { - n.Log.AssertNoError(n.chainManager.Alias(chainID, alias)) + if err := n.chainManager.Alias(chainID, alias); err != nil { + return err + } } } for vmIDKey, aliases := range vmAliases { vmID := ids.NewID(vmIDKey) for _, alias := range aliases { - n.Log.AssertNoError(n.vmManager.Alias(vmID, alias)) + if err := n.vmManager.Alias(vmID, alias); err != nil { + return err + } } } for url, aliases := range defaultAliases { - n.APIServer.AddAliases(url, aliases...) + if err := n.APIServer.AddAliases(url, aliases...); err != nil { + return err + } } + return nil } // Initialize this node @@ -496,12 +599,17 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg } n.HTTPLog = httpLog - n.initDatabase() // Set up the node's database + if err := n.initDatabase(); err != nil { // Set up the node's database + return fmt.Errorf("problem initializing database: %w", err) + } if err = n.initNodeID(); err != nil { // Derive this node's ID return fmt.Errorf("problem initializing staker ID: %w", err) } + // initialize shared memory + n.initSharedMemory() + // Start HTTP APIs n.initAPIServer() // Start the API Server n.initKeystoreAPI() // Start the Keystore API @@ -511,8 +619,13 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg if err = n.initNetlib(); err != nil { // Set up all networking return fmt.Errorf("problem initializing networking: %w", err) } - n.initValidatorNet() // Set up the validator handshake + authentication - n.initVMManager() // Set up the vm manager + if err := n.initValidatorNet(); err != nil { // Set up the validator handshake + authentication + return fmt.Errorf("problem initializing validator network: %w", err) + } + if err := n.initVMManager(); err != nil { // Set up the vm manager + return fmt.Errorf("problem initializing the VM manager: %w", err) + } + n.initEventDispatcher() // Set up the event dipatcher n.initChainManager() // Set up the chain manager n.initConsensusNet() // Set up the main consensus network @@ -524,10 +637,11 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg n.initAdminAPI() // Start the Admin API n.initIPCAPI() // Start the IPC API - n.initAliases() // Set up aliases - n.initChains() // Start the Platform chain - return nil + if err := n.initAliases(); err != nil { // Set up aliases + return err + } + return n.initChains() // Start the Platform chain } // Shutdown this node diff --git a/scripts/ansible/inventory.yml b/scripts/ansible/inventory.yml index 1841bd9..5315082 100755 --- a/scripts/ansible/inventory.yml +++ b/scripts/ansible/inventory.yml @@ -2,8 +2,6 @@ borealis_bootstrap: hosts: bootstrap1: ansible_host: 3.227.207.132 - staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys1/staker.key" - staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys1/staker.crt" http_tls_enabled: true http_tls_key_file: "/home/ubuntu/ssl/privkey.pem" http_tls_cert_file: "/home/ubuntu/ssl/fullchain.pem" @@ -11,7 +9,7 @@ borealis_bootstrap: ansible_connection: ssh ansible_user: ubuntu - network_id: "borealis" + network_id: "cascade" api_admin_enabled: true api_keystore_enabled: true api_metrics_enabled: true @@ -28,6 +26,8 @@ borealis_bootstrap: bootstrap_ids: "" staking_port: 21001 staking_tls_enabled: true + staking_tls_key_file: "/home/ubuntu/keys/staker.key" + staking_tls_cert_file: "/home/ubuntu/keys/staker.crt" log_dir: "/home/ubuntu/.gecko" log_level: debug snow_sample_size: 3 @@ -44,25 +44,17 @@ borealis_node: hosts: node1: ansible_host: 34.207.133.167 - staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys2/staker.key" - staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys2/staker.crt" node2: ansible_host: 107.23.241.199 - staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys3/staker.key" - staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys3/staker.crt" node3: ansible_host: 54.197.215.186 - staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys4/staker.key" - staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys4/staker.crt" node4: ansible_host: 18.234.153.22 - staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys5/staker.key" - staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys5/staker.crt" vars: ansible_connection: ssh ansible_user: ubuntu - network_id: "borealis" + network_id: "cascade" api_admin_enabled: true api_keystore_enabled: true api_metrics_enabled: true @@ -76,9 +68,11 @@ borealis_node: http_tls_key_file: "" http_tls_cert_file: "" bootstrap_ips: "3.227.207.132:21001" - bootstrap_ids: "7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg" + bootstrap_ids: "NX4zVkuiRJZYe6Nzzav7GXN3TakUet3Co" staking_port: 21001 staking_tls_enabled: true + staking_tls_key_file: "/home/ubuntu/keys/staker.key" + staking_tls_cert_file: "/home/ubuntu/keys/staker.crt" log_dir: "/home/ubuntu/.gecko" log_level: debug snow_sample_size: 3 diff --git a/scripts/ansible/restart_playbook.yml b/scripts/ansible/restart_playbook.yml index 97b8533..48d44fb 100755 --- a/scripts/ansible/restart_playbook.yml +++ b/scripts/ansible/restart_playbook.yml @@ -8,6 +8,7 @@ ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava repo_folder: ~/go/src/github.com/ava-labs/gecko repo_name: ava-labs/gecko + repo_branch: cascade tasks: - name: Kill Node command: killall ava @@ -15,6 +16,7 @@ - git: repo: ssh://git@github.com/{{ repo_name }}.git dest: "{{ repo_folder }}" + version: "{{ repo_branch }}" update: yes - name: Build project command: ./scripts/build.sh diff --git a/scripts/ansible/update_playbook.yml b/scripts/ansible/update_playbook.yml index ad9d314..b28def3 100755 --- a/scripts/ansible/update_playbook.yml +++ b/scripts/ansible/update_playbook.yml @@ -8,6 +8,7 @@ ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava repo_folder: ~/go/src/github.com/ava-labs/gecko repo_name: ava-labs/gecko + repo_branch: cascade tasks: - name: Kill Node command: killall ava @@ -15,6 +16,7 @@ - git: repo: ssh://git@github.com/{{ repo_name }}.git dest: "{{ repo_folder }}" + version: "{{ repo_branch }}" update: yes - name: Build project command: ./scripts/build.sh diff --git a/scripts/aws/create.py b/scripts/aws/create.py old mode 100644 new mode 100755 index ab7a6d7..07d75e7 --- a/scripts/aws/create.py +++ b/scripts/aws/create.py @@ -1,17 +1,16 @@ -import sys +#!/usr/bin/env python3 +""" +Start a number of AVA nodes on Amazon EC2 +""" + import boto3 -ec2 = boto3.client("ec2") - -# Should be called with python3 aws_create.py $numBootstraps $numNodes -numBootstraps = int(sys.argv[1]) -numNodes = int(sys.argv[2]) bootstapNode = "Borealis-Bootstrap" fullNode = "Borealis-Node" -def runInstances(num: int, name: str): +def runInstances(ec2, num: int, name: str): if num > 0: ec2.run_instances( ImageId="ami-0badd1c10cb7673e9", @@ -28,8 +27,18 @@ def runInstances(num: int, name: str): def main(): - runInstances(numBootstraps, bootstapNode) - runInstances(numNodes, fullNode) + import argparse + + parser = argparse.ArgumentParser( + description=__doc__, + ) + parser.add_argument('numBootstraps', type=int) + parser.add_argument('numNodes', type=int) + args = parser.parse_args() + + ec2 = boto3.client("ec2") + runInstances(ec2, args.numBootstraps, bootstapNode) + runInstances(ec2, args.numNodes, fullNode) if __name__ == "__main__": diff --git a/snow/consensus/avalanche/parameters_test.go b/snow/consensus/avalanche/parameters_test.go index aa28f98..8935cf5 100644 --- a/snow/consensus/avalanche/parameters_test.go +++ b/snow/consensus/avalanche/parameters_test.go @@ -12,10 +12,11 @@ import ( func TestParametersValid(t *testing.T) { p := Parameters{ Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -29,10 +30,11 @@ func TestParametersValid(t *testing.T) { func TestParametersInvalidParents(t *testing.T) { p := Parameters{ Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, }, Parents: 1, BatchSize: 1, @@ -46,10 +48,11 @@ func TestParametersInvalidParents(t *testing.T) { func TestParametersInvalidBatchSize(t *testing.T) { p := Parameters{ Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 0, diff --git a/snow/consensus/avalanche/topological_test.go b/snow/consensus/avalanche/topological_test.go index f43ee5b..0ad3b0a 100644 --- a/snow/consensus/avalanche/topological_test.go +++ b/snow/consensus/avalanche/topological_test.go @@ -27,11 +27,12 @@ func TestTopologicalTxIssued(t *testing.T) { TxIssuedTest(t, TopologicalFactory{ func TestAvalancheVoting(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -106,11 +107,12 @@ func TestAvalancheVoting(t *testing.T) { func TestAvalancheTransitiveVoting(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -199,11 +201,12 @@ func TestAvalancheTransitiveVoting(t *testing.T) { func TestAvalancheSplitVoting(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -262,11 +265,12 @@ func TestAvalancheSplitVoting(t *testing.T) { func TestAvalancheTransitiveRejection(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -363,11 +367,12 @@ func TestAvalancheTransitiveRejection(t *testing.T) { func TestAvalancheVirtuous(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -484,11 +489,12 @@ func TestAvalancheVirtuous(t *testing.T) { func TestAvalancheIsVirtuous(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -567,11 +573,12 @@ func TestAvalancheIsVirtuous(t *testing.T) { func TestAvalancheQuiesce(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -660,11 +667,12 @@ func TestAvalancheQuiesce(t *testing.T) { func TestAvalancheOrphans(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, - Alpha: 1, - BetaVirtuous: math.MaxInt32, - BetaRogue: math.MaxInt32, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: math.MaxInt32, + BetaRogue: math.MaxInt32, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, diff --git a/snow/consensus/snowball/binary_slush.go b/snow/consensus/snowball/binary_slush.go new file mode 100644 index 0000000..84e4cd3 --- /dev/null +++ b/snow/consensus/snowball/binary_slush.go @@ -0,0 +1,27 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "fmt" +) + +// binarySlush is the implementation of a binary slush instance +type binarySlush struct { + // preference is the choice that last had a successful poll. Unless there + // hasn't been a successful poll, in which case it is the initially provided + // choice. + preference int +} + +// Initialize implements the BinarySlush interface +func (sl *binarySlush) Initialize(choice int) { sl.preference = choice } + +// Preference implements the BinarySlush interface +func (sl *binarySlush) Preference() int { return sl.preference } + +// RecordSuccessfulPoll implements the BinarySlush interface +func (sl *binarySlush) RecordSuccessfulPoll(choice int) { sl.preference = choice } + +func (sl *binarySlush) String() string { return fmt.Sprintf("SL(Preference = %d)", sl.preference) } diff --git a/snow/consensus/snowball/binary_snowball.go b/snow/consensus/snowball/binary_snowball.go index f755a6b..f41046c 100644 --- a/snow/consensus/snowball/binary_snowball.go +++ b/snow/consensus/snowball/binary_snowball.go @@ -9,6 +9,9 @@ import ( // binarySnowball is the implementation of a binary snowball instance type binarySnowball struct { + // wrap the binary snowflake logic + binarySnowflake + // preference is the choice with the largest number of successful polls. // Ties are broken by switching choice lazily preference int @@ -16,15 +19,12 @@ type binarySnowball struct { // numSuccessfulPolls tracks the total number of successful network polls of // the 0 and 1 choices numSuccessfulPolls [2]int - - // snowflake wraps the binary snowflake logic - snowflake binarySnowflake } // Initialize implements the BinarySnowball interface func (sb *binarySnowball) Initialize(beta, choice int) { + sb.binarySnowflake.Initialize(beta, choice) sb.preference = choice - sb.snowflake.Initialize(beta, choice) } // Preference implements the BinarySnowball interface @@ -34,7 +34,7 @@ func (sb *binarySnowball) Preference() int { // this case is handled for completion. Therefore, if snowflake is // finalized, then our finalized snowflake choice should be preferred. if sb.Finalized() { - return sb.snowflake.Preference() + return sb.binarySnowflake.Preference() } return sb.preference } @@ -45,20 +45,14 @@ func (sb *binarySnowball) RecordSuccessfulPoll(choice int) { if sb.numSuccessfulPolls[choice] > sb.numSuccessfulPolls[1-choice] { sb.preference = choice } - sb.snowflake.RecordSuccessfulPoll(choice) + sb.binarySnowflake.RecordSuccessfulPoll(choice) } -// RecordUnsuccessfulPoll implements the BinarySnowball interface -func (sb *binarySnowball) RecordUnsuccessfulPoll() { sb.snowflake.RecordUnsuccessfulPoll() } - -// Finalized implements the BinarySnowball interface -func (sb *binarySnowball) Finalized() bool { return sb.snowflake.Finalized() } - func (sb *binarySnowball) String() string { return fmt.Sprintf( - "SB(Preference = %d, NumSuccessfulPolls[0] = %d, NumSuccessfulPolls[1] = %d, SF = %s)", + "SB(Preference = %d, NumSuccessfulPolls[0] = %d, NumSuccessfulPolls[1] = %d, %s)", sb.preference, sb.numSuccessfulPolls[0], sb.numSuccessfulPolls[1], - &sb.snowflake) + &sb.binarySnowflake) } diff --git a/snow/consensus/snowball/binary_snowball_test.go b/snow/consensus/snowball/binary_snowball_test.go index dd37f13..2962164 100644 --- a/snow/consensus/snowball/binary_snowball_test.go +++ b/snow/consensus/snowball/binary_snowball_test.go @@ -96,15 +96,15 @@ func TestBinarySnowballRecordUnsuccessfulPoll(t *testing.T) { t.Fatalf("Finalized too late") } - expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 3, SF = SF(Preference = 1, Confidence = 2, Finalized = true))" + expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 3, SF(Confidence = 2, Finalized = true, SL(Preference = 1)))" if str := sb.String(); str != expected { t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) } } func TestBinarySnowballAcceptWeirdColor(t *testing.T) { - Red := 0 - Blue := 1 + Blue := 0 + Red := 1 beta := 2 @@ -151,7 +151,7 @@ func TestBinarySnowballAcceptWeirdColor(t *testing.T) { t.Fatalf("Finalized too late") } - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 2, SF = SF(Preference = 1, Confidence = 2, Finalized = true))" + expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 2, SF(Confidence = 2, Finalized = true, SL(Preference = 0)))" if str := sb.String(); str != expected { t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) } @@ -190,7 +190,7 @@ func TestBinarySnowballLockColor(t *testing.T) { t.Fatalf("Finalized too late") } - expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 2, SF = SF(Preference = 0, Confidence = 1, Finalized = true))" + expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 2, SF(Confidence = 1, Finalized = true, SL(Preference = 0)))" if str := sb.String(); str != expected { t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) } diff --git a/snow/consensus/snowball/binary_snowflake.go b/snow/consensus/snowball/binary_snowflake.go index 715e3ed..1860012 100644 --- a/snow/consensus/snowball/binary_snowflake.go +++ b/snow/consensus/snowball/binary_snowflake.go @@ -9,10 +9,8 @@ import ( // binarySnowflake is the implementation of a binary snowflake instance type binarySnowflake struct { - // preference is the choice that last had a successful poll. Unless there - // hasn't been a successful poll, in which case it is the initially provided - // choice. - preference int + // wrap the binary slush logic + binarySlush // confidence tracks the number of successful polls in a row that have // returned the preference @@ -29,29 +27,26 @@ type binarySnowflake struct { // Initialize implements the BinarySnowflake interface func (sf *binarySnowflake) Initialize(beta, choice int) { + sf.binarySlush.Initialize(choice) sf.beta = beta - sf.preference = choice } -// Preference implements the BinarySnowflake interface -func (sf *binarySnowflake) Preference() int { return sf.preference } - // RecordSuccessfulPoll implements the BinarySnowflake interface func (sf *binarySnowflake) RecordSuccessfulPoll(choice int) { - if sf.Finalized() { + if sf.finalized { return // This instace is already decided. } - if sf.preference == choice { + if preference := sf.Preference(); preference == choice { sf.confidence++ } else { // confidence is set to 1 because there has already been 1 successful // poll, namely this poll. sf.confidence = 1 - sf.preference = choice } sf.finalized = sf.confidence >= sf.beta + sf.binarySlush.RecordSuccessfulPoll(choice) } // RecordUnsuccessfulPoll implements the BinarySnowflake interface @@ -61,8 +56,8 @@ func (sf *binarySnowflake) RecordUnsuccessfulPoll() { sf.confidence = 0 } func (sf *binarySnowflake) Finalized() bool { return sf.finalized } func (sf *binarySnowflake) String() string { - return fmt.Sprintf("SF(Preference = %d, Confidence = %d, Finalized = %v)", - sf.Preference(), + return fmt.Sprintf("SF(Confidence = %d, Finalized = %v, %s)", sf.confidence, - sf.Finalized()) + sf.finalized, + &sf.binarySlush) } diff --git a/snow/consensus/snowball/binary_snowflake_test.go b/snow/consensus/snowball/binary_snowflake_test.go new file mode 100644 index 0000000..1078687 --- /dev/null +++ b/snow/consensus/snowball/binary_snowflake_test.go @@ -0,0 +1,56 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "testing" +) + +func TestBinarySnowflake(t *testing.T) { + Blue := 0 + Red := 1 + + beta := 2 + + sf := binarySnowflake{} + sf.Initialize(beta, Red) + + if pref := sf.Preference(); pref != Red { + t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Blue) + + if pref := sf.Preference(); pref != Blue { + t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Red) + + if pref := sf.Preference(); pref != Red { + t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Blue) + + if pref := sf.Preference(); pref != Blue { + t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Blue) + + if pref := sf.Preference(); pref != Blue { + t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + } else if !sf.Finalized() { + t.Fatalf("Didn't finalized correctly") + } +} diff --git a/snow/consensus/snowball/byzantine.go b/snow/consensus/snowball/byzantine.go index 8995d11..88fda59 100644 --- a/snow/consensus/snowball/byzantine.go +++ b/snow/consensus/snowball/byzantine.go @@ -24,6 +24,7 @@ type Byzantine struct { // Initialize implements the Consensus interface func (b *Byzantine) Initialize(params Parameters, choice ids.ID) { + b.params = params b.preference = choice } diff --git a/snow/consensus/snowball/byzantine_test.go b/snow/consensus/snowball/byzantine_test.go new file mode 100644 index 0000000..cee357b --- /dev/null +++ b/snow/consensus/snowball/byzantine_test.go @@ -0,0 +1,54 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/prometheus/client_golang/prometheus" +) + +func TestByzantine(t *testing.T) { + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5, + } + + byzFactory := ByzantineFactory{} + byz := byzFactory.New() + byz.Initialize(params, Blue) + + if ret := byz.Parameters(); ret != params { + t.Fatalf("Should have returned the correct params") + } + + byz.Add(Green) + + if pref := byz.Preference(); !pref.Equals(Blue) { + t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref) + } + + oneGreen := ids.Bag{} + oneGreen.Add(Green) + byz.RecordPoll(oneGreen) + + if pref := byz.Preference(); !pref.Equals(Blue) { + t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref) + } + + byz.RecordUnsuccessfulPoll() + + if pref := byz.Preference(); !pref.Equals(Blue) { + t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref) + } + + if final := byz.Finalized(); !final { + t.Fatalf("Should be marked as accepted") + } + + if str := byz.String(); str != Blue.String() { + t.Fatalf("Wrong string, expected %s returned %s", Blue, str) + } +} diff --git a/snow/consensus/snowball/consensus.go b/snow/consensus/snowball/consensus.go index edbf14b..06cb3a4 100644 --- a/snow/consensus/snowball/consensus.go +++ b/snow/consensus/snowball/consensus.go @@ -69,6 +69,23 @@ type NnarySnowflake interface { Finalized() bool } +// NnarySlush is a slush instance deciding between an unbounded number of +// values. After performing a network sample of k nodes, if you have alpha +// votes for one of the choices, you should vote for that choice. +type NnarySlush interface { + fmt.Stringer + + // Takes in the initial choice + Initialize(initialPreference ids.ID) + + // Returns the currently preferred choice to be finalized + Preference() ids.ID + + // RecordSuccessfulPoll records a successful poll towards finalizing the + // specified choice. Assumes the choice was previously added. + RecordSuccessfulPoll(choice ids.ID) +} + // BinarySnowball augments BinarySnowflake with a counter that tracks the total // number of positive responses from a network sample. type BinarySnowball interface{ BinarySnowflake } @@ -97,6 +114,23 @@ type BinarySnowflake interface { Finalized() bool } +// BinarySlush is a slush instance deciding between two values. After performing +// a network sample of k nodes, if you have alpha votes for one of the choices, +// you should vote for that choice. +type BinarySlush interface { + fmt.Stringer + + // Takes in the initial choice + Initialize(initialPreference int) + + // Returns the currently preferred choice to be finalized + Preference() int + + // RecordSuccessfulPoll records a successful poll towards finalizing the + // specified choice + RecordSuccessfulPoll(choice int) +} + // UnarySnowball is a snowball instance deciding on one value. After performing // a network sample of k nodes, if you have alpha votes for the choice, you // should vote. Otherwise, you should reset. @@ -122,3 +156,29 @@ type UnarySnowball interface { // Returns a new unary snowball instance with the same state Clone() UnarySnowball } + +// UnarySnowflake is a snowflake instance deciding on one value. After +// performing a network sample of k nodes, if you have alpha votes for the +// choice, you should vote. Otherwise, you should reset. +type UnarySnowflake interface { + fmt.Stringer + + // Takes in the beta value + Initialize(beta int) + + // RecordSuccessfulPoll records a successful poll towards finalizing + RecordSuccessfulPoll() + + // RecordUnsuccessfulPoll resets the snowflake counter of this instance + RecordUnsuccessfulPoll() + + // Return whether a choice has been finalized + Finalized() bool + + // Returns a new binary snowball instance with the agreement parameters + // transferred. Takes in the new beta value and the original choice + Extend(beta, originalPreference int) BinarySnowflake + + // Returns a new unary snowflake instance with the same state + Clone() UnarySnowflake +} diff --git a/snow/consensus/snowball/consensus_test.go b/snow/consensus/snowball/consensus_test.go index 67fec3d..922f606 100644 --- a/snow/consensus/snowball/consensus_test.go +++ b/snow/consensus/snowball/consensus_test.go @@ -22,7 +22,7 @@ func ParamsTest(t *testing.T, factory Factory) { params := Parameters{ Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, ConcurrentRepolls: 1, } sb.Initialize(params, Red) @@ -34,5 +34,7 @@ func ParamsTest(t *testing.T, factory Factory) { t.Fatalf("Wrong Beta1 parameter") } else if p.BetaRogue != params.BetaRogue { t.Fatalf("Wrong Beta2 parameter") + } else if p.ConcurrentRepolls != params.ConcurrentRepolls { + t.Fatalf("Wrong Repoll parameter") } } diff --git a/snow/consensus/snowball/flat.go b/snow/consensus/snowball/flat.go index da4eb1b..21663c4 100644 --- a/snow/consensus/snowball/flat.go +++ b/snow/consensus/snowball/flat.go @@ -15,40 +15,27 @@ func (FlatFactory) New() Consensus { return &Flat{} } // Flat is a naive implementation of a multi-choice snowball instance type Flat struct { + // wraps the n-nary snowball logic + nnarySnowball + // params contains all the configurations of a snowball instance params Parameters - - // snowball wraps the n-nary snowball logic - snowball nnarySnowball } // Initialize implements the Consensus interface func (f *Flat) Initialize(params Parameters, choice ids.ID) { + f.nnarySnowball.Initialize(params.BetaVirtuous, params.BetaRogue, choice) f.params = params - f.snowball.Initialize(params.BetaVirtuous, params.BetaRogue, choice) } // Parameters implements the Consensus interface func (f *Flat) Parameters() Parameters { return f.params } -// Add implements the Consensus interface -func (f *Flat) Add(choice ids.ID) { f.snowball.Add(choice) } - -// Preference implements the Consensus interface -func (f *Flat) Preference() ids.ID { return f.snowball.Preference() } - // RecordPoll implements the Consensus interface func (f *Flat) RecordPoll(votes ids.Bag) { if pollMode, numVotes := votes.Mode(); numVotes >= f.params.Alpha { - f.snowball.RecordSuccessfulPoll(pollMode) + f.nnarySnowball.RecordSuccessfulPoll(pollMode) } else { f.RecordUnsuccessfulPoll() } } - -// RecordUnsuccessfulPoll implements the Consensus interface -func (f *Flat) RecordUnsuccessfulPoll() { f.snowball.RecordUnsuccessfulPoll() } - -// Finalized implements the Consensus interface -func (f *Flat) Finalized() bool { return f.snowball.Finalized() } -func (f *Flat) String() string { return f.snowball.String() } diff --git a/snow/consensus/snowball/flat_test.go b/snow/consensus/snowball/flat_test.go index 1aaa754..d2b9617 100644 --- a/snow/consensus/snowball/flat_test.go +++ b/snow/consensus/snowball/flat_test.go @@ -65,7 +65,7 @@ func TestFlat(t *testing.T) { t.Fatalf("Finalized too late") } - expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, NumSuccessfulPolls = 3, SF = SF(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, Confidence = 2, Finalized = true))" + expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, NumSuccessfulPolls = 3, SF(Confidence = 2, Finalized = true, SL(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES)))" if str := f.String(); str != expected { t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) } diff --git a/snow/consensus/snowball/nnary_slush.go b/snow/consensus/snowball/nnary_slush.go new file mode 100644 index 0000000..70a55c3 --- /dev/null +++ b/snow/consensus/snowball/nnary_slush.go @@ -0,0 +1,30 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "fmt" + + "github.com/ava-labs/gecko/ids" +) + +// nnarySlush is the implementation of a slush instance with an unbounded number +// of choices +type nnarySlush struct { + // preference is the choice that last had a successful poll. Unless there + // hasn't been a successful poll, in which case it is the initially provided + // choice. + preference ids.ID +} + +// Initialize implements the NnarySlush interface +func (sl *nnarySlush) Initialize(choice ids.ID) { sl.preference = choice } + +// Preference implements the NnarySlush interface +func (sl *nnarySlush) Preference() ids.ID { return sl.preference } + +// RecordSuccessfulPoll implements the NnarySlush interface +func (sl *nnarySlush) RecordSuccessfulPoll(choice ids.ID) { sl.preference = choice } + +func (sl *nnarySlush) String() string { return fmt.Sprintf("SL(Preference = %s)", sl.preference) } diff --git a/snow/consensus/snowball/nnary_snowball.go b/snow/consensus/snowball/nnary_snowball.go index 6821a50..2595622 100644 --- a/snow/consensus/snowball/nnary_snowball.go +++ b/snow/consensus/snowball/nnary_snowball.go @@ -11,6 +11,9 @@ import ( // nnarySnowball is a naive implementation of a multi-color snowball instance type nnarySnowball struct { + // wrap the n-nary snowflake logic + nnarySnowflake + // preference is the choice with the largest number of successful polls. // Ties are broken by switching choice lazily preference ids.ID @@ -22,21 +25,15 @@ type nnarySnowball struct { // numSuccessfulPolls tracks the total number of successful network polls of // the choices numSuccessfulPolls map[[32]byte]int - - // snowflake wraps the n-nary snowflake logic - snowflake nnarySnowflake } // Initialize implements the NnarySnowball interface func (sb *nnarySnowball) Initialize(betaVirtuous, betaRogue int, choice ids.ID) { + sb.nnarySnowflake.Initialize(betaVirtuous, betaRogue, choice) sb.preference = choice sb.numSuccessfulPolls = make(map[[32]byte]int) - sb.snowflake.Initialize(betaVirtuous, betaRogue, choice) } -// Add implements the NnarySnowball interface -func (sb *nnarySnowball) Add(choice ids.ID) { sb.snowflake.Add(choice) } - // Preference implements the NnarySnowball interface func (sb *nnarySnowball) Preference() ids.ID { // It is possible, with low probability, that the snowflake preference is @@ -44,17 +41,13 @@ func (sb *nnarySnowball) Preference() ids.ID { // this case is handled for completion. Therefore, if snowflake is // finalized, then our finalized snowflake choice should be preferred. if sb.Finalized() { - return sb.snowflake.Preference() + return sb.nnarySnowflake.Preference() } return sb.preference } // RecordSuccessfulPoll implements the NnarySnowball interface func (sb *nnarySnowball) RecordSuccessfulPoll(choice ids.ID) { - if sb.Finalized() { - return - } - key := choice.Key() numSuccessfulPolls := sb.numSuccessfulPolls[key] + 1 sb.numSuccessfulPolls[key] = numSuccessfulPolls @@ -64,16 +57,10 @@ func (sb *nnarySnowball) RecordSuccessfulPoll(choice ids.ID) { sb.maxSuccessfulPolls = numSuccessfulPolls } - sb.snowflake.RecordSuccessfulPoll(choice) + sb.nnarySnowflake.RecordSuccessfulPoll(choice) } -// RecordUnsuccessfulPoll implements the NnarySnowball interface -func (sb *nnarySnowball) RecordUnsuccessfulPoll() { sb.snowflake.RecordUnsuccessfulPoll() } - -// Finalized implements the NnarySnowball interface -func (sb *nnarySnowball) Finalized() bool { return sb.snowflake.Finalized() } - func (sb *nnarySnowball) String() string { - return fmt.Sprintf("SB(Preference = %s, NumSuccessfulPolls = %d, SF = %s)", - sb.preference, sb.maxSuccessfulPolls, &sb.snowflake) + return fmt.Sprintf("SB(Preference = %s, NumSuccessfulPolls = %d, %s)", + sb.preference, sb.maxSuccessfulPolls, &sb.nnarySnowflake) } diff --git a/snow/consensus/snowball/nnary_snowball_test.go b/snow/consensus/snowball/nnary_snowball_test.go index 655fdc6..50eb667 100644 --- a/snow/consensus/snowball/nnary_snowball_test.go +++ b/snow/consensus/snowball/nnary_snowball_test.go @@ -55,50 +55,24 @@ func TestNnarySnowball(t *testing.T) { } } -func TestNnarySnowflake(t *testing.T) { - betaVirtuous := 2 +func TestVirtuousNnarySnowball(t *testing.T) { + betaVirtuous := 1 betaRogue := 2 - sf := nnarySnowflake{} - sf.Initialize(betaVirtuous, betaRogue, Red) - sf.Add(Blue) - sf.Add(Green) + sb := nnarySnowball{} + sb.Initialize(betaVirtuous, betaRogue, Red) - if pref := sf.Preference(); !Red.Equals(pref) { + if pref := sb.Preference(); !Red.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sf.Finalized() { + } else if sb.Finalized() { t.Fatalf("Finalized too early") } - sf.RecordSuccessfulPoll(Blue) + sb.RecordSuccessfulPoll(Red) - if pref := sf.Preference(); !Blue.Equals(pref) { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } - - sf.RecordSuccessfulPoll(Red) - - if pref := sf.Preference(); !Red.Equals(pref) { + if pref := sb.Preference(); !Red.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } - - sf.RecordSuccessfulPoll(Red) - - if pref := sf.Preference(); !Red.Equals(pref) { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if !sf.Finalized() { - t.Fatalf("Should be finalized") - } - - sf.RecordSuccessfulPoll(Blue) - - if pref := sf.Preference(); !Red.Equals(pref) { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if !sf.Finalized() { + } else if !sb.Finalized() { t.Fatalf("Should be finalized") } } @@ -143,7 +117,7 @@ func TestNarySnowballRecordUnsuccessfulPoll(t *testing.T) { t.Fatalf("Finalized too late") } - expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, NumSuccessfulPolls = 3, SF = SF(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, Confidence = 2, Finalized = true))" + expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, NumSuccessfulPolls = 3, SF(Confidence = 2, Finalized = true, SL(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES)))" if str := sb.String(); str != expected { t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) } @@ -159,7 +133,7 @@ func TestNarySnowballRecordUnsuccessfulPoll(t *testing.T) { } } -func TestNarySnowflakeColor(t *testing.T) { +func TestNarySnowballDifferentSnowflakeColor(t *testing.T) { betaVirtuous := 2 betaRogue := 2 @@ -175,7 +149,7 @@ func TestNarySnowflakeColor(t *testing.T) { sb.RecordSuccessfulPoll(Blue) - if pref := sb.snowflake.Preference(); !Blue.Equals(pref) { + if pref := sb.nnarySnowflake.Preference(); !Blue.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) } @@ -183,7 +157,7 @@ func TestNarySnowflakeColor(t *testing.T) { if pref := sb.Preference(); !Blue.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if pref := sb.snowflake.Preference(); !Red.Equals(pref) { + } else if pref := sb.nnarySnowflake.Preference(); !Red.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) } } diff --git a/snow/consensus/snowball/nnary_snowflake.go b/snow/consensus/snowball/nnary_snowflake.go index f9d1069..8b461f0 100644 --- a/snow/consensus/snowball/nnary_snowflake.go +++ b/snow/consensus/snowball/nnary_snowflake.go @@ -12,6 +12,9 @@ import ( // nnarySnowflake is the implementation of a snowflake instance with an // unbounded number of choices type nnarySnowflake struct { + // wrap the n-nary slush logic + nnarySlush + // betaVirtuous is the number of consecutive successful queries required for // finalization on a virtuous instance. betaVirtuous int @@ -24,11 +27,6 @@ type nnarySnowflake struct { // returned the preference confidence int - // preference is the choice that last had a successful poll. Unless there - // hasn't been a successful poll, in which case it is the initially provided - // choice. - preference ids.ID - // rogue tracks if this instance has multiple choices or only one rogue bool @@ -39,32 +37,31 @@ type nnarySnowflake struct { // Initialize implements the NnarySnowflake interface func (sf *nnarySnowflake) Initialize(betaVirtuous, betaRogue int, choice ids.ID) { + sf.nnarySlush.Initialize(choice) sf.betaVirtuous = betaVirtuous sf.betaRogue = betaRogue - sf.preference = choice } // Add implements the NnarySnowflake interface func (sf *nnarySnowflake) Add(choice ids.ID) { sf.rogue = sf.rogue || !choice.Equals(sf.preference) } -// Preference implements the NnarySnowflake interface -func (sf *nnarySnowflake) Preference() ids.ID { return sf.preference } - // RecordSuccessfulPoll implements the NnarySnowflake interface func (sf *nnarySnowflake) RecordSuccessfulPoll(choice ids.ID) { - if sf.Finalized() { - return + if sf.finalized { + return // This instace is already decided. } - if sf.preference.Equals(choice) { + if preference := sf.nnarySlush.Preference(); preference.Equals(choice) { sf.confidence++ } else { + // confidence is set to 1 because there has already been 1 successful + // poll, namely this poll. sf.confidence = 1 - sf.preference = choice } sf.finalized = (!sf.rogue && sf.confidence >= sf.betaVirtuous) || sf.confidence >= sf.betaRogue + sf.nnarySlush.RecordSuccessfulPoll(choice) } // RecordUnsuccessfulPoll implements the NnarySnowflake interface @@ -74,8 +71,8 @@ func (sf *nnarySnowflake) RecordUnsuccessfulPoll() { sf.confidence = 0 } func (sf *nnarySnowflake) Finalized() bool { return sf.finalized } func (sf *nnarySnowflake) String() string { - return fmt.Sprintf("SF(Preference = %s, Confidence = %d, Finalized = %v)", - sf.preference, + return fmt.Sprintf("SF(Confidence = %d, Finalized = %v, %s)", sf.confidence, - sf.Finalized()) + sf.finalized, + &sf.nnarySlush) } diff --git a/snow/consensus/snowball/nnary_snowflake_test.go b/snow/consensus/snowball/nnary_snowflake_test.go new file mode 100644 index 0000000..cbf3864 --- /dev/null +++ b/snow/consensus/snowball/nnary_snowflake_test.go @@ -0,0 +1,134 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "testing" +) + +func TestNnarySnowflake(t *testing.T) { + betaVirtuous := 2 + betaRogue := 2 + + sf := nnarySnowflake{} + sf.Initialize(betaVirtuous, betaRogue, Red) + sf.Add(Blue) + sf.Add(Green) + + if pref := sf.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Blue) + + if pref := sf.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Red) + + if pref := sf.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Red) + + if pref := sf.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if !sf.Finalized() { + t.Fatalf("Should be finalized") + } + + sf.RecordSuccessfulPoll(Blue) + + if pref := sf.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if !sf.Finalized() { + t.Fatalf("Should be finalized") + } +} + +func TestVirtuousNnarySnowflake(t *testing.T) { + betaVirtuous := 2 + betaRogue := 3 + + sb := nnarySnowflake{} + sb.Initialize(betaVirtuous, betaRogue, Red) + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Red) + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Red) + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if !sb.Finalized() { + t.Fatalf("Should be finalized") + } +} + +func TestRogueNnarySnowflake(t *testing.T) { + betaVirtuous := 1 + betaRogue := 2 + + sb := nnarySnowflake{} + sb.Initialize(betaVirtuous, betaRogue, Red) + if sb.rogue { + t.Fatalf("Shouldn't be rogue") + } + + sb.Add(Red) + if sb.rogue { + t.Fatalf("Shouldn't be rogue") + } + + sb.Add(Blue) + if !sb.rogue { + t.Fatalf("Should be rogue") + } + + sb.Add(Red) + if !sb.rogue { + t.Fatalf("Should be rogue") + } + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Red) + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Red) + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if !sb.Finalized() { + t.Fatalf("Should be finalized") + } +} diff --git a/snow/consensus/snowball/parameters.go b/snow/consensus/snowball/parameters.go index 5e14afa..7d77405 100644 --- a/snow/consensus/snowball/parameters.go +++ b/snow/consensus/snowball/parameters.go @@ -9,11 +9,28 @@ import ( "github.com/prometheus/client_golang/prometheus" ) +const ( + errMsg = "__________ .___\n" + + "\\______ \\____________ __| _/__.__.\n" + + " | | _/\\_ __ \\__ \\ / __ < | |\n" + + " | | \\ | | \\// __ \\_/ /_/ |\\___ |\n" + + " |______ / |__| (____ /\\____ |/ ____|\n" + + " \\/ \\/ \\/\\/\n" + + "\n" + + "🏆 🏆 🏆 🏆 🏆 🏆\n" + + " ________ ________ ________________\n" + + " / _____/ \\_____ \\ / _ \\__ ___/\n" + + "/ \\ ___ / | \\ / /_\\ \\| |\n" + + "\\ \\_\\ \\/ | \\/ | \\ |\n" + + " \\______ /\\_______ /\\____|__ /____|\n" + + " \\/ \\/ \\/\n" +) + // Parameters required for snowball consensus type Parameters struct { - Namespace string - Metrics prometheus.Registerer - K, Alpha, BetaVirtuous, BetaRogue int + Namespace string + Metrics prometheus.Registerer + K, Alpha, BetaVirtuous, BetaRogue, ConcurrentRepolls int } // Valid returns nil if the parameters describe a valid initialization. @@ -25,8 +42,14 @@ func (p Parameters) Valid() error { return fmt.Errorf("K = %d, Alpha = %d: Fails the condition that: Alpha <= K", p.K, p.Alpha) case p.BetaVirtuous <= 0: return fmt.Errorf("BetaVirtuous = %d: Fails the condition that: 0 < BetaVirtuous", p.BetaVirtuous) + case p.BetaRogue == 3 && p.BetaVirtuous == 28: + return fmt.Errorf("BetaVirtuous = %d, BetaRogue = %d: Fails the condition that: BetaVirtuous <= BetaRogue\n%s", p.BetaVirtuous, p.BetaRogue, errMsg) case p.BetaRogue < p.BetaVirtuous: return fmt.Errorf("BetaVirtuous = %d, BetaRogue = %d: Fails the condition that: BetaVirtuous <= BetaRogue", p.BetaVirtuous, p.BetaRogue) + case p.ConcurrentRepolls <= 0: + return fmt.Errorf("ConcurrentRepolls = %d: Fails the condition that: 0 < ConcurrentRepolls", p.ConcurrentRepolls) + case p.ConcurrentRepolls > p.BetaRogue: + return fmt.Errorf("ConcurrentRepolls = %d, BetaRogue = %d: Fails the condition that: ConcurrentRepolls <= BetaRogue", p.ConcurrentRepolls, p.BetaRogue) default: return nil } diff --git a/snow/consensus/snowball/parameters_test.go b/snow/consensus/snowball/parameters_test.go index de1b666..7c3668c 100644 --- a/snow/consensus/snowball/parameters_test.go +++ b/snow/consensus/snowball/parameters_test.go @@ -4,15 +4,46 @@ package snowball import ( + "fmt" + "strings" "testing" ) func TestParametersValid(t *testing.T) { p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + } + + if err := p.Valid(); err != nil { + t.Fatal(err) + } +} + +func TestParametersAnotherValid(t *testing.T) { + p := Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 28, + BetaRogue: 30, + ConcurrentRepolls: 1, + } + + if err := p.Valid(); err != nil { + t.Fatal(err) + } +} + +func TestParametersYetAnotherValid(t *testing.T) { + p := Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 3, + BetaRogue: 3, + ConcurrentRepolls: 1, } if err := p.Valid(); err != nil { @@ -22,10 +53,11 @@ func TestParametersValid(t *testing.T) { func TestParametersInvalidK(t *testing.T) { p := Parameters{ - K: 0, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, + K: 0, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } if err := p.Valid(); err == nil { @@ -35,10 +67,11 @@ func TestParametersInvalidK(t *testing.T) { func TestParametersInvalidAlpha(t *testing.T) { p := Parameters{ - K: 1, - Alpha: 0, - BetaVirtuous: 1, - BetaRogue: 1, + K: 1, + Alpha: 0, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } if err := p.Valid(); err == nil { @@ -48,10 +81,11 @@ func TestParametersInvalidAlpha(t *testing.T) { func TestParametersInvalidBetaVirtuous(t *testing.T) { p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 0, - BetaRogue: 1, + K: 1, + Alpha: 1, + BetaVirtuous: 0, + BetaRogue: 1, + ConcurrentRepolls: 1, } if err := p.Valid(); err == nil { @@ -61,13 +95,57 @@ func TestParametersInvalidBetaVirtuous(t *testing.T) { func TestParametersInvalidBetaRogue(t *testing.T) { p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 0, + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 0, + ConcurrentRepolls: 1, } if err := p.Valid(); err == nil { t.Fatalf("Should have failed due to invalid beta rogue") } } + +func TestParametersAnotherInvalidBetaRogue(t *testing.T) { + p := Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 28, + BetaRogue: 3, + ConcurrentRepolls: 1, + } + + if err := p.Valid(); err == nil { + t.Fatalf("Should have failed due to invalid beta rogue") + } else if !strings.Contains(err.Error(), "\n") { + t.Fatalf("Should have described the extensive error") + } +} + +func TestParametersInvalidConcurrentRepolls(t *testing.T) { + tests := []Parameters{ + Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 2, + }, + Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 0, + }, + } + for _, p := range tests { + label := fmt.Sprintf("ConcurrentRepolls=%d", p.ConcurrentRepolls) + t.Run(label, func(t *testing.T) { + if err := p.Valid(); err == nil { + t.Error("Should have failed due to invalid concurrent repolls") + } + }) + } +} diff --git a/snow/consensus/snowball/tree.go b/snow/consensus/snowball/tree.go index ad6554b..f28d0a8 100644 --- a/snow/consensus/snowball/tree.go +++ b/snow/consensus/snowball/tree.go @@ -18,6 +18,10 @@ func (TreeFactory) New() Consensus { return &Tree{} } // Tree implements the snowball interface by using a modified patricia tree. type Tree struct { + // node is the root that represents the first snowball instance in the tree, + // and contains references to all the other snowball instances in the tree. + node + // params contains all the configurations of a snowball instance params Parameters @@ -31,10 +35,6 @@ type Tree struct { // that any later traversal into this sub-tree should call // RecordUnsuccessfulPoll before performing any other action. shouldReset bool - - // root is the node that represents the first snowball instance in the tree, - // and contains references to all the other snowball instances in the tree. - root node } // Initialize implements the Consensus interface @@ -44,7 +44,7 @@ func (t *Tree) Initialize(params Parameters, choice ids.ID) { snowball := &unarySnowball{} snowball.Initialize(params.BetaVirtuous) - t.root = &unaryNode{ + t.node = &unaryNode{ tree: t, preference: choice, commonPrefix: ids.NumBits, // The initial state has no conflicts @@ -57,20 +57,17 @@ func (t *Tree) Parameters() Parameters { return t.params } // Add implements the Consensus interface func (t *Tree) Add(choice ids.ID) { - prefix := t.root.DecidedPrefix() + prefix := t.node.DecidedPrefix() // Make sure that we haven't already decided against this new id if ids.EqualSubset(0, prefix, t.Preference(), choice) { - t.root = t.root.Add(choice) + t.node = t.node.Add(choice) } } -// Preference implements the Consensus interface -func (t *Tree) Preference() ids.ID { return t.root.Preference() } - // RecordPoll implements the Consensus interface func (t *Tree) RecordPoll(votes ids.Bag) { // Get the assumed decided prefix of the root node. - decidedPrefix := t.root.DecidedPrefix() + decidedPrefix := t.node.DecidedPrefix() // If any of the bits differ from the preference in this prefix, the vote is // for a rejected operation. So, we filter out these invalid votes. @@ -78,7 +75,7 @@ func (t *Tree) RecordPoll(votes ids.Bag) { // Now that the votes have been restricted to valid votes, pass them into // the first snowball instance - t.root = t.root.RecordPoll(filteredVotes, t.shouldReset) + t.node = t.node.RecordPoll(filteredVotes, t.shouldReset) // Because we just passed the reset into the snowball instance, we should no // longer reset. @@ -88,14 +85,11 @@ func (t *Tree) RecordPoll(votes ids.Bag) { // RecordUnsuccessfulPoll implements the Consensus interface func (t *Tree) RecordUnsuccessfulPoll() { t.shouldReset = true } -// Finalized implements the Consensus interface -func (t *Tree) Finalized() bool { return t.root.Finalized() } - func (t *Tree) String() string { builder := strings.Builder{} prefixes := []string{""} - nodes := []node{t.root} + nodes := []node{t.node} for len(prefixes) > 0 { newSize := len(prefixes) - 1 @@ -321,14 +315,14 @@ func (u *unaryNode) Add(newChoice ids.ID) node { u.decidedPrefix, u.commonPrefix, u.preference, newChoice); !found { // If the first difference doesn't exist, then this node shouldn't be // split - if u.child != nil && ids.EqualSubset( - u.commonPrefix, u.child.DecidedPrefix(), u.preference, newChoice) { - // If the choice matched my child's prefix, then the add should be - // passed to my child. (Case 1. from above) + if u.child != nil { + // Because this node will finalize before any children could + // finalize, it must be that the newChoice will match my child's + // prefix u.child = u.child.Add(newChoice) } - // If the choice didn't my child's prefix, then the choice was - // previously rejected and the tree should not be modified + // if u.child is nil, then we are attempting to add the same choice into + // the tree, which should be a noop } else { // The difference was found, so this node must be split @@ -409,13 +403,18 @@ func (u *unaryNode) RecordPoll(votes ids.Bag, reset bool) node { u.snowball.RecordSuccessfulPoll() if u.child != nil { - decidedPrefix := u.child.DecidedPrefix() - filteredVotes := votes.Filter(u.commonPrefix, decidedPrefix, u.preference) + // We are guaranteed that u.commonPrefix will equal + // u.child.DecidedPrefix(). Otherwise, there must have been a + // decision under this node, which isn't possible because + // beta1 <= beta2. That means that filtering the votes between + // u.commonPrefix and u.child.DecidedPrefix() would always result in + // the same set being returned. + // If I'm now decided, return my child if u.Finalized() { - return u.child.RecordPoll(filteredVotes, u.shouldReset) + return u.child.RecordPoll(votes, u.shouldReset) } - u.child = u.child.RecordPoll(filteredVotes, u.shouldReset) + u.child = u.child.RecordPoll(votes, u.shouldReset) // The child's preference may have changed u.preference = u.child.Preference() } @@ -482,6 +481,10 @@ func (b *binaryNode) Add(id ids.ID) node { ids.EqualSubset(b.bit+1, child.DecidedPrefix(), b.preferences[bit], id) { b.children[bit] = child.Add(id) } + // If child is nil, then the id has already been added to the tree, so + // nothing should be done + // If the decided prefix isn't matched, then a previous decision has made + // the id that is being added to have already been rejected return b } diff --git a/snow/consensus/snowball/tree_test.go b/snow/consensus/snowball/tree_test.go index 56904e1..4fb1159 100644 --- a/snow/consensus/snowball/tree_test.go +++ b/snow/consensus/snowball/tree_test.go @@ -18,7 +18,7 @@ func TestTreeParams(t *testing.T) { ParamsTest(t, TreeFactory{}) } func TestSnowballSingleton(t *testing.T) { params := Parameters{ Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5, + K: 1, Alpha: 1, BetaVirtuous: 2, BetaRogue: 5, } tree := Tree{} tree.Initialize(params, Red) @@ -35,6 +35,14 @@ func TestSnowballSingleton(t *testing.T) { t.Fatalf("Snowball is finalized too soon") } + + empty := ids.Bag{} + tree.RecordPoll(empty) + + if tree.Finalized() { + t.Fatalf("Snowball is finalized too soon") + } + tree.RecordPoll(oneRed) if tree.Finalized() { @@ -170,10 +178,13 @@ func TestSnowballLastBinary(t *testing.T) { tree.Initialize(params, zero) tree.Add(one) - expected := "SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [0, 255)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 255" + // Should do nothing + tree.Add(one) + + expected := "SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 255)\n" + + " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 255" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !zero.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) } else if tree.Finalized() { @@ -199,6 +210,238 @@ func TestSnowballLastBinary(t *testing.T) { } } +func TestSnowballAddPreviouslyRejected(t *testing.T) { + zero := ids.NewID([32]byte{0b00000000}) + one := ids.NewID([32]byte{0b00000001}) + two := ids.NewID([32]byte{0b00000010}) + four := ids.NewID([32]byte{0b00000100}) + + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + tree := Tree{} + tree.Initialize(params, zero) + tree.Add(one) + tree.Add(four) + + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + zeroBag := ids.Bag{} + zeroBag.Add(zero) + tree.RecordPoll(zeroBag) + + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + tree.Add(two) + + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } +} + +func TestSnowballNewUnary(t *testing.T) { + zero := ids.NewID([32]byte{0b00000000}) + one := ids.NewID([32]byte{0b00000001}) + + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 2, BetaRogue: 3, + } + tree := Tree{} + tree.Initialize(params, zero) + tree.Add(one) + + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + oneBag := ids.Bag{} + oneBag.Add(one) + tree.RecordPoll(oneBag) + + { + expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [1, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !one.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", one, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + tree.RecordPoll(oneBag) + + { + expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 2, SF(Confidence = 2, Finalized = false, SL(Preference = 1))) Bit = 0\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n"+ + " SB(NumSuccessfulPolls = 2, SF(Confidence = 2, Finalized = true)) Bits = [1, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !one.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", one, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } +} + +func TestSnowballTransitiveReset(t *testing.T) { + zero := ids.NewID([32]byte{0b00000000}) + two := ids.NewID([32]byte{0b00000010}) + eight := ids.NewID([32]byte{0b00001000}) + + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 2, BetaRogue: 2, + } + tree := Tree{} + tree.Initialize(params, zero) + tree.Add(two) + tree.Add(eight) + + { + expected := "SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 1)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 1\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 3)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 3\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + zeroBag := ids.Bag{} + zeroBag.Add(zero) + tree.RecordPoll(zeroBag) + + { + expected := "SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 1)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + + emptyBag := ids.Bag{} + tree.RecordPoll(emptyBag) + + { + expected := "SB(NumSuccessfulPolls = 1, SF(Confidence = 0, Finalized = false)) Bits = [0, 1)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + tree.RecordPoll(zeroBag) + + { + expected := "SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = false)) Bits = [0, 1)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n"+ + " SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = false)) Bits = [2, 3)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3\n"+ + " SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + tree.RecordPoll(zeroBag) + + { + expected := "SB(NumSuccessfulPolls = 3, SF(Confidence = 2, Finalized = true)) Bits = [4, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if !tree.Finalized() { + t.Fatalf("Finalized too late") + } + } +} + func TestSnowballTrinary(t *testing.T) { params := Parameters{ Metrics: prometheus.NewRegistry(), @@ -256,7 +499,7 @@ func TestSnowballTrinary(t *testing.T) { tree.RecordPoll(redBag) if pref := tree.Preference(); !Blue.Equals(pref) { - t.Fatalf("Wrong preference. Expected %s got %s", Green, pref) + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) } else if tree.Finalized() { t.Fatalf("Finalized too early") } @@ -378,9 +621,9 @@ func TestSnowballFineGrained(t *testing.T) { tree := Tree{} tree.Initialize(params, c0000) { - expected := "SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [0, 256)" + expected := "SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !c0000.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) } else if tree.Finalized() { @@ -390,11 +633,11 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c1100) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [1, 256)\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [1, 256)" + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !c0000.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) } else if tree.Finalized() { @@ -404,13 +647,13 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c1000) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [1, 256)\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 1, Confidence = 0, Finalized = false)) Bit = 1\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)" + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + + " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !c0000.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) } else if tree.Finalized() { @@ -420,16 +663,16 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c0010) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [1, 2)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 2\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [3, 256)\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 1, Confidence = 0, Finalized = false)) Bit = 1\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)" + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2)\n" + + " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + + " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !c0000.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) } else if tree.Finalized() { @@ -441,15 +684,15 @@ func TestSnowballFineGrained(t *testing.T) { c0000Bag.Add(c0000) tree.RecordPoll(c0000Bag) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 1, Finalized = false)) Bit = 0\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 1, Finalized = false)) Bit = 2\n" + - " SB(NumSuccessfulPolls = 1, Confidence = 1, Finalized = true) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [3, 256)\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 1, Confidence = 0, Finalized = false)) Bit = 1\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)" + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n" + + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + + " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !c0000.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) } else if tree.Finalized() { @@ -461,11 +704,11 @@ func TestSnowballFineGrained(t *testing.T) { c0010Bag.Add(c0010) tree.RecordPoll(c0010Bag) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 1, SF = SF(Preference = 1, Confidence = 1, Finalized = false)) Bit = 2\n" + - " SB(NumSuccessfulPolls = 1, Confidence = 1, Finalized = true) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 1, Confidence = 1, Finalized = true) Bits = [3, 256)" + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2\n" + + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !c0000.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) } else if tree.Finalized() { @@ -475,9 +718,9 @@ func TestSnowballFineGrained(t *testing.T) { tree.RecordPoll(c0010Bag) { - expected := "SB(NumSuccessfulPolls = 2, Confidence = 2, Finalized = true) Bits = [3, 256)" + expected := "SB(NumSuccessfulPolls = 2, SF(Confidence = 2, Finalized = true)) Bits = [3, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !c0010.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", c0010, pref) } else if !tree.Finalized() { @@ -496,9 +739,9 @@ func TestSnowballDoubleAdd(t *testing.T) { tree.Add(Red) { - expected := "SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [0, 256)" + expected := "SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !Red.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) } else if tree.Finalized() { @@ -533,3 +776,108 @@ func TestSnowballConsistent(t *testing.T) { t.Fatalf("Network agreed on inconsistent values") } } + +func TestSnowballFilterBinaryChildren(t *testing.T) { + c0000 := ids.NewID([32]byte{0b00000000}) + c1000 := ids.NewID([32]byte{0b00000001}) + c0100 := ids.NewID([32]byte{0b00000010}) + c0010 := ids.NewID([32]byte{0b00000100}) + + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + tree := Tree{} + tree.Initialize(params, c0000) + { + expected := "SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 256)" + if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } else if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } + } + + tree.Add(c1000) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } else if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } + } + + tree.Add(c0010) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } else if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } + } + + c0000Bag := ids.Bag{} + c0000Bag.Add(c0000) + tree.RecordPoll(c0000Bag) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } else if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } + } + + tree.Add(c0100) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } else if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } + } + + c0100Bag := ids.Bag{} + c0100Bag.Add(c0100) + tree.RecordPoll(c0100Bag) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)" + if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } else if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } + } +} diff --git a/snow/consensus/snowball/unary_snowball.go b/snow/consensus/snowball/unary_snowball.go index 6d0db07..26ea35d 100644 --- a/snow/consensus/snowball/unary_snowball.go +++ b/snow/consensus/snowball/unary_snowball.go @@ -9,64 +9,40 @@ import ( // unarySnowball is the implementation of a unary snowball instance type unarySnowball struct { - // beta is the number of consecutive successful queries required for - // finalization. - beta int - - // confidence tracks the number of successful polls in a row that have - // returned the preference - confidence int + // wrap the unary snowflake logic + unarySnowflake // numSuccessfulPolls tracks the total number of successful network polls numSuccessfulPolls int - - // finalized prevents the state from changing after the required number of - // consecutive polls has been reached - finalized bool } -// Initialize implements the UnarySnowball interface -func (sb *unarySnowball) Initialize(beta int) { sb.beta = beta } - // RecordSuccessfulPoll implements the UnarySnowball interface func (sb *unarySnowball) RecordSuccessfulPoll() { sb.numSuccessfulPolls++ - sb.confidence++ - sb.finalized = sb.finalized || sb.confidence >= sb.beta + sb.unarySnowflake.RecordSuccessfulPoll() } -// RecordUnsuccessfulPoll implements the UnarySnowball interface -func (sb *unarySnowball) RecordUnsuccessfulPoll() { sb.confidence = 0 } - -// Finalized implements the UnarySnowball interface -func (sb *unarySnowball) Finalized() bool { return sb.finalized } - // Extend implements the UnarySnowball interface func (sb *unarySnowball) Extend(beta int, choice int) BinarySnowball { bs := &binarySnowball{ - preference: choice, - snowflake: binarySnowflake{ - beta: beta, - preference: choice, - finalized: sb.Finalized(), + binarySnowflake: binarySnowflake{ + binarySlush: binarySlush{preference: choice}, + beta: beta, + finalized: sb.Finalized(), }, + preference: choice, } return bs } // Clone implements the UnarySnowball interface func (sb *unarySnowball) Clone() UnarySnowball { - return &unarySnowball{ - beta: sb.beta, - numSuccessfulPolls: sb.numSuccessfulPolls, - confidence: sb.confidence, - finalized: sb.Finalized(), - } + newSnowball := *sb + return &newSnowball } func (sb *unarySnowball) String() string { - return fmt.Sprintf("SB(NumSuccessfulPolls = %d, Confidence = %d, Finalized = %v)", + return fmt.Sprintf("SB(NumSuccessfulPolls = %d, %s)", sb.numSuccessfulPolls, - sb.confidence, - sb.Finalized()) + &sb.unarySnowflake) } diff --git a/snow/consensus/snowball/unary_snowball_test.go b/snow/consensus/snowball/unary_snowball_test.go index 8bf098a..3f4efe5 100644 --- a/snow/consensus/snowball/unary_snowball_test.go +++ b/snow/consensus/snowball/unary_snowball_test.go @@ -35,7 +35,7 @@ func TestUnarySnowball(t *testing.T) { sbCloneIntf := sb.Clone() sbClone, ok := sbCloneIntf.(*unarySnowball) if !ok { - t.Fatalf("Unexpectedly clone type") + t.Fatalf("Unexpected clone type") } UnarySnowballStateTest(t, sbClone, 2, 1, false) diff --git a/snow/consensus/snowball/unary_snowflake.go b/snow/consensus/snowball/unary_snowflake.go new file mode 100644 index 0000000..2172331 --- /dev/null +++ b/snow/consensus/snowball/unary_snowflake.go @@ -0,0 +1,60 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "fmt" +) + +// unarySnowflake is the implementation of a unary snowflake instance +type unarySnowflake struct { + // beta is the number of consecutive successful queries required for + // finalization. + beta int + + // confidence tracks the number of successful polls in a row that have + // returned the preference + confidence int + + // finalized prevents the state from changing after the required number of + // consecutive polls has been reached + finalized bool +} + +// Initialize implements the UnarySnowflake interface +func (sf *unarySnowflake) Initialize(beta int) { sf.beta = beta } + +// RecordSuccessfulPoll implements the UnarySnowflake interface +func (sf *unarySnowflake) RecordSuccessfulPoll() { + sf.confidence++ + sf.finalized = sf.finalized || sf.confidence >= sf.beta +} + +// RecordUnsuccessfulPoll implements the UnarySnowflake interface +func (sf *unarySnowflake) RecordUnsuccessfulPoll() { sf.confidence = 0 } + +// Finalized implements the UnarySnowflake interface +func (sf *unarySnowflake) Finalized() bool { return sf.finalized } + +// Extend implements the UnarySnowflake interface +func (sf *unarySnowflake) Extend(beta int, choice int) BinarySnowflake { + return &binarySnowflake{ + binarySlush: binarySlush{preference: choice}, + confidence: sf.confidence, + beta: beta, + finalized: sf.finalized, + } +} + +// Clone implements the UnarySnowflake interface +func (sf *unarySnowflake) Clone() UnarySnowflake { + newSnowflake := *sf + return &newSnowflake +} + +func (sf *unarySnowflake) String() string { + return fmt.Sprintf("SF(Confidence = %d, Finalized = %v)", + sf.confidence, + sf.finalized) +} diff --git a/snow/consensus/snowball/unary_snowflake_test.go b/snow/consensus/snowball/unary_snowflake_test.go new file mode 100644 index 0000000..55d29e2 --- /dev/null +++ b/snow/consensus/snowball/unary_snowflake_test.go @@ -0,0 +1,67 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "testing" +) + +func UnarySnowflakeStateTest(t *testing.T, sf *unarySnowflake, expectedConfidence int, expectedFinalized bool) { + if confidence := sf.confidence; confidence != expectedConfidence { + t.Fatalf("Wrong confidence. Expected %d got %d", expectedConfidence, confidence) + } else if finalized := sf.Finalized(); finalized != expectedFinalized { + t.Fatalf("Wrong finalized status. Expected %v got %v", expectedFinalized, finalized) + } +} + +func TestUnarySnowflake(t *testing.T) { + beta := 2 + + sf := &unarySnowflake{} + sf.Initialize(beta) + + sf.RecordSuccessfulPoll() + UnarySnowflakeStateTest(t, sf, 1, false) + + sf.RecordUnsuccessfulPoll() + UnarySnowflakeStateTest(t, sf, 0, false) + + sf.RecordSuccessfulPoll() + UnarySnowflakeStateTest(t, sf, 1, false) + + sfCloneIntf := sf.Clone() + sfClone, ok := sfCloneIntf.(*unarySnowflake) + if !ok { + t.Fatalf("Unexpected clone type") + } + + UnarySnowflakeStateTest(t, sfClone, 1, false) + + binarySnowflake := sfClone.Extend(beta, 0) + + binarySnowflake.RecordUnsuccessfulPoll() + + binarySnowflake.RecordSuccessfulPoll(1) + + if binarySnowflake.Finalized() { + t.Fatalf("Should not have finalized") + } + + binarySnowflake.RecordSuccessfulPoll(1) + + if binarySnowflake.Preference() != 1 { + t.Fatalf("Wrong preference") + } else if !binarySnowflake.Finalized() { + t.Fatalf("Should have finalized") + } + + sf.RecordSuccessfulPoll() + UnarySnowflakeStateTest(t, sf, 2, true) + + sf.RecordUnsuccessfulPoll() + UnarySnowflakeStateTest(t, sf, 0, true) + + sf.RecordSuccessfulPoll() + UnarySnowflakeStateTest(t, sf, 1, true) +} diff --git a/snow/consensus/snowstorm/test_tx.go b/snow/consensus/snowstorm/test_tx.go index d5fec1b..12c2f73 100644 --- a/snow/consensus/snowstorm/test_tx.go +++ b/snow/consensus/snowstorm/test_tx.go @@ -14,6 +14,7 @@ type TestTx struct { Deps []Tx Ins ids.Set Stat choices.Status + Validity error Bits []byte } @@ -39,7 +40,7 @@ func (tx *TestTx) Reject() { tx.Stat = choices.Rejected } func (tx *TestTx) Reset() { tx.Stat = choices.Processing } // Verify returns nil -func (tx *TestTx) Verify() error { return nil } +func (tx *TestTx) Verify() error { return tx.Validity } // Bytes returns the bits func (tx *TestTx) Bytes() []byte { return tx.Bits } diff --git a/snow/context.go b/snow/context.go index ce213c1..f359553 100644 --- a/snow/context.go +++ b/snow/context.go @@ -24,6 +24,12 @@ type Keystore interface { GetDatabase(username, password string) (database.Database, error) } +// SharedMemory ... +type SharedMemory interface { + GetDatabase(id ids.ID) database.Database + ReleaseDatabase(id ids.ID) +} + // AliasLookup ... type AliasLookup interface { Lookup(alias string) (ids.ID, error) @@ -44,6 +50,7 @@ type Context struct { Lock sync.RWMutex HTTP Callable Keystore Keystore + SharedMemory SharedMemory BCLookup AliasLookup } diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index 7d3d7c8..0f58194 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -119,7 +119,7 @@ func (b *bootstrapper) fetch(vtxID ids.ID) { b.sendRequest(vtxID) return } - b.addVertex(vtx) + b.storeVertex(vtx) } func (b *bootstrapper) sendRequest(vtxID ids.ID) { @@ -138,6 +138,14 @@ func (b *bootstrapper) sendRequest(vtxID ids.ID) { } func (b *bootstrapper) addVertex(vtx avalanche.Vertex) { + b.storeVertex(vtx) + + if numPending := b.pending.Len(); numPending == 0 { + b.finish() + } +} + +func (b *bootstrapper) storeVertex(vtx avalanche.Vertex) { vts := []avalanche.Vertex{vtx} for len(vts) > 0 { @@ -181,9 +189,6 @@ func (b *bootstrapper) addVertex(vtx avalanche.Vertex) { numPending := b.pending.Len() b.numPendingRequests.Set(float64(numPending)) - if numPending == 0 { - b.finish() - } } func (b *bootstrapper) finish() { diff --git a/snow/engine/avalanche/bootstrapper_test.go b/snow/engine/avalanche/bootstrapper_test.go index d1be936..cc63b68 100644 --- a/snow/engine/avalanche/bootstrapper_test.go +++ b/snow/engine/avalanche/bootstrapper_test.go @@ -69,7 +69,7 @@ func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest, Context: ctx, Validators: peers, Beacons: peers, - Alpha: peers.Len()/2 + 1, + Alpha: uint64(peers.Len()/2 + 1), Sender: sender, } return BootstrapConfig{ @@ -957,3 +957,53 @@ func TestBootstrapperFilterAccepted(t *testing.T) { t.Fatalf("Vtx shouldn't be accepted") } } + +func TestBootstrapperPartialFetch(t *testing.T) { + config, _, sender, state, _ := newConfig(t) + + vtxID0 := ids.Empty.Prefix(0) + vtxID1 := ids.Empty.Prefix(1) + + vtxBytes0 := []byte{0} + + vtx0 := &Vtx{ + id: vtxID0, + height: 0, + status: choices.Processing, + bytes: vtxBytes0, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + acceptedIDs := ids.Set{} + acceptedIDs.Add( + vtxID0, + vtxID1, + ) + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0): + return vtx0, nil + case vtxID.Equals(vtxID1): + return nil, errUnknownVertex + default: + t.Fatal(errUnknownVertex) + panic(errUnknownVertex) + } + } + + sender.CantGet = false + + bs.ForceAccepted(acceptedIDs) + + if bs.finished { + t.Fatalf("should have requested a vertex") + } + + if bs.pending.Len() != 1 { + t.Fatalf("wrong number pending") + } +} diff --git a/snow/engine/avalanche/config_test.go b/snow/engine/avalanche/config_test.go index 4906559..b11d186 100644 --- a/snow/engine/avalanche/config_test.go +++ b/snow/engine/avalanche/config_test.go @@ -26,11 +26,12 @@ func DefaultConfig() Config { }, Params: avalanche.Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, diff --git a/snow/engine/avalanche/issuer.go b/snow/engine/avalanche/issuer.go index befe973..4be29b3 100644 --- a/snow/engine/avalanche/issuer.go +++ b/snow/engine/avalanche/issuer.go @@ -6,6 +6,7 @@ package avalanche import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/consensus/avalanche" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" ) type issuer struct { @@ -44,14 +45,24 @@ func (i *issuer) Update() { vtxID := i.vtx.ID() i.t.pending.Remove(vtxID) - for _, tx := range i.vtx.Txs() { + txs := i.vtx.Txs() + validTxs := []snowstorm.Tx{} + for _, tx := range txs { if err := tx.Verify(); err != nil { - i.t.Config.Context.Log.Debug("Transaction failed verification due to %s, dropping vertex", err) - i.t.vtxBlocked.Abandon(vtxID) - return + i.t.Config.Context.Log.Debug("Transaction %s failed verification due to %s", tx.ID(), err) + } else { + validTxs = append(validTxs, tx) } } + if len(validTxs) != len(txs) { + i.t.Config.Context.Log.Debug("Abandoning %s due to failed transaction verification", vtxID) + + i.t.batch(validTxs, false /*=force*/, false /*=empty*/) + i.t.vtxBlocked.Abandon(vtxID) + return + } + i.t.Config.Context.Log.Verbo("Adding vertex to consensus:\n%s", i.vtx) i.t.Consensus.Add(i.vtx) @@ -65,8 +76,10 @@ func (i *issuer) Update() { } i.t.RequestID++ + polled := false if numVdrs := len(vdrs); numVdrs == p.K && i.t.polls.Add(i.t.RequestID, vdrSet.Len()) { i.t.Config.Sender.PushQuery(vdrSet, i.t.RequestID, vtxID, i.vtx.Bytes()) + polled = true } else if numVdrs < p.K { i.t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", vtxID) } @@ -75,6 +88,10 @@ func (i *issuer) Update() { for _, tx := range i.vtx.Txs() { i.t.txBlocked.Fulfill(tx.ID()) } + + if polled && len(i.t.polls.m) < i.t.Params.ConcurrentRepolls { + i.t.repoll() + } } type vtxIssuer struct{ i *issuer } diff --git a/snow/engine/avalanche/transitive.go b/snow/engine/avalanche/transitive.go index 4d6617f..4de2aa5 100644 --- a/snow/engine/avalanche/transitive.go +++ b/snow/engine/avalanche/transitive.go @@ -309,7 +309,7 @@ func (t *Transitive) batch(txs []snowstorm.Tx, force, empty bool) { } // Force allows for a conflict to be issued - if txID := tx.ID(); !overlaps && !issuedTxs.Contains(txID) && (force || (t.Consensus.IsVirtuous(tx))) && !tx.Status().Decided() { + if txID := tx.ID(); !overlaps && !issuedTxs.Contains(txID) && (force || t.Consensus.IsVirtuous(tx)) && !tx.Status().Decided() { batch = append(batch, tx) issuedTxs.Add(txID) consumed.Union(inputs) diff --git a/snow/engine/avalanche/transitive_test.go b/snow/engine/avalanche/transitive_test.go index 6f5b5ed..bcf55ce 100644 --- a/snow/engine/avalanche/transitive_test.go +++ b/snow/engine/avalanche/transitive_test.go @@ -315,6 +315,18 @@ func TestEngineQuery(t *testing.T) { if !bytes.Equal(b, vtx1.Bytes()) { t.Fatalf("Wrong bytes") } + + st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + if vtxID.Equals(vtx0.ID()) { + return &Vtx{status: choices.Processing}, nil + } + if vtxID.Equals(vtx1.ID()) { + return vtx1, nil + } + t.Fatalf("Wrong vertex requested") + panic("Should have failed") + } + return vtx1, nil } te.Put(vdr.ID(), 0, vtx1.ID(), vtx1.Bytes()) @@ -340,11 +352,12 @@ func TestEngineMultipleQuery(t *testing.T) { config.Params = avalanche.Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 3, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 3, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -2363,3 +2376,175 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { sender.PushQueryF = nil st.getVertex = nil } + +func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + st := &stateTest{t: t} + config.State = st + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx} + utxos := []ids.ID{GenerateID(), GenerateID()} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + }, + } + tx0.Ins.Add(utxos[0]) + + tx1 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + Validity: errors.New(""), + }, + } + tx1.Ins.Add(utxos[1]) + + vtx0 := &Vtx{ + parents: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + vtx1 := &Vtx{ + parents: []avalanche.Vertex{vtx0}, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 2, + status: choices.Processing, + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + sender := &common.SenderTest{} + sender.T = t + te.Config.Sender = sender + + reqID := new(uint32) + sender.PushQueryF = func(_ ids.ShortSet, requestID uint32, _ ids.ID, _ []byte) { + *reqID = requestID + } + + te.insert(vtx0) + + sender.PushQueryF = func(ids.ShortSet, uint32, ids.ID, []byte) { + t.Fatalf("should have failed verification") + } + + te.insert(vtx1) + + st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtx0.ID()): + return vtx0, nil + case vtxID.Equals(vtx1.ID()): + return vtx1, nil + } + return nil, errors.New("Unknown vtx") + } + + votes := ids.Set{} + votes.Add(vtx1.ID()) + te.Chits(vdr.ID(), *reqID, votes) + + if status := vtx0.Status(); status != choices.Accepted { + t.Fatalf("should have accepted the vertex due to transitive voting") + } +} + +func TestEnginePartiallyValidVertex(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + st := &stateTest{t: t} + config.State = st + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx} + utxos := []ids.ID{GenerateID(), GenerateID()} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + }, + } + tx0.Ins.Add(utxos[0]) + + tx1 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + Validity: errors.New(""), + }, + } + tx1.Ins.Add(utxos[1]) + + vtx := &Vtx{ + parents: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0, tx1}, + height: 1, + status: choices.Processing, + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + expectedVtxID := GenerateID() + st.buildVertex = func(_ ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) { + consumers := []snowstorm.Tx{} + for _, tx := range txs { + consumers = append(consumers, tx) + } + return &Vtx{ + parents: vts, + id: expectedVtxID, + txs: consumers, + status: choices.Processing, + bytes: []byte{1}, + }, nil + } + + sender := &common.SenderTest{} + sender.T = t + te.Config.Sender = sender + + sender.PushQueryF = func(_ ids.ShortSet, _ uint32, vtxID ids.ID, _ []byte) { + if !expectedVtxID.Equals(vtxID) { + t.Fatalf("wrong vertex queried") + } + } + + te.insert(vtx) +} diff --git a/snow/engine/avalanche/tx_job.go b/snow/engine/avalanche/tx_job.go index 0462bd3..f0ffe70 100644 --- a/snow/engine/avalanche/tx_job.go +++ b/snow/engine/avalanche/tx_job.go @@ -54,12 +54,9 @@ func (t *txJob) Execute() { case choices.Unknown, choices.Rejected: t.numDropped.Inc() case choices.Processing: - if err := t.tx.Verify(); err == nil { - t.tx.Accept() - t.numAccepted.Inc() - } else { - t.numDropped.Inc() - } + t.tx.Verify() + t.tx.Accept() + t.numAccepted.Inc() } } func (t *txJob) Bytes() []byte { return t.tx.Bytes() } diff --git a/snow/engine/avalanche/voter.go b/snow/engine/avalanche/voter.go index 72a1b53..7430495 100644 --- a/snow/engine/avalanche/voter.go +++ b/snow/engine/avalanche/voter.go @@ -5,6 +5,7 @@ package avalanche import ( "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/avalanche" "github.com/ava-labs/gecko/snow/consensus/snowstorm" ) @@ -34,6 +35,7 @@ func (v *voter) Update() { if !finished { return } + results = v.bubbleVotes(results) v.t.Config.Context.Log.Debug("Finishing poll with:\n%s", &results) v.t.Consensus.RecordPoll(results) @@ -58,7 +60,33 @@ func (v *voter) Update() { v.t.Config.Context.Log.Verbo("Avalanche engine can't quiesce") - if len(v.t.polls.m) == 0 { + if len(v.t.polls.m) < v.t.Config.Params.ConcurrentRepolls { v.t.repoll() } } + +func (v *voter) bubbleVotes(votes ids.UniqueBag) ids.UniqueBag { + bubbledVotes := ids.UniqueBag{} + for _, vote := range votes.List() { + set := votes.GetSet(vote) + vtx, err := v.t.Config.State.GetVertex(vote) + if err != nil { + continue + } + + vts := []avalanche.Vertex{vtx} + for len(vts) > 0 { + vtx := vts[0] + vts = vts[1:] + + if status := vtx.Status(); status.Fetched() && !v.t.Consensus.VertexIssued(vtx) { + vts = append(vts, vtx.Parents()...) + } else if !status.Decided() && v.t.Consensus.VertexIssued(vtx) { + bubbledVotes.UnionSet(vtx.ID(), set) + } else { + v.t.Config.Context.Log.Debug("Dropping %d vote(s) for %s because the vertex is invalid", set.Len(), vtx.ID()) + } + } + } + return bubbledVotes +} diff --git a/snow/engine/common/bootstrapper.go b/snow/engine/common/bootstrapper.go index 9eebe0e..cda4a43 100644 --- a/snow/engine/common/bootstrapper.go +++ b/snow/engine/common/bootstrapper.go @@ -4,7 +4,10 @@ package common import ( + stdmath "math" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/math" ) // Bootstrapper implements the Engine interface. @@ -15,7 +18,7 @@ type Bootstrapper struct { acceptedFrontier ids.Set pendingAccepted ids.ShortSet - accepted ids.Bag + acceptedVotes map[[32]byte]uint64 RequestID uint32 } @@ -30,7 +33,7 @@ func (b *Bootstrapper) Initialize(config Config) { b.pendingAccepted.Add(vdrID) } - b.accepted.SetThreshold(config.Alpha) + b.acceptedVotes = make(map[[32]byte]uint64) } // Startup implements the Engine interface. @@ -95,10 +98,29 @@ func (b *Bootstrapper) Accepted(validatorID ids.ShortID, requestID uint32, conta } b.pendingAccepted.Remove(validatorID) - b.accepted.Add(containerIDs.List()...) + weight := uint64(0) + if vdr, ok := b.Validators.Get(validatorID); ok { + weight = vdr.Weight() + } + + for _, containerID := range containerIDs.List() { + key := containerID.Key() + previousWeight := b.acceptedVotes[key] + newWeight, err := math.Add64(weight, previousWeight) + if err != nil { + newWeight = stdmath.MaxUint64 + } + b.acceptedVotes[key] = newWeight + } if b.pendingAccepted.Len() == 0 { - accepted := b.accepted.Threshold() + accepted := ids.Set{} + for key, weight := range b.acceptedVotes { + if weight >= b.Config.Alpha { + accepted.Add(ids.NewID(key)) + } + } + if size := accepted.Len(); size == 0 && b.Config.Beacons.Len() > 0 { b.Context.Log.Warn("Bootstrapping finished with no accepted frontier. This is likely a result of failing to be able to connect to the specified bootstraps, or no transactions have been issued on this network yet") } else { diff --git a/snow/engine/common/config.go b/snow/engine/common/config.go index e3e6b10..e75a957 100644 --- a/snow/engine/common/config.go +++ b/snow/engine/common/config.go @@ -15,7 +15,7 @@ type Config struct { Validators validators.Set Beacons validators.Set - Alpha int + Alpha uint64 Sender Sender Bootstrapable Bootstrapable } diff --git a/snow/engine/snowman/block_job.go b/snow/engine/snowman/block_job.go index aab227f..ec5f4a3 100644 --- a/snow/engine/snowman/block_job.go +++ b/snow/engine/snowman/block_job.go @@ -51,12 +51,9 @@ func (b *blockJob) Execute() { case choices.Unknown, choices.Rejected: b.numDropped.Inc() case choices.Processing: - if err := b.blk.Verify(); err == nil { - b.blk.Accept() - b.numAccepted.Inc() - } else { - b.numDropped.Inc() - } + b.blk.Verify() + b.blk.Accept() + b.numAccepted.Inc() } } func (b *blockJob) Bytes() []byte { return b.blk.Bytes() } diff --git a/snow/engine/snowman/bootstrapper.go b/snow/engine/snowman/bootstrapper.go index 88724ed..46ced68 100644 --- a/snow/engine/snowman/bootstrapper.go +++ b/snow/engine/snowman/bootstrapper.go @@ -113,7 +113,7 @@ func (b *bootstrapper) fetch(blkID ids.ID) { b.sendRequest(blkID) return } - b.addBlock(blk) + b.storeBlock(blk) } func (b *bootstrapper) sendRequest(blkID ids.ID) { @@ -132,6 +132,14 @@ func (b *bootstrapper) sendRequest(blkID ids.ID) { } func (b *bootstrapper) addBlock(blk snowman.Block) { + b.storeBlock(blk) + + if numPending := b.pending.Len(); numPending == 0 { + b.finish() + } +} + +func (b *bootstrapper) storeBlock(blk snowman.Block) { status := blk.Status() blkID := blk.ID() for status == choices.Processing { @@ -161,9 +169,6 @@ func (b *bootstrapper) addBlock(blk snowman.Block) { numPending := b.pending.Len() b.numPendingRequests.Set(float64(numPending)) - if numPending == 0 { - b.finish() - } } func (b *bootstrapper) finish() { diff --git a/snow/engine/snowman/bootstrapper_test.go b/snow/engine/snowman/bootstrapper_test.go index 9cb0968..6168df2 100644 --- a/snow/engine/snowman/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrapper_test.go @@ -62,7 +62,7 @@ func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest, Context: ctx, Validators: peers, Beacons: peers, - Alpha: peers.Len()/2 + 1, + Alpha: uint64(peers.Len()/2 + 1), Sender: sender, } return BootstrapConfig{ @@ -425,3 +425,54 @@ func TestBootstrapperFilterAccepted(t *testing.T) { t.Fatalf("Blk shouldn't be accepted") } } + +func TestBootstrapperPartialFetch(t *testing.T) { + config, _, sender, vm := newConfig(t) + + blkID0 := ids.Empty.Prefix(0) + blkID1 := ids.Empty.Prefix(1) + + blkBytes0 := []byte{0} + + blk0 := &Blk{ + id: blkID0, + height: 0, + status: choices.Accepted, + bytes: blkBytes0, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + acceptedIDs := ids.Set{} + acceptedIDs.Add( + blkID0, + blkID1, + ) + + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blkID0): + return blk0, nil + case blkID.Equals(blkID1): + return nil, errUnknownBlock + default: + t.Fatal(errUnknownBlock) + panic(errUnknownBlock) + } + } + + sender.CantGet = false + bs.onFinished = func() {} + + bs.ForceAccepted(acceptedIDs) + + if bs.finished { + t.Fatalf("should have requested a block") + } + + if bs.pending.Len() != 1 { + t.Fatalf("wrong number pending") + } +} diff --git a/snow/engine/snowman/config_test.go b/snow/engine/snowman/config_test.go index 1b590b7..6cf7c51 100644 --- a/snow/engine/snowman/config_test.go +++ b/snow/engine/snowman/config_test.go @@ -23,10 +23,11 @@ func DefaultConfig() Config { }, Params: snowball.Parameters{ Metrics: prometheus.NewRegistry(), - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Consensus: &snowman.Topological{}, } diff --git a/snow/engine/snowman/engine_test.go b/snow/engine/snowman/engine_test.go index e149970..bc4ed59 100644 --- a/snow/engine/snowman/engine_test.go +++ b/snow/engine/snowman/engine_test.go @@ -25,8 +25,9 @@ type Blk struct { parent snowman.Block id ids.ID - height int - status choices.Status + height int + status choices.Status + validity error bytes []byte } @@ -36,7 +37,7 @@ func (b *Blk) Parent() snowman.Block { return b.parent } func (b *Blk) Accept() { b.status = choices.Accepted } func (b *Blk) Reject() { b.status = choices.Rejected } func (b *Blk) Status() choices.Status { return b.status } -func (b *Blk) Verify() error { return nil } +func (b *Blk) Verify() error { return b.validity } func (b *Blk) Bytes() []byte { return b.bytes } type sortBks []*Blk diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index e023a7d..947967a 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -305,7 +305,7 @@ func (t *Transitive) pullSample(blkID ids.ID) { } } -func (t *Transitive) pushSample(blk snowman.Block) { +func (t *Transitive) pushSample(blk snowman.Block) bool { t.Config.Context.Log.Verbo("About to sample from: %s", t.Config.Validators) p := t.Consensus.Parameters() vdrs := t.Config.Validators.Sample(p.K) @@ -315,11 +315,14 @@ func (t *Transitive) pushSample(blk snowman.Block) { } t.RequestID++ + queryIssued := false if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet.Len()) { t.Config.Sender.PushQuery(vdrSet, t.RequestID, blk.ID(), blk.Bytes()) + queryIssued = true } else if numVdrs < p.K { t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blk.ID()) } + return queryIssued } func (t *Transitive) deliver(blk snowman.Block) { @@ -338,9 +341,8 @@ func (t *Transitive) deliver(blk snowman.Block) { } t.Config.Context.Log.Verbo("Adding block to consensus: %s", blkID) - t.Consensus.Add(blk) - t.pushSample(blk) + polled := t.pushSample(blk) added := []snowman.Block{} dropped := []snowman.Block{} @@ -373,6 +375,10 @@ func (t *Transitive) deliver(blk snowman.Block) { t.blocked.Abandon(blkID) } + if polled && len(t.polls.m) < t.Params.ConcurrentRepolls { + t.repoll() + } + // Tracks performance statistics t.numBlkRequests.Set(float64(t.blkReqs.Len())) t.numBlockedBlk.Set(float64(t.pending.Len())) diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 1920d8c..f719cc3 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -280,6 +280,18 @@ func TestEngineQuery(t *testing.T) { if !bytes.Equal(b, blk1.Bytes()) { t.Fatalf("Wrong bytes") } + + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blk.ID()): + return blk, nil + case blkID.Equals(blk1.ID()): + return blk1, nil + } + t.Fatalf("Wrong block requested") + panic("Should have failed") + } + return blk1, nil } te.Put(vdr.ID(), *getRequestID, blk1.ID(), blk1.Bytes()) @@ -304,11 +316,12 @@ func TestEngineMultipleQuery(t *testing.T) { config := DefaultConfig() config.Params = snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 3, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 3, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } vdr0 := validators.GenerateRandomValidator(1) @@ -418,6 +431,17 @@ func TestEngineMultipleQuery(t *testing.T) { te.Chits(vdr1.ID(), *queryRequestID, blkSet) vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blk0.ID()): + return blk0, nil + case blkID.Equals(blk1.ID()): + return blk1, nil + } + t.Fatalf("Wrong block requested") + panic("Should have failed") + } + return blk1, nil } @@ -672,11 +696,12 @@ func TestVoteCanceling(t *testing.T) { config := DefaultConfig() config.Params = snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 3, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 3, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } vdr0 := validators.GenerateRandomValidator(1) @@ -1076,3 +1101,60 @@ func TestEngineRetryFetch(t *testing.T) { t.Fatalf("Should have requested the block again") } } + +func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { + vdr, _, sender, vm, te, gBlk := setup(t) + + sender.Default(true) + + validBlk := &Blk{ + parent: gBlk, + id: GenerateID(), + height: 1, + status: choices.Processing, + bytes: []byte{1}, + } + + invalidBlk := &Blk{ + parent: validBlk, + id: GenerateID(), + height: 2, + status: choices.Processing, + validity: errors.New("invalid due to an undeclared dependency"), + bytes: []byte{2}, + } + + validBlkID := validBlk.ID() + invalidBlkID := invalidBlk.ID() + + reqID := new(uint32) + sender.PushQueryF = func(_ ids.ShortSet, requestID uint32, _ ids.ID, _ []byte) { + *reqID = requestID + } + + te.insert(validBlk) + + sender.PushQueryF = nil + + te.insert(invalidBlk) + + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(validBlkID): + return validBlk, nil + case blkID.Equals(invalidBlkID): + return invalidBlk, nil + } + return nil, errUnknownBlock + } + + votes := ids.Set{} + votes.Add(invalidBlkID) + te.Chits(vdr.ID(), *reqID, votes) + + vm.GetBlockF = nil + + if status := validBlk.Status(); status != choices.Accepted { + t.Fatalf("Should have bubbled invalid votes to the valid parent") + } +} diff --git a/snow/engine/snowman/voter.go b/snow/engine/snowman/voter.go index d9c8a7f..0c9779a 100644 --- a/snow/engine/snowman/voter.go +++ b/snow/engine/snowman/voter.go @@ -41,6 +41,10 @@ func (v *voter) Update() { return } + // To prevent any potential deadlocks with un-disclosed dependencies, votes + // must be bubbled to the nearest valid block + results = v.bubbleVotes(results) + v.t.Config.Context.Log.Verbo("Finishing poll [%d] with:\n%s", v.requestID, &results) v.t.Consensus.RecordPoll(results) @@ -53,7 +57,27 @@ func (v *voter) Update() { v.t.Config.Context.Log.Verbo("Snowman engine can't quiesce") - if len(v.t.polls.m) == 0 { + if len(v.t.polls.m) < v.t.Config.Params.ConcurrentRepolls { v.t.repoll() } } + +func (v *voter) bubbleVotes(votes ids.Bag) ids.Bag { + bubbledVotes := ids.Bag{} + for _, vote := range votes.List() { + count := votes.Count(vote) + blk, err := v.t.Config.VM.GetBlock(vote) + if err != nil { + continue + } + + for blk.Status().Fetched() && !v.t.Consensus.Issued(blk) { + blk = blk.Parent() + } + + if !blk.Status().Decided() && v.t.Consensus.Issued(blk) { + bubbledVotes.AddCount(blk.ID(), count) + } + } + return bubbledVotes +} diff --git a/snow/networking/awaiting_connections.go b/snow/networking/awaiting_connections.go index 0b5047d..5887cea 100644 --- a/snow/networking/awaiting_connections.go +++ b/snow/networking/awaiting_connections.go @@ -4,31 +4,43 @@ package networking import ( + stdmath "math" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/math" ) // AwaitingConnections ... type AwaitingConnections struct { - Requested ids.ShortSet - NumRequired int - Finish func() + Requested validators.Set + WeightRequired uint64 + Finish func() - connected ids.ShortSet + weight uint64 } // Add ... func (aw *AwaitingConnections) Add(conn ids.ShortID) { - if aw.Requested.Contains(conn) { - aw.connected.Add(conn) + vdr, ok := aw.Requested.Get(conn) + if !ok { + return } + weight, err := math.Add64(vdr.Weight(), aw.weight) + if err != nil { + weight = stdmath.MaxUint64 + } + aw.weight = weight } // Remove ... func (aw *AwaitingConnections) Remove(conn ids.ShortID) { - aw.connected.Remove(conn) + vdr, ok := aw.Requested.Get(conn) + if !ok { + return + } + aw.weight -= vdr.Weight() } // Ready ... -func (aw *AwaitingConnections) Ready() bool { - return aw.connected.Len() >= aw.NumRequired -} +func (aw *AwaitingConnections) Ready() bool { return aw.weight >= aw.WeightRequired } diff --git a/snow/networking/router/subnet_router.go b/snow/networking/router/subnet_router.go index 93da106..ca1f6de 100644 --- a/snow/networking/router/subnet_router.go +++ b/snow/networking/router/subnet_router.go @@ -38,7 +38,9 @@ func (sr *ChainRouter) AddChain(chain *handler.Handler) { sr.lock.Lock() defer sr.lock.Unlock() - sr.chains[chain.Context().ChainID.Key()] = chain + chainID := chain.Context().ChainID + sr.log.Debug("Adding %s to the routing table", chainID) + sr.chains[chainID.Key()] = chain } // RemoveChain removes the specified chain so that incoming diff --git a/snow/validators/set.go b/snow/validators/set.go index 26dd22f..50210bf 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -24,6 +24,9 @@ type Set interface { // Add the provided validator to the set. Add(Validator) + // Get the validator from the set. + Get(ids.ShortID) (Validator, bool) + // Remove the validator with the specified ID. Remove(ids.ShortID) @@ -102,6 +105,22 @@ func (s *set) add(vdr Validator) { s.sampler.Weights = append(s.sampler.Weights, w) } +// Get implements the Set interface. +func (s *set) Get(vdrID ids.ShortID) (Validator, bool) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.get(vdrID) +} + +func (s *set) get(vdrID ids.ShortID) (Validator, bool) { + index, ok := s.vdrMap[vdrID.Key()] + if !ok { + return nil, false + } + return s.vdrSlice[index], true +} + // Remove implements the Set interface. func (s *set) Remove(vdrID ids.ShortID) { s.lock.Lock() diff --git a/utils/crypto/secp256k1.go b/utils/crypto/secp256k1.go index 72f4451..b9fa41a 100644 --- a/utils/crypto/secp256k1.go +++ b/utils/crypto/secp256k1.go @@ -21,6 +21,9 @@ const ( // SECP256K1SKLen is the number of bytes in a secp2561k private key SECP256K1SKLen = 32 + + // SECP256K1PKLen is the number of bytes in a secp2561k public key + SECP256K1PKLen = 33 ) // FactorySECP256K1 ... diff --git a/utils/crypto/secp256k1r.go b/utils/crypto/secp256k1r.go index 6de3515..ef50618 100644 --- a/utils/crypto/secp256k1r.go +++ b/utils/crypto/secp256k1r.go @@ -27,6 +27,10 @@ const ( // SECP256K1RSKLen is the number of bytes in a secp2561k recoverable private // key SECP256K1RSKLen = 32 + + // SECP256K1RPKLen is the number of bytes in a secp2561k recoverable public + // key + SECP256K1RPKLen = 33 ) // FactorySECP256K1R ... diff --git a/utils/formatting/cb58_test.go b/utils/formatting/cb58_test.go index 74e7aae..7ecb09a 100644 --- a/utils/formatting/cb58_test.go +++ b/utils/formatting/cb58_test.go @@ -26,6 +26,59 @@ func TestCB58Single(t *testing.T) { } } +func TestCB58UnmarshalJSON(t *testing.T) { + expected := CB58{[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255}} + cb58 := CB58{} + err := cb58.UnmarshalJSON([]byte("\"1NVSVezva3bAtJesnUj\"")) + if err != nil { + t.Fatalf("CB58.UnmarshalJSON unexpected error unmarshalling: %s", err) + } else if !bytes.Equal(cb58.Bytes, expected.Bytes) { + t.Fatalf("CB58.UnmarshalJSON got 0x%x, expected 0x%x", cb58, expected) + } +} + +func TestCB58UnmarshalJSONNull(t *testing.T) { + cb58 := CB58{} + err := cb58.UnmarshalJSON([]byte("null")) + if err != nil { + t.Fatalf("CB58.UnmarshalJSON unexpected error unmarshalling null: %s", err) + } +} + +func TestCB58UnmarshalJSONError(t *testing.T) { + tests := []struct { + in string + expected error + }{ + {"", errMissingQuotes}, + {"\"foo", errMissingQuotes}, + {"foo", errMissingQuotes}, + {"foo\"", errMissingQuotes}, + {"\"foo\"", errMissingChecksum}, + {"\"foobar\"", errBadChecksum}, + } + cb58 := CB58{} + for _, tt := range tests { + t.Run(tt.in, func(t *testing.T) { + err := cb58.UnmarshalJSON([]byte(tt.in)) + if err != tt.expected { + t.Errorf("got error %q, expected error %q", err, tt.expected) + } + }) + } +} + +func TestCB58MarshalJSONError(t *testing.T) { + cb58 := CB58{[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255}} + expected := []byte("\"1NVSVezva3bAtJesnUj\"") + result, err := cb58.MarshalJSON() + if err != nil { + t.Fatalf("CB58.MarshalJSON unexpected error: %s", err) + } else if !bytes.Equal(result, expected) { + t.Fatalf("CB58.MarshalJSON got %q, expected %q", result, expected) + } +} + func TestCB58ParseBytes(t *testing.T) { ui := "1NVSVezva3bAtJesnUj" cb58 := CB58{} diff --git a/utils/ip.go b/utils/ip.go index cca055d..8a8985d 100644 --- a/utils/ip.go +++ b/utils/ip.go @@ -8,7 +8,6 @@ import ( "fmt" "net" "strconv" - "strings" ) var ( @@ -33,21 +32,21 @@ func (ipDesc IPDesc) PortString() string { } func (ipDesc IPDesc) String() string { - return fmt.Sprintf("%s%s", ipDesc.IP, ipDesc.PortString()) + return net.JoinHostPort(ipDesc.IP.String(), fmt.Sprintf("%d", ipDesc.Port)) } // ToIPDesc ... -// TODO: this was kinda hacked together, it should be verified. func ToIPDesc(str string) (IPDesc, error) { - parts := strings.Split(str, ":") - if len(parts) != 2 { + host, portStr, err := net.SplitHostPort(str) + if err != nil { return IPDesc{}, errBadIP } - port, err := strconv.ParseUint(parts[1], 10 /*=base*/, 16 /*=size*/) + port, err := strconv.ParseUint(portStr, 10 /*=base*/, 16 /*=size*/) if err != nil { + // TODO: Should this return a locally defined error? (e.g. errBadPort) return IPDesc{}, err } - ip := net.ParseIP(parts[0]) + ip := net.ParseIP(host) if ip == nil { return IPDesc{}, errBadIP } diff --git a/utils/ip_test.go b/utils/ip_test.go new file mode 100644 index 0000000..179014f --- /dev/null +++ b/utils/ip_test.go @@ -0,0 +1,151 @@ +// (c) 2020, Alex Willmer. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +import ( + "fmt" + "net" + "testing" +) + +func TestIPDescEqual(t *testing.T) { + tests := []struct { + ipDesc1 IPDesc + ipDesc2 IPDesc + result bool + }{ + // Expected equal + { + IPDesc{net.ParseIP("127.0.0.1"), 0}, + IPDesc{net.ParseIP("127.0.0.1"), 0}, + true, + }, { + IPDesc{net.ParseIP("::1"), 0}, + IPDesc{net.ParseIP("::1"), 0}, + true, + }, { + IPDesc{net.ParseIP("127.0.0.1"), 0}, + IPDesc{net.ParseIP("::ffff:127.0.0.1"), 0}, + true, + }, + + // Expected unequal + { + IPDesc{net.ParseIP("127.0.0.1"), 0}, + IPDesc{net.ParseIP("1.2.3.4"), 0}, + false, + }, { + IPDesc{net.ParseIP("::1"), 0}, + IPDesc{net.ParseIP("2001::1"), 0}, + false, + }, { + IPDesc{net.ParseIP("127.0.0.1"), 0}, + IPDesc{net.ParseIP("127.0.0.1"), 1}, + false, + }, + } + for i, tt := range tests { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + if tt.ipDesc1.IP == nil { + t.Error("ipDesc1 nil") + } else if tt.ipDesc2.IP == nil { + t.Error("ipDesc2 nil") + } + result := tt.ipDesc1.Equal(tt.ipDesc2) + if result && result != tt.result { + t.Error("Expected IPDesc to be equal, but they were not") + } + if !result && result != tt.result { + t.Error("Expected IPDesc to be unequal, but they were equal") + } + }) + } +} + +func TestIPDescPortString(t *testing.T) { + tests := []struct { + ipDesc IPDesc + result string + }{ + {IPDesc{net.ParseIP("127.0.0.1"), 0}, ":0"}, + {IPDesc{net.ParseIP("::1"), 42}, ":42"}, + {IPDesc{net.ParseIP("::ffff:127.0.0.1"), 65535}, ":65535"}, + {IPDesc{net.IP{}, 1234}, ":1234"}, + } + for _, tt := range tests { + t.Run(tt.result, func(t *testing.T) { + if result := tt.ipDesc.PortString(); result != tt.result { + t.Errorf("Expected %q, got %q", tt.result, result) + } + }) + } +} + +func TestIPDescString(t *testing.T) { + tests := []struct { + ipDesc IPDesc + result string + }{ + {IPDesc{net.ParseIP("127.0.0.1"), 0}, "127.0.0.1:0"}, + {IPDesc{net.ParseIP("::1"), 42}, "[::1]:42"}, + {IPDesc{net.ParseIP("::ffff:127.0.0.1"), 65535}, "127.0.0.1:65535"}, + {IPDesc{net.IP{}, 1234}, ":1234"}, + } + for _, tt := range tests { + t.Run(tt.result, func(t *testing.T) { + if result := tt.ipDesc.String(); result != tt.result { + t.Errorf("Expected %q, got %q", tt.result, result) + } + }) + } +} + +func TestToIPDescError(t *testing.T) { + tests := []struct { + in string + out IPDesc + }{ + {"", IPDesc{}}, + {":", IPDesc{}}, + {"abc:", IPDesc{}}, + {":abc", IPDesc{}}, + {"abc:abc", IPDesc{}}, + {"127.0.0.1:", IPDesc{}}, + {":1", IPDesc{}}, + {"::1", IPDesc{}}, + {"::1:42", IPDesc{}}, + } + for _, tt := range tests { + t.Run(tt.in, func(t *testing.T) { + result, err := ToIPDesc(tt.in) + if err == nil { + t.Errorf("Unexpected success") + } + if !tt.out.Equal(result) { + t.Errorf("Expected %v, got %v", tt.out, result) + } + }) + } +} + +func TestToIPDesc(t *testing.T) { + tests := []struct { + in string + out IPDesc + }{ + {"127.0.0.1:42", IPDesc{net.ParseIP("127.0.0.1"), 42}}, + {"[::1]:42", IPDesc{net.ParseIP("::1"), 42}}, + } + for _, tt := range tests { + t.Run(tt.in, func(t *testing.T) { + result, err := ToIPDesc(tt.in) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if !tt.out.Equal(result) { + t.Errorf("Expected %#v, got %#v", tt.out, result) + } + }) + } +} diff --git a/utils/logging/log.go b/utils/logging/log.go index 04d69fb..a9e6aec 100644 --- a/utils/logging/log.go +++ b/utils/logging/log.go @@ -171,7 +171,7 @@ func (l *Log) format(level Level, format string, args ...interface{}) string { return fmt.Sprintf("%s[%s]%s %s\n", level, - time.Now().Format("01-02|15:04:05.000"), + time.Now().Format("01-02|15:04:05"), prefix, text) } diff --git a/utils/math/safe_math_test.go b/utils/math/safe_math_test.go index c8428b5..47f65f9 100644 --- a/utils/math/safe_math_test.go +++ b/utils/math/safe_math_test.go @@ -10,6 +10,28 @@ import ( const maxUint64 uint64 = math.MaxUint64 +func TestMax64(t *testing.T) { + actual := Max64(0, maxUint64) + if actual != maxUint64 { + t.Fatalf("Expected %d, got %d", maxUint64, actual) + } + actual = Max64(maxUint64, 0) + if actual != maxUint64 { + t.Fatalf("Expected %d, got %d", maxUint64, actual) + } +} + +func TestMin64(t *testing.T) { + actual := Min64(0, maxUint64) + if actual != 0 { + t.Fatalf("Expected %d, got %d", 0, actual) + } + actual = Min64(maxUint64, 0) + if actual != 0 { + t.Fatalf("Expected %d, got %d", 0, actual) + } +} + func TestAdd64(t *testing.T) { sum, err := Add64(0, maxUint64) if err != nil { @@ -51,6 +73,20 @@ func TestAdd64(t *testing.T) { } } +func TestSub64(t *testing.T) { + actual, err := Sub64(2, 1) + if err != nil { + t.Fatalf("Sub64 failed unexpectedly") + } else if actual != 1 { + t.Fatalf("Expected %d, got %d", 1, actual) + } + + _, err = Sub64(1, 2) + if err == nil { + t.Fatalf("Sub64 did not fail in the manner expected") + } +} + func TestMul64(t *testing.T) { if prod, err := Mul64(maxUint64, 0); err != nil { t.Fatalf("Mul64 failed unexpectedly") @@ -68,3 +104,15 @@ func TestMul64(t *testing.T) { t.Fatalf("Mul64 overflowed") } } + +func TestDiff64(t *testing.T) { + actual := Diff64(0, maxUint64) + if actual != maxUint64 { + t.Fatalf("Expected %d, got %d", maxUint64, actual) + } + + actual = Diff64(maxUint64, 0) + if actual != maxUint64 { + t.Fatalf("Expected %d, got %d", maxUint64, actual) + } +} diff --git a/utils/timer/clock_test.go b/utils/timer/clock_test.go new file mode 100644 index 0000000..ef8eb67 --- /dev/null +++ b/utils/timer/clock_test.go @@ -0,0 +1,37 @@ +package timer + +import ( + "testing" + "time" +) + +func TestClockSet(t *testing.T) { + clock := Clock{} + clock.Set(time.Unix(1000000, 0)) + if clock.faked == false { + t.Error("Fake time was set, but .faked flag was not set") + } + if !clock.Time().Equal(time.Unix(1000000, 0)) { + t.Error("Fake time was set, but not returned") + } +} + +func TestClockSync(t *testing.T) { + clock := Clock{true, time.Unix(0, 0)} + clock.Sync() + if clock.faked == true { + t.Error("Clock was synced, but .faked flag was set") + } + if clock.Time().Equal(time.Unix(0, 0)) { + t.Error("Clock was synced, but returned a fake time") + } +} + +func TestClockUnix(t *testing.T) { + clock := Clock{true, time.Unix(-14159040, 0)} + actual := clock.Unix() + if actual != 0 { + // We are Unix of 1970s, Moon landings are irrelevant + t.Errorf("Expected time prior to Unix epoch to be clamped to 0, got %d", actual) + } +} diff --git a/utils/wrappers/packing.go b/utils/wrappers/packing.go index da9bfc7..cd00f98 100644 --- a/utils/wrappers/packing.go +++ b/utils/wrappers/packing.go @@ -24,6 +24,8 @@ const ( IntLen = 4 // LongLen is the number of bytes per long LongLen = 8 + // BoolLen is the number of bytes per bool + BoolLen = 1 ) var ( @@ -242,7 +244,9 @@ func (p *Packer) PackFixedByteSlices(byteSlices [][]byte) { } } -// UnpackFixedByteSlices unpack a byte slice slice to the byte array +// UnpackFixedByteSlices returns a byte slice slice from the byte array. +// Each byte slice has the specified size. The number of byte slices is +// read from the byte array. func (p *Packer) UnpackFixedByteSlices(size int) [][]byte { sliceSize := p.UnpackInt() bytes := [][]byte(nil) diff --git a/utils/wrappers/packing_test.go b/utils/wrappers/packing_test.go index a97463f..6937d27 100644 --- a/utils/wrappers/packing_test.go +++ b/utils/wrappers/packing_test.go @@ -5,10 +5,61 @@ package wrappers import ( "bytes" + "reflect" "testing" ) -func TestPackerByte(t *testing.T) { +const ( + ByteSentinal = 0 + ShortSentinal = 0 + IntSentinal = 0 + LongSentinal = 0 + BoolSentinal = false +) + +func TestPackerCheckSpace(t *testing.T) { + p := Packer{Offset: -1} + p.CheckSpace(1) + if !p.Errored() { + t.Fatal("Expected errNegativeOffset") + } + + p = Packer{} + p.CheckSpace(-1) + if !p.Errored() { + t.Fatal("Expected errInvalidInput") + } + + p = Packer{Bytes: []byte{0x01}, Offset: 1} + p.CheckSpace(1) + if !p.Errored() { + t.Fatal("Expected errBadLength") + } + + p = Packer{Bytes: []byte{0x01}, Offset: 2} + p.CheckSpace(0) + if !p.Errored() { + t.Fatal("Expected errBadLength, due to out of bounds offset") + } +} + +func TestPackerExpand(t *testing.T) { + p := Packer{Bytes: []byte{0x01}, Offset: 2} + p.Expand(1) + if !p.Errored() { + t.Fatal("packer.Expand didn't notice packer had out of bounds offset") + } + + p = Packer{Bytes: []byte{0x01, 0x02, 0x03}, Offset: 0} + p.Expand(1) + if p.Errored() { + t.Fatalf("packer.Expand unexpectedly had error %s", p.Err) + } else if len(p.Bytes) != 3 { + t.Fatalf("packer.Expand modified byte array, when it didn't need to") + } +} + +func TestPackerPackByte(t *testing.T) { p := Packer{MaxSize: 1} p.PackByte(0x01) @@ -25,9 +76,37 @@ func TestPackerByte(t *testing.T) { if !bytes.Equal(p.Bytes, expected) { t.Fatalf("Packer.PackByte wrote:\n%v\nExpected:\n%v", p.Bytes, expected) } + + p.PackByte(0x02) + if !p.Errored() { + t.Fatal("Packer.PackByte did not fail when attempt was beyond p.MaxSize") + } } -func TestPackerShort(t *testing.T) { +func TestPackerUnpackByte(t *testing.T) { + var ( + p = Packer{Bytes: []byte{0x01}, Offset: 0} + actual = p.UnpackByte() + expected byte = 1 + expectedLen = ByteLen + ) + if p.Errored() { + t.Fatalf("Packer.UnpackByte unexpectedly raised %s", p.Err) + } else if actual != expected { + t.Fatalf("Packer.UnpackByte returned %d, but expected %d", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackByte left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackByte() + if !p.Errored() { + t.Fatalf("Packer.UnpackByte should have set error, due to attempted out of bounds read") + } else if actual != ByteSentinal { + t.Fatalf("Packer.UnpackByte returned %d, expected sentinal value %d", actual, ByteSentinal) + } +} + +func TestPackerPackShort(t *testing.T) { p := Packer{MaxSize: 2} p.PackShort(0x0102) @@ -46,7 +125,30 @@ func TestPackerShort(t *testing.T) { } } -func TestPackerInt(t *testing.T) { +func TestPackerUnpackShort(t *testing.T) { + var ( + p = Packer{Bytes: []byte{0x01, 0x02}, Offset: 0} + actual = p.UnpackShort() + expected uint16 = 0x0102 + expectedLen = ShortLen + ) + if p.Errored() { + t.Fatalf("Packer.UnpackShort unexpectedly raised %s", p.Err) + } else if actual != expected { + t.Fatalf("Packer.UnpackShort returned %d, but expected %d", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackShort left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackShort() + if !p.Errored() { + t.Fatalf("Packer.UnpackShort should have set error, due to attempted out of bounds read") + } else if actual != ShortSentinal { + t.Fatalf("Packer.UnpackShort returned %d, expected sentinal value %d", actual, ShortSentinal) + } +} + +func TestPackerPackInt(t *testing.T) { p := Packer{MaxSize: 4} p.PackInt(0x01020304) @@ -63,9 +165,37 @@ func TestPackerInt(t *testing.T) { if !bytes.Equal(p.Bytes, expected) { t.Fatalf("Packer.PackInt wrote:\n%v\nExpected:\n%v", p.Bytes, expected) } + + p.PackInt(0x05060708) + if !p.Errored() { + t.Fatal("Packer.PackInt did not fail when attempt was beyond p.MaxSize") + } } -func TestPackerLong(t *testing.T) { +func TestPackerUnpackInt(t *testing.T) { + var ( + p = Packer{Bytes: []byte{0x01, 0x02, 0x03, 0x04}, Offset: 0} + actual = p.UnpackInt() + expected uint32 = 0x01020304 + expectedLen = IntLen + ) + if p.Errored() { + t.Fatalf("Packer.UnpackInt unexpectedly raised %s", p.Err) + } else if actual != expected { + t.Fatalf("Packer.UnpackInt returned %d, but expected %d", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackInt left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackInt() + if !p.Errored() { + t.Fatalf("Packer.UnpackInt should have set error, due to attempted out of bounds read") + } else if actual != IntSentinal { + t.Fatalf("Packer.UnpackInt returned %d, expected sentinal value %d", actual, IntSentinal) + } +} + +func TestPackerPackLong(t *testing.T) { p := Packer{MaxSize: 8} p.PackLong(0x0102030405060708) @@ -82,6 +212,175 @@ func TestPackerLong(t *testing.T) { if !bytes.Equal(p.Bytes, expected) { t.Fatalf("Packer.PackLong wrote:\n%v\nExpected:\n%v", p.Bytes, expected) } + + p.PackLong(0x090a0b0c0d0e0f00) + if !p.Errored() { + t.Fatal("Packer.PackLong did not fail when attempt was beyond p.MaxSize") + } +} + +func TestPackerUnpackLong(t *testing.T) { + var ( + p = Packer{Bytes: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, Offset: 0} + actual = p.UnpackLong() + expected uint64 = 0x0102030405060708 + expectedLen = LongLen + ) + if p.Errored() { + t.Fatalf("Packer.UnpackLong unexpectedly raised %s", p.Err) + } else if actual != expected { + t.Fatalf("Packer.UnpackLong returned %d, but expected %d", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackLong left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackLong() + if !p.Errored() { + t.Fatalf("Packer.UnpackLong should have set error, due to attempted out of bounds read") + } else if actual != LongSentinal { + t.Fatalf("Packer.UnpackLong returned %d, expected sentinal value %d", actual, LongSentinal) + } +} + +func TestPackerPackFixedBytes(t *testing.T) { + p := Packer{MaxSize: 3} + + p.PackFixedBytes([]byte("Ava")) + + if p.Errored() { + t.Fatal(p.Err) + } + + if size := len(p.Bytes); size != 3 { + t.Fatalf("Packer.PackFixedBytes wrote %d byte(s) but expected %d byte(s)", size, 3) + } + + expected := []byte("Ava") + if !bytes.Equal(p.Bytes, expected) { + t.Fatalf("Packer.PackFixedBytes wrote:\n%v\nExpected:\n%v", p.Bytes, expected) + } + + p.PackFixedBytes([]byte("Ava")) + if !p.Errored() { + t.Fatal("Packer.PackFixedBytes did not fail when attempt was beyond p.MaxSize") + } +} + +func TestPackerUnpackFixedBytes(t *testing.T) { + var ( + p = Packer{Bytes: []byte("Ava")} + actual = p.UnpackFixedBytes(3) + expected = []byte("Ava") + expectedLen = 3 + ) + if p.Errored() { + t.Fatalf("Packer.UnpackFixedBytes unexpectedly raised %s", p.Err) + } else if !bytes.Equal(actual, expected) { + t.Fatalf("Packer.UnpackFixedBytes returned %d, but expected %d", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackFixedBytes left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackFixedBytes(3) + if !p.Errored() { + t.Fatalf("Packer.UnpackFixedBytes should have set error, due to attempted out of bounds read") + } else if actual != nil { + t.Fatalf("Packer.UnpackFixedBytes returned %v, expected sentinal value %v", actual, nil) + } +} + +func TestPackerPackBytes(t *testing.T) { + p := Packer{MaxSize: 7} + + p.PackBytes([]byte("Ava")) + + if p.Errored() { + t.Fatal(p.Err) + } + + if size := len(p.Bytes); size != 7 { + t.Fatalf("Packer.PackBytes wrote %d byte(s) but expected %d byte(s)", size, 7) + } + + expected := []byte("\x00\x00\x00\x03Ava") + if !bytes.Equal(p.Bytes, expected) { + t.Fatalf("Packer.PackBytes wrote:\n%v\nExpected:\n%v", p.Bytes, expected) + } + + p.PackBytes([]byte("Ava")) + if !p.Errored() { + t.Fatal("Packer.PackBytes did not fail when attempt was beyond p.MaxSize") + } +} + +func TestPackerUnpackBytes(t *testing.T) { + var ( + p = Packer{Bytes: []byte("\x00\x00\x00\x03Ava")} + actual = p.UnpackBytes() + expected = []byte("Ava") + expectedLen = 7 + ) + if p.Errored() { + t.Fatalf("Packer.UnpackBytes unexpectedly raised %s", p.Err) + } else if !bytes.Equal(actual, expected) { + t.Fatalf("Packer.UnpackBytes returned %d, but expected %d", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackBytes left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackBytes() + if !p.Errored() { + t.Fatalf("Packer.UnpackBytes should have set error, due to attempted out of bounds read") + } else if actual != nil { + t.Fatalf("Packer.UnpackBytes returned %v, expected sentinal value %v", actual, nil) + } +} + +func TestPackerPackFixedByteSlices(t *testing.T) { + p := Packer{MaxSize: 10} + + p.PackFixedByteSlices([][]byte{[]byte("Ava"), []byte("Eva")}) + + if p.Errored() { + t.Fatal(p.Err) + } + + if size := len(p.Bytes); size != 10 { + t.Fatalf("Packer.PackFixedByteSlices wrote %d byte(s) but expected %d byte(s)", size, 13) + } + + expected := []byte("\x00\x00\x00\x02AvaEva") + if !bytes.Equal(p.Bytes, expected) { + t.Fatalf("Packer.PackPackFixedByteSlicesBytes wrote:\n%v\nExpected:\n%v", p.Bytes, expected) + } + + p.PackFixedByteSlices([][]byte{[]byte("Ava"), []byte("Eva")}) + if !p.Errored() { + t.Fatal("Packer.PackFixedByteSlices did not fail when attempt was beyond p.MaxSize") + } +} + +func TestPackerUnpackFixedByteSlices(t *testing.T) { + var ( + p = Packer{Bytes: []byte("\x00\x00\x00\x02AvaEva")} + actual = p.UnpackFixedByteSlices(3) + expected = [][]byte{[]byte("Ava"), []byte("Eva")} + expectedLen = 10 + ) + if p.Errored() { + t.Fatalf("Packer.UnpackFixedByteSlices unexpectedly raised %s", p.Err) + } else if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Packer.UnpackFixedByteSlices returned %d, but expected %d", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackFixedByteSlices left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackFixedByteSlices(3) + if !p.Errored() { + t.Fatalf("Packer.UnpackFixedByteSlices should have set error, due to attempted out of bounds read") + } else if actual != nil { + t.Fatalf("Packer.UnpackFixedByteSlices returned %v, expected sentinal value %v", actual, nil) + } } func TestPackerString(t *testing.T) { @@ -151,3 +450,59 @@ func TestPackBool(t *testing.T) { t.Fatal("got back wrong values") } } + +func TestPackerPackBool(t *testing.T) { + p := Packer{MaxSize: 1} + + p.PackBool(true) + + if p.Errored() { + t.Fatal(p.Err) + } + + if size := len(p.Bytes); size != 1 { + t.Fatalf("Packer.PackBool wrote %d byte(s) but expected %d byte(s)", size, 1) + } + + expected := []byte{0x01} + if !bytes.Equal(p.Bytes, expected) { + t.Fatalf("Packer.PackBool wrote:\n%v\nExpected:\n%v", p.Bytes, expected) + } + + p.PackBool(false) + if !p.Errored() { + t.Fatal("Packer.PackLong did not fail when attempt was beyond p.MaxSize") + } +} + +func TestPackerUnpackBool(t *testing.T) { + var ( + p = Packer{Bytes: []byte{0x01}, Offset: 0} + actual = p.UnpackBool() + expected bool = true + expectedLen = BoolLen + ) + if p.Errored() { + t.Fatalf("Packer.UnpackBool unexpectedly raised %s", p.Err) + } else if actual != expected { + t.Fatalf("Packer.UnpackBool returned %t, but expected %t", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackBool left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackBool() + if !p.Errored() { + t.Fatalf("Packer.UnpackBool should have set error, due to attempted out of bounds read") + } else if actual != BoolSentinal { + t.Fatalf("Packer.UnpackBool returned %t, expected sentinal value %t", actual, BoolSentinal) + } + + p = Packer{Bytes: []byte{0x42}, Offset: 0} + expected = false + actual = p.UnpackBool() + if !p.Errored() { + t.Fatalf("Packer.UnpackBool id not raise error for invalid boolean value %v", p.Bytes) + } else if actual != expected { + t.Fatalf("Packer.UnpackBool returned %t, expected sentinal value %t", actual, BoolSentinal) + } +} diff --git a/vms/avm/base_tx.go b/vms/avm/base_tx.go index 578aa23..33cba51 100644 --- a/vms/avm/base_tx.go +++ b/vms/avm/base_tx.go @@ -6,10 +6,12 @@ package avm import ( "errors" + "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" ) var ( @@ -27,31 +29,17 @@ var ( // BaseTx is the basis of all transactions. type BaseTx struct { - metadata + ava.Metadata - NetID uint32 `serialize:"true"` // ID of the network this chain lives on - BCID ids.ID `serialize:"true"` // ID of the chain on which this transaction exists (prevents replay attacks) - Outs []*TransferableOutput `serialize:"true"` // The outputs of this transaction - Ins []*TransferableInput `serialize:"true"` // The inputs to this transaction + NetID uint32 `serialize:"true" json:"networkID"` // ID of the network this chain lives on + BCID ids.ID `serialize:"true" json:"blockchainID"` // ID of the chain on which this transaction exists (prevents replay attacks) + Outs []*ava.TransferableOutput `serialize:"true" json:"outputs"` // The outputs of this transaction + Ins []*ava.TransferableInput `serialize:"true" json:"inputs"` // The inputs to this transaction } -// NetworkID is the ID of the network on which this transaction exists -func (t *BaseTx) NetworkID() uint32 { return t.NetID } - -// ChainID is the ID of the chain on which this transaction exists -func (t *BaseTx) ChainID() ids.ID { return t.BCID } - -// Outputs track which outputs this transaction is producing. The returned array -// should not be modified. -func (t *BaseTx) Outputs() []*TransferableOutput { return t.Outs } - -// Inputs track which UTXOs this transaction is consuming. The returned array -// should not be modified. -func (t *BaseTx) Inputs() []*TransferableInput { return t.Ins } - // InputUTXOs track which UTXOs this transaction is consuming. -func (t *BaseTx) InputUTXOs() []*UTXOID { - utxos := []*UTXOID(nil) +func (t *BaseTx) InputUTXOs() []*ava.UTXOID { + utxos := []*ava.UTXOID(nil) for _, in := range t.Ins { utxos = append(utxos, &in.UTXOID) } @@ -67,20 +55,21 @@ func (t *BaseTx) AssetIDs() ids.Set { return assets } +// NumCredentials returns the number of expected credentials +func (t *BaseTx) NumCredentials() int { return len(t.Ins) } + // UTXOs returns the UTXOs transaction is producing. -func (t *BaseTx) UTXOs() []*UTXO { +func (t *BaseTx) UTXOs() []*ava.UTXO { txID := t.ID() - utxos := make([]*UTXO, len(t.Outs)) + utxos := make([]*ava.UTXO, len(t.Outs)) for i, out := range t.Outs { - utxos[i] = &UTXO{ - UTXOID: UTXOID{ + utxos[i] = &ava.UTXO{ + UTXOID: ava.UTXOID{ TxID: txID, OutputIndex: uint32(i), }, - Asset: Asset{ - ID: out.AssetID(), - }, - Out: out.Out, + Asset: ava.Asset{ID: out.AssetID()}, + Out: out.Out, } } return utxos @@ -97,12 +86,14 @@ func (t *BaseTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, _ int) error return errWrongChainID } + fc := ava.NewFlowChecker() for _, out := range t.Outs { if err := out.Verify(); err != nil { return err } + fc.Produce(out.AssetID(), out.Output().Amount()) } - if !IsSortedTransferableOutputs(t.Outs, c) { + if !ava.IsSortedTransferableOutputs(t.Outs, c) { return errOutputsNotSorted } @@ -110,101 +101,37 @@ func (t *BaseTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, _ int) error if err := in.Verify(); err != nil { return err } + fc.Consume(in.AssetID(), in.Input().Amount()) } - if !isSortedAndUniqueTransferableInputs(t.Ins) { + if !ava.IsSortedAndUniqueTransferableInputs(t.Ins) { return errInputsNotSortedUnique } - consumedFunds := map[[32]byte]uint64{} - for _, in := range t.Ins { - assetID := in.AssetID() - amount := in.Input().Amount() + // TODO: Add the Tx fee to the produced side - var err error - assetIDKey := assetID.Key() - consumedFunds[assetIDKey], err = math.Add64(consumedFunds[assetIDKey], amount) - - if err != nil { - return errInputOverflow - } - } - producedFunds := map[[32]byte]uint64{} - for _, out := range t.Outs { - assetID := out.AssetID() - amount := out.Output().Amount() - - var err error - assetIDKey := assetID.Key() - producedFunds[assetIDKey], err = math.Add64(producedFunds[assetIDKey], amount) - - if err != nil { - return errOutputOverflow - } + if err := fc.Verify(); err != nil { + return err } - // TODO: Add the Tx fee to the producedFunds - - for assetID, producedAssetAmount := range producedFunds { - consumedAssetAmount := consumedFunds[assetID] - if producedAssetAmount > consumedAssetAmount { - return errInsufficientFunds - } - } - - return t.metadata.Verify() + return t.Metadata.Verify() } // SemanticVerify that this transaction is valid to be spent. -func (t *BaseTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) error { +func (t *BaseTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []verify.Verifiable) error { for i, in := range t.Ins { cred := creds[i] - fxIndex, err := vm.getFx(cred.Cred) + fxIndex, err := vm.getFx(cred) if err != nil { return err } fx := vm.fxs[fxIndex].Fx - utxoID := in.InputID() - utxo, err := vm.state.UTXO(utxoID) - if err == nil { - utxoAssetID := utxo.AssetID() - inAssetID := in.AssetID() - if !utxoAssetID.Equals(inAssetID) { - return errAssetIDMismatch - } - - if !vm.verifyFxUsage(fxIndex, inAssetID) { - return errIncompatibleFx - } - - err = fx.VerifyTransfer(uTx, utxo.Out, in.In, cred.Cred) - if err == nil { - continue - } + utxo, err := vm.getUTXO(&in.UTXOID) + if err != nil { return err } - inputTx, inputIndex := in.InputSource() - parent := UniqueTx{ - vm: vm, - txID: inputTx, - } - - if err := parent.Verify(); err != nil { - return errMissingUTXO - } else if status := parent.Status(); status.Decided() { - return errMissingUTXO - } - - utxos := parent.UTXOs() - - if uint32(len(utxos)) <= inputIndex || int(inputIndex) < 0 { - return errInvalidUTXO - } - - utxo = utxos[int(inputIndex)] - utxoAssetID := utxo.AssetID() inAssetID := in.AssetID() if !utxoAssetID.Equals(inAssetID) { @@ -215,9 +142,12 @@ func (t *BaseTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) erro return errIncompatibleFx } - if err := fx.VerifyTransfer(uTx, utxo.Out, in.In, cred.Cred); err != nil { + if err := fx.VerifyTransfer(uTx, in.In, cred, utxo.Out); err != nil { return err } } return nil } + +// ExecuteWithSideEffects writes the batch with any additional side effects +func (t *BaseTx) ExecuteWithSideEffects(_ *VM, batch database.Batch) error { return batch.Write() } diff --git a/vms/avm/base_tx_test.go b/vms/avm/base_tx_test.go index 1a9b5a6..9033b3b 100644 --- a/vms/avm/base_tx_test.go +++ b/vms/avm/base_tx_test.go @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -36,7 +37,7 @@ func TestBaseTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // fxID: - 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x07, // secp256k1 Transferable Output: // amount: 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, @@ -65,7 +66,7 @@ func TestBaseTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // fxID: - 0x00, 0x00, 0x00, 0x06, + 0x00, 0x00, 0x00, 0x05, // amount: 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, // number of signatures: @@ -77,48 +78,46 @@ func TestBaseTxSerialization(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 1, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, }} c := codec.NewDefault() c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) b, err := c.Marshal(&tx.UnsignedTx) @@ -137,56 +136,40 @@ func TestBaseTxGetters(t *testing.T) { tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 1, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } tx.Initialize([]byte{}) txID := tx.ID() - if netID := tx.NetworkID(); netID != networkID { - t.Fatalf("Wrong network ID returned") - } else if bcID := tx.ChainID(); !bcID.Equals(chainID) { - t.Fatalf("Wrong chain ID returned") - } else if outs := tx.Outputs(); len(outs) != 1 { - t.Fatalf("Outputs returned wrong number of outs") - } else if out := outs[0]; out != tx.Outs[0] { - t.Fatalf("Outputs returned wrong output") - } else if ins := tx.Inputs(); len(ins) != 1 { - t.Fatalf("Inputs returned wrong number of ins") - } else if in := ins[0]; in != tx.Ins[0] { - t.Fatalf("Inputs returned wrong input") - } else if assets := tx.AssetIDs(); assets.Len() != 1 { + if assets := tx.AssetIDs(); assets.Len() != 1 { t.Fatalf("Wrong number of assets returned") } else if !assets.Contains(asset) { t.Fatalf("Wrong asset returned") @@ -198,8 +181,6 @@ func TestBaseTxGetters(t *testing.T) { t.Fatalf("Wrong output index returned") } else if assetID := utxo.AssetID(); !assetID.Equals(asset) { t.Fatalf("Wrong asset ID returned") - } else if utxoOut := utxo.Out; utxoOut != out.Out { - t.Fatalf("Wrong output returned") } } @@ -208,47 +189,45 @@ func TestBaseTxSyntacticVerify(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 0, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } tx.Initialize([]byte{}) @@ -262,10 +241,12 @@ func TestBaseTxSyntacticVerifyNil(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) tx := (*BaseTx)(nil) @@ -279,47 +260,45 @@ func TestBaseTxSyntacticVerifyWrongNetworkID(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) tx := &BaseTx{ NetID: 0, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 1, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } tx.Initialize([]byte{}) @@ -333,47 +312,45 @@ func TestBaseTxSyntacticVerifyWrongChainID(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) tx := &BaseTx{ NetID: networkID, BCID: ids.Empty, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 1, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } tx.Initialize([]byte{}) @@ -387,38 +364,36 @@ func TestBaseTxSyntacticVerifyInvalidOutput(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - nil, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 1, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + Outs: []*ava.TransferableOutput{nil}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } tx.Initialize([]byte{}) @@ -432,18 +407,20 @@ func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, + Outs: []*ava.TransferableOutput{ + &ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 2, OutputOwners: secp256k1fx.OutputOwners{ @@ -452,8 +429,8 @@ func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { }, }, }, - &TransferableOutput{ - Asset: Asset{ID: asset}, + &ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 1, OutputOwners: secp256k1fx.OutputOwners{ @@ -463,9 +440,9 @@ func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { }, }, }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ + Ins: []*ava.TransferableInput{ + &ava.TransferableInput{ + UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, @@ -474,7 +451,7 @@ func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { }), OutputIndex: 1, }, - Asset: Asset{ID: asset}, + Asset: ava.Asset{ID: asset}, In: &secp256k1fx.TransferInput{ Amt: 54321, Input: secp256k1fx.Input{ @@ -496,30 +473,28 @@ func TestBaseTxSyntacticVerifyInvalidInput(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - nil, - }, + }}, + Ins: []*ava.TransferableInput{nil}, } tx.Initialize([]byte{}) @@ -533,30 +508,30 @@ func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ + }}, + Ins: []*ava.TransferableInput{ + &ava.TransferableInput{ + UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, @@ -565,7 +540,7 @@ func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { }), OutputIndex: 0, }, - Asset: Asset{ID: asset}, + Asset: ava.Asset{ID: asset}, In: &secp256k1fx.TransferInput{ Amt: math.MaxUint64, Input: secp256k1fx.Input{ @@ -573,8 +548,8 @@ func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { }, }, }, - &TransferableInput{ - UTXOID: UTXOID{ + &ava.TransferableInput{ + UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, @@ -583,7 +558,7 @@ func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { }), OutputIndex: 1, }, - Asset: Asset{ID: asset}, + Asset: ava.Asset{ID: asset}, In: &secp256k1fx.TransferInput{ Amt: 1, Input: secp256k1fx.Input{ @@ -605,18 +580,20 @@ func TestBaseTxSyntacticVerifyOutputOverflow(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, + Outs: []*ava.TransferableOutput{ + &ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 2, OutputOwners: secp256k1fx.OutputOwners{ @@ -625,8 +602,8 @@ func TestBaseTxSyntacticVerifyOutputOverflow(t *testing.T) { }, }, }, - &TransferableOutput{ - Asset: Asset{ID: asset}, + &ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: math.MaxUint64, OutputOwners: secp256k1fx.OutputOwners{ @@ -636,26 +613,24 @@ func TestBaseTxSyntacticVerifyOutputOverflow(t *testing.T) { }, }, }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 0, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 1, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 1, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } tx.Initialize([]byte{}) @@ -669,47 +644,45 @@ func TestBaseTxSyntacticVerifyInsufficientFunds(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 0, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 1, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 1, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } tx.Initialize([]byte{}) @@ -723,47 +696,45 @@ func TestBaseTxSyntacticVerifyUninitialized(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 0, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } if err := tx.SyntacticVerify(ctx, c, 0); err == nil { @@ -797,29 +768,25 @@ func TestBaseTxSemanticVerify(t *testing.T) { genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) if err != nil { @@ -834,11 +801,9 @@ func TestBaseTxSemanticVerify(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -849,11 +814,11 @@ func TestBaseTxSemanticVerify(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err != nil { @@ -885,37 +850,31 @@ func TestBaseTxSemanticVerifyUnknownFx(t *testing.T) { } vm.batchTimeout = 0 - vm.codec.RegisterType(&testVerifiable{}) + vm.codec.RegisterType(&ava.TestVerifiable{}) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} - tx.Creds = append(tx.Creds, &Credential{ - Cred: &testVerifiable{}, - }) + tx.Creds = append(tx.Creds, &ava.TestVerifiable{}) b, err := vm.codec.Marshal(tx) if err != nil { @@ -924,11 +883,11 @@ func TestBaseTxSemanticVerifyUnknownFx(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -960,33 +919,29 @@ func TestBaseTxSemanticVerifyWrongAssetID(t *testing.T) { } vm.batchTimeout = 0 - vm.codec.RegisterType(&testVerifiable{}) + vm.codec.RegisterType(&ava.TestVerifiable{}) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: asset, - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) if err != nil { @@ -1001,11 +956,9 @@ func TestBaseTxSemanticVerifyWrongAssetID(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1016,11 +969,11 @@ func TestBaseTxSemanticVerifyWrongAssetID(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1043,14 +996,14 @@ func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) { genesisBytes, issuer, []*common.Fx{ - &common.Fx{ - ID: ids.NewID([32]byte{1}), - Fx: &testFx{}, - }, &common.Fx{ ID: ids.Empty, Fx: &secp256k1fx.Fx{}, }, + &common.Fx{ + ID: ids.NewID([32]byte{1}), + Fx: &testFx{}, + }, }, ) if err != nil { @@ -1064,26 +1017,22 @@ func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) { codec: vm.codec, } - cr.RegisterType(&TestTransferable{}) + cr.RegisterType(&ava.TestTransferable{}) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &TestTransferable{}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, }, - }, - }}} + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &ava.TestTransferable{}, + }}, + }} unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) if err != nil { @@ -1098,11 +1047,9 @@ func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1113,11 +1060,11 @@ func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1151,35 +1098,29 @@ func TestBaseTxSemanticVerifyInvalidSignature(t *testing.T) { genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - [crypto.SECP256K1RSigLen]byte{}, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + [crypto.SECP256K1RSigLen]byte{}, }, }) @@ -1190,11 +1131,11 @@ func TestBaseTxSemanticVerifyInvalidSignature(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1228,29 +1169,25 @@ func TestBaseTxSemanticVerifyMissingUTXO(t *testing.T) { genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) if err != nil { @@ -1265,11 +1202,9 @@ func TestBaseTxSemanticVerifyMissingUTXO(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1280,11 +1215,11 @@ func TestBaseTxSemanticVerifyMissingUTXO(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1318,29 +1253,25 @@ func TestBaseTxSemanticVerifyInvalidUTXO(t *testing.T) { genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: math.MaxUint32, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: math.MaxUint32, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) if err != nil { @@ -1355,11 +1286,9 @@ func TestBaseTxSemanticVerifyInvalidUTXO(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1370,11 +1299,11 @@ func TestBaseTxSemanticVerifyInvalidUTXO(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1407,44 +1336,36 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - pendingTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + pendingTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ - ID: genesisTx.ID(), - }, - Out: &secp256k1fx.TransferOutput{ - Amt: 50000, - Locktime: 0, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + }}, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&pendingTx.UnsignedTx) if err != nil { @@ -1459,11 +1380,9 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - pendingTx.Creds = append(pendingTx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + pendingTx.Creds = append(pendingTx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1486,29 +1405,25 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { vm.PendingTxs() - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: txID, - OutputIndex: 2, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err = vm.codec.Marshal(&tx.UnsignedTx) if err != nil { @@ -1522,11 +1437,9 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { fixedSig = [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1537,11 +1450,11 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1574,44 +1487,36 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - pendingTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + pendingTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ - ID: genesisTx.ID(), - }, - Out: &secp256k1fx.TransferOutput{ - Amt: 50000, - Locktime: 0, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + }}, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&pendingTx.UnsignedTx) if err != nil { @@ -1626,11 +1531,9 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - pendingTx.Creds = append(pendingTx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + pendingTx.Creds = append(pendingTx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1653,29 +1556,25 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { vm.PendingTxs() - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: txID, - OutputIndex: 0, - }, - Asset: Asset{ - ID: asset, - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: txID, + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err = vm.codec.Marshal(&tx.UnsignedTx) if err != nil { @@ -1689,11 +1588,9 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { fixedSig = [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1704,11 +1601,11 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1751,48 +1648,40 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { codec: vm.codec, } - cr.RegisterType(&testVerifiable{}) + cr.RegisterType(&ava.TestVerifiable{}) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - pendingTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + pendingTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ - ID: genesisTx.ID(), - }, - Out: &secp256k1fx.TransferOutput{ - Amt: 50000, - Locktime: 0, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + }}, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&pendingTx.UnsignedTx) if err != nil { @@ -1807,11 +1696,9 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - pendingTx.Creds = append(pendingTx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + pendingTx.Creds = append(pendingTx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1834,33 +1721,27 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { vm.PendingTxs() - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: txID, - OutputIndex: 0, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: txID, + OutputIndex: 0, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} - tx.Creds = append(tx.Creds, &Credential{ - Cred: &testVerifiable{}, - }) + tx.Creds = append(tx.Creds, &ava.TestVerifiable{}) b, err = vm.codec.Marshal(tx) if err != nil { @@ -1869,11 +1750,11 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1916,48 +1797,40 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { codec: vm.codec, } - cr.RegisterType(&testVerifiable{}) + cr.RegisterType(&ava.TestVerifiable{}) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - pendingTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + pendingTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ - ID: genesisTx.ID(), - }, - Out: &secp256k1fx.TransferOutput{ - Amt: 50000, - Locktime: 0, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + }}, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&pendingTx.UnsignedTx) if err != nil { @@ -1972,11 +1845,9 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - pendingTx.Creds = append(pendingTx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + pendingTx.Creds = append(pendingTx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1999,35 +1870,29 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { vm.PendingTxs() - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: txID, - OutputIndex: 0, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: txID, + OutputIndex: 0, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - [crypto.SECP256K1RSigLen]byte{}, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + [crypto.SECP256K1RSigLen]byte{}, }, }) @@ -2038,11 +1903,11 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { diff --git a/vms/avm/create_asset_tx.go b/vms/avm/create_asset_tx.go index c606b6b..9f95a15 100644 --- a/vms/avm/create_asset_tx.go +++ b/vms/avm/create_asset_tx.go @@ -10,6 +10,7 @@ import ( "unicode" "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" ) @@ -32,10 +33,10 @@ var ( // CreateAssetTx is a transaction that creates a new asset. type CreateAssetTx struct { BaseTx `serialize:"true"` - Name string `serialize:"true"` - Symbol string `serialize:"true"` - Denomination byte `serialize:"true"` - States []*InitialState `serialize:"true"` + Name string `serialize:"true" json:"name"` + Symbol string `serialize:"true" json:"symbol"` + Denomination byte `serialize:"true" json:"denomination"` + States []*InitialState `serialize:"true" json:"initialStates"` } // InitialStates track which virtual machines, and the initial state of these @@ -43,18 +44,18 @@ type CreateAssetTx struct { func (t *CreateAssetTx) InitialStates() []*InitialState { return t.States } // UTXOs returns the UTXOs transaction is producing. -func (t *CreateAssetTx) UTXOs() []*UTXO { +func (t *CreateAssetTx) UTXOs() []*ava.UTXO { txID := t.ID() utxos := t.BaseTx.UTXOs() for _, state := range t.States { for _, out := range state.Outs { - utxos = append(utxos, &UTXO{ - UTXOID: UTXOID{ + utxos = append(utxos, &ava.UTXO{ + UTXOID: ava.UTXOID{ TxID: txID, OutputIndex: uint32(len(utxos)), }, - Asset: Asset{ + Asset: ava.Asset{ ID: txID, }, Out: out, @@ -110,10 +111,5 @@ func (t *CreateAssetTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs return nil } -// SemanticVerify that this transaction is well-formed. -func (t *CreateAssetTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) error { - return t.BaseTx.SemanticVerify(vm, uTx, creds) -} - // Sort ... func (t *CreateAssetTx) Sort() { sortInitialStates(t.States) } diff --git a/vms/avm/create_asset_tx_test.go b/vms/avm/create_asset_tx_test.go index 2dabd5c..a26a815 100644 --- a/vms/avm/create_asset_tx_test.go +++ b/vms/avm/create_asset_tx_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" "github.com/ava-labs/gecko/vms/components/verify" "github.com/ava-labs/gecko/vms/secp256k1fx" @@ -33,7 +34,7 @@ func TestCreateAssetTxSerialization(t *testing.T) { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, // output: - 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x51, 0x02, 0x5c, 0x61, @@ -57,7 +58,7 @@ func TestCreateAssetTxSerialization(t *testing.T) { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, // input: - 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x07, 0x5b, 0xcd, 0x15, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, // name: @@ -72,7 +73,7 @@ func TestCreateAssetTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x01, // InitialStates[0]: 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x51, 0x02, 0x5c, 0x61, @@ -92,64 +93,60 @@ func TestCreateAssetTxSerialization(t *testing.T) { 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, }), - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ - ID: ids.NewID([32]byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - }), - }, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - Locktime: 54321, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID([20]byte{ - 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, - 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, - 0x6d, 0x55, 0xa9, 0x55, - }), - ids.NewShortID([20]byte{ - 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, - 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, - 0x43, 0xab, 0x08, 0x59, - }), - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ + ID: ids.NewID([32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + Locktime: 54321, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID([20]byte{ + 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, + 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, + 0x6d, 0x55, 0xa9, 0x55, + }), + ids.NewShortID([20]byte{ + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + }), }, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xf1, 0xe1, 0xd1, 0xc1, 0xb1, 0xa1, 0x91, 0x81, - 0x71, 0x61, 0x51, 0x41, 0x31, 0x21, 0x11, 0x01, - 0xf0, 0xe0, 0xd0, 0xc0, 0xb0, 0xa0, 0x90, 0x80, - 0x70, 0x60, 0x50, 0x40, 0x30, 0x20, 0x10, 0x00, - }), - OutputIndex: 5, - }, - Asset: Asset{ - ID: ids.NewID([32]byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - }), - }, - In: &secp256k1fx.TransferInput{ - Amt: 123456789, - Input: secp256k1fx.Input{ - SigIndices: []uint32{3, 7}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xf1, 0xe1, 0xd1, 0xc1, 0xb1, 0xa1, 0x91, 0x81, + 0x71, 0x61, 0x51, 0x41, 0x31, 0x21, 0x11, 0x01, + 0xf0, 0xe0, 0xd0, 0xc0, 0xb0, 0xa0, 0x90, 0x80, + 0x70, 0x60, 0x50, 0x40, 0x30, 0x20, 0x10, 0x00, + }), + OutputIndex: 5, + }, + Asset: ava.Asset{ + ID: ids.NewID([32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }), + }, + In: &secp256k1fx.TransferInput{ + Amt: 123456789, + Input: secp256k1fx.Input{ + SigIndices: []uint32{3, 7}, }, }, - }, + }}, }, Name: "Volatility Index", Symbol: "VIX", @@ -186,10 +183,12 @@ func TestCreateAssetTxSerialization(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) b, err := c.Marshal(&tx.UnsignedTx) diff --git a/vms/avm/credential.go b/vms/avm/credential.go deleted file mode 100644 index d5fb8ee..0000000 --- a/vms/avm/credential.go +++ /dev/null @@ -1,36 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "errors" - - "github.com/ava-labs/gecko/vms/components/verify" -) - -var ( - errNilCredential = errors.New("nil credential is not valid") - errNilFxCredential = errors.New("nil feature extension credential is not valid") -) - -// Credential ... -type Credential struct { - Cred verify.Verifiable `serialize:"true"` -} - -// Credential returns the feature extension credential that this Credential is -// using. -func (cred *Credential) Credential() verify.Verifiable { return cred.Cred } - -// Verify implements the verify.Verifiable interface -func (cred *Credential) Verify() error { - switch { - case cred == nil: - return errNilCredential - case cred.Cred == nil: - return errNilFxCredential - default: - return cred.Cred.Verify() - } -} diff --git a/vms/avm/credential_test.go b/vms/avm/credential_test.go deleted file mode 100644 index 867a89f..0000000 --- a/vms/avm/credential_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "testing" -) - -func TestCredentialVerifyNil(t *testing.T) { - cred := (*Credential)(nil) - if err := cred.Verify(); err == nil { - t.Fatalf("Should have errored due to nil credential") - } -} - -func TestCredentialVerifyNilFx(t *testing.T) { - cred := &Credential{} - if err := cred.Verify(); err == nil { - t.Fatalf("Should have errored due to nil fx credential") - } -} - -func TestCredential(t *testing.T) { - cred := &Credential{ - Cred: &testVerifiable{}, - } - - if err := cred.Verify(); err != nil { - t.Fatal(err) - } - - if cred.Credential() != cred.Cred { - t.Fatalf("Should have returned the fx credential") - } -} diff --git a/vms/avm/export_tx.go b/vms/avm/export_tx.go new file mode 100644 index 0000000..d5222f4 --- /dev/null +++ b/vms/avm/export_tx.go @@ -0,0 +1,144 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "github.com/ava-labs/gecko/chains/atomic" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" +) + +// ExportTx is the basis of all transactions. +type ExportTx struct { + BaseTx `serialize:"true"` + + Outs []*ava.TransferableOutput `serialize:"true" json:"exportedOutputs"` // The outputs this transaction is sending to the other chain +} + +// SyntacticVerify that this transaction is well-formed. +func (t *ExportTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, _ int) error { + switch { + case t == nil: + return errNilTx + case t.NetID != ctx.NetworkID: + return errWrongNetworkID + case !t.BCID.Equals(ctx.ChainID): + return errWrongChainID + } + + fc := ava.NewFlowChecker() + for _, out := range t.BaseTx.Outs { + if err := out.Verify(); err != nil { + return err + } + fc.Produce(out.AssetID(), out.Output().Amount()) + } + if !ava.IsSortedTransferableOutputs(t.BaseTx.Outs, c) { + return errOutputsNotSorted + } + + for _, out := range t.Outs { + if err := out.Verify(); err != nil { + return err + } + fc.Produce(out.AssetID(), out.Output().Amount()) + } + if !ava.IsSortedTransferableOutputs(t.Outs, c) { + return errOutputsNotSorted + } + + for _, in := range t.Ins { + if err := in.Verify(); err != nil { + return err + } + fc.Consume(in.AssetID(), in.Input().Amount()) + } + if !ava.IsSortedAndUniqueTransferableInputs(t.Ins) { + return errInputsNotSortedUnique + } + + // TODO: Add the Tx fee to the produced side + + if err := fc.Verify(); err != nil { + return err + } + + return t.Metadata.Verify() +} + +// SemanticVerify that this transaction is valid to be spent. +func (t *ExportTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []verify.Verifiable) error { + for i, in := range t.Ins { + cred := creds[i] + + fxIndex, err := vm.getFx(cred) + if err != nil { + return err + } + fx := vm.fxs[fxIndex].Fx + + utxo, err := vm.getUTXO(&in.UTXOID) + if err != nil { + return err + } + + utxoAssetID := utxo.AssetID() + inAssetID := in.AssetID() + if !utxoAssetID.Equals(inAssetID) { + return errAssetIDMismatch + } + + if !vm.verifyFxUsage(fxIndex, inAssetID) { + return errIncompatibleFx + } + + if err := fx.VerifyTransfer(uTx, in.In, cred, utxo.Out); err != nil { + return err + } + } + + for _, out := range t.Outs { + if !out.AssetID().Equals(vm.ava) { + return errWrongAssetID + } + } + + return nil +} + +// ExecuteWithSideEffects writes the batch with any additional side effects +func (t *ExportTx) ExecuteWithSideEffects(vm *VM, batch database.Batch) error { + txID := t.ID() + + smDB := vm.ctx.SharedMemory.GetDatabase(vm.platform) + defer vm.ctx.SharedMemory.ReleaseDatabase(vm.platform) + + vsmDB := versiondb.New(smDB) + + state := ava.NewPrefixedState(vsmDB, vm.codec) + for i, out := range t.Outs { + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{ + TxID: txID, + OutputIndex: uint32(len(t.BaseTx.Outs) + i), + }, + Asset: ava.Asset{ID: out.AssetID()}, + Out: out.Out, + } + if err := state.FundAVMUTXO(utxo); err != nil { + return err + } + } + + sharedBatch, err := vsmDB.CommitBatch() + if err != nil { + return err + } + + return atomic.WriteAll(batch, sharedBatch) +} diff --git a/vms/avm/export_tx_test.go b/vms/avm/export_tx_test.go new file mode 100644 index 0000000..98df9b0 --- /dev/null +++ b/vms/avm/export_tx_test.go @@ -0,0 +1,389 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/chains/atomic" + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestExportTxSerialization(t *testing.T) { + expected := []byte{ + // txID: + 0x00, 0x00, 0x00, 0x04, + // networkID: + 0x00, 0x00, 0x00, 0x02, + // blockchainID: + 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, + 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, + 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, + 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, + // number of outs: + 0x00, 0x00, 0x00, 0x00, + // number of inputs: + 0x00, 0x00, 0x00, 0x01, + // utxoID: + 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, + 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, + 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, + 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + // output index + 0x00, 0x00, 0x00, 0x00, + // assetID: + 0x1f, 0x3f, 0x5f, 0x7f, 0x9e, 0xbe, 0xde, 0xfe, + 0x1d, 0x3d, 0x5d, 0x7d, 0x9c, 0xbc, 0xdc, 0xfc, + 0x1b, 0x3b, 0x5b, 0x7b, 0x9a, 0xba, 0xda, 0xfa, + 0x19, 0x39, 0x59, 0x79, 0x98, 0xb8, 0xd8, 0xf8, + // input: + // input ID: + 0x00, 0x00, 0x00, 0x05, + // amount: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, + // num sig indices: + 0x00, 0x00, 0x00, 0x01, + // sig index[0]: + 0x00, 0x00, 0x00, 0x00, + // number of exported outs: + 0x00, 0x00, 0x00, 0x00, + } + + tx := &Tx{UnsignedTx: &ExportTx{BaseTx: BaseTx{ + NetID: 2, + BCID: ids.NewID([32]byte{ + 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, + 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, + 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, + 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, + }), + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{TxID: ids.NewID([32]byte{ + 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, + 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, + 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, + 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + })}, + Asset: ava.Asset{ID: ids.NewID([32]byte{ + 0x1f, 0x3f, 0x5f, 0x7f, 0x9e, 0xbe, 0xde, 0xfe, + 0x1d, 0x3d, 0x5d, 0x7d, 0x9c, 0xbc, 0xdc, 0xfc, + 0x1b, 0x3b, 0x5b, 0x7b, 0x9a, 0xba, 0xda, 0xfa, + 0x19, 0x39, 0x59, 0x79, 0x98, 0xb8, 0xd8, 0xf8, + })}, + In: &secp256k1fx.TransferInput{ + Amt: 1000, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + }}} + + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) + c.RegisterType(&secp256k1fx.Credential{}) + + b, err := c.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + result := tx.Bytes() + if !bytes.Equal(expected, result) { + t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + } +} + +// Test issuing an import transaction. +func TestIssueExportTx(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + sm := &atomic.SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + ctx := snow.DefaultContextTest() + ctx.NetworkID = networkID + ctx.ChainID = chainID + ctx.SharedMemory = sm.NewBlockchainSharedMemory(chainID) + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + avaID := genesisTx.ID() + platformID := ids.Empty.Prefix(0) + + ctx.Lock.Lock() + vm := &VM{ + ava: avaID, + platform: platformID, + } + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + key := keys[0] + + tx := &Tx{UnsignedTx: &ExportTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: avaID, + OutputIndex: 1, + }, + Asset: ava.Asset{ID: avaID}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: avaID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }} + + unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + if _, err := vm.IssueTx(tx.Bytes(), nil); err != nil { + t.Fatal(err) + } + + ctx.Lock.Unlock() + + msg := <-issuer + if msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + txs := vm.PendingTxs() + if len(txs) != 1 { + t.Fatalf("Should have returned %d tx(s)", 1) + } + + parsedTx := txs[0] + if err := parsedTx.Verify(); err != nil { + t.Fatal(err) + } + parsedTx.Accept() + + smDB := vm.ctx.SharedMemory.GetDatabase(platformID) + defer vm.ctx.SharedMemory.ReleaseDatabase(platformID) + + state := ava.NewPrefixedState(smDB, vm.codec) + + utxo := ava.UTXOID{ + TxID: tx.ID(), + OutputIndex: 0, + } + utxoID := utxo.InputID() + if _, err := state.AVMUTXO(utxoID); err != nil { + t.Fatal(err) + } +} + +// Test force accepting an import transaction. +func TestClearForceAcceptedExportTx(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + sm := &atomic.SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + ctx := snow.DefaultContextTest() + ctx.NetworkID = networkID + ctx.ChainID = chainID + ctx.SharedMemory = sm.NewBlockchainSharedMemory(chainID) + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + avaID := genesisTx.ID() + platformID := ids.Empty.Prefix(0) + + ctx.Lock.Lock() + vm := &VM{ + ava: avaID, + platform: platformID, + } + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + key := keys[0] + + tx := &Tx{UnsignedTx: &ExportTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: avaID, + OutputIndex: 1, + }, + Asset: ava.Asset{ID: avaID}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: avaID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }} + + unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + if _, err := vm.IssueTx(tx.Bytes(), nil); err != nil { + t.Fatal(err) + } + + ctx.Lock.Unlock() + + msg := <-issuer + if msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + txs := vm.PendingTxs() + if len(txs) != 1 { + t.Fatalf("Should have returned %d tx(s)", 1) + } + + parsedTx := txs[0] + if err := parsedTx.Verify(); err != nil { + t.Fatal(err) + } + + smDB := vm.ctx.SharedMemory.GetDatabase(platformID) + + state := ava.NewPrefixedState(smDB, vm.codec) + + utxo := ava.UTXOID{ + TxID: tx.ID(), + OutputIndex: 0, + } + utxoID := utxo.InputID() + if err := state.SpendAVMUTXO(utxoID); err != nil { + t.Fatal(err) + } + + vm.ctx.SharedMemory.ReleaseDatabase(platformID) + + parsedTx.Accept() + + smDB = vm.ctx.SharedMemory.GetDatabase(platformID) + defer vm.ctx.SharedMemory.ReleaseDatabase(platformID) + + state = ava.NewPrefixedState(smDB, vm.codec) + + if _, err := state.AVMUTXO(utxoID); err == nil { + t.Fatalf("should have failed to read the utxo") + } +} diff --git a/vms/avm/factory.go b/vms/avm/factory.go index b76606d..b8896ad 100644 --- a/vms/avm/factory.go +++ b/vms/avm/factory.go @@ -13,7 +13,15 @@ var ( ) // Factory ... -type Factory struct{} +type Factory struct { + AVA ids.ID + Platform ids.ID +} // New ... -func (f *Factory) New() interface{} { return &VM{} } +func (f *Factory) New() interface{} { + return &VM{ + ava: f.AVA, + platform: f.Platform, + } +} diff --git a/vms/avm/fx.go b/vms/avm/fx.go index cc4d8e4..432b177 100644 --- a/vms/avm/fx.go +++ b/vms/avm/fx.go @@ -23,27 +23,18 @@ type Fx interface { // provided utxo with no restrictions on the destination. If the transaction // can't spend the output based on the input and credential, a non-nil error // should be returned. - VerifyTransfer(tx, utxo, in, cred interface{}) error + VerifyTransfer(tx, in, cred, utxo interface{}) error // VerifyOperation verifies that the specified transaction can spend the // provided utxos conditioned on the result being restricted to the provided // outputs. If the transaction can't spend the output based on the input and // credential, a non-nil error should be returned. - VerifyOperation(tx interface{}, utxos, ins, creds, outs []interface{}) error + VerifyOperation(tx, op, cred interface{}, utxos []interface{}) error } -// FxTransferable is the interface a feature extension must provide to transfer -// value between features extensions. -type FxTransferable interface { +// FxOperation ... +type FxOperation interface { verify.Verifiable - // Amount returns how much value this output consumes of the asset in its - // transaction. - Amount() uint64 -} - -// FxAddressable is the interface a feature extension must provide to be able to -// be tracked as a part of the utxo set for a set of addresses -type FxAddressable interface { - Addresses() [][]byte + Outs() []verify.Verifiable } diff --git a/vms/avm/fx_test.go b/vms/avm/fx_test.go index a0863b2..59639e9 100644 --- a/vms/avm/fx_test.go +++ b/vms/avm/fx_test.go @@ -9,6 +9,6 @@ type testFx struct { func (fx *testFx) Initialize(_ interface{}) error { return fx.initialize } func (fx *testFx) VerifyTransfer(_, _, _, _ interface{}) error { return fx.verifyTransfer } -func (fx *testFx) VerifyOperation(_ interface{}, _, _, _, _ []interface{}) error { +func (fx *testFx) VerifyOperation(_, _, _ interface{}, _ []interface{}) error { return fx.verifyOperation } diff --git a/vms/avm/import_tx.go b/vms/avm/import_tx.go new file mode 100644 index 0000000..09dec6e --- /dev/null +++ b/vms/avm/import_tx.go @@ -0,0 +1,168 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "errors" + + "github.com/ava-labs/gecko/chains/atomic" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" +) + +// ImportTx is a transaction that imports an asset from another blockchain. +type ImportTx struct { + BaseTx `serialize:"true"` + + Ins []*ava.TransferableInput `serialize:"true" json:"importedInputs"` // The inputs to this transaction +} + +// InputUTXOs track which UTXOs this transaction is consuming. +func (t *ImportTx) InputUTXOs() []*ava.UTXOID { + utxos := t.BaseTx.InputUTXOs() + for _, in := range t.Ins { + in.Symbol = true + utxos = append(utxos, &in.UTXOID) + } + return utxos +} + +// AssetIDs returns the IDs of the assets this transaction depends on +func (t *ImportTx) AssetIDs() ids.Set { + assets := t.BaseTx.AssetIDs() + for _, in := range t.Ins { + assets.Add(in.AssetID()) + } + return assets +} + +// NumCredentials returns the number of expected credentials +func (t *ImportTx) NumCredentials() int { return t.BaseTx.NumCredentials() + len(t.Ins) } + +var ( + errNoImportInputs = errors.New("no import inputs") +) + +// SyntacticVerify that this transaction is well-formed. +func (t *ImportTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs int) error { + switch { + case t == nil: + return errNilTx + case t.NetID != ctx.NetworkID: + return errWrongNetworkID + case !t.BCID.Equals(ctx.ChainID): + return errWrongChainID + case len(t.Ins) == 0: + return errNoImportInputs + } + + fc := ava.NewFlowChecker() + for _, out := range t.Outs { + if err := out.Verify(); err != nil { + return err + } + fc.Produce(out.AssetID(), out.Output().Amount()) + } + if !ava.IsSortedTransferableOutputs(t.Outs, c) { + return errOutputsNotSorted + } + + for _, in := range t.BaseTx.Ins { + if err := in.Verify(); err != nil { + return err + } + fc.Consume(in.AssetID(), in.Input().Amount()) + } + if !ava.IsSortedAndUniqueTransferableInputs(t.BaseTx.Ins) { + return errInputsNotSortedUnique + } + + for _, in := range t.Ins { + if err := in.Verify(); err != nil { + return err + } + fc.Consume(in.AssetID(), in.Input().Amount()) + } + if !ava.IsSortedAndUniqueTransferableInputs(t.Ins) { + return errInputsNotSortedUnique + } + + // TODO: Add the Tx fee to the produced side + + return fc.Verify() +} + +// SemanticVerify that this transaction is well-formed. +func (t *ImportTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []verify.Verifiable) error { + if err := t.BaseTx.SemanticVerify(vm, uTx, creds); err != nil { + return err + } + + smDB := vm.ctx.SharedMemory.GetDatabase(vm.platform) + defer vm.ctx.SharedMemory.ReleaseDatabase(vm.platform) + + state := ava.NewPrefixedState(smDB, vm.codec) + + offset := t.BaseTx.NumCredentials() + for i, in := range t.Ins { + cred := creds[i+offset] + + fxIndex, err := vm.getFx(cred) + if err != nil { + return err + } + fx := vm.fxs[fxIndex].Fx + + utxoID := in.UTXOID.InputID() + utxo, err := state.PlatformUTXO(utxoID) + if err != nil { + return err + } + utxoAssetID := utxo.AssetID() + inAssetID := in.AssetID() + if !utxoAssetID.Equals(inAssetID) { + return errAssetIDMismatch + } + if !utxoAssetID.Equals(vm.ava) { + return errWrongAssetID + } + + if !vm.verifyFxUsage(fxIndex, inAssetID) { + return errIncompatibleFx + } + + if err := fx.VerifyTransfer(uTx, in.In, cred, utxo.Out); err != nil { + return err + } + } + return nil +} + +// ExecuteWithSideEffects writes the batch with any additional side effects +func (t *ImportTx) ExecuteWithSideEffects(vm *VM, batch database.Batch) error { + smDB := vm.ctx.SharedMemory.GetDatabase(vm.platform) + defer vm.ctx.SharedMemory.ReleaseDatabase(vm.platform) + + vsmDB := versiondb.New(smDB) + + state := ava.NewPrefixedState(vsmDB, vm.codec) + for _, in := range t.Ins { + utxoID := in.UTXOID.InputID() + if err := state.SpendPlatformUTXO(utxoID); err != nil { + return err + } + } + + sharedBatch, err := vsmDB.CommitBatch() + if err != nil { + return err + } + + return atomic.WriteAll(batch, sharedBatch) +} diff --git a/vms/avm/import_tx_test.go b/vms/avm/import_tx_test.go new file mode 100644 index 0000000..eb8f25b --- /dev/null +++ b/vms/avm/import_tx_test.go @@ -0,0 +1,366 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/chains/atomic" + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestImportTxSerialization(t *testing.T) { + expected := []byte{ + // txID: + 0x00, 0x00, 0x00, 0x03, + // networkID: + 0x00, 0x00, 0x00, 0x02, + // blockchainID: + 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, + 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, + 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, + 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, + // number of base outs: + 0x00, 0x00, 0x00, 0x00, + // number of base inputs: + 0x00, 0x00, 0x00, 0x00, + // number of inputs: + 0x00, 0x00, 0x00, 0x01, + // utxoID: + 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, + 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, + 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, + 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + // output index + 0x00, 0x00, 0x00, 0x00, + // assetID: + 0x1f, 0x3f, 0x5f, 0x7f, 0x9e, 0xbe, 0xde, 0xfe, + 0x1d, 0x3d, 0x5d, 0x7d, 0x9c, 0xbc, 0xdc, 0xfc, + 0x1b, 0x3b, 0x5b, 0x7b, 0x9a, 0xba, 0xda, 0xfa, + 0x19, 0x39, 0x59, 0x79, 0x98, 0xb8, 0xd8, 0xf8, + // input: + // input ID: + 0x00, 0x00, 0x00, 0x05, + // amount: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, + // num sig indices: + 0x00, 0x00, 0x00, 0x01, + // sig index[0]: + 0x00, 0x00, 0x00, 0x00, + } + + tx := &Tx{UnsignedTx: &ImportTx{ + BaseTx: BaseTx{ + NetID: 2, + BCID: ids.NewID([32]byte{ + 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, + 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, + 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, + 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, + }), + }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{TxID: ids.NewID([32]byte{ + 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, + 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, + 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, + 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + })}, + Asset: ava.Asset{ID: ids.NewID([32]byte{ + 0x1f, 0x3f, 0x5f, 0x7f, 0x9e, 0xbe, 0xde, 0xfe, + 0x1d, 0x3d, 0x5d, 0x7d, 0x9c, 0xbc, 0xdc, 0xfc, + 0x1b, 0x3b, 0x5b, 0x7b, 0x9a, 0xba, 0xda, 0xfa, + 0x19, 0x39, 0x59, 0x79, 0x98, 0xb8, 0xd8, 0xf8, + })}, + In: &secp256k1fx.TransferInput{ + Amt: 1000, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + }} + + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) + c.RegisterType(&secp256k1fx.Credential{}) + + b, err := c.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + result := tx.Bytes() + if !bytes.Equal(expected, result) { + t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + } +} + +// Test issuing an import transaction. +func TestIssueImportTx(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + sm := &atomic.SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + ctx := snow.DefaultContextTest() + ctx.NetworkID = networkID + ctx.ChainID = chainID + ctx.SharedMemory = sm.NewBlockchainSharedMemory(chainID) + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + avaID := genesisTx.ID() + platformID := ids.Empty.Prefix(0) + + ctx.Lock.Lock() + vm := &VM{ + ava: avaID, + platform: platformID, + } + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + key := keys[0] + + utxoID := ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, + 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, + 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, + 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + }), + } + + tx := &Tx{UnsignedTx: &ImportTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: utxoID, + Asset: ava.Asset{ID: avaID}, + In: &secp256k1fx.TransferInput{ + Amt: 1000, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + }} + + unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + if _, err := vm.IssueTx(tx.Bytes(), nil); err == nil { + t.Fatal(err) + } + + // Provide the platform UTXO: + + smDB := vm.ctx.SharedMemory.GetDatabase(platformID) + + utxo := &ava.UTXO{ + UTXOID: utxoID, + Asset: ava.Asset{ID: avaID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 1000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + } + + state := ava.NewPrefixedState(smDB, vm.codec) + if err := state.FundPlatformUTXO(utxo); err != nil { + t.Fatal(err) + } + + vm.ctx.SharedMemory.ReleaseDatabase(platformID) + + if _, err := vm.IssueTx(tx.Bytes(), nil); err != nil { + t.Fatalf("should have issued the transaction correctly but errored: %s", err) + } + + ctx.Lock.Unlock() + + msg := <-issuer + if msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + txs := vm.PendingTxs() + if len(txs) != 1 { + t.Fatalf("Should have returned %d tx(s)", 1) + } + + parsedTx := txs[0] + parsedTx.Accept() + + smDB = vm.ctx.SharedMemory.GetDatabase(platformID) + defer vm.ctx.SharedMemory.ReleaseDatabase(platformID) + + state = ava.NewPrefixedState(smDB, vm.codec) + if _, err := state.PlatformUTXO(utxoID.InputID()); err == nil { + t.Fatalf("shouldn't have been able to read the utxo") + } +} + +// Test force accepting an import transaction. +func TestForceAcceptImportTx(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + sm := &atomic.SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + ctx := snow.DefaultContextTest() + ctx.NetworkID = networkID + ctx.ChainID = chainID + ctx.SharedMemory = sm.NewBlockchainSharedMemory(chainID) + + platformID := ids.Empty.Prefix(0) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{platform: platformID} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + key := keys[0] + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + utxoID := ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, + 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, + 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, + 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + }), + } + + tx := &Tx{UnsignedTx: &ImportTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: utxoID, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 1000, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + }} + + unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + parsedTx, err := vm.ParseTx(tx.Bytes()) + if err != nil { + t.Fatal(err) + } + + if err := parsedTx.Verify(); err == nil { + t.Fatalf("Should have failed verification") + } + + parsedTx.Accept() + + smDB := vm.ctx.SharedMemory.GetDatabase(platformID) + defer vm.ctx.SharedMemory.ReleaseDatabase(platformID) + + state := ava.NewPrefixedState(smDB, vm.codec) + utxoSource := utxoID.InputID() + if _, err := state.PlatformUTXO(utxoSource); err == nil { + t.Fatalf("shouldn't have been able to read the utxo") + } +} diff --git a/vms/avm/initial_state.go b/vms/avm/initial_state.go index 58dae84..c3d4b16 100644 --- a/vms/avm/initial_state.go +++ b/vms/avm/initial_state.go @@ -19,8 +19,8 @@ var ( // InitialState ... type InitialState struct { - FxID uint32 `serialize:"true"` - Outs []verify.Verifiable `serialize:"true"` + FxID uint32 `serialize:"true" json:"fxID"` + Outs []verify.Verifiable `serialize:"true" json:"outputs"` } // Verify implements the verify.Verifiable interface diff --git a/vms/avm/initial_state_test.go b/vms/avm/initial_state_test.go index 267947e..67c4b15 100644 --- a/vms/avm/initial_state_test.go +++ b/vms/avm/initial_state_test.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" "github.com/ava-labs/gecko/vms/components/verify" "github.com/ava-labs/gecko/vms/secp256k1fx" @@ -52,14 +53,12 @@ func TestInitialStateVerifyNilOutput(t *testing.T) { func TestInitialStateVerifyInvalidOutput(t *testing.T) { c := codec.NewDefault() - c.RegisterType(&testVerifiable{}) + c.RegisterType(&ava.TestVerifiable{}) numFxs := 1 is := InitialState{ FxID: 0, - Outs: []verify.Verifiable{ - &testVerifiable{err: errors.New("")}, - }, + Outs: []verify.Verifiable{&ava.TestVerifiable{Err: errors.New("")}}, } if err := is.Verify(c, numFxs); err == nil { t.Fatalf("Should have errored due to an invalid output") @@ -68,14 +67,14 @@ func TestInitialStateVerifyInvalidOutput(t *testing.T) { func TestInitialStateVerifyUnsortedOutputs(t *testing.T) { c := codec.NewDefault() - c.RegisterType(&TestTransferable{}) + c.RegisterType(&ava.TestTransferable{}) numFxs := 1 is := InitialState{ FxID: 0, Outs: []verify.Verifiable{ - &TestTransferable{Val: 1}, - &TestTransferable{Val: 0}, + &ava.TestTransferable{Val: 1}, + &ava.TestTransferable{Val: 0}, }, } if err := is.Verify(c, numFxs); err == nil { diff --git a/vms/avm/operables.go b/vms/avm/operables.go deleted file mode 100644 index 7aac3a0..0000000 --- a/vms/avm/operables.go +++ /dev/null @@ -1,116 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "bytes" - "errors" - "sort" - - "github.com/ava-labs/gecko/utils" - "github.com/ava-labs/gecko/vms/components/codec" - "github.com/ava-labs/gecko/vms/components/verify" -) - -var ( - errNilOperableOutput = errors.New("nil operable output is not valid") - errNilOperableFxOutput = errors.New("nil operable feature extension output is not valid") - - errNilOperableInput = errors.New("nil operable input is not valid") - errNilOperableFxInput = errors.New("nil operable feature extension input is not valid") -) - -// OperableOutput ... -type OperableOutput struct { - Out verify.Verifiable `serialize:"true"` -} - -// Output returns the feature extension output that this Output is using. -func (out *OperableOutput) Output() verify.Verifiable { return out.Out } - -// Verify implements the verify.Verifiable interface -func (out *OperableOutput) Verify() error { - switch { - case out == nil: - return errNilOperableOutput - case out.Out == nil: - return errNilOperableFxOutput - default: - return out.Out.Verify() - } -} - -type innerSortOperableOutputs struct { - outs []*OperableOutput - codec codec.Codec -} - -func (outs *innerSortOperableOutputs) Less(i, j int) bool { - iOut := outs.outs[i] - jOut := outs.outs[j] - - iBytes, err := outs.codec.Marshal(&iOut.Out) - if err != nil { - return false - } - jBytes, err := outs.codec.Marshal(&jOut.Out) - if err != nil { - return false - } - return bytes.Compare(iBytes, jBytes) == -1 -} -func (outs *innerSortOperableOutputs) Len() int { return len(outs.outs) } -func (outs *innerSortOperableOutputs) Swap(i, j int) { o := outs.outs; o[j], o[i] = o[i], o[j] } - -func sortOperableOutputs(outs []*OperableOutput, c codec.Codec) { - sort.Sort(&innerSortOperableOutputs{outs: outs, codec: c}) -} -func isSortedOperableOutputs(outs []*OperableOutput, c codec.Codec) bool { - return sort.IsSorted(&innerSortOperableOutputs{outs: outs, codec: c}) -} - -// OperableInput ... -type OperableInput struct { - UTXOID `serialize:"true"` - - In verify.Verifiable `serialize:"true"` -} - -// Input returns the feature extension input that this Input is using. -func (in *OperableInput) Input() verify.Verifiable { return in.In } - -// Verify implements the verify.Verifiable interface -func (in *OperableInput) Verify() error { - switch { - case in == nil: - return errNilOperableInput - case in.In == nil: - return errNilOperableFxInput - default: - return verify.All(&in.UTXOID, in.In) - } -} - -type innerSortOperableInputs []*OperableInput - -func (ins innerSortOperableInputs) Less(i, j int) bool { - iID, iIndex := ins[i].InputSource() - jID, jIndex := ins[j].InputSource() - - switch bytes.Compare(iID.Bytes(), jID.Bytes()) { - case -1: - return true - case 0: - return iIndex < jIndex - default: - return false - } -} -func (ins innerSortOperableInputs) Len() int { return len(ins) } -func (ins innerSortOperableInputs) Swap(i, j int) { ins[j], ins[i] = ins[i], ins[j] } - -func sortOperableInputs(ins []*OperableInput) { sort.Sort(innerSortOperableInputs(ins)) } -func isSortedAndUniqueOperableInputs(ins []*OperableInput) bool { - return utils.IsSortedAndUnique(innerSortOperableInputs(ins)) -} diff --git a/vms/avm/operables_test.go b/vms/avm/operables_test.go deleted file mode 100644 index 98e0996..0000000 --- a/vms/avm/operables_test.go +++ /dev/null @@ -1,175 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "testing" - - "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/vms/components/codec" -) - -func TestOperableOutputVerifyNil(t *testing.T) { - oo := (*OperableOutput)(nil) - if err := oo.Verify(); err == nil { - t.Fatalf("Should have errored due to nil operable output") - } -} - -func TestOperableOutputVerifyNilFx(t *testing.T) { - oo := &OperableOutput{} - if err := oo.Verify(); err == nil { - t.Fatalf("Should have errored due to nil operable fx output") - } -} - -func TestOperableOutputVerify(t *testing.T) { - oo := &OperableOutput{ - Out: &testVerifiable{}, - } - if err := oo.Verify(); err != nil { - t.Fatal(err) - } - if oo.Output() != oo.Out { - t.Fatalf("Should have returned the fx output") - } -} - -func TestOperableOutputSorting(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&TestTransferable{}) - c.RegisterType(&testVerifiable{}) - - outs := []*OperableOutput{ - &OperableOutput{ - Out: &TestTransferable{Val: 1}, - }, - &OperableOutput{ - Out: &TestTransferable{Val: 0}, - }, - &OperableOutput{ - Out: &TestTransferable{Val: 0}, - }, - &OperableOutput{ - Out: &testVerifiable{}, - }, - } - - if isSortedOperableOutputs(outs, c) { - t.Fatalf("Shouldn't be sorted") - } - sortOperableOutputs(outs, c) - if !isSortedOperableOutputs(outs, c) { - t.Fatalf("Should be sorted") - } - if result := outs[0].Out.(*TestTransferable).Val; result != 0 { - t.Fatalf("Val expected: %d ; result: %d", 0, result) - } - if result := outs[1].Out.(*TestTransferable).Val; result != 0 { - t.Fatalf("Val expected: %d ; result: %d", 0, result) - } - if result := outs[2].Out.(*TestTransferable).Val; result != 1 { - t.Fatalf("Val expected: %d ; result: %d", 0, result) - } - if _, ok := outs[3].Out.(*testVerifiable); !ok { - t.Fatalf("testVerifiable expected") - } -} - -func TestOperableInputVerifyNil(t *testing.T) { - oi := (*OperableInput)(nil) - if err := oi.Verify(); err == nil { - t.Fatalf("Should have errored due to nil operable input") - } -} - -func TestOperableInputVerifyNilFx(t *testing.T) { - oi := &OperableInput{} - if err := oi.Verify(); err == nil { - t.Fatalf("Should have errored due to nil operable fx input") - } -} - -func TestOperableInputVerify(t *testing.T) { - oi := &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - }, - In: &testVerifiable{}, - } - if err := oi.Verify(); err != nil { - t.Fatal(err) - } - if oi.Input() != oi.In { - t.Fatalf("Should have returned the fx input") - } -} - -func TestOperableInputSorting(t *testing.T) { - ins := []*OperableInput{ - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - In: &testVerifiable{}, - }, - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{1}), - OutputIndex: 1, - }, - In: &testVerifiable{}, - }, - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - In: &testVerifiable{}, - }, - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{1}), - OutputIndex: 0, - }, - In: &testVerifiable{}, - }, - } - if isSortedAndUniqueOperableInputs(ins) { - t.Fatalf("Shouldn't be sorted") - } - sortOperableInputs(ins) - if !isSortedAndUniqueOperableInputs(ins) { - t.Fatalf("Should be sorted") - } - if result := ins[0].OutputIndex; result != 0 { - t.Fatalf("OutputIndex expected: %d ; result: %d", 0, result) - } - if result := ins[1].OutputIndex; result != 1 { - t.Fatalf("OutputIndex expected: %d ; result: %d", 1, result) - } - if result := ins[2].OutputIndex; result != 0 { - t.Fatalf("OutputIndex expected: %d ; result: %d", 0, result) - } - if result := ins[3].OutputIndex; result != 1 { - t.Fatalf("OutputIndex expected: %d ; result: %d", 1, result) - } - if result := ins[0].TxID; !result.Equals(ids.Empty) { - t.Fatalf("OutputIndex expected: %s ; result: %s", ids.Empty, result) - } - if result := ins[0].TxID; !result.Equals(ids.Empty) { - t.Fatalf("OutputIndex expected: %s ; result: %s", ids.Empty, result) - } - ins = append(ins, &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - In: &testVerifiable{}, - }) - if isSortedAndUniqueOperableInputs(ins) { - t.Fatalf("Shouldn't be unique") - } -} diff --git a/vms/avm/operation.go b/vms/avm/operation.go index 516e8fa..3b5fc9a 100644 --- a/vms/avm/operation.go +++ b/vms/avm/operation.go @@ -9,20 +9,23 @@ import ( "sort" "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" ) var ( - errNilOperation = errors.New("nil operation is not valid") - errEmptyOperation = errors.New("empty operation is not valid") + errNilOperation = errors.New("nil operation is not valid") + errNilFxOperation = errors.New("nil fx operation is not valid") + errNotSortedAndUniqueUTXOIDs = errors.New("utxo IDs not sorted and unique") ) // Operation ... type Operation struct { - Asset `serialize:"true"` + ava.Asset `serialize:"true"` - Ins []*OperableInput `serialize:"true"` - Outs []*OperableOutput `serialize:"true"` + UTXOIDs []*ava.UTXOID `serialize:"true" json:"inputIDs"` + Op FxOperation `serialize:"true" json:"operation"` } // Verify implements the verify.Verifiable interface @@ -30,29 +33,13 @@ func (op *Operation) Verify(c codec.Codec) error { switch { case op == nil: return errNilOperation - case len(op.Ins) == 0 && len(op.Outs) == 0: - return errEmptyOperation + case op.Op == nil: + return errNilFxOperation + case !ava.IsSortedAndUniqueUTXOIDs(op.UTXOIDs): + return errNotSortedAndUniqueUTXOIDs + default: + return verify.All(&op.Asset, op.Op) } - - for _, in := range op.Ins { - if err := in.Verify(); err != nil { - return err - } - } - if !isSortedAndUniqueOperableInputs(op.Ins) { - return errInputsNotSortedUnique - } - - for _, out := range op.Outs { - if err := out.Verify(); err != nil { - return err - } - } - if !isSortedOperableOutputs(op.Outs, c) { - return errOutputsNotSorted - } - - return op.Asset.Verify() } type innerSortOperation struct { diff --git a/vms/avm/operation_test.go b/vms/avm/operation_test.go index 9215448..8948388 100644 --- a/vms/avm/operation_test.go +++ b/vms/avm/operation_test.go @@ -7,9 +7,19 @@ import ( "testing" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" ) +type testOperable struct { + ava.TestTransferable `serialize:"true"` + + Outputs []verify.Verifiable `serialize:"true"` +} + +func (o *testOperable) Outs() []verify.Verifiable { return o.Outputs } + func TestOperationVerifyNil(t *testing.T) { c := codec.NewDefault() op := (*Operation)(nil) @@ -21,106 +31,45 @@ func TestOperationVerifyNil(t *testing.T) { func TestOperationVerifyEmpty(t *testing.T) { c := codec.NewDefault() op := &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, + Asset: ava.Asset{ID: ids.Empty}, } if err := op.Verify(c); err == nil { t.Fatalf("Should have errored due to empty operation") } } -func TestOperationVerifyInvalidInput(t *testing.T) { +func TestOperationVerifyUTXOIDsNotSorted(t *testing.T) { c := codec.NewDefault() op := &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Ins: []*OperableInput{ - &OperableInput{}, - }, - } - if err := op.Verify(c); err == nil { - t.Fatalf("Should have errored due to an invalid input") - } -} - -func TestOperationVerifyInvalidOutput(t *testing.T) { - c := codec.NewDefault() - op := &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Outs: []*OperableOutput{ - &OperableOutput{}, - }, - } - if err := op.Verify(c); err == nil { - t.Fatalf("Should have errored due to an invalid output") - } -} - -func TestOperationVerifyInputsNotSorted(t *testing.T) { - c := codec.NewDefault() - op := &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Ins: []*OperableInput{ - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - In: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + UTXOIDs: []*ava.UTXOID{ + &ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, }, - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - In: &testVerifiable{}, + &ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, }, }, + Op: &testOperable{}, } if err := op.Verify(c); err == nil { - t.Fatalf("Should have errored due to unsorted inputs") - } -} - -func TestOperationVerifyOutputsNotSorted(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&TestTransferable{}) - - op := &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Outs: []*OperableOutput{ - &OperableOutput{ - Out: &TestTransferable{Val: 1}, - }, - &OperableOutput{ - Out: &TestTransferable{Val: 0}, - }, - }, - } - if err := op.Verify(c); err == nil { - t.Fatalf("Should have errored due to unsorted outputs") + t.Fatalf("Should have errored due to unsorted utxoIDs") } } func TestOperationVerify(t *testing.T) { c := codec.NewDefault() op := &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Outs: []*OperableOutput{ - &OperableOutput{ - Out: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + UTXOIDs: []*ava.UTXOID{ + &ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, }, }, + Op: &testOperable{}, } if err := op.Verify(c); err != nil { t.Fatal(err) @@ -129,36 +78,28 @@ func TestOperationVerify(t *testing.T) { func TestOperationSorting(t *testing.T) { c := codec.NewDefault() - c.RegisterType(&testVerifiable{}) + c.RegisterType(&testOperable{}) ops := []*Operation{ &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Ins: []*OperableInput{ - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - In: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + UTXOIDs: []*ava.UTXOID{ + &ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, }, }, + Op: &testOperable{}, }, &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Ins: []*OperableInput{ - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - In: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + UTXOIDs: []*ava.UTXOID{ + &ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, }, }, + Op: &testOperable{}, }, } if isSortedAndUniqueOperations(ops, c) { @@ -169,18 +110,14 @@ func TestOperationSorting(t *testing.T) { t.Fatalf("Should be sorted") } ops = append(ops, &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Ins: []*OperableInput{ - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - In: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + UTXOIDs: []*ava.UTXOID{ + &ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, }, }, + Op: &testOperable{}, }) if isSortedAndUniqueOperations(ops, c) { t.Fatalf("Shouldn't be unique") diff --git a/vms/avm/operation_tx.go b/vms/avm/operation_tx.go index 07d8947..9384f8d 100644 --- a/vms/avm/operation_tx.go +++ b/vms/avm/operation_tx.go @@ -8,11 +8,14 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" ) var ( errOperationsNotSortedUnique = errors.New("operations not sorted and unique") + errNoOperations = errors.New("an operationTx must have at least one operation") errDoubleSpend = errors.New("inputs attempt to double spend an input") ) @@ -20,7 +23,7 @@ var ( // OperationTx is a transaction with no credentials. type OperationTx struct { BaseTx `serialize:"true"` - Ops []*Operation `serialize:"true"` + Ops []*Operation `serialize:"true" json:"operations"` } // Operations track which ops this transaction is performing. The returned array @@ -28,12 +31,10 @@ type OperationTx struct { func (t *OperationTx) Operations() []*Operation { return t.Ops } // InputUTXOs track which UTXOs this transaction is consuming. -func (t *OperationTx) InputUTXOs() []*UTXOID { +func (t *OperationTx) InputUTXOs() []*ava.UTXOID { utxos := t.BaseTx.InputUTXOs() for _, op := range t.Ops { - for _, in := range op.Ins { - utxos = append(utxos, &in.UTXOID) - } + utxos = append(utxos, op.UTXOIDs...) } return utxos } @@ -47,23 +48,24 @@ func (t *OperationTx) AssetIDs() ids.Set { return assets } +// NumCredentials returns the number of expected credentials +func (t *OperationTx) NumCredentials() int { return t.BaseTx.NumCredentials() + len(t.Ops) } + // UTXOs returns the UTXOs transaction is producing. -func (t *OperationTx) UTXOs() []*UTXO { +func (t *OperationTx) UTXOs() []*ava.UTXO { txID := t.ID() utxos := t.BaseTx.UTXOs() for _, op := range t.Ops { asset := op.AssetID() - for _, out := range op.Outs { - utxos = append(utxos, &UTXO{ - UTXOID: UTXOID{ + for _, out := range op.Op.Outs() { + utxos = append(utxos, &ava.UTXO{ + UTXOID: ava.UTXOID{ TxID: txID, OutputIndex: uint32(len(utxos)), }, - Asset: Asset{ - ID: asset, - }, - Out: out.Out, + Asset: ava.Asset{ID: asset}, + Out: out, }) } } @@ -76,6 +78,8 @@ func (t *OperationTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs i switch { case t == nil: return errNilTx + case len(t.Ops) == 0: + return errNoOperations } if err := t.BaseTx.SyntacticVerify(ctx, c, numFxs); err != nil { @@ -91,8 +95,8 @@ func (t *OperationTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs i if err := op.Verify(c); err != nil { return err } - for _, in := range op.Ins { - inputID := in.InputID() + for _, utxoID := range op.UTXOIDs { + inputID := utxoID.InputID() if inputs.Contains(inputID) { return errDoubleSpend } @@ -106,77 +110,30 @@ func (t *OperationTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs i } // SemanticVerify that this transaction is well-formed. -func (t *OperationTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) error { +func (t *OperationTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []verify.Verifiable) error { if err := t.BaseTx.SemanticVerify(vm, uTx, creds); err != nil { return err } - offset := len(t.BaseTx.Ins) - for _, op := range t.Ops { + + offset := t.BaseTx.NumCredentials() + for i, op := range t.Ops { opAssetID := op.AssetID() utxos := []interface{}{} - ins := []interface{}{} - credIntfs := []interface{}{} - outs := []interface{}{} - - for i, in := range op.Ins { - ins = append(ins, in.In) - - cred := creds[i+offset] - credIntfs = append(credIntfs, cred.Cred) - - utxoID := in.InputID() - utxo, err := vm.state.UTXO(utxoID) - if err == nil { - utxoAssetID := utxo.AssetID() - if !utxoAssetID.Equals(opAssetID) { - return errAssetIDMismatch - } - - utxos = append(utxos, utxo.Out) - continue + for _, utxoID := range op.UTXOIDs { + utxo, err := vm.getUTXO(utxoID) + if err != nil { + return err } - inputTx, inputIndex := in.InputSource() - parent := UniqueTx{ - vm: vm, - txID: inputTx, - } - - if err := parent.Verify(); err != nil { - return errMissingUTXO - } else if status := parent.Status(); status.Decided() { - return errMissingUTXO - } - - parentUTXOs := parent.UTXOs() - - if uint32(len(parentUTXOs)) <= inputIndex || int(inputIndex) < 0 { - return errInvalidUTXO - } - - utxo = parentUTXOs[int(inputIndex)] - utxoAssetID := utxo.AssetID() if !utxoAssetID.Equals(opAssetID) { return errAssetIDMismatch } utxos = append(utxos, utxo.Out) } - offset += len(op.Ins) - for _, out := range op.Outs { - outs = append(outs, out.Out) - } - var fxObj interface{} - switch { - case len(ins) > 0: - fxObj = ins[0] - case len(outs) > 0: - fxObj = outs[0] - } - - fxIndex, err := vm.getFx(fxObj) + fxIndex, err := vm.getFx(op.Op) if err != nil { return err } @@ -186,8 +143,7 @@ func (t *OperationTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) return errIncompatibleFx } - err = fx.VerifyOperation(uTx, utxos, ins, credIntfs, outs) - if err != nil { + if err := fx.VerifyOperation(uTx, op.Op, creds[offset+i], utxos); err != nil { return err } } diff --git a/vms/avm/prefixed_state.go b/vms/avm/prefixed_state.go index 1314857..8a1898d 100644 --- a/vms/avm/prefixed_state.go +++ b/vms/avm/prefixed_state.go @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/vms/components/ava" ) const ( @@ -37,31 +38,31 @@ func (s *prefixedState) UniqueTx(tx *UniqueTx) *UniqueTx { } // Tx attempts to load a transaction from storage. -func (s *prefixedState) Tx(id ids.ID) (*Tx, error) { return s.state.Tx(s.uniqueID(id, txID, s.tx)) } +func (s *prefixedState) Tx(id ids.ID) (*Tx, error) { return s.state.Tx(uniqueID(id, txID, s.tx)) } // SetTx saves the provided transaction to storage. func (s *prefixedState) SetTx(id ids.ID, tx *Tx) error { - return s.state.SetTx(s.uniqueID(id, txID, s.tx), tx) + return s.state.SetTx(uniqueID(id, txID, s.tx), tx) } // UTXO attempts to load a utxo from storage. -func (s *prefixedState) UTXO(id ids.ID) (*UTXO, error) { - return s.state.UTXO(s.uniqueID(id, utxoID, s.utxo)) +func (s *prefixedState) UTXO(id ids.ID) (*ava.UTXO, error) { + return s.state.UTXO(uniqueID(id, utxoID, s.utxo)) } // SetUTXO saves the provided utxo to storage. -func (s *prefixedState) SetUTXO(id ids.ID, utxo *UTXO) error { - return s.state.SetUTXO(s.uniqueID(id, utxoID, s.utxo), utxo) +func (s *prefixedState) SetUTXO(id ids.ID, utxo *ava.UTXO) error { + return s.state.SetUTXO(uniqueID(id, utxoID, s.utxo), utxo) } // Status returns the status of the provided transaction id from storage. func (s *prefixedState) Status(id ids.ID) (choices.Status, error) { - return s.state.Status(s.uniqueID(id, txStatusID, s.txStatus)) + return s.state.Status(uniqueID(id, txStatusID, s.txStatus)) } // SetStatus saves the provided status to storage. func (s *prefixedState) SetStatus(id ids.ID, status choices.Status) error { - return s.state.SetStatus(s.uniqueID(id, txStatusID, s.txStatus), status) + return s.state.SetStatus(uniqueID(id, txStatusID, s.txStatus), status) } // DBInitialized returns the status of this database. If the database is @@ -76,21 +77,12 @@ func (s *prefixedState) SetDBInitialized(status choices.Status) error { // Funds returns the mapping from the 32 byte representation of an address to a // list of utxo IDs that reference the address. func (s *prefixedState) Funds(id ids.ID) ([]ids.ID, error) { - return s.state.IDs(s.uniqueID(id, fundsID, s.funds)) + return s.state.IDs(uniqueID(id, fundsID, s.funds)) } // SetFunds saves the mapping from address to utxo IDs to storage. func (s *prefixedState) SetFunds(id ids.ID, idSlice []ids.ID) error { - return s.state.SetIDs(s.uniqueID(id, fundsID, s.funds), idSlice) -} - -func (s *prefixedState) uniqueID(id ids.ID, prefix uint64, cacher cache.Cacher) ids.ID { - if cachedIDIntf, found := cacher.Get(id); found { - return cachedIDIntf.(ids.ID) - } - uID := id.Prefix(prefix) - cacher.Put(id, uID) - return uID + return s.state.SetIDs(uniqueID(id, fundsID, s.funds), idSlice) } // SpendUTXO consumes the provided utxo. @@ -103,7 +95,7 @@ func (s *prefixedState) SpendUTXO(utxoID ids.ID) error { return err } - addressable, ok := utxo.Out.(FxAddressable) + addressable, ok := utxo.Out.(ava.Addressable) if !ok { return nil } @@ -126,13 +118,13 @@ func (s *prefixedState) removeUTXO(addrs [][]byte, utxoID ids.ID) error { } // FundUTXO adds the provided utxo to the database -func (s *prefixedState) FundUTXO(utxo *UTXO) error { +func (s *prefixedState) FundUTXO(utxo *ava.UTXO) error { utxoID := utxo.InputID() if err := s.SetUTXO(utxoID, utxo); err != nil { return err } - addressable, ok := utxo.Out.(FxAddressable) + addressable, ok := utxo.Out.(ava.Addressable) if !ok { return nil } diff --git a/vms/avm/prefixed_state_test.go b/vms/avm/prefixed_state_test.go index 2b5d739..8a69f91 100644 --- a/vms/avm/prefixed_state_test.go +++ b/vms/avm/prefixed_state_test.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -18,40 +19,36 @@ func TestPrefixedSetsAndGets(t *testing.T) { vm := GenesisVM(t) state := vm.state - vm.codec.RegisterType(&testVerifiable{}) + vm.codec.RegisterType(&ava.TestVerifiable{}) - utxo := &UTXO{ - UTXOID: UTXOID{ + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 1, }, - Asset: Asset{ID: ids.Empty}, - Out: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + Out: &ava.TestVerifiable{}, } - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - Asset: Asset{ - ID: asset, - }, - In: &secp256k1fx.TransferInput{ - Amt: 20 * units.KiloAva, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(tx.UnsignedTx) if err != nil { @@ -66,11 +63,9 @@ func TestPrefixedSetsAndGets(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -118,15 +113,15 @@ func TestPrefixedFundingNoAddresses(t *testing.T) { vm := GenesisVM(t) state := vm.state - vm.codec.RegisterType(&testVerifiable{}) + vm.codec.RegisterType(&ava.TestVerifiable{}) - utxo := &UTXO{ - UTXOID: UTXOID{ + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 1, }, - Asset: Asset{ID: ids.Empty}, - Out: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + Out: &ava.TestVerifiable{}, } if err := state.FundUTXO(utxo); err != nil { @@ -143,12 +138,12 @@ func TestPrefixedFundingAddresses(t *testing.T) { vm.codec.RegisterType(&testAddressable{}) - utxo := &UTXO{ - UTXOID: UTXOID{ + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 1, }, - Asset: Asset{ID: ids.Empty}, + Asset: ava.Asset{ID: ids.Empty}, Out: &testAddressable{ Addrs: [][]byte{ []byte{0}, diff --git a/vms/avm/service.go b/vms/avm/service.go index 94121ad..49c96c4 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -8,16 +8,15 @@ import ( "errors" "fmt" "net/http" - "sort" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/choices" - "github.com/ava-labs/gecko/utils" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/json" "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/verify" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -75,6 +74,10 @@ type GetTxStatusReply struct { Status choices.Status `json:"status"` } +var ( + errNilTxID = errors.New("nil transaction ID") +) + // GetTxStatus returns the status of the specified transaction func (service *Service) GetTxStatus(r *http.Request, args *GetTxStatusArgs, reply *GetTxStatusReply) error { service.vm.ctx.Log.Verbo("GetTxStatus called with %s", args.TxID) @@ -163,7 +166,7 @@ func (service *Service) GetAssetDescription(_ *http.Request, args *GetAssetDescr if status := tx.Status(); !status.Fetched() { return errUnknownAssetID } - createAssetTx, ok := tx.t.tx.UnsignedTx.(*CreateAssetTx) + createAssetTx, ok := tx.UnsignedTx.(*CreateAssetTx) if !ok { return errTxNotCreateAsset } @@ -214,7 +217,7 @@ func (service *Service) GetBalance(r *http.Request, args *GetBalanceArgs, reply for _, utxo := range utxos { if utxo.AssetID().Equals(assetID) { - transferable, ok := utxo.Out.(FxTransferable) + transferable, ok := utxo.Out.(ava.Transferable) if !ok { continue } @@ -596,7 +599,7 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) amountSpent := uint64(0) time := service.vm.clock.Unix() - ins := []*TransferableInput{} + ins := []*ava.TransferableInput{} keys := [][]*crypto.PrivateKeySECP256K1R{} for _, utxo := range utxos { if !utxo.AssetID().Equals(assetID) { @@ -606,7 +609,7 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) if err != nil { continue } - input, ok := inputIntf.(FxTransferable) + input, ok := inputIntf.(ava.Transferable) if !ok { continue } @@ -616,9 +619,9 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) } amountSpent = spent - in := &TransferableInput{ + in := &ava.TransferableInput{ UTXOID: utxo.UTXOID, - Asset: Asset{ID: assetID}, + Asset: ava.Asset{ID: assetID}, In: input, } @@ -634,44 +637,36 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) return errInsufficientFunds } - SortTransferableInputsWithSigners(ins, keys) + ava.SortTransferableInputsWithSigners(ins, keys) - outs := []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ - ID: assetID, - }, - Out: &secp256k1fx.TransferOutput{ - Amt: uint64(args.Amount), - Locktime: 0, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{to}, - }, + outs := []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: uint64(args.Amount), + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, }, }, - } + }} if amountSpent > uint64(args.Amount) { changeAddr := kc.Keys[0].PublicKey().Address() - outs = append(outs, - &TransferableOutput{ - Asset: Asset{ - ID: assetID, - }, - Out: &secp256k1fx.TransferOutput{ - Amt: amountSpent - uint64(args.Amount), - Locktime: 0, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{changeAddr}, - }, + outs = append(outs, &ava.TransferableOutput{ + Asset: ava.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amountSpent - uint64(args.Amount), + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, }, }, - ) + }) } - SortTransferableOutputs(outs, service.vm.codec) + ava.SortTransferableOutputs(outs, service.vm.codec) tx := Tx{ UnsignedTx: &BaseTx{ @@ -700,7 +695,7 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) cred.Sigs = append(cred.Sigs, fixedSig) } - tx.Creds = append(tx.Creds, &Credential{Cred: cred}) + tx.Creds = append(tx.Creds, cred) } b, err := service.vm.codec.Marshal(tx) @@ -717,42 +712,6 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) return nil } -type innerSortTransferableInputsWithSigners struct { - ins []*TransferableInput - signers [][]*crypto.PrivateKeySECP256K1R -} - -func (ins *innerSortTransferableInputsWithSigners) Less(i, j int) bool { - iID, iIndex := ins.ins[i].InputSource() - jID, jIndex := ins.ins[j].InputSource() - - switch bytes.Compare(iID.Bytes(), jID.Bytes()) { - case -1: - return true - case 0: - return iIndex < jIndex - default: - return false - } -} -func (ins *innerSortTransferableInputsWithSigners) Len() int { return len(ins.ins) } -func (ins *innerSortTransferableInputsWithSigners) Swap(i, j int) { - ins.ins[j], ins.ins[i] = ins.ins[i], ins.ins[j] - ins.signers[j], ins.signers[i] = ins.signers[i], ins.signers[j] -} - -// SortTransferableInputsWithSigners sorts the inputs and signers based on the -// input's utxo ID -func SortTransferableInputsWithSigners(ins []*TransferableInput, signers [][]*crypto.PrivateKeySECP256K1R) { - sort.Sort(&innerSortTransferableInputsWithSigners{ins: ins, signers: signers}) -} - -// IsSortedAndUniqueTransferableInputsWithSigners returns true if the inputs are -// sorted and unique -func IsSortedAndUniqueTransferableInputsWithSigners(ins []*TransferableInput, signers [][]*crypto.PrivateKeySECP256K1R) bool { - return utils.IsSortedAndUnique(&innerSortTransferableInputsWithSigners{ins: ins, signers: signers}) -} - // CreateMintTxArgs are arguments for passing into CreateMintTx requests type CreateMintTxArgs struct { Amount json.Uint64 `json:"amount"` @@ -828,47 +787,35 @@ func (service *Service) CreateMintTx(r *http.Request, args *CreateMintTxArgs, re continue } - tx := Tx{ - UnsignedTx: &OperationTx{ - BaseTx: BaseTx{ - NetID: service.vm.ctx.NetworkID, - BCID: service.vm.ctx.ChainID, - }, - Ops: []*Operation{ - &Operation{ - Asset: Asset{ - ID: assetID, + tx := Tx{UnsignedTx: &OperationTx{ + BaseTx: BaseTx{ + NetID: service.vm.ctx.NetworkID, + BCID: service.vm.ctx.ChainID, + }, + Ops: []*Operation{ + &Operation{ + Asset: ava.Asset{ID: assetID}, + UTXOIDs: []*ava.UTXOID{ + &utxo.UTXOID, + }, + Op: &secp256k1fx.MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: sigs, }, - Ins: []*OperableInput{ - &OperableInput{ - UTXOID: utxo.UTXOID, - In: &secp256k1fx.MintInput{ - Input: secp256k1fx.Input{ - SigIndices: sigs, - }, - }, - }, + MintOutput: secp256k1fx.MintOutput{ + OutputOwners: out.OutputOwners, }, - Outs: []*OperableOutput{ - &OperableOutput{ - &secp256k1fx.MintOutput{ - OutputOwners: out.OutputOwners, - }, - }, - &OperableOutput{ - &secp256k1fx.TransferOutput{ - Amt: uint64(args.Amount), - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{to}, - }, - }, + TransferOutput: secp256k1fx.TransferOutput{ + Amt: uint64(args.Amount), + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, }, }, }, }, }, - } + }} txBytes, err := service.vm.codec.Marshal(&tx) if err != nil { @@ -922,71 +869,77 @@ func (service *Service) SignMintTx(r *http.Request, args *SignMintTxArgs, reply return fmt.Errorf("problem creating transaction: %w", err) } - inputUTXOs := tx.InputUTXOs() - if len(inputUTXOs) != 1 { + opTx, ok := tx.UnsignedTx.(*OperationTx) + if !ok { + return errors.New("transaction must be a mint transaction") + } + if len(opTx.Ins) != 0 { return errCanOnlySignSingleInputTxs } - inputUTXO := inputUTXOs[0] + if len(opTx.Ops) != 1 { + return errCanOnlySignSingleInputTxs + } + op := opTx.Ops[0] - inputTxID, utxoIndex := inputUTXO.InputSource() - utx := UniqueTx{ - vm: service.vm, - txID: inputTxID, + if len(op.UTXOIDs) != 1 { + return errCanOnlySignSingleInputTxs } - if !utx.Status().Fetched() { - return errUnknownUTXO - } - utxos := utx.UTXOs() - if uint32(len(utxos)) <= utxoIndex { - return errInvalidUTXO + inputUTXO := op.UTXOIDs[0] + + utxo, err := service.vm.getUTXO(inputUTXO) + if err != nil { + return err } - utxo := utxos[int(utxoIndex)] - - i := -1 - size := 0 - switch out := utxo.Out.(type) { - case *secp256k1fx.MintOutput: - size = int(out.Threshold) - for j, addr := range out.Addrs { - if bytes.Equal(addr.Bytes(), minter) { - i = j - break - } - } - default: + out, ok := utxo.Out.(*secp256k1fx.MintOutput) + if !ok { return errUnknownOutputType } - if i == -1 { - return errUnneededAddress + secpOp, ok := op.Op.(*secp256k1fx.MintOperation) + if !ok { + return errors.New("unknown mint operation") + } + + sigIndex := -1 + size := int(out.Threshold) + for i, addrIndex := range secpOp.MintInput.SigIndices { + if addrIndex >= uint32(len(out.Addrs)) { + return errors.New("input output mismatch") + } + if bytes.Equal(out.Addrs[int(addrIndex)].Bytes(), minter) { + sigIndex = i + break + } + } + if sigIndex == -1 { + return errUnneededAddress } if len(tx.Creds) == 0 { - tx.Creds = append(tx.Creds, &Credential{Cred: &secp256k1fx.Credential{}}) + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{}) } - cred := tx.Creds[0] - switch cred := cred.Cred.(type) { - case *secp256k1fx.Credential: - if len(cred.Sigs) != size { - cred.Sigs = make([][crypto.SECP256K1RSigLen]byte, size) - } - - unsignedBytes, err := service.vm.codec.Marshal(&tx.UnsignedTx) - if err != nil { - return fmt.Errorf("problem creating transaction: %w", err) - } - - sig, err := sk.Sign(unsignedBytes) - if err != nil { - return fmt.Errorf("problem signing transaction: %w", err) - } - copy(cred.Sigs[i][:], sig) - default: + cred, ok := tx.Creds[0].(*secp256k1fx.Credential) + if !ok { return errUnknownCredentialType } + if len(cred.Sigs) != size { + cred.Sigs = make([][crypto.SECP256K1RSigLen]byte, size) + } + + unsignedBytes, err := service.vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + + sig, err := sk.Sign(unsignedBytes) + if err != nil { + return fmt.Errorf("problem signing transaction: %w", err) + } + copy(cred.Sigs[sigIndex][:], sig) + txBytes, err := service.vm.codec.Marshal(&tx) if err != nil { return fmt.Errorf("problem creating transaction: %w", err) @@ -994,3 +947,317 @@ func (service *Service) SignMintTx(r *http.Request, args *SignMintTxArgs, reply reply.Tx.Bytes = txBytes return nil } + +// SendImportArgs are arguments for passing into SendImport requests +type SendImportArgs struct { + Username string `json:"username"` + Password string `json:"password"` + To string `json:"to"` +} + +// SendImportReply defines the SendImport replies returned from the API +type SendImportReply struct { + TxID ids.ID `json:"txID"` +} + +// SendImport returns the ID of the newly created atomic transaction +func (service *Service) SendImport(_ *http.Request, args *SendImportArgs, reply *SendImportReply) error { + service.vm.ctx.Log.Verbo("SendExport called with username: %s", args.Username) + + toBytes, err := service.vm.Parse(args.To) + if err != nil { + return fmt.Errorf("problem parsing to address: %w", err) + } + to, err := ids.ToShortID(toBytes) + if err != nil { + return fmt.Errorf("problem parsing to address: %w", err) + } + + db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) + if err != nil { + return fmt.Errorf("problem retrieving user: %w", err) + } + + user := userState{vm: service.vm} + + addresses, _ := user.Addresses(db) + + addrs := ids.Set{} + addrs.Add(addresses...) + utxos, err := service.vm.GetAtomicUTXOs(addrs) + if err != nil { + return fmt.Errorf("problem retrieving user's atomic UTXOs: %w", err) + } + + kc := secp256k1fx.NewKeychain() + for _, addr := range addresses { + sk, err := user.Key(db, addr) + if err != nil { + return fmt.Errorf("problem retrieving private key: %w", err) + } + kc.Add(sk) + } + + amount := uint64(0) + time := service.vm.clock.Unix() + + ins := []*ava.TransferableInput{} + keys := [][]*crypto.PrivateKeySECP256K1R{} + for _, utxo := range utxos { + if !utxo.AssetID().Equals(service.vm.ava) { + continue + } + inputIntf, signers, err := kc.Spend(utxo.Out, time) + if err != nil { + continue + } + input, ok := inputIntf.(ava.Transferable) + if !ok { + continue + } + spent, err := math.Add64(amount, input.Amount()) + if err != nil { + return errSpendOverflow + } + amount = spent + + in := &ava.TransferableInput{ + UTXOID: utxo.UTXOID, + Asset: ava.Asset{ID: service.vm.ava}, + In: input, + } + + ins = append(ins, in) + keys = append(keys, signers) + } + + ava.SortTransferableInputsWithSigners(ins, keys) + + outs := []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: service.vm.ava}, + Out: &secp256k1fx.TransferOutput{ + Amt: amount, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, + }, + }, + }} + + tx := Tx{UnsignedTx: &ImportTx{ + BaseTx: BaseTx{ + NetID: service.vm.ctx.NetworkID, + BCID: service.vm.ctx.ChainID, + Outs: outs, + }, + Ins: ins, + }} + + unsignedBytes, err := service.vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + hash := hashing.ComputeHash256(unsignedBytes) + + for _, credKeys := range keys { + cred := &secp256k1fx.Credential{} + for _, key := range credKeys { + sig, err := key.SignHash(hash) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + cred.Sigs = append(cred.Sigs, fixedSig) + } + tx.Creds = append(tx.Creds, cred) + } + + b, err := service.vm.codec.Marshal(tx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + + txID, err := service.vm.IssueTx(b, nil) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + return nil +} + +// SendExportArgs are arguments for passing into SendExport requests +type SendExportArgs struct { + Username string `json:"username"` + Password string `json:"password"` + Amount json.Uint64 `json:"amount"` + To string `json:"to"` +} + +// SendExportReply defines the Send replies returned from the API +type SendExportReply struct { + TxID ids.ID `json:"txID"` +} + +// SendExport returns the ID of the newly created atomic transaction +func (service *Service) SendExport(_ *http.Request, args *SendExportArgs, reply *SendExportReply) error { + service.vm.ctx.Log.Verbo("SendExport called with username: %s", args.Username) + + if args.Amount == 0 { + return errInvalidAmount + } + + toBytes, err := service.vm.Parse(args.To) + if err != nil { + return fmt.Errorf("problem parsing to address: %w", err) + } + to, err := ids.ToShortID(toBytes) + if err != nil { + return fmt.Errorf("problem parsing to address: %w", err) + } + + db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) + if err != nil { + return fmt.Errorf("problem retrieving user: %w", err) + } + + user := userState{vm: service.vm} + + addresses, _ := user.Addresses(db) + + addrs := ids.Set{} + addrs.Add(addresses...) + utxos, err := service.vm.GetUTXOs(addrs) + if err != nil { + return fmt.Errorf("problem retrieving user's UTXOs: %w", err) + } + + kc := secp256k1fx.NewKeychain() + for _, addr := range addresses { + sk, err := user.Key(db, addr) + if err != nil { + return fmt.Errorf("problem retrieving private key: %w", err) + } + kc.Add(sk) + } + + amountSpent := uint64(0) + time := service.vm.clock.Unix() + + ins := []*ava.TransferableInput{} + keys := [][]*crypto.PrivateKeySECP256K1R{} + for _, utxo := range utxos { + if !utxo.AssetID().Equals(service.vm.ava) { + continue + } + inputIntf, signers, err := kc.Spend(utxo.Out, time) + if err != nil { + continue + } + input, ok := inputIntf.(ava.Transferable) + if !ok { + continue + } + spent, err := math.Add64(amountSpent, input.Amount()) + if err != nil { + return errSpendOverflow + } + amountSpent = spent + + in := &ava.TransferableInput{ + UTXOID: utxo.UTXOID, + Asset: ava.Asset{ID: service.vm.ava}, + In: input, + } + + ins = append(ins, in) + keys = append(keys, signers) + + if amountSpent >= uint64(args.Amount) { + break + } + } + + if amountSpent < uint64(args.Amount) { + return errInsufficientFunds + } + + ava.SortTransferableInputsWithSigners(ins, keys) + + exportOuts := []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: service.vm.ava}, + Out: &secp256k1fx.TransferOutput{ + Amt: uint64(args.Amount), + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, + }, + }, + }} + + outs := []*ava.TransferableOutput{} + if amountSpent > uint64(args.Amount) { + changeAddr := kc.Keys[0].PublicKey().Address() + outs = append(outs, &ava.TransferableOutput{ + Asset: ava.Asset{ID: service.vm.ava}, + Out: &secp256k1fx.TransferOutput{ + Amt: amountSpent - uint64(args.Amount), + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }, + }, + }) + } + + ava.SortTransferableOutputs(outs, service.vm.codec) + + tx := Tx{UnsignedTx: &ExportTx{ + BaseTx: BaseTx{ + NetID: service.vm.ctx.NetworkID, + BCID: service.vm.ctx.ChainID, + Outs: outs, + Ins: ins, + }, + Outs: exportOuts, + }} + + unsignedBytes, err := service.vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + hash := hashing.ComputeHash256(unsignedBytes) + + for _, credKeys := range keys { + cred := &secp256k1fx.Credential{} + for _, key := range credKeys { + sig, err := key.SignHash(hash) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + cred.Sigs = append(cred.Sigs, fixedSig) + } + tx.Creds = append(tx.Creds, cred) + } + + b, err := service.vm.codec.Marshal(tx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + + txID, err := service.vm.IssueTx(b, nil) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + return nil +} diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index 16be290..9221634 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -136,7 +136,7 @@ func TestCreateFixedCapAsset(t *testing.T) { t.Fatal(err) } - if reply.AssetID.String() != "27ySRc5CE4obYwkS6kyvj5S8eGxGkr994157Hdo82mKVHTWpUT" { + if reply.AssetID.String() != "wWBk78PGAU4VkXhESr3jiYyMCEzzPPcnVYeEnNr9g4JuvYs2x" { t.Fatalf("Wrong assetID returned from CreateFixedCapAsset %s", reply.AssetID) } } @@ -182,7 +182,7 @@ func TestCreateVariableCapAsset(t *testing.T) { t.Fatal(err) } - if reply.AssetID.String() != "2vnRkWvRN3G9JJ7pixBmNdq4pfwRFkpew4kccf27WokYLH9VYY" { + if reply.AssetID.String() != "SscTvpQFCZPNiRXyueDc7LdHT9EstHiva3AK6kuTgHTMd7DsU" { t.Fatalf("Wrong assetID returned from CreateFixedCapAsset %s", reply.AssetID) } } diff --git a/vms/avm/state.go b/vms/avm/state.go index b9d045f..6033b8b 100644 --- a/vms/avm/state.go +++ b/vms/avm/state.go @@ -8,169 +8,58 @@ import ( "github.com/ava-labs/gecko/cache" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/vms/components/ava" ) var ( errCacheTypeMismatch = errors.New("type returned from cache doesn't match the expected type") ) +func uniqueID(id ids.ID, prefix uint64, cacher cache.Cacher) ids.ID { + if cachedIDIntf, found := cacher.Get(id); found { + return cachedIDIntf.(ids.ID) + } + uID := id.Prefix(prefix) + cacher.Put(id, uID) + return uID +} + // state is a thin wrapper around a database to provide, caching, serialization, // and de-serialization. -type state struct { - c cache.Cacher - vm *VM -} +type state struct{ ava.State } // Tx attempts to load a transaction from storage. func (s *state) Tx(id ids.ID) (*Tx, error) { - if txIntf, found := s.c.Get(id); found { + if txIntf, found := s.Cache.Get(id); found { if tx, ok := txIntf.(*Tx); ok { return tx, nil } return nil, errCacheTypeMismatch } - bytes, err := s.vm.db.Get(id.Bytes()) + bytes, err := s.DB.Get(id.Bytes()) if err != nil { return nil, err } // The key was in the database tx := &Tx{} - if err := s.vm.codec.Unmarshal(bytes, tx); err != nil { + if err := s.Codec.Unmarshal(bytes, tx); err != nil { return nil, err } tx.Initialize(bytes) - s.c.Put(id, tx) + s.Cache.Put(id, tx) return tx, nil } // SetTx saves the provided transaction to storage. func (s *state) SetTx(id ids.ID, tx *Tx) error { if tx == nil { - s.c.Evict(id) - return s.vm.db.Delete(id.Bytes()) + s.Cache.Evict(id) + return s.DB.Delete(id.Bytes()) } - s.c.Put(id, tx) - return s.vm.db.Put(id.Bytes(), tx.Bytes()) -} - -// UTXO attempts to load a utxo from storage. -func (s *state) UTXO(id ids.ID) (*UTXO, error) { - if utxoIntf, found := s.c.Get(id); found { - if utxo, ok := utxoIntf.(*UTXO); ok { - return utxo, nil - } - return nil, errCacheTypeMismatch - } - - bytes, err := s.vm.db.Get(id.Bytes()) - if err != nil { - return nil, err - } - - // The key was in the database - utxo := &UTXO{} - if err := s.vm.codec.Unmarshal(bytes, utxo); err != nil { - return nil, err - } - - s.c.Put(id, utxo) - return utxo, nil -} - -// SetUTXO saves the provided utxo to storage. -func (s *state) SetUTXO(id ids.ID, utxo *UTXO) error { - if utxo == nil { - s.c.Evict(id) - return s.vm.db.Delete(id.Bytes()) - } - - bytes, err := s.vm.codec.Marshal(utxo) - if err != nil { - return err - } - - s.c.Put(id, utxo) - return s.vm.db.Put(id.Bytes(), bytes) -} - -// Status returns a status from storage. -func (s *state) Status(id ids.ID) (choices.Status, error) { - if statusIntf, found := s.c.Get(id); found { - if status, ok := statusIntf.(choices.Status); ok { - return status, nil - } - return choices.Unknown, errCacheTypeMismatch - } - - bytes, err := s.vm.db.Get(id.Bytes()) - if err != nil { - return choices.Unknown, err - } - - var status choices.Status - s.vm.codec.Unmarshal(bytes, &status) - - s.c.Put(id, status) - return status, nil -} - -// SetStatus saves a status in storage. -func (s *state) SetStatus(id ids.ID, status choices.Status) error { - if status == choices.Unknown { - s.c.Evict(id) - return s.vm.db.Delete(id.Bytes()) - } - - s.c.Put(id, status) - - bytes, err := s.vm.codec.Marshal(status) - if err != nil { - return err - } - return s.vm.db.Put(id.Bytes(), bytes) -} - -// IDs returns a slice of IDs from storage -func (s *state) IDs(id ids.ID) ([]ids.ID, error) { - if idsIntf, found := s.c.Get(id); found { - if idSlice, ok := idsIntf.([]ids.ID); ok { - return idSlice, nil - } - return nil, errCacheTypeMismatch - } - - bytes, err := s.vm.db.Get(id.Bytes()) - if err != nil { - return nil, err - } - - idSlice := []ids.ID(nil) - if err := s.vm.codec.Unmarshal(bytes, &idSlice); err != nil { - return nil, err - } - - s.c.Put(id, idSlice) - return idSlice, nil -} - -// SetIDs saves a slice of IDs to the database. -func (s *state) SetIDs(id ids.ID, idSlice []ids.ID) error { - if len(idSlice) == 0 { - s.c.Evict(id) - return s.vm.db.Delete(id.Bytes()) - } - - s.c.Put(id, idSlice) - - bytes, err := s.vm.codec.Marshal(idSlice) - if err != nil { - return err - } - - return s.vm.db.Put(id.Bytes(), bytes) + s.Cache.Put(id, tx) + return s.DB.Put(id.Bytes(), tx.Bytes()) } diff --git a/vms/avm/state_test.go b/vms/avm/state_test.go index 0485a1e..212fc18 100644 --- a/vms/avm/state_test.go +++ b/vms/avm/state_test.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -67,7 +68,7 @@ func TestStateIDs(t *testing.T) { } } - state.c.Flush() + state.Cache.Flush() result, err = state.IDs(ids.Empty) if err != nil { @@ -94,7 +95,7 @@ func TestStateIDs(t *testing.T) { t.Fatalf("Should have errored during cache lookup") } - state.c.Flush() + state.Cache.Flush() result, err = state.IDs(ids.Empty) if err == nil { @@ -174,19 +175,19 @@ func TestStateUTXOs(t *testing.T) { vm := GenesisVM(t) state := vm.state.state - vm.codec.RegisterType(&testVerifiable{}) + vm.codec.RegisterType(&ava.TestVerifiable{}) if _, err := state.UTXO(ids.Empty); err == nil { t.Fatalf("Should have errored when reading utxo") } - utxo := &UTXO{ - UTXOID: UTXOID{ + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 1, }, - Asset: Asset{ID: ids.Empty}, - Out: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + Out: &ava.TestVerifiable{}, } if err := state.SetUTXO(ids.Empty, utxo); err != nil { @@ -202,7 +203,7 @@ func TestStateUTXOs(t *testing.T) { t.Fatalf("Wrong UTXO returned") } - state.c.Flush() + state.Cache.Flush() result, err = state.UTXO(ids.Empty) if err != nil { @@ -221,7 +222,7 @@ func TestStateUTXOs(t *testing.T) { t.Fatalf("Should have errored when reading utxo") } - if err := state.SetUTXO(ids.Empty, &UTXO{}); err == nil { + if err := state.SetUTXO(ids.Empty, &ava.UTXO{}); err == nil { t.Fatalf("Should have errored packing the utxo") } @@ -233,7 +234,7 @@ func TestStateUTXOs(t *testing.T) { t.Fatalf("Should have errored when reading utxo") } - state.c.Flush() + state.Cache.Flush() if _, err := state.UTXO(ids.Empty); err == nil { t.Fatalf("Should have errored when reading utxo") @@ -244,35 +245,31 @@ func TestStateTXs(t *testing.T) { vm := GenesisVM(t) state := vm.state.state - vm.codec.RegisterType(&TestTransferable{}) + vm.codec.RegisterType(&ava.TestTransferable{}) if _, err := state.Tx(ids.Empty); err == nil { t.Fatalf("Should have errored when reading tx") } - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - Asset: Asset{ - ID: asset, - }, - In: &secp256k1fx.TransferInput{ - Amt: 20 * units.KiloAva, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(tx.UnsignedTx) if err != nil { @@ -287,11 +284,9 @@ func TestStateTXs(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -314,7 +309,7 @@ func TestStateTXs(t *testing.T) { t.Fatalf("Wrong Tx returned") } - state.c.Flush() + state.Cache.Flush() result, err = state.Tx(ids.Empty) if err != nil { @@ -341,7 +336,7 @@ func TestStateTXs(t *testing.T) { t.Fatalf("Should have errored when reading tx") } - state.c.Flush() + state.Cache.Flush() if _, err := state.Tx(ids.Empty); err == nil { t.Fatalf("Should have errored when reading tx") diff --git a/vms/avm/static_service.go b/vms/avm/static_service.go index 47ef942..3fd58f3 100644 --- a/vms/avm/static_service.go +++ b/vms/avm/static_service.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/components/codec" "github.com/ava-labs/gecko/vms/secp256k1fx" @@ -44,15 +45,24 @@ type BuildGenesisReply struct { // BuildGenesis returns the UTXOs such that at least one address in [args.Addresses] is // referenced in the UTXO. func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, reply *BuildGenesisReply) error { + errs := wrappers.Errs{} + c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) + errs.Add( + c.RegisterType(&BaseTx{}), + c.RegisterType(&CreateAssetTx{}), + c.RegisterType(&OperationTx{}), + c.RegisterType(&ImportTx{}), + c.RegisterType(&ExportTx{}), + c.RegisterType(&secp256k1fx.TransferInput{}), + c.RegisterType(&secp256k1fx.MintOutput{}), + c.RegisterType(&secp256k1fx.TransferOutput{}), + c.RegisterType(&secp256k1fx.MintOperation{}), + c.RegisterType(&secp256k1fx.Credential{}), + ) + if errs.Errored() { + return errs.Err + } g := Genesis{} for assetAlias, assetDefinition := range args.GenesisData { @@ -67,78 +77,75 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl Denomination: byte(assetDefinition.Denomination), }, } - for assetType, initialStates := range assetDefinition.InitialState { - switch assetType { - case "fixedCap": - initialState := &InitialState{ - FxID: 0, // TODO: Should lookup secp256k1fx FxID - } - for _, state := range initialStates { - b, err := json.Marshal(state) - if err != nil { - return err - } - holder := Holder{} - if err := json.Unmarshal(b, &holder); err != nil { - return err - } - cb58 := formatting.CB58{} - if err := cb58.FromString(holder.Address); err != nil { - return err - } - addr, err := ids.ToShortID(cb58.Bytes) - if err != nil { - return err - } - initialState.Outs = append(initialState.Outs, &secp256k1fx.TransferOutput{ - Amt: uint64(holder.Amount), - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{addr}, - }, - }) - } - initialState.Sort(c) - asset.States = append(asset.States, initialState) - case "variableCap": - initialState := &InitialState{ - FxID: 0, // TODO: Should lookup secp256k1fx FxID - } - for _, state := range initialStates { - b, err := json.Marshal(state) - if err != nil { - return err - } - owners := Owners{} - if err := json.Unmarshal(b, &owners); err != nil { - return err - } - - out := &secp256k1fx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - }, - } - for _, address := range owners.Minters { + if len(assetDefinition.InitialState) > 0 { + initialState := &InitialState{ + FxID: 0, // TODO: Should lookup secp256k1fx FxID + } + for assetType, initialStates := range assetDefinition.InitialState { + switch assetType { + case "fixedCap": + for _, state := range initialStates { + b, err := json.Marshal(state) + if err != nil { + return err + } + holder := Holder{} + if err := json.Unmarshal(b, &holder); err != nil { + return err + } cb58 := formatting.CB58{} - if err := cb58.FromString(address); err != nil { + if err := cb58.FromString(holder.Address); err != nil { return err } addr, err := ids.ToShortID(cb58.Bytes) if err != nil { return err } - out.Addrs = append(out.Addrs, addr) + initialState.Outs = append(initialState.Outs, &secp256k1fx.TransferOutput{ + Amt: uint64(holder.Amount), + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }) } - out.Sort() + case "variableCap": + for _, state := range initialStates { + b, err := json.Marshal(state) + if err != nil { + return err + } + owners := Owners{} + if err := json.Unmarshal(b, &owners); err != nil { + return err + } - initialState.Outs = append(initialState.Outs, out) + out := &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + }, + } + for _, address := range owners.Minters { + cb58 := formatting.CB58{} + if err := cb58.FromString(address); err != nil { + return err + } + addr, err := ids.ToShortID(cb58.Bytes) + if err != nil { + return err + } + out.Addrs = append(out.Addrs, addr) + } + out.Sort() + + initialState.Outs = append(initialState.Outs, out) + } + default: + return errUnknownAssetType } - initialState.Sort(c) - asset.States = append(asset.States, initialState) - default: - return errUnknownAssetType } + initialState.Sort(c) + asset.States = append(asset.States, initialState) } asset.Sort() g.Txs = append(g.Txs, &asset) diff --git a/vms/avm/static_service_test.go b/vms/avm/static_service_test.go index 612132e..fd9acc0 100644 --- a/vms/avm/static_service_test.go +++ b/vms/avm/static_service_test.go @@ -5,8 +5,6 @@ package avm import ( "testing" - - "github.com/ava-labs/gecko/utils/formatting" ) func TestBuildGenesis(t *testing.T) { @@ -79,20 +77,4 @@ func TestBuildGenesis(t *testing.T) { if err != nil { t.Fatal(err) } - - expected := "1112YAVd1YsJ7JBDMQssciuuu9ySgebznWfmfT8JSw5vUKERtP4WGyitE7z38J8tExNmvK2kuwHsUP3erfcncXBWmJkdnd9nDJoj9tCiQHJmW1pstNQn3zXHdTnw6KJcG8Ro36ahknQkuy9ZSXgnZtpFhqUuwSd7mPj8vzZcqJMXLXorCBfvhwypTbZKogM9tUshyUfngfkg256ZsoU2ufMjhTG14PBBrgJkXD2F38uVSXWvYbubMVWDZbDnUzbyD3Azrs2Hydf8Paio6aNjwfwc1py61oXS5ehC55wiYbKpfzwE4px3bfYBu9yV6rvhivksB56vop9LEo8Pdo71tFAMkhR5toZmYcqRKyLXAnYqonUgmPsyxNwU22as8oscT5dj3Qxy1jsg6bEp6GwQepNqsWufGYx6Hiby2r5hyRZeYdk6xsXMPGBSBWUXhKX3ReTxBnjcrVE2Zc3G9eMvRho1tKzt7ppkutpcQemdDy2dxGryMqaFmPJaTaqcH2vB197KgVFbPgmHZY3ufUdfpVzzHax365pwCmzQD2PQh8hCqEP7rfV5e8uXKQiSynngoNDM4ak145zTpcUaX8htMGinfs45aKQvo5WHcD6ccRnHzc7dyXN8xJRnMznsuRN7D6k66DdbfDYhc2NbVUgXRAF4wSNTtsuZGxCGTEjQyYaoUoJowGXvnxmXAWHvLyMJswNizBeYgw1agRg5qB4AEKX96BFXhJq3MbsBRiypLR6nSuZgPFhCrLdBtstxEC2SPQNuUVWW9Qy68dDWQ3Fxx95n1pnjVru9wDJFoemg2imXRR" - - cb58 := formatting.CB58{} - if err := cb58.FromString(expected); err != nil { - t.Fatal(err) - } - expectedBytes := cb58.Bytes - - if result := reply.Bytes.String(); result != expected { - t.Fatalf("Create genesis returned unexpected bytes:\n\n%s\n\n%s\n\n%s", - reply.Bytes, - formatting.DumpBytes{Bytes: reply.Bytes.Bytes}, - formatting.DumpBytes{Bytes: expectedBytes}, - ) - } } diff --git a/vms/avm/tx.go b/vms/avm/tx.go index 4fced32..c35fd80 100644 --- a/vms/avm/tx.go +++ b/vms/avm/tx.go @@ -6,10 +6,12 @@ package avm import ( "errors" + "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" ) var ( @@ -22,16 +24,14 @@ type UnsignedTx interface { ID() ids.ID Bytes() []byte - NetworkID() uint32 - ChainID() ids.ID - Outputs() []*TransferableOutput - Inputs() []*TransferableInput - AssetIDs() ids.Set - InputUTXOs() []*UTXOID - UTXOs() []*UTXO + NumCredentials() int + InputUTXOs() []*ava.UTXOID + UTXOs() []*ava.UTXO + SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs int) error - SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) error + SemanticVerify(vm *VM, uTx *UniqueTx, creds []verify.Verifiable) error + ExecuteWithSideEffects(vm *VM, batch database.Batch) error } // Tx is the core operation that can be performed. The tx uses the UTXO model. @@ -40,14 +40,14 @@ type UnsignedTx interface { // attempting to consume and the inputs consume sufficient state to produce the // outputs. type Tx struct { - UnsignedTx `serialize:"true"` + UnsignedTx `serialize:"true" json:"unsignedTx"` - Creds []*Credential `serialize:"true"` // The credentials of this transaction + Creds []verify.Verifiable `serialize:"true" json:"credentials"` // The credentials of this transaction } // Credentials describes the authorization that allows the Inputs to consume the // specified UTXOs. The returned array should not be modified. -func (t *Tx) Credentials() []*Credential { return t.Creds } +func (t *Tx) Credentials() []verify.Verifiable { return t.Creds } // SyntacticVerify verifies that this transaction is well-formed. func (t *Tx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs int) error { @@ -66,8 +66,7 @@ func (t *Tx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs int) error } } - numInputs := len(t.InputUTXOs()) - if numInputs != len(t.Creds) { + if numCreds := t.UnsignedTx.NumCredentials(); numCreds != len(t.Creds) { return errWrongNumberOfCredentials } return nil diff --git a/vms/avm/tx_test.go b/vms/avm/tx_test.go index bc01f36..a26f595 100644 --- a/vms/avm/tx_test.go +++ b/vms/avm/tx_test.go @@ -8,7 +8,9 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -25,6 +27,13 @@ func TestTxEmpty(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) + c.RegisterType(&secp256k1fx.Credential{}) tx := &Tx{} if err := tx.SyntacticVerify(ctx, c, 1); err == nil { @@ -37,42 +46,36 @@ func TestTxInvalidCredential(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) - c.RegisterType(&testVerifiable{}) + c.RegisterType(&ava.TestVerifiable{}) tx := &Tx{ - UnsignedTx: &OperationTx{BaseTx: BaseTx{ + UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - Asset: Asset{ - ID: asset, - }, - In: &secp256k1fx.TransferInput{ - Amt: 20 * units.KiloAva, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}, - Creds: []*Credential{ - &Credential{ - Cred: &testVerifiable{err: errUnneededAddress}, - }, + }}, }, + Creds: []verify.Verifiable{&ava.TestVerifiable{Err: errUnneededAddress}}, } b, err := c.Marshal(tx) @@ -91,26 +94,26 @@ func TestTxInvalidUnsignedTx(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) - c.RegisterType(&testVerifiable{}) + c.RegisterType(&ava.TestVerifiable{}) tx := &Tx{ - UnsignedTx: &OperationTx{BaseTx: BaseTx{ + UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ + Ins: []*ava.TransferableInput{ + &ava.TransferableInput{ + UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 0, }, - Asset: Asset{ - ID: asset, - }, + Asset: ava.Asset{ID: asset}, In: &secp256k1fx.TransferInput{ Amt: 20 * units.KiloAva, Input: secp256k1fx.Input{ @@ -120,14 +123,12 @@ func TestTxInvalidUnsignedTx(t *testing.T) { }, }, }, - &TransferableInput{ - UTXOID: UTXOID{ + &ava.TransferableInput{ + UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 0, }, - Asset: Asset{ - ID: asset, - }, + Asset: ava.Asset{ID: asset}, In: &secp256k1fx.TransferInput{ Amt: 20 * units.KiloAva, Input: secp256k1fx.Input{ @@ -138,14 +139,10 @@ func TestTxInvalidUnsignedTx(t *testing.T) { }, }, }, - }}, - Creds: []*Credential{ - &Credential{ - Cred: &testVerifiable{}, - }, - &Credential{ - Cred: &testVerifiable{}, - }, + }, + Creds: []verify.Verifiable{ + &ava.TestVerifiable{}, + &ava.TestVerifiable{}, }, } @@ -165,60 +162,47 @@ func TestTxInvalidNumberOfCredentials(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) - c.RegisterType(&testVerifiable{}) + c.RegisterType(&ava.TestVerifiable{}) tx := &Tx{ - UnsignedTx: &OperationTx{ - BaseTx: BaseTx{ - NetID: networkID, - BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, + UnsignedTx: &BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*ava.TransferableInput{ + &ava.TransferableInput{ + UTXOID: ava.UTXOID{TxID: ids.Empty, OutputIndex: 0}, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, }, - Asset: Asset{ - ID: asset, - }, - In: &secp256k1fx.TransferInput{ - Amt: 20 * units.KiloAva, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + }, + }, + &ava.TransferableInput{ + UTXOID: ava.UTXOID{TxID: ids.Empty, OutputIndex: 1}, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, }, }, - Ops: []*Operation{ - &Operation{ - Asset: Asset{ - ID: asset, - }, - Ins: []*OperableInput{ - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - In: &testVerifiable{}, - }, - }, - }, - }, - }, - Creds: []*Credential{ - &Credential{ - Cred: &testVerifiable{}, - }, }, + Creds: []verify.Verifiable{&ava.TestVerifiable{}}, } b, err := c.Marshal(tx) @@ -231,76 +215,3 @@ func TestTxInvalidNumberOfCredentials(t *testing.T) { t.Fatalf("Tx should have failed due to an invalid unsigned tx") } } - -func TestTxDocumentation(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) - - txBytes := []byte{ - // unsigned transaction: - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, - 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, - 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, - 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, - 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, - 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, - 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, - 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, - 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x00, 0x00, 0x04, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, - 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, - 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, - 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, - 0x6d, 0x55, 0xa9, 0x55, 0xc3, 0x34, 0x41, 0x28, - 0xe0, 0x60, 0x12, 0x8e, 0xde, 0x35, 0x23, 0xa2, - 0x4a, 0x46, 0x1c, 0x89, 0x43, 0xab, 0x08, 0x59, - 0x00, 0x00, 0x00, 0x01, 0xf1, 0xe1, 0xd1, 0xc1, - 0xb1, 0xa1, 0x91, 0x81, 0x71, 0x61, 0x51, 0x41, - 0x31, 0x21, 0x11, 0x01, 0xf0, 0xe0, 0xd0, 0xc0, - 0xb0, 0xa0, 0x90, 0x80, 0x70, 0x60, 0x50, 0x40, - 0x30, 0x20, 0x10, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, - 0x07, 0x5b, 0xcd, 0x15, 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, - // number of credentials: - 0x00, 0x00, 0x00, 0x01, - // credential[0]: - 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x02, - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1e, 0x1d, 0x1f, - 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, - 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2e, 0x2d, 0x2f, - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, - 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, - 0x00, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, - 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, - 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, - 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5e, 0x5d, - 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, - 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6e, 0x6d, - 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, - 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, - 0x7f, 0x00, - } - - tx := Tx{} - err := c.Unmarshal(txBytes, &tx) - if err != nil { - t.Fatal(err) - } -} diff --git a/vms/avm/unique_tx.go b/vms/avm/unique_tx.go index c7e5115..788ef52 100644 --- a/vms/avm/unique_tx.go +++ b/vms/avm/unique_tx.go @@ -9,10 +9,12 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/vms/components/ava" ) var ( errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") + errWrongAssetID = errors.New("asset ID must be AVA in the atomic tx") errMissingUTXO = errors.New("missing utxo") errUnknownTx = errors.New("transaction is unknown") errRejectedTx = errors.New("transaction is rejected") @@ -21,19 +23,22 @@ var ( // UniqueTx provides a de-duplication service for txs. This only provides a // performance boost type UniqueTx struct { + *TxState + vm *VM txID ids.ID - t *txState } -type txState struct { +// TxState ... +type TxState struct { + *Tx + unique, verifiedTx, verifiedState bool validity error - tx *Tx inputs ids.Set - inputUTXOs []*UTXOID - utxos []*UTXO + inputUTXOs []*ava.UTXOID + utxos []*ava.UTXO deps []snowstorm.Tx status choices.Status @@ -42,51 +47,51 @@ type txState struct { } func (tx *UniqueTx) refresh() { - if tx.t == nil { - tx.t = &txState{} + if tx.TxState == nil { + tx.TxState = &TxState{} } - if tx.t.unique { + if tx.unique { return } unique := tx.vm.state.UniqueTx(tx) - prevTx := tx.t.tx + prevTx := tx.Tx if unique == tx { // If no one was in the cache, make sure that there wasn't an // intermediate object whose state I must reflect if status, err := tx.vm.state.Status(tx.ID()); err == nil { - tx.t.status = status - tx.t.unique = true + tx.status = status + tx.unique = true } } else { // If someone is in the cache, they must be up to date // This ensures that every unique tx object points to the same tx state - tx.t = unique.t + tx.TxState = unique.TxState } - if tx.t.tx != nil { + if tx.Tx != nil { return } if prevTx == nil { if innerTx, err := tx.vm.state.Tx(tx.ID()); err == nil { - tx.t.tx = innerTx + tx.Tx = innerTx } } else { - tx.t.tx = prevTx + tx.Tx = prevTx } } // Evict is called when this UniqueTx will no longer be returned from a cache // lookup -func (tx *UniqueTx) Evict() { tx.t.unique = false } // Lock is already held here +func (tx *UniqueTx) Evict() { tx.unique = false } // Lock is already held here func (tx *UniqueTx) setStatus(status choices.Status) error { tx.refresh() - if tx.t.status == status { + if tx.status == status { return nil } - tx.t.status = status + tx.status = status return tx.vm.state.SetStatus(tx.ID(), status) } @@ -95,13 +100,20 @@ func (tx *UniqueTx) ID() ids.ID { return tx.txID } // Accept is called when the transaction was finalized as accepted by consensus func (tx *UniqueTx) Accept() { + defer tx.vm.db.Abort() + if err := tx.setStatus(choices.Accepted); err != nil { tx.vm.ctx.Log.Error("Failed to accept tx %s due to %s", tx.txID, err) return } // Remove spent utxos - for _, utxoID := range tx.InputIDs().List() { + for _, utxo := range tx.InputUTXOs() { + if utxo.Symbolic() { + // If the UTXO is symbolic, it can't be spent + continue + } + utxoID := utxo.InputID() if err := tx.vm.state.SpendUTXO(utxoID); err != nil { tx.vm.ctx.Log.Error("Failed to spend utxo %s due to %s", utxoID, err) return @@ -117,23 +129,32 @@ func (tx *UniqueTx) Accept() { } txID := tx.ID() - tx.vm.ctx.Log.Verbo("Accepting Tx: %s", txID) - - if err := tx.vm.db.Commit(); err != nil { - tx.vm.ctx.Log.Error("Failed to commit accept %s due to %s", tx.txID, err) + commitBatch, err := tx.vm.db.CommitBatch() + if err != nil { + tx.vm.ctx.Log.Error("Failed to calculate CommitBatch for %s due to %s", txID, err) + return } + if err := tx.ExecuteWithSideEffects(tx.vm, commitBatch); err != nil { + tx.vm.ctx.Log.Error("Failed to commit accept %s due to %s", txID, err) + return + } + + tx.vm.ctx.Log.Verbo("Accepted Tx: %s", txID) + tx.vm.pubsub.Publish("accepted", txID) - tx.t.deps = nil // Needed to prevent a memory leak + tx.deps = nil // Needed to prevent a memory leak - if tx.t.onDecide != nil { - tx.t.onDecide(choices.Accepted) + if tx.onDecide != nil { + tx.onDecide(choices.Accepted) } } // Reject is called when the transaction was finalized as rejected by consensus func (tx *UniqueTx) Reject() { + defer tx.vm.db.Abort() + if err := tx.setStatus(choices.Rejected); err != nil { tx.vm.ctx.Log.Error("Failed to reject tx %s due to %s", tx.txID, err) return @@ -148,86 +169,89 @@ func (tx *UniqueTx) Reject() { tx.vm.pubsub.Publish("rejected", txID) - tx.t.deps = nil // Needed to prevent a memory leak + tx.deps = nil // Needed to prevent a memory leak - if tx.t.onDecide != nil { - tx.t.onDecide(choices.Rejected) + if tx.onDecide != nil { + tx.onDecide(choices.Rejected) } } // Status returns the current status of this transaction func (tx *UniqueTx) Status() choices.Status { tx.refresh() - return tx.t.status + return tx.status } // Dependencies returns the set of transactions this transaction builds on func (tx *UniqueTx) Dependencies() []snowstorm.Tx { tx.refresh() - if tx.t.tx == nil || len(tx.t.deps) != 0 { - return tx.t.deps + if tx.Tx == nil || len(tx.deps) != 0 { + return tx.deps } txIDs := ids.Set{} for _, in := range tx.InputUTXOs() { + if in.Symbolic() { + continue + } txID, _ := in.InputSource() if !txIDs.Contains(txID) { txIDs.Add(txID) - tx.t.deps = append(tx.t.deps, &UniqueTx{ + tx.deps = append(tx.deps, &UniqueTx{ vm: tx.vm, txID: txID, }) } } - for _, assetID := range tx.t.tx.AssetIDs().List() { + for _, assetID := range tx.Tx.AssetIDs().List() { if !txIDs.Contains(assetID) { txIDs.Add(assetID) - tx.t.deps = append(tx.t.deps, &UniqueTx{ + tx.deps = append(tx.deps, &UniqueTx{ vm: tx.vm, txID: assetID, }) } } - return tx.t.deps + return tx.deps } // InputIDs returns the set of utxoIDs this transaction consumes func (tx *UniqueTx) InputIDs() ids.Set { tx.refresh() - if tx.t.tx == nil || tx.t.inputs.Len() != 0 { - return tx.t.inputs + if tx.Tx == nil || tx.inputs.Len() != 0 { + return tx.inputs } for _, utxo := range tx.InputUTXOs() { - tx.t.inputs.Add(utxo.InputID()) + tx.inputs.Add(utxo.InputID()) } - return tx.t.inputs + return tx.inputs } // InputUTXOs returns the utxos that will be consumed on tx acceptance -func (tx *UniqueTx) InputUTXOs() []*UTXOID { +func (tx *UniqueTx) InputUTXOs() []*ava.UTXOID { tx.refresh() - if tx.t.tx == nil || len(tx.t.inputUTXOs) != 0 { - return tx.t.inputUTXOs + if tx.Tx == nil || len(tx.inputUTXOs) != 0 { + return tx.inputUTXOs } - tx.t.inputUTXOs = tx.t.tx.InputUTXOs() - return tx.t.inputUTXOs + tx.inputUTXOs = tx.Tx.InputUTXOs() + return tx.inputUTXOs } // UTXOs returns the utxos that will be added to the UTXO set on tx acceptance -func (tx *UniqueTx) UTXOs() []*UTXO { +func (tx *UniqueTx) UTXOs() []*ava.UTXO { tx.refresh() - if tx.t.tx == nil || len(tx.t.utxos) != 0 { - return tx.t.utxos + if tx.Tx == nil || len(tx.utxos) != 0 { + return tx.utxos } - tx.t.utxos = tx.t.tx.UTXOs() - return tx.t.utxos + tx.utxos = tx.Tx.UTXOs() + return tx.utxos } // Bytes returns the binary representation of this transaction func (tx *UniqueTx) Bytes() []byte { tx.refresh() - return tx.t.tx.Bytes() + return tx.Tx.Bytes() } // Verify the validity of this transaction @@ -248,39 +272,39 @@ func (tx *UniqueTx) Verify() error { func (tx *UniqueTx) SyntacticVerify() error { tx.refresh() - if tx.t.tx == nil { + if tx.Tx == nil { return errUnknownTx } - if tx.t.verifiedTx { - return tx.t.validity + if tx.verifiedTx { + return tx.validity } - tx.t.verifiedTx = true - tx.t.validity = tx.t.tx.SyntacticVerify(tx.vm.ctx, tx.vm.codec, len(tx.vm.fxs)) - return tx.t.validity + tx.verifiedTx = true + tx.validity = tx.Tx.SyntacticVerify(tx.vm.ctx, tx.vm.codec, len(tx.vm.fxs)) + return tx.validity } // SemanticVerify the validity of this transaction func (tx *UniqueTx) SemanticVerify() error { tx.SyntacticVerify() - if tx.t.validity != nil || tx.t.verifiedState { - return tx.t.validity + if tx.validity != nil || tx.verifiedState { + return tx.validity } - tx.t.verifiedState = true - tx.t.validity = tx.t.tx.SemanticVerify(tx.vm, tx) - - if tx.t.validity == nil { - tx.vm.pubsub.Publish("verified", tx.ID()) + if err := tx.Tx.SemanticVerify(tx.vm, tx); err != nil { + return err } - return tx.t.validity + + tx.verifiedState = true + tx.vm.pubsub.Publish("verified", tx.ID()) + return nil } // UnsignedBytes returns the unsigned bytes of the transaction func (tx *UniqueTx) UnsignedBytes() []byte { - b, err := tx.vm.codec.Marshal(&tx.t.tx.UnsignedTx) + b, err := tx.vm.codec.Marshal(&tx.UnsignedTx) tx.vm.ctx.Log.AssertNoError(err) return b } diff --git a/vms/avm/utxo_id.go b/vms/avm/utxo_id.go deleted file mode 100644 index 9852c5d..0000000 --- a/vms/avm/utxo_id.go +++ /dev/null @@ -1,48 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "errors" - - "github.com/ava-labs/gecko/ids" -) - -var ( - errNilUTXOID = errors.New("nil utxo ID is not valid") - errNilTxID = errors.New("nil tx ID is not valid") -) - -// UTXOID ... -type UTXOID struct { - // Serialized: - TxID ids.ID `serialize:"true"` - OutputIndex uint32 `serialize:"true"` - - // Cached: - id ids.ID -} - -// InputSource returns the source of the UTXO that this input is spending -func (utxo *UTXOID) InputSource() (ids.ID, uint32) { return utxo.TxID, utxo.OutputIndex } - -// InputID returns a unique ID of the UTXO that this input is spending -func (utxo *UTXOID) InputID() ids.ID { - if utxo.id.IsZero() { - utxo.id = utxo.TxID.Prefix(uint64(utxo.OutputIndex)) - } - return utxo.id -} - -// Verify implements the verify.Verifiable interface -func (utxo *UTXOID) Verify() error { - switch { - case utxo == nil: - return errNilUTXOID - case utxo.TxID.IsZero(): - return errNilTxID - default: - return nil - } -} diff --git a/vms/avm/verifiable_test.go b/vms/avm/verifiable_test.go index 65630d2..6e6337a 100644 --- a/vms/avm/verifiable_test.go +++ b/vms/avm/verifiable_test.go @@ -3,20 +3,10 @@ package avm -type testVerifiable struct{ err error } - -func (v *testVerifiable) Verify() error { return v.err } - -type TestTransferable struct { - testVerifiable - - Val uint64 `serialize:"true"` -} - -func (t *TestTransferable) Amount() uint64 { return t.Val } +import "github.com/ava-labs/gecko/vms/components/ava" type testAddressable struct { - TestTransferable `serialize:"true"` + ava.TestTransferable `serialize:"true"` Addrs [][]byte `serialize:"true"` } diff --git a/vms/avm/vm.go b/vms/avm/vm.go index ce29921..e6af76d 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -21,8 +21,10 @@ import ( "github.com/ava-labs/gecko/snow/consensus/snowstorm" "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/utils/wrappers" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" cjson "github.com/ava-labs/gecko/utils/json" @@ -49,6 +51,9 @@ var ( type VM struct { ids.Aliaser + ava ids.ID + platform ids.ID + // Contains information of where this VM is executing ctx *snow.Context @@ -111,36 +116,24 @@ func (vm *VM) Initialize( vm.Aliaser.Initialize() vm.pubsub = cjson.NewPubSubServer(ctx) + c := codec.NewDefault() errs := wrappers.Errs{} errs.Add( vm.pubsub.Register("accepted"), vm.pubsub.Register("rejected"), vm.pubsub.Register("verified"), + + c.RegisterType(&BaseTx{}), + c.RegisterType(&CreateAssetTx{}), + c.RegisterType(&OperationTx{}), + c.RegisterType(&ImportTx{}), + c.RegisterType(&ExportTx{}), ) if errs.Errored() { return errs.Err } - vm.state = &prefixedState{ - state: &state{ - c: &cache.LRU{Size: stateCacheSize}, - vm: vm, - }, - - tx: &cache.LRU{Size: idCacheSize}, - utxo: &cache.LRU{Size: idCacheSize}, - txStatus: &cache.LRU{Size: idCacheSize}, - funds: &cache.LRU{Size: idCacheSize}, - - uniqueTx: &cache.EvictableLRU{Size: txCacheSize}, - } - - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - vm.fxs = make([]*parsedFx, len(fxs)) for i, fxContainer := range fxs { if fxContainer == nil { @@ -166,6 +159,21 @@ func (vm *VM) Initialize( vm.codec = c + vm.state = &prefixedState{ + state: &state{State: ava.State{ + Cache: &cache.LRU{Size: stateCacheSize}, + DB: vm.db, + Codec: vm.codec, + }}, + + tx: &cache.LRU{Size: idCacheSize}, + utxo: &cache.LRU{Size: idCacheSize}, + txStatus: &cache.LRU{Size: idCacheSize}, + funds: &cache.LRU{Size: idCacheSize}, + + uniqueTx: &cache.EvictableLRU{Size: txCacheSize}, + } + if err := vm.initAliases(genesisBytes); err != nil { return err } @@ -251,7 +259,10 @@ func (vm *VM) GetTx(txID ids.ID) (snowstorm.Tx, error) { ****************************************************************************** */ -// IssueTx attempts to send a transaction to consensus +// IssueTx attempts to send a transaction to consensus. +// If onDecide is specified, the function will be called when the transaction is +// either accepted or rejected with the appropriate status. This function will +// go out of scope when the transaction is removed from memory. func (vm *VM) IssueTx(b []byte, onDecide func(choices.Status)) (ids.ID, error) { tx, err := vm.parseTx(b) if err != nil { @@ -261,20 +272,45 @@ func (vm *VM) IssueTx(b []byte, onDecide func(choices.Status)) (ids.ID, error) { return ids.ID{}, err } vm.issueTx(tx) - tx.t.onDecide = onDecide + tx.onDecide = onDecide return tx.ID(), nil } +// GetAtomicUTXOs returns the utxos that at least one of the provided addresses is +// referenced in. +func (vm *VM) GetAtomicUTXOs(addrs ids.Set) ([]*ava.UTXO, error) { + smDB := vm.ctx.SharedMemory.GetDatabase(vm.platform) + defer vm.ctx.SharedMemory.ReleaseDatabase(vm.platform) + + state := ava.NewPrefixedState(smDB, vm.codec) + + utxoIDs := ids.Set{} + for _, addr := range addrs.List() { + utxos, _ := state.PlatformFunds(addr) + utxoIDs.Add(utxos...) + } + + utxos := []*ava.UTXO{} + for _, utxoID := range utxoIDs.List() { + utxo, err := state.PlatformUTXO(utxoID) + if err != nil { + return nil, err + } + utxos = append(utxos, utxo) + } + return utxos, nil +} + // GetUTXOs returns the utxos that at least one of the provided addresses is // referenced in. -func (vm *VM) GetUTXOs(addrs ids.Set) ([]*UTXO, error) { +func (vm *VM) GetUTXOs(addrs ids.Set) ([]*ava.UTXO, error) { utxoIDs := ids.Set{} for _, addr := range addrs.List() { utxos, _ := vm.state.Funds(addr) utxoIDs.Add(utxos...) } - utxos := []*UTXO{} + utxos := []*ava.UTXO{} for _, utxoID := range utxoIDs.List() { utxo, err := vm.state.UTXO(utxoID) if err != nil { @@ -297,6 +333,9 @@ func (vm *VM) Clock() *timer.Clock { return &vm.clock } // Codec returns a reference to the internal codec of this VM func (vm *VM) Codec() codec.Codec { return vm.codec } +// Logger returns a reference to the internal logger of this VM +func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } + /* ****************************************************************************** ********************************** Timer API ********************************* @@ -399,18 +438,18 @@ func (vm *VM) parseTx(b []byte) (*UniqueTx, error) { rawTx.Initialize(b) tx := &UniqueTx{ + TxState: &TxState{ + Tx: rawTx, + }, vm: vm, txID: rawTx.ID(), - t: &txState{ - tx: rawTx, - }, } if err := tx.SyntacticVerify(); err != nil { return nil, err } if tx.Status() == choices.Unknown { - if err := vm.state.SetTx(tx.ID(), tx.t.tx); err != nil { + if err := vm.state.SetTx(tx.ID(), tx.Tx); err != nil { return nil, err } tx.setStatus(choices.Processing) @@ -429,6 +468,32 @@ func (vm *VM) issueTx(tx snowstorm.Tx) { } } +func (vm *VM) getUTXO(utxoID *ava.UTXOID) (*ava.UTXO, error) { + inputID := utxoID.InputID() + utxo, err := vm.state.UTXO(inputID) + if err == nil { + return utxo, nil + } + + inputTx, inputIndex := utxoID.InputSource() + parent := UniqueTx{ + vm: vm, + txID: inputTx, + } + + if err := parent.Verify(); err != nil { + return nil, errMissingUTXO + } else if status := parent.Status(); status.Decided() { + return nil, errMissingUTXO + } + + parentUTXOs := parent.UTXOs() + if uint32(len(parentUTXOs)) <= inputIndex || int(inputIndex) < 0 { + return nil, errInvalidUTXO + } + return parentUTXOs[int(inputIndex)], nil +} + func (vm *VM) getFx(val interface{}) (int, error) { valType := reflect.TypeOf(val) fx, exists := vm.typeToFxIndex[valType] @@ -446,7 +511,7 @@ func (vm *VM) verifyFxUsage(fxID int, assetID ids.ID) bool { if status := tx.Status(); !status.Fetched() { return false } - createAssetTx, ok := tx.t.tx.UnsignedTx.(*CreateAssetTx) + createAssetTx, ok := tx.UnsignedTx.(*CreateAssetTx) if !ok { return false } diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index 39c3ca6..e629c4b 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -15,7 +15,11 @@ import ( "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/nftfx" + "github.com/ava-labs/gecko/vms/propertyfx" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -50,10 +54,12 @@ func GetFirstTxFromGenesisTest(genesisBytes []byte, t *testing.T) *Tx { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) genesis := Genesis{} @@ -188,7 +194,7 @@ func GenesisVM(t *testing.T) *VM { func TestTxSerialization(t *testing.T) { expected := []byte{ // txID: - 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x01, // networkID: 0x00, 0x00, 0xa8, 0x66, // chainID: @@ -205,7 +211,7 @@ func TestTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // fxID: - 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x07, // secp256k1 Transferable Output: // amount: 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, @@ -226,7 +232,7 @@ func TestTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // fxID: - 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x07, // secp256k1 Transferable Output: // amount: 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, @@ -247,7 +253,7 @@ func TestTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // fxID: - 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x07, // secp256k1 Transferable Output: // amount: 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, @@ -263,20 +269,24 @@ func TestTxSerialization(t *testing.T) { 0x92, 0xf0, 0xee, 0x31, // number of inputs: 0x00, 0x00, 0x00, 0x00, - // number of operations: + // name length: + 0x00, 0x04, + // name: + 'n', 'a', 'm', 'e', + // symbol length: + 0x00, 0x04, + // symbol: + 's', 'y', 'm', 'b', + // denomination + 0x00, + // number of initial states: 0x00, 0x00, 0x00, 0x01, - // operation[0]: - // assetID: - 0x01, 0x02, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // number of inputs: + // fx index: 0x00, 0x00, 0x00, 0x00, // number of outputs: 0x00, 0x00, 0x00, 0x01, // fxID: - 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x06, // secp256k1 Mint Output: // threshold: 0x00, 0x00, 0x00, 0x01, @@ -290,23 +300,22 @@ func TestTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, } - unsignedTx := &OperationTx{ + unsignedTx := &CreateAssetTx{ BaseTx: BaseTx{ NetID: networkID, BCID: chainID, }, - Ops: []*Operation{ - &Operation{ - Asset: Asset{ - ID: asset, - }, - Outs: []*OperableOutput{ - &OperableOutput{ - Out: &secp256k1fx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Name: "name", + Symbol: "symb", + Denomination: 0, + States: []*InitialState{ + &InitialState{ + FxID: 0, + Outs: []verify.Verifiable{ + &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, }, @@ -317,10 +326,8 @@ func TestTxSerialization(t *testing.T) { for _, key := range keys { addr := key.PublicKey().Address() - unsignedTx.Outs = append(unsignedTx.Outs, &TransferableOutput{ - Asset: Asset{ - ID: asset, - }, + unsignedTx.Outs = append(unsignedTx.Outs, &ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 20 * units.KiloAva, OutputOwners: secp256k1fx.OutputOwners{ @@ -335,10 +342,12 @@ func TestTxSerialization(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) b, err := c.Marshal(tx) @@ -441,29 +450,25 @@ func TestIssueTx(t *testing.T) { genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - newTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + newTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&newTx.UnsignedTx) if err != nil { @@ -478,11 +483,9 @@ func TestIssueTx(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - newTx.Creds = append(newTx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + newTx.Creds = append(newTx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -544,10 +547,12 @@ func TestGenesisGetUTXOs(t *testing.T) { ctx.Lock.Unlock() if len(utxos) != 7 { - t.Fatalf("Wrong number of utxos (%d) returned", len(utxos)) + t.Fatalf("Wrong number of utxos. Expected (%d) returned (%d)", 7, len(utxos)) } } +// Test issuing a transaction that consumes a currently pending UTXO. The +// transaction should be issued successfully. func TestIssueDependentTx(t *testing.T) { genesisBytes := BuildGenesisTest(t) @@ -574,39 +579,35 @@ func TestIssueDependentTx(t *testing.T) { key := keys[0] - firstTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + firstTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: genesisTx.ID()}, - Out: &secp256k1fx.TransferOutput{ - Amt: 50000, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{key.PublicKey().Address()}, - }, + }}, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&firstTx.UnsignedTx) if err != nil { @@ -620,11 +621,9 @@ func TestIssueDependentTx(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - firstTx.Creds = append(firstTx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + firstTx.Creds = append(firstTx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -639,27 +638,25 @@ func TestIssueDependentTx(t *testing.T) { t.Fatal(err) } - secondTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + secondTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: firstTx.ID(), - OutputIndex: 0, - }, - Asset: Asset{ID: genesisTx.ID()}, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: firstTx.ID(), + OutputIndex: 0, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err = vm.codec.Marshal(&secondTx.UnsignedTx) if err != nil { @@ -673,11 +670,9 @@ func TestIssueDependentTx(t *testing.T) { fixedSig = [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - secondTx.Creds = append(secondTx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + secondTx.Creds = append(secondTx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -703,3 +698,310 @@ func TestIssueDependentTx(t *testing.T) { t.Fatalf("Should have returned %d tx(s)", 2) } } + +// Test issuing a transaction that creates an NFT family +func TestIssueNFT(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{ + &common.Fx{ + ID: ids.Empty.Prefix(0), + Fx: &secp256k1fx.Fx{}, + }, + &common.Fx{ + ID: ids.Empty.Prefix(1), + Fx: &nftfx.Fx{}, + }, + }, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + createAssetTx := &Tx{UnsignedTx: &CreateAssetTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Name: "Team Rocket", + Symbol: "TR", + Denomination: 0, + States: []*InitialState{&InitialState{ + FxID: 1, + Outs: []verify.Verifiable{ + &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + &nftfx.MintOutput{ + GroupID: 2, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }}, + }} + + b, err := vm.codec.Marshal(createAssetTx) + if err != nil { + t.Fatal(err) + } + createAssetTx.Initialize(b) + + if _, err = vm.IssueTx(createAssetTx.Bytes(), nil); err != nil { + t.Fatal(err) + } + + mintNFTTx := &Tx{UnsignedTx: &OperationTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Ops: []*Operation{&Operation{ + Asset: ava.Asset{ID: createAssetTx.ID()}, + UTXOIDs: []*ava.UTXOID{&ava.UTXOID{ + TxID: createAssetTx.ID(), + OutputIndex: 0, + }}, + Op: &nftfx.MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + GroupID: 1, + Payload: []byte{'h', 'e', 'l', 'l', 'o'}, + Outputs: []*secp256k1fx.OutputOwners{ + &secp256k1fx.OutputOwners{}, + }, + }, + }}, + }} + + unsignedBytes, err := vm.codec.Marshal(&mintNFTTx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + mintNFTTx.Creds = append(mintNFTTx.Creds, &nftfx.Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }}, + }) + + b, err = vm.codec.Marshal(mintNFTTx) + if err != nil { + t.Fatal(err) + } + mintNFTTx.Initialize(b) + + if _, err = vm.IssueTx(mintNFTTx.Bytes(), nil); err != nil { + t.Fatal(err) + } + + transferNFTTx := &Tx{UnsignedTx: &OperationTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Ops: []*Operation{&Operation{ + Asset: ava.Asset{ID: createAssetTx.ID()}, + UTXOIDs: []*ava.UTXOID{&ava.UTXOID{ + TxID: mintNFTTx.ID(), + OutputIndex: 0, + }}, + Op: &nftfx.TransferOperation{ + Input: secp256k1fx.Input{}, + Output: nftfx.TransferOutput{ + GroupID: 1, + Payload: []byte{'h', 'e', 'l', 'l', 'o'}, + OutputOwners: secp256k1fx.OutputOwners{}, + }, + }, + }}, + }} + + transferNFTTx.Creds = append(transferNFTTx.Creds, &nftfx.Credential{}) + + b, err = vm.codec.Marshal(transferNFTTx) + if err != nil { + t.Fatal(err) + } + transferNFTTx.Initialize(b) + + if _, err = vm.IssueTx(transferNFTTx.Bytes(), nil); err != nil { + t.Fatal(err) + } +} + +// Test issuing a transaction that creates an Property family +func TestIssueProperty(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{ + &common.Fx{ + ID: ids.Empty.Prefix(0), + Fx: &secp256k1fx.Fx{}, + }, + &common.Fx{ + ID: ids.Empty.Prefix(1), + Fx: &nftfx.Fx{}, + }, + &common.Fx{ + ID: ids.Empty.Prefix(2), + Fx: &propertyfx.Fx{}, + }, + }, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + createAssetTx := &Tx{UnsignedTx: &CreateAssetTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Name: "Team Rocket", + Symbol: "TR", + Denomination: 0, + States: []*InitialState{&InitialState{ + FxID: 2, + Outs: []verify.Verifiable{ + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }}, + }} + + b, err := vm.codec.Marshal(createAssetTx) + if err != nil { + t.Fatal(err) + } + createAssetTx.Initialize(b) + + if _, err = vm.IssueTx(createAssetTx.Bytes(), nil); err != nil { + t.Fatal(err) + } + + mintPropertyTx := &Tx{UnsignedTx: &OperationTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Ops: []*Operation{&Operation{ + Asset: ava.Asset{ID: createAssetTx.ID()}, + UTXOIDs: []*ava.UTXOID{&ava.UTXOID{ + TxID: createAssetTx.ID(), + OutputIndex: 0, + }}, + Op: &propertyfx.MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + MintOutput: propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + OwnedOutput: propertyfx.OwnedOutput{}, + }, + }}, + }} + + unsignedBytes, err := vm.codec.Marshal(&mintPropertyTx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + mintPropertyTx.Creds = append(mintPropertyTx.Creds, &propertyfx.Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }}, + }) + + b, err = vm.codec.Marshal(mintPropertyTx) + if err != nil { + t.Fatal(err) + } + mintPropertyTx.Initialize(b) + + if _, err = vm.IssueTx(mintPropertyTx.Bytes(), nil); err != nil { + t.Fatal(err) + } + + burnPropertyTx := &Tx{UnsignedTx: &OperationTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Ops: []*Operation{&Operation{ + Asset: ava.Asset{ID: createAssetTx.ID()}, + UTXOIDs: []*ava.UTXOID{&ava.UTXOID{ + TxID: mintPropertyTx.ID(), + OutputIndex: 1, + }}, + Op: &propertyfx.BurnOperation{Input: secp256k1fx.Input{}}, + }}, + }} + + burnPropertyTx.Creds = append(burnPropertyTx.Creds, &propertyfx.Credential{}) + + b, err = vm.codec.Marshal(burnPropertyTx) + if err != nil { + t.Fatal(err) + } + burnPropertyTx.Initialize(b) + + if _, err = vm.IssueTx(burnPropertyTx.Bytes(), nil); err != nil { + t.Fatal(err) + } +} diff --git a/vms/avm/asset.go b/vms/components/ava/asset.go similarity index 91% rename from vms/avm/asset.go rename to vms/components/ava/asset.go index fc9ef07..710e6e4 100644 --- a/vms/avm/asset.go +++ b/vms/components/ava/asset.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "errors" @@ -16,7 +16,7 @@ var ( // Asset ... type Asset struct { - ID ids.ID `serialize:"true"` + ID ids.ID `serialize:"true" json:"assetID"` } // AssetID returns the ID of the contained asset diff --git a/vms/avm/asset_test.go b/vms/components/ava/asset_test.go similarity index 99% rename from vms/avm/asset_test.go rename to vms/components/ava/asset_test.go index 209cc81..40d6ea8 100644 --- a/vms/avm/asset_test.go +++ b/vms/components/ava/asset_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "testing" diff --git a/vms/components/ava/flow_checker.go b/vms/components/ava/flow_checker.go new file mode 100644 index 0000000..321f8dd --- /dev/null +++ b/vms/components/ava/flow_checker.go @@ -0,0 +1,57 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ava + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/utils/wrappers" +) + +var ( + errInsufficientFunds = errors.New("insufficient funds") +) + +// FlowChecker ... +type FlowChecker struct { + consumed, produced map[[32]byte]uint64 + errs wrappers.Errs +} + +// NewFlowChecker ... +func NewFlowChecker() *FlowChecker { + return &FlowChecker{ + consumed: make(map[[32]byte]uint64), + produced: make(map[[32]byte]uint64), + } +} + +// Consume ... +func (fc *FlowChecker) Consume(assetID ids.ID, amount uint64) { fc.add(fc.consumed, assetID, amount) } + +// Produce ... +func (fc *FlowChecker) Produce(assetID ids.ID, amount uint64) { fc.add(fc.produced, assetID, amount) } + +func (fc *FlowChecker) add(value map[[32]byte]uint64, assetID ids.ID, amount uint64) { + var err error + assetIDKey := assetID.Key() + value[assetIDKey], err = math.Add64(value[assetIDKey], amount) + fc.errs.Add(err) +} + +// Verify ... +func (fc *FlowChecker) Verify() error { + if !fc.errs.Errored() { + for assetID, producedAssetAmount := range fc.produced { + consumedAssetAmount := fc.consumed[assetID] + if producedAssetAmount > consumedAssetAmount { + fc.errs.Add(errInsufficientFunds) + break + } + } + } + return fc.errs.Err +} diff --git a/vms/avm/metadata.go b/vms/components/ava/metadata.go similarity index 74% rename from vms/avm/metadata.go rename to vms/components/ava/metadata.go index fb29b44..3ae9228 100644 --- a/vms/avm/metadata.go +++ b/vms/components/ava/metadata.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "errors" @@ -15,25 +15,26 @@ var ( errMetadataNotInitialize = errors.New("metadata was never initialized and is not valid") ) -type metadata struct { +// Metadata ... +type Metadata struct { id ids.ID // The ID of this data bytes []byte // Byte representation of this data } -// Bytes returns the binary representation of this data -func (md *metadata) Initialize(bytes []byte) { +// Initialize set the bytes and ID +func (md *Metadata) Initialize(bytes []byte) { md.id = ids.NewID(hashing.ComputeHash256Array(bytes)) md.bytes = bytes } // ID returns the unique ID of this data -func (md *metadata) ID() ids.ID { return md.id } +func (md *Metadata) ID() ids.ID { return md.id } // Bytes returns the binary representation of this data -func (md *metadata) Bytes() []byte { return md.bytes } +func (md *Metadata) Bytes() []byte { return md.bytes } // Verify implements the verify.Verifiable interface -func (md *metadata) Verify() error { +func (md *Metadata) Verify() error { switch { case md == nil: return errNilMetadata diff --git a/vms/avm/metadata_test.go b/vms/components/ava/metadata_test.go similarity index 88% rename from vms/avm/metadata_test.go rename to vms/components/ava/metadata_test.go index 09c559b..bd6563b 100644 --- a/vms/avm/metadata_test.go +++ b/vms/components/ava/metadata_test.go @@ -1,21 +1,21 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "testing" ) func TestMetaDataVerifyNil(t *testing.T) { - md := (*metadata)(nil) + md := (*Metadata)(nil) if err := md.Verify(); err == nil { t.Fatalf("Should have errored due to nil metadata") } } func TestMetaDataVerifyUninitialized(t *testing.T) { - md := &metadata{} + md := &Metadata{} if err := md.Verify(); err == nil { t.Fatalf("Should have errored due to uninitialized metadata") } diff --git a/vms/components/ava/prefixed_state.go b/vms/components/ava/prefixed_state.go new file mode 100644 index 0000000..dd7f3e8 --- /dev/null +++ b/vms/components/ava/prefixed_state.go @@ -0,0 +1,208 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ava + +import ( + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/vms/components/codec" +) + +// Addressable is the interface a feature extension must provide to be able to +// be tracked as a part of the utxo set for a set of addresses +type Addressable interface { + Addresses() [][]byte +} + +const ( + platformUTXOID uint64 = iota + platformStatusID + platformFundsID + avmUTXOID + avmStatusID + avmFundsID +) + +const ( + stateCacheSize = 10000 + idCacheSize = 10000 +) + +type chainState struct { + *State + + utxoIDPrefix, statusIDPrefix, fundsIDPrefix uint64 + utxoID, statusID, fundsID cache.Cacher +} + +// UTXO attempts to load a utxo from platform's storage. +func (s *chainState) UTXO(id ids.ID) (*UTXO, error) { + return s.State.UTXO(UniqueID(id, s.utxoIDPrefix, s.utxoID)) +} + +// Funds returns the mapping from the 32 byte representation of an +// address to a list of utxo IDs that reference the address. +func (s *chainState) Funds(id ids.ID) ([]ids.ID, error) { + return s.IDs(UniqueID(id, s.fundsIDPrefix, s.fundsID)) +} + +// SpendUTXO consumes the provided platform utxo. +func (s *chainState) SpendUTXO(utxoID ids.ID) error { + utxo, err := s.UTXO(utxoID) + if err != nil { + return s.setStatus(utxoID, choices.Accepted) + } else if err := s.setUTXO(utxoID, nil); err != nil { + return err + } + + if addressable, ok := utxo.Out.(Addressable); ok { + return s.removeUTXO(addressable.Addresses(), utxoID) + } + return nil +} + +// FundUTXO adds the provided utxo to the database +func (s *chainState) FundUTXO(utxo *UTXO) error { + utxoID := utxo.InputID() + if _, err := s.status(utxoID); err == nil { + return s.setStatus(utxoID, choices.Unknown) + } else if err := s.setUTXO(utxoID, utxo); err != nil { + return err + } + + if addressable, ok := utxo.Out.(Addressable); ok { + return s.addUTXO(addressable.Addresses(), utxoID) + } + return nil +} + +// setUTXO saves the provided utxo to platform's storage. +func (s *chainState) setUTXO(id ids.ID, utxo *UTXO) error { + return s.SetUTXO(UniqueID(id, s.utxoIDPrefix, s.utxoID), utxo) +} + +func (s *chainState) status(id ids.ID) (choices.Status, error) { + return s.Status(UniqueID(id, s.statusIDPrefix, s.statusID)) +} + +// setStatus saves the provided platform status to storage. +func (s *chainState) setStatus(id ids.ID, status choices.Status) error { + return s.State.SetStatus(UniqueID(id, s.statusIDPrefix, s.statusID), status) +} + +func (s *chainState) removeUTXO(addrs [][]byte, utxoID ids.ID) error { + for _, addr := range addrs { + addrID := ids.NewID(hashing.ComputeHash256Array(addr)) + utxos := ids.Set{} + funds, _ := s.Funds(addrID) + utxos.Add(funds...) + utxos.Remove(utxoID) + if err := s.setFunds(addrID, utxos.List()); err != nil { + return err + } + } + return nil +} + +func (s *chainState) addUTXO(addrs [][]byte, utxoID ids.ID) error { + for _, addr := range addrs { + addrID := ids.NewID(hashing.ComputeHash256Array(addr)) + utxos := ids.Set{} + funds, _ := s.Funds(addrID) + utxos.Add(funds...) + utxos.Add(utxoID) + if err := s.setFunds(addrID, utxos.List()); err != nil { + return err + } + } + return nil +} + +func (s *chainState) setFunds(id ids.ID, idSlice []ids.ID) error { + return s.SetIDs(UniqueID(id, s.fundsIDPrefix, s.fundsID), idSlice) +} + +// PrefixedState wraps a state object. By prefixing the state, there will +// be no collisions between different types of objects that have the same hash. +type PrefixedState struct { + platform, avm chainState +} + +// NewPrefixedState ... +func NewPrefixedState(db database.Database, codec codec.Codec) *PrefixedState { + state := &State{ + Cache: &cache.LRU{Size: stateCacheSize}, + DB: db, + Codec: codec, + } + return &PrefixedState{ + platform: chainState{ + State: state, + + utxoIDPrefix: platformUTXOID, + statusIDPrefix: platformStatusID, + fundsIDPrefix: platformFundsID, + + utxoID: &cache.LRU{Size: idCacheSize}, + statusID: &cache.LRU{Size: idCacheSize}, + fundsID: &cache.LRU{Size: idCacheSize}, + }, + avm: chainState{ + State: state, + + utxoIDPrefix: avmUTXOID, + statusIDPrefix: avmStatusID, + fundsIDPrefix: avmFundsID, + + utxoID: &cache.LRU{Size: idCacheSize}, + statusID: &cache.LRU{Size: idCacheSize}, + fundsID: &cache.LRU{Size: idCacheSize}, + }, + } +} + +// PlatformUTXO attempts to load a utxo from platform's storage. +func (s *PrefixedState) PlatformUTXO(id ids.ID) (*UTXO, error) { + return s.platform.UTXO(id) +} + +// PlatformFunds returns the mapping from the 32 byte representation of an +// address to a list of utxo IDs that reference the address. +func (s *PrefixedState) PlatformFunds(id ids.ID) ([]ids.ID, error) { + return s.platform.Funds(id) +} + +// SpendPlatformUTXO consumes the provided platform utxo. +func (s *PrefixedState) SpendPlatformUTXO(utxoID ids.ID) error { + return s.platform.SpendUTXO(utxoID) +} + +// FundPlatformUTXO adds the provided utxo to the database +func (s *PrefixedState) FundPlatformUTXO(utxo *UTXO) error { + return s.platform.FundUTXO(utxo) +} + +// AVMUTXO attempts to load a utxo from avm's storage. +func (s *PrefixedState) AVMUTXO(id ids.ID) (*UTXO, error) { + return s.avm.UTXO(id) +} + +// AVMFunds returns the mapping from the 32 byte representation of an +// address to a list of utxo IDs that reference the address. +func (s *PrefixedState) AVMFunds(id ids.ID) ([]ids.ID, error) { + return s.avm.Funds(id) +} + +// SpendAVMUTXO consumes the provided platform utxo. +func (s *PrefixedState) SpendAVMUTXO(utxoID ids.ID) error { + return s.avm.SpendUTXO(utxoID) +} + +// FundAVMUTXO adds the provided utxo to the database +func (s *PrefixedState) FundAVMUTXO(utxo *UTXO) error { + return s.avm.FundUTXO(utxo) +} diff --git a/vms/components/ava/state.go b/vms/components/ava/state.go new file mode 100644 index 0000000..a9c5424 --- /dev/null +++ b/vms/components/ava/state.go @@ -0,0 +1,153 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ava + +import ( + "errors" + + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/vms/components/codec" +) + +var ( + errCacheTypeMismatch = errors.New("type returned from cache doesn't match the expected type") +) + +// UniqueID returns a unique identifier +func UniqueID(id ids.ID, prefix uint64, cacher cache.Cacher) ids.ID { + if cachedIDIntf, found := cacher.Get(id); found { + return cachedIDIntf.(ids.ID) + } + uID := id.Prefix(prefix) + cacher.Put(id, uID) + return uID +} + +// State is a thin wrapper around a database to provide, caching, serialization, +// and de-serialization. +type State struct { + Cache cache.Cacher + DB database.Database + Codec codec.Codec +} + +// UTXO attempts to load a utxo from storage. +func (s *State) UTXO(id ids.ID) (*UTXO, error) { + if utxoIntf, found := s.Cache.Get(id); found { + if utxo, ok := utxoIntf.(*UTXO); ok { + return utxo, nil + } + return nil, errCacheTypeMismatch + } + + bytes, err := s.DB.Get(id.Bytes()) + if err != nil { + return nil, err + } + + // The key was in the database + utxo := &UTXO{} + if err := s.Codec.Unmarshal(bytes, utxo); err != nil { + return nil, err + } + + s.Cache.Put(id, utxo) + return utxo, nil +} + +// SetUTXO saves the provided utxo to storage. +func (s *State) SetUTXO(id ids.ID, utxo *UTXO) error { + if utxo == nil { + s.Cache.Evict(id) + return s.DB.Delete(id.Bytes()) + } + + bytes, err := s.Codec.Marshal(utxo) + if err != nil { + return err + } + + s.Cache.Put(id, utxo) + return s.DB.Put(id.Bytes(), bytes) +} + +// Status returns a status from storage. +func (s *State) Status(id ids.ID) (choices.Status, error) { + if statusIntf, found := s.Cache.Get(id); found { + if status, ok := statusIntf.(choices.Status); ok { + return status, nil + } + return choices.Unknown, errCacheTypeMismatch + } + + bytes, err := s.DB.Get(id.Bytes()) + if err != nil { + return choices.Unknown, err + } + + var status choices.Status + s.Codec.Unmarshal(bytes, &status) + + s.Cache.Put(id, status) + return status, nil +} + +// SetStatus saves a status in storage. +func (s *State) SetStatus(id ids.ID, status choices.Status) error { + if status == choices.Unknown { + s.Cache.Evict(id) + return s.DB.Delete(id.Bytes()) + } + + s.Cache.Put(id, status) + + bytes, err := s.Codec.Marshal(status) + if err != nil { + return err + } + return s.DB.Put(id.Bytes(), bytes) +} + +// IDs returns a slice of IDs from storage +func (s *State) IDs(id ids.ID) ([]ids.ID, error) { + if idsIntf, found := s.Cache.Get(id); found { + if idSlice, ok := idsIntf.([]ids.ID); ok { + return idSlice, nil + } + return nil, errCacheTypeMismatch + } + + bytes, err := s.DB.Get(id.Bytes()) + if err != nil { + return nil, err + } + + idSlice := []ids.ID(nil) + if err := s.Codec.Unmarshal(bytes, &idSlice); err != nil { + return nil, err + } + + s.Cache.Put(id, idSlice) + return idSlice, nil +} + +// SetIDs saves a slice of IDs to the database. +func (s *State) SetIDs(id ids.ID, idSlice []ids.ID) error { + if len(idSlice) == 0 { + s.Cache.Evict(id) + return s.DB.Delete(id.Bytes()) + } + + s.Cache.Put(id, idSlice) + + bytes, err := s.Codec.Marshal(idSlice) + if err != nil { + return err + } + + return s.DB.Put(id.Bytes(), bytes) +} diff --git a/vms/components/ava/test_verifiable.go b/vms/components/ava/test_verifiable.go new file mode 100644 index 0000000..34dce1d --- /dev/null +++ b/vms/components/ava/test_verifiable.go @@ -0,0 +1,20 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ava + +// TestVerifiable ... +type TestVerifiable struct{ Err error } + +// Verify ... +func (v *TestVerifiable) Verify() error { return v.Err } + +// TestTransferable ... +type TestTransferable struct { + TestVerifiable + + Val uint64 `serialize:"true"` +} + +// Amount ... +func (t *TestTransferable) Amount() uint64 { return t.Val } diff --git a/vms/avm/transferables.go b/vms/components/ava/transferables.go similarity index 61% rename from vms/avm/transferables.go rename to vms/components/ava/transferables.go index e5f3879..4aa906d 100644 --- a/vms/avm/transferables.go +++ b/vms/components/ava/transferables.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "bytes" @@ -9,6 +9,7 @@ import ( "sort" "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/vms/components/codec" "github.com/ava-labs/gecko/vms/components/verify" ) @@ -21,15 +22,25 @@ var ( errNilTransferableFxInput = errors.New("nil transferable feature extension input is not valid") ) +// Transferable is the interface a feature extension must provide to transfer +// value between features extensions. +type Transferable interface { + verify.Verifiable + + // Amount returns how much value this output consumes of the asset in its + // transaction. + Amount() uint64 +} + // TransferableOutput ... type TransferableOutput struct { Asset `serialize:"true"` - Out FxTransferable `serialize:"true"` + Out Transferable `serialize:"true" json:"output"` } // Output returns the feature extension output that this Output is using. -func (out *TransferableOutput) Output() FxTransferable { return out.Out } +func (out *TransferableOutput) Output() Transferable { return out.Out } // Verify implements the verify.Verifiable interface func (out *TransferableOutput) Verify() error { @@ -90,11 +101,11 @@ type TransferableInput struct { UTXOID `serialize:"true"` Asset `serialize:"true"` - In FxTransferable `serialize:"true"` + In Transferable `serialize:"true" json:"input"` } // Input returns the feature extension input that this Input is using. -func (in *TransferableInput) Input() FxTransferable { return in.In } +func (in *TransferableInput) Input() Transferable { return in.In } // Verify implements the verify.Verifiable interface func (in *TransferableInput) Verify() error { @@ -126,7 +137,46 @@ func (ins innerSortTransferableInputs) Less(i, j int) bool { func (ins innerSortTransferableInputs) Len() int { return len(ins) } func (ins innerSortTransferableInputs) Swap(i, j int) { ins[j], ins[i] = ins[i], ins[j] } -func sortTransferableInputs(ins []*TransferableInput) { sort.Sort(innerSortTransferableInputs(ins)) } -func isSortedAndUniqueTransferableInputs(ins []*TransferableInput) bool { +// SortTransferableInputs ... +func SortTransferableInputs(ins []*TransferableInput) { sort.Sort(innerSortTransferableInputs(ins)) } + +// IsSortedAndUniqueTransferableInputs ... +func IsSortedAndUniqueTransferableInputs(ins []*TransferableInput) bool { return utils.IsSortedAndUnique(innerSortTransferableInputs(ins)) } + +type innerSortTransferableInputsWithSigners struct { + ins []*TransferableInput + signers [][]*crypto.PrivateKeySECP256K1R +} + +func (ins *innerSortTransferableInputsWithSigners) Less(i, j int) bool { + iID, iIndex := ins.ins[i].InputSource() + jID, jIndex := ins.ins[j].InputSource() + + switch bytes.Compare(iID.Bytes(), jID.Bytes()) { + case -1: + return true + case 0: + return iIndex < jIndex + default: + return false + } +} +func (ins *innerSortTransferableInputsWithSigners) Len() int { return len(ins.ins) } +func (ins *innerSortTransferableInputsWithSigners) Swap(i, j int) { + ins.ins[j], ins.ins[i] = ins.ins[i], ins.ins[j] + ins.signers[j], ins.signers[i] = ins.signers[i], ins.signers[j] +} + +// SortTransferableInputsWithSigners sorts the inputs and signers based on the +// input's utxo ID +func SortTransferableInputsWithSigners(ins []*TransferableInput, signers [][]*crypto.PrivateKeySECP256K1R) { + sort.Sort(&innerSortTransferableInputsWithSigners{ins: ins, signers: signers}) +} + +// IsSortedAndUniqueTransferableInputsWithSigners returns true if the inputs are +// sorted and unique +func IsSortedAndUniqueTransferableInputsWithSigners(ins []*TransferableInput, signers [][]*crypto.PrivateKeySECP256K1R) bool { + return utils.IsSortedAndUnique(&innerSortTransferableInputsWithSigners{ins: ins, signers: signers}) +} diff --git a/vms/avm/transferables_test.go b/vms/components/ava/transferables_test.go similarity index 90% rename from vms/avm/transferables_test.go rename to vms/components/ava/transferables_test.go index 24accc9..80205a6 100644 --- a/vms/avm/transferables_test.go +++ b/vms/components/ava/transferables_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "bytes" @@ -21,11 +21,7 @@ func TestTransferableOutputVerifyNil(t *testing.T) { } func TestTransferableOutputVerifyNilFx(t *testing.T) { - to := &TransferableOutput{ - Asset: Asset{ - ID: ids.Empty, - }, - } + to := &TransferableOutput{Asset: Asset{ID: ids.Empty}} if err := to.Verify(); err == nil { t.Fatalf("Should have errored due to nil transferable fx output") } @@ -33,12 +29,8 @@ func TestTransferableOutputVerifyNilFx(t *testing.T) { func TestTransferableOutputVerify(t *testing.T) { to := &TransferableOutput{ - Asset: Asset{ - ID: ids.Empty, - }, - Out: &TestTransferable{ - Val: 1, - }, + Asset: Asset{ID: ids.Empty}, + Out: &TestTransferable{Val: 1}, } if err := to.Verify(); err != nil { t.Fatal(err) @@ -54,34 +46,24 @@ func TestTransferableOutputSorting(t *testing.T) { outs := []*TransferableOutput{ &TransferableOutput{ - Asset: Asset{ - ID: ids.NewID([32]byte{1}), - }, - Out: &TestTransferable{Val: 1}, + Asset: Asset{ID: ids.NewID([32]byte{1})}, + Out: &TestTransferable{Val: 1}, }, &TransferableOutput{ - Asset: Asset{ - ID: ids.Empty, - }, - Out: &TestTransferable{Val: 1}, + Asset: Asset{ID: ids.Empty}, + Out: &TestTransferable{Val: 1}, }, &TransferableOutput{ - Asset: Asset{ - ID: ids.NewID([32]byte{1}), - }, - Out: &TestTransferable{Val: 0}, + Asset: Asset{ID: ids.NewID([32]byte{1})}, + Out: &TestTransferable{Val: 0}, }, &TransferableOutput{ - Asset: Asset{ - ID: ids.Empty, - }, - Out: &TestTransferable{Val: 0}, + Asset: Asset{ID: ids.Empty}, + Out: &TestTransferable{Val: 0}, }, &TransferableOutput{ - Asset: Asset{ - ID: ids.Empty, - }, - Out: &TestTransferable{Val: 0}, + Asset: Asset{ID: ids.Empty}, + Out: &TestTransferable{Val: 0}, }, } @@ -243,11 +225,11 @@ func TestTransferableInputSorting(t *testing.T) { }, } - if isSortedAndUniqueTransferableInputs(ins) { + if IsSortedAndUniqueTransferableInputs(ins) { t.Fatalf("Shouldn't be sorted") } - sortTransferableInputs(ins) - if !isSortedAndUniqueTransferableInputs(ins) { + SortTransferableInputs(ins) + if !IsSortedAndUniqueTransferableInputs(ins) { t.Fatalf("Should be sorted") } @@ -260,7 +242,7 @@ func TestTransferableInputSorting(t *testing.T) { In: &TestTransferable{}, }) - if isSortedAndUniqueTransferableInputs(ins) { + if IsSortedAndUniqueTransferableInputs(ins) { t.Fatalf("Shouldn't be unique") } } diff --git a/vms/avm/utxo.go b/vms/components/ava/utxo.go similarity index 90% rename from vms/avm/utxo.go rename to vms/components/ava/utxo.go index e431e55..dee62ec 100644 --- a/vms/avm/utxo.go +++ b/vms/components/ava/utxo.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "errors" @@ -19,7 +19,7 @@ type UTXO struct { UTXOID `serialize:"true"` Asset `serialize:"true"` - Out verify.Verifiable `serialize:"true"` + Out verify.Verifiable `serialize:"true" json:"output"` } // Verify implements the verify.Verifiable interface diff --git a/vms/components/ava/utxo_id.go b/vms/components/ava/utxo_id.go new file mode 100644 index 0000000..b21000a --- /dev/null +++ b/vms/components/ava/utxo_id.go @@ -0,0 +1,83 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ava + +import ( + "bytes" + "errors" + "sort" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils" +) + +var ( + errNilUTXOID = errors.New("nil utxo ID is not valid") + errNilTxID = errors.New("nil tx ID is not valid") +) + +// UTXOID ... +type UTXOID struct { + // Serialized: + TxID ids.ID `serialize:"true" json:"txID"` + OutputIndex uint32 `serialize:"true" json:"outputIndex"` + + // Symbol is false if the UTXO should be part of the DB + Symbol bool + // id is the unique ID of a UTXO, it is calculated from TxID and OutputIndex + id ids.ID +} + +// InputSource returns the source of the UTXO that this input is spending +func (utxo *UTXOID) InputSource() (ids.ID, uint32) { return utxo.TxID, utxo.OutputIndex } + +// InputID returns a unique ID of the UTXO that this input is spending +func (utxo *UTXOID) InputID() ids.ID { + if utxo.id.IsZero() { + utxo.id = utxo.TxID.Prefix(uint64(utxo.OutputIndex)) + } + return utxo.id +} + +// Symbolic returns if this is the ID of a UTXO in the DB, or if it is a +// symbolic input +func (utxo *UTXOID) Symbolic() bool { return utxo.Symbol } + +// Verify implements the verify.Verifiable interface +func (utxo *UTXOID) Verify() error { + switch { + case utxo == nil: + return errNilUTXOID + case utxo.TxID.IsZero(): + return errNilTxID + default: + return nil + } +} + +type innerSortUTXOIDs []*UTXOID + +func (utxos innerSortUTXOIDs) Less(i, j int) bool { + iID, iIndex := utxos[i].InputSource() + jID, jIndex := utxos[j].InputSource() + + switch bytes.Compare(iID.Bytes(), jID.Bytes()) { + case -1: + return true + case 0: + return iIndex < jIndex + default: + return false + } +} +func (utxos innerSortUTXOIDs) Len() int { return len(utxos) } +func (utxos innerSortUTXOIDs) Swap(i, j int) { utxos[j], utxos[i] = utxos[i], utxos[j] } + +// SortUTXOIDs ... +func SortUTXOIDs(utxos []*UTXOID) { sort.Sort(innerSortUTXOIDs(utxos)) } + +// IsSortedAndUniqueUTXOIDs ... +func IsSortedAndUniqueUTXOIDs(utxos []*UTXOID) bool { + return utils.IsSortedAndUnique(innerSortUTXOIDs(utxos)) +} diff --git a/vms/avm/utxo_id_test.go b/vms/components/ava/utxo_id_test.go similarity index 99% rename from vms/avm/utxo_id_test.go rename to vms/components/ava/utxo_id_test.go index fed513f..7944961 100644 --- a/vms/avm/utxo_id_test.go +++ b/vms/components/ava/utxo_id_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "testing" diff --git a/vms/avm/utxo_test.go b/vms/components/ava/utxo_test.go similarity index 95% rename from vms/avm/utxo_test.go rename to vms/components/ava/utxo_test.go index 6f043db..07b067a 100644 --- a/vms/avm/utxo_test.go +++ b/vms/components/ava/utxo_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "bytes" @@ -34,9 +34,6 @@ func TestUTXOVerifyEmpty(t *testing.T) { func TestUTXOSerialize(t *testing.T) { c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) c.RegisterType(&secp256k1fx.MintInput{}) @@ -57,7 +54,7 @@ func TestUTXOSerialize(t *testing.T) { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, // output: - 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0x02, 0x03, diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index a80d9bc..efbc5a0 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -324,6 +324,11 @@ func (c codec) unmarshal(p *wrappers.Packer, field reflect.Value) error { if !ok { return errUnmarshalUnregisteredType } + // Ensure struct actually does implement the interface + fieldType := field.Type() + if !typ.Implements(fieldType) { + return fmt.Errorf("%s does not implement interface %s", typ, fieldType) + } concreteInstancePtr := reflect.New(typ) // instance of the proper type // Unmarshal into the struct if err := c.unmarshal(p, concreteInstancePtr.Elem()); err != nil { diff --git a/vms/components/codec/codec_test.go b/vms/components/codec/codec_test.go index 6fc4f25..336837f 100644 --- a/vms/components/codec/codec_test.go +++ b/vms/components/codec/codec_test.go @@ -538,3 +538,42 @@ func TestTooLargeUnmarshal(t *testing.T) { t.Fatalf("Should have errored due to too many bytes provided") } } + +type outerInterface interface { + ToInt() int +} + +type outer struct { + Interface outerInterface `serialize:"true"` +} + +type innerInterface struct{} + +func (it *innerInterface) ToInt() int { + return 0 +} + +type innerNoInterface struct{} + +// Ensure deserializing structs into the wrong interface errors gracefully +func TestUnmarshalInvalidInterface(t *testing.T) { + codec := NewDefault() + + codec.RegisterType(&innerInterface{}) + codec.RegisterType(&innerNoInterface{}) + + { + bytes := []byte{0, 0, 0, 0} + s := outer{} + if err := codec.Unmarshal(bytes, &s); err != nil { + t.Fatal(err) + } + } + { + bytes := []byte{0, 0, 0, 1} + s := outer{} + if err := codec.Unmarshal(bytes, &s); err == nil { + t.Fatalf("should have errored") + } + } +} diff --git a/vms/nftfx/credential.go b/vms/nftfx/credential.go new file mode 100644 index 0000000..bb7cca0 --- /dev/null +++ b/vms/nftfx/credential.go @@ -0,0 +1,10 @@ +package nftfx + +import ( + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +// Credential ... +type Credential struct { + secp256k1fx.Credential `serialize:"true"` +} diff --git a/vms/nftfx/factory.go b/vms/nftfx/factory.go new file mode 100644 index 0000000..5a3b0c2 --- /dev/null +++ b/vms/nftfx/factory.go @@ -0,0 +1,16 @@ +package nftfx + +import ( + "github.com/ava-labs/gecko/ids" +) + +// ID that this Fx uses when labeled +var ( + ID = ids.NewID([32]byte{'n', 'f', 't', 'f', 'x'}) +) + +// Factory ... +type Factory struct{} + +// New ... +func (f *Factory) New() interface{} { return &Fx{} } diff --git a/vms/nftfx/factory_test.go b/vms/nftfx/factory_test.go new file mode 100644 index 0000000..0f533c9 --- /dev/null +++ b/vms/nftfx/factory_test.go @@ -0,0 +1,12 @@ +package nftfx + +import ( + "testing" +) + +func TestFactory(t *testing.T) { + factory := Factory{} + if fx := factory.New(); fx == nil { + t.Fatalf("Factory.New returned nil") + } +} diff --git a/vms/nftfx/fx.go b/vms/nftfx/fx.go new file mode 100644 index 0000000..1440ff0 --- /dev/null +++ b/vms/nftfx/fx.go @@ -0,0 +1,118 @@ +package nftfx + +import ( + "bytes" + "errors" + + "github.com/ava-labs/gecko/utils/wrappers" + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + errWrongTxType = errors.New("wrong tx type") + errWrongUTXOType = errors.New("wrong utxo type") + errWrongOperationType = errors.New("wrong operation type") + errWrongCredentialType = errors.New("wrong credential type") + + errNoUTXOs = errors.New("an operation must consume at least one UTXO") + errWrongNumberOfUTXOs = errors.New("wrong number of UTXOs for the operation") + errWrongNumberOfCreds = errors.New("wrong number of credentials for the operation") + + errWrongUniqueID = errors.New("wrong unique ID provided") + errWrongBytes = errors.New("wrong bytes provided") + + errCantTransfer = errors.New("cant transfer with this fx") +) + +// Fx ... +type Fx struct{ secp256k1fx.Fx } + +// Initialize ... +func (fx *Fx) Initialize(vmIntf interface{}) error { + if err := fx.InitializeVM(vmIntf); err != nil { + return err + } + + log := fx.VM.Logger() + log.Debug("Initializing nft fx") + + c := fx.VM.Codec() + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(&MintOutput{}), + c.RegisterType(&TransferOutput{}), + c.RegisterType(&MintOperation{}), + c.RegisterType(&TransferOperation{}), + c.RegisterType(&Credential{}), + ) + return errs.Err +} + +// VerifyOperation ... +func (fx *Fx) VerifyOperation(txIntf, opIntf, credIntf interface{}, utxosIntf []interface{}) error { + tx, ok := txIntf.(secp256k1fx.Tx) + switch { + case !ok: + return errWrongTxType + case len(utxosIntf) != 1: + return errWrongNumberOfUTXOs + } + + cred, ok := credIntf.(*Credential) + if !ok { + return errWrongCredentialType + } + + switch op := opIntf.(type) { + case *MintOperation: + return fx.VerifyMintOperation(tx, op, cred, utxosIntf[0]) + case *TransferOperation: + return fx.VerifyTransferOperation(tx, op, cred, utxosIntf[0]) + default: + return errWrongOperationType + } +} + +// VerifyMintOperation ... +func (fx *Fx) VerifyMintOperation(tx secp256k1fx.Tx, op *MintOperation, cred *Credential, utxoIntf interface{}) error { + out, ok := utxoIntf.(*MintOutput) + if !ok { + return errWrongUTXOType + } + + if err := verify.All(op, cred, out); err != nil { + return err + } + + switch { + case out.GroupID != op.GroupID: + return errWrongUniqueID + default: + return fx.Fx.VerifyCredentials(tx, &op.MintInput, &cred.Credential, &out.OutputOwners) + } +} + +// VerifyTransferOperation ... +func (fx *Fx) VerifyTransferOperation(tx secp256k1fx.Tx, op *TransferOperation, cred *Credential, utxoIntf interface{}) error { + out, ok := utxoIntf.(*TransferOutput) + if !ok { + return errWrongUTXOType + } + + if err := verify.All(op, cred, out); err != nil { + return err + } + + switch { + case out.GroupID != op.Output.GroupID: + return errWrongUniqueID + case !bytes.Equal(out.Payload, op.Output.Payload): + return errWrongBytes + default: + return fx.VerifyCredentials(tx, &op.Input, &cred.Credential, &out.OutputOwners) + } +} + +// VerifyTransfer ... +func (fx *Fx) VerifyTransfer(_, _, _, _ interface{}) error { return errCantTransfer } diff --git a/vms/nftfx/fx_test.go b/vms/nftfx/fx_test.go new file mode 100644 index 0000000..d965902 --- /dev/null +++ b/vms/nftfx/fx_test.go @@ -0,0 +1,618 @@ +package nftfx + +import ( + "testing" + "time" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + txBytes = []byte{0, 1, 2, 3, 4, 5} + sigBytes = [crypto.SECP256K1RSigLen]byte{ + 0x0e, 0x33, 0x4e, 0xbc, 0x67, 0xa7, 0x3f, 0xe8, + 0x24, 0x33, 0xac, 0xa3, 0x47, 0x88, 0xa6, 0x3d, + 0x58, 0xe5, 0x8e, 0xf0, 0x3a, 0xd5, 0x84, 0xf1, + 0xbc, 0xa3, 0xb2, 0xd2, 0x5d, 0x51, 0xd6, 0x9b, + 0x0f, 0x28, 0x5d, 0xcd, 0x3f, 0x71, 0x17, 0x0a, + 0xf9, 0xbf, 0x2d, 0xb1, 0x10, 0x26, 0x5c, 0xe9, + 0xdc, 0xc3, 0x9d, 0x7a, 0x01, 0x50, 0x9d, 0xe8, + 0x35, 0xbd, 0xcb, 0x29, 0x3a, 0xd1, 0x49, 0x32, + 0x00, + } + addrBytes = [hashing.AddrLen]byte{ + 0x01, 0x5c, 0xce, 0x6c, 0x55, 0xd6, 0xb5, 0x09, + 0x84, 0x5c, 0x8c, 0x4e, 0x30, 0xbe, 0xd9, 0x8d, + 0x39, 0x1a, 0xe7, 0xf0, + } +) + +func TestFxInitialize(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + fx := Fx{} + err := fx.Initialize(&vm) + if err != nil { + t.Fatal(err) + } +} + +func TestFxInitializeInvalid(t *testing.T) { + fx := Fx{} + err := fx.Initialize(nil) + if err == nil { + t.Fatalf("Should have returned an error") + } +} + +func TestFxVerifyMintOperation(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { + t.Fatal(err) + } +} + +func TestFxVerifyMintOperationWrongTx(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(nil, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid tx") + } +} + +func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to not enough utxos") + } +} + +func TestFxVerifyMintOperationWrongCredential(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, nil, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to a bad credential") + } +} + +func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{nil} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo") + } +} + +func TestFxVerifyMintOperationFailingVerification(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + ids.ShortEmpty, + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") + } +} + +func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + GroupID: 1, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid Group ID") + } +} + +func TestFxVerifyTransferOperation(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + op := &TransferOperation{ + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + Output: TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { + t.Fatal(err) + } +} + +func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + op := &TransferOperation{ + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + Output: TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + }, + } + + utxos := []interface{}{nil} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo") + } +} + +func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + }, + } + op := &TransferOperation{ + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + Output: TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") + } +} + +func TestFxVerifyTransferOperationWrongGroupID(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + op := &TransferOperation{ + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + Output: TransferOutput{ + GroupID: 2, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to a wrong unique id") + } +} + +func TestFxVerifyTransferOperationWrongBytes(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + op := &TransferOperation{ + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + Output: TransferOutput{ + GroupID: 1, + Payload: []byte{3}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to the wrong hash being produced") + } +} + +func TestFxVerifyOperationUnknownOperation(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, nil, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an unknown operation") + } +} + +func TestFxVerifyTransfer(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + if err := fx.VerifyTransfer(nil, nil, nil, nil); err == nil { + t.Fatalf("this Fx doesn't support transfers") + } +} diff --git a/vms/nftfx/mint_operation.go b/vms/nftfx/mint_operation.go new file mode 100644 index 0000000..ea6bdbc --- /dev/null +++ b/vms/nftfx/mint_operation.go @@ -0,0 +1,50 @@ +package nftfx + +import ( + "errors" + + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + errNilMintOperation = errors.New("nil mint operation") +) + +// MintOperation ... +type MintOperation struct { + MintInput secp256k1fx.Input `serialize:"true" json:"mintInput"` + GroupID uint32 `serialize:"true" json:"groupID"` + Payload []byte `serialize:"true" json:"payload"` + Outputs []*secp256k1fx.OutputOwners `serialize:"true" json:"outputs"` +} + +// Outs ... +func (op *MintOperation) Outs() []verify.Verifiable { + outs := []verify.Verifiable{} + for _, out := range op.Outputs { + outs = append(outs, &TransferOutput{ + GroupID: op.GroupID, + Payload: op.Payload, + OutputOwners: *out, + }) + } + return outs +} + +// Verify ... +func (op *MintOperation) Verify() error { + switch { + case op == nil: + return errNilMintOperation + case len(op.Payload) > MaxPayloadSize: + return errPayloadTooLarge + } + + for _, out := range op.Outputs { + if err := out.Verify(); err != nil { + return err + } + } + return op.MintInput.Verify() +} diff --git a/vms/nftfx/mint_operation_test.go b/vms/nftfx/mint_operation_test.go new file mode 100644 index 0000000..18513dc --- /dev/null +++ b/vms/nftfx/mint_operation_test.go @@ -0,0 +1,43 @@ +package nftfx + +import ( + "testing" + + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestMintOperationVerifyNil(t *testing.T) { + op := (*MintOperation)(nil) + if err := op.Verify(); err == nil { + t.Fatalf("nil operation should have failed verification") + } +} + +func TestMintOperationVerifyTooLargePayload(t *testing.T) { + op := MintOperation{ + Payload: make([]byte, MaxPayloadSize+1), + } + if err := op.Verify(); err == nil { + t.Fatalf("operation should have failed verification") + } +} + +func TestMintOperationVerifyInvalidOutput(t *testing.T) { + op := MintOperation{ + Outputs: []*secp256k1fx.OutputOwners{&secp256k1fx.OutputOwners{ + Threshold: 1, + }}, + } + if err := op.Verify(); err == nil { + t.Fatalf("operation should have failed verification") + } +} + +func TestMintOperationOuts(t *testing.T) { + op := MintOperation{ + Outputs: []*secp256k1fx.OutputOwners{&secp256k1fx.OutputOwners{}}, + } + if outs := op.Outs(); len(outs) != 1 { + t.Fatalf("Wrong number of outputs returned") + } +} diff --git a/vms/nftfx/mint_output.go b/vms/nftfx/mint_output.go new file mode 100644 index 0000000..6a40c08 --- /dev/null +++ b/vms/nftfx/mint_output.go @@ -0,0 +1,11 @@ +package nftfx + +import ( + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +// MintOutput ... +type MintOutput struct { + GroupID uint32 `serialize:"true" json:"groupID"` + secp256k1fx.OutputOwners `serialize:"true"` +} diff --git a/vms/nftfx/transfer_operation.go b/vms/nftfx/transfer_operation.go new file mode 100644 index 0000000..5e1482b --- /dev/null +++ b/vms/nftfx/transfer_operation.go @@ -0,0 +1,33 @@ +package nftfx + +import ( + "errors" + + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + errNilTransferOperation = errors.New("nil transfer operation") +) + +// TransferOperation ... +type TransferOperation struct { + Input secp256k1fx.Input `serialize:"true" json:"input"` + Output TransferOutput `serialize:"true" json:"output"` +} + +// Outs ... +func (op *TransferOperation) Outs() []verify.Verifiable { + return []verify.Verifiable{&op.Output} +} + +// Verify ... +func (op *TransferOperation) Verify() error { + switch { + case op == nil: + return errNilTransferOperation + default: + return verify.All(&op.Input, &op.Output) + } +} diff --git a/vms/nftfx/transfer_operation_test.go b/vms/nftfx/transfer_operation_test.go new file mode 100644 index 0000000..80357bb --- /dev/null +++ b/vms/nftfx/transfer_operation_test.go @@ -0,0 +1,32 @@ +package nftfx + +import ( + "testing" + + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestTransferOperationVerifyNil(t *testing.T) { + op := (*TransferOperation)(nil) + if err := op.Verify(); err == nil { + t.Fatalf("nil operation should have failed verification") + } +} + +func TestTransferOperationInvalid(t *testing.T) { + op := TransferOperation{Input: secp256k1fx.Input{ + SigIndices: []uint32{1, 0}, + }} + if err := op.Verify(); err == nil { + t.Fatalf("operation should have failed verification") + } +} + +func TestTransferOperationOuts(t *testing.T) { + op := TransferOperation{ + Output: TransferOutput{}, + } + if outs := op.Outs(); len(outs) != 1 { + t.Fatalf("Wrong number of outputs returned") + } +} diff --git a/vms/nftfx/transfer_output.go b/vms/nftfx/transfer_output.go new file mode 100644 index 0000000..d46cd46 --- /dev/null +++ b/vms/nftfx/transfer_output.go @@ -0,0 +1,36 @@ +package nftfx + +import ( + "errors" + + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +const ( + // MaxPayloadSize is the maximum size that can be placed into a payload + MaxPayloadSize = 1 << 10 +) + +var ( + errNilTransferOutput = errors.New("nil transfer output") + errPayloadTooLarge = errors.New("payload too large") +) + +// TransferOutput ... +type TransferOutput struct { + GroupID uint32 `serialize:"true" json:"groupID"` + Payload []byte `serialize:"true" json:"payload"` + secp256k1fx.OutputOwners `serialize:"true"` +} + +// Verify ... +func (out *TransferOutput) Verify() error { + switch { + case out == nil: + return errNilTransferOutput + case len(out.Payload) > MaxPayloadSize: + return errPayloadTooLarge + default: + return out.OutputOwners.Verify() + } +} diff --git a/vms/nftfx/transfer_output_test.go b/vms/nftfx/transfer_output_test.go new file mode 100644 index 0000000..d3d8ca9 --- /dev/null +++ b/vms/nftfx/transfer_output_test.go @@ -0,0 +1,38 @@ +package nftfx + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestTransferOutputVerifyNil(t *testing.T) { + to := (*TransferOutput)(nil) + if err := to.Verify(); err == nil { + t.Fatalf("TransferOutput.Verify should have errored on nil") + } +} + +func TestTransferOutputLargePayload(t *testing.T) { + to := TransferOutput{ + Payload: make([]byte, MaxPayloadSize+1), + } + if err := to.Verify(); err == nil { + t.Fatalf("TransferOutput.Verify should have errored on too large of a payload") + } +} + +func TestTransferOutputInvalidSecp256k1Output(t *testing.T) { + to := TransferOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Addrs: []ids.ShortID{ + ids.ShortEmpty, + ids.ShortEmpty, + }, + }, + } + if err := to.Verify(); err == nil { + t.Fatalf("TransferOutput.Verify should have errored on too large of a payload") + } +} diff --git a/vms/platformvm/add_default_subnet_delegator_tx_test.go b/vms/platformvm/add_default_subnet_delegator_tx_test.go index 46ba21e..9d6d5cf 100644 --- a/vms/platformvm/add_default_subnet_delegator_tx_test.go +++ b/vms/platformvm/add_default_subnet_delegator_tx_test.go @@ -325,9 +325,9 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { } tx, err = vm.newAddDefaultSubnetDelegatorTx( - defaultNonce+1, // nonce - defaultStakeAmount, // weight - uint64(newTimestamp.Unix()), // start time + defaultNonce+1, // nonce + defaultStakeAmount, // weight + uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(MinimumStakingDuration).Unix()), // end time defaultKey.PublicKey().Address(), // node ID defaultKey.PublicKey().Address(), // destination diff --git a/vms/platformvm/add_default_subnet_validator_tx.go b/vms/platformvm/add_default_subnet_validator_tx.go index bf398c7..10d41fe 100644 --- a/vms/platformvm/add_default_subnet_validator_tx.go +++ b/vms/platformvm/add_default_subnet_validator_tx.go @@ -184,10 +184,7 @@ func (tx *addDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (*ve // If this proposal is aborted, chain state doesn't change onAbortDB := versiondb.New(db) - onAccept := func() { - tx.vm.resetTimer() - } - return onCommitDB, onAbortDB, onAccept, nil, nil + return onCommitDB, onAbortDB, tx.vm.resetTimer, nil, nil } // InitiallyPrefersCommit returns true if the proposed validators start time is diff --git a/vms/platformvm/add_nondefault_subnet_validator_tx.go b/vms/platformvm/add_nondefault_subnet_validator_tx.go index 6173950..531570a 100644 --- a/vms/platformvm/add_nondefault_subnet_validator_tx.go +++ b/vms/platformvm/add_nondefault_subnet_validator_tx.go @@ -162,7 +162,7 @@ func (tx *addNonDefaultSubnetValidatorTx) SemanticVerify(db database.Database) ( } var subnet *CreateSubnetTx for _, sn := range subnets { - if sn.ID.Equals(tx.SubnetID()) { + if sn.id.Equals(tx.SubnetID()) { subnet = sn break } diff --git a/vms/platformvm/add_nondefault_subnet_validator_tx_test.go b/vms/platformvm/add_nondefault_subnet_validator_tx_test.go index 2d63f06..c29faf1 100644 --- a/vms/platformvm/add_nondefault_subnet_validator_tx_test.go +++ b/vms/platformvm/add_nondefault_subnet_validator_tx_test.go @@ -28,7 +28,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -48,7 +48,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID+1, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -67,7 +67,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -87,7 +87,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -107,7 +107,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -126,7 +126,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix())-1, defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -147,7 +147,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix())-1, defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -167,7 +167,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MaximumStakingDuration).Unix())+1, defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -187,7 +187,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -212,7 +212,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix())+1, defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -235,7 +235,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -245,7 +245,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } _, _, _, _, err = tx.SemanticVerify(vm.DB) if err != nil { - t.Log(testSubnet1.ID) + t.Log(testSubnet1.id) subnets, err := vm.getSubnets(vm.DB) if err != nil { t.Fatal(err) @@ -253,7 +253,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { if len(subnets) == 0 { t.Fatal("no subnets found") } - t.Logf("subnets[0].ID: %v", subnets[0].ID) + t.Logf("subnets[0].ID: %v", subnets[0].id) t.Fatal(err) } @@ -290,7 +290,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(DSStartTime.Unix()), // start validating non-default subnet before default subnet uint64(DSEndTime.Unix()), pendingDSValidatorID, - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -324,7 +324,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(DSStartTime.Unix())-1, // start validating non-default subnet before default subnet uint64(DSEndTime.Unix()), pendingDSValidatorID, - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -346,7 +346,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(DSStartTime.Unix()), uint64(DSEndTime.Unix())+1, // stop validating non-default subnet after stopping validating default subnet pendingDSValidatorID, - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -368,7 +368,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(DSStartTime.Unix()), // same start time as for default subnet uint64(DSEndTime.Unix()), // same end time as for default subnet pendingDSValidatorID, - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -389,12 +389,12 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } tx, err = vm.newAddNonDefaultSubnetValidatorTx( - defaultNonce+1, // nonce - defaultWeight, // weight - uint64(newTimestamp.Unix()), // start time + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(MinimumStakingDuration).Unix()), // end time defaultKey.PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, // tx fee payer @@ -429,7 +429,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time defaultKey.PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, newAcctKey.(*crypto.PrivateKeySECP256K1R), // tx fee payer @@ -451,7 +451,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time defaultKey.PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, // tx fee payer @@ -465,7 +465,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { SortByStartTime: false, Txs: []TimedTx{tx}, }, - testSubnet1.ID, + testSubnet1.id, ) // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID @@ -475,7 +475,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time defaultKey.PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, // tx fee payer @@ -494,17 +494,17 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { &EventHeap{ SortByStartTime: false, }, - testSubnet1.ID, + testSubnet1.id, ) // Case 9: Too many signatures tx, err = vm.newAddNonDefaultSubnetValidatorTx( - defaultNonce+1, // nonce - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix())+1, // end time keys[0].PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, defaultKey, // tx fee payer @@ -520,12 +520,12 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { // Case 10: Too few signatures tx, err = vm.newAddNonDefaultSubnetValidatorTx( - defaultNonce+1, // nonce - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()), // end time keys[0].PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[2]}, defaultKey, // tx fee payer @@ -541,12 +541,12 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { // Case 10: Control Signature from invalid key tx, err = vm.newAddNonDefaultSubnetValidatorTx( - defaultNonce+1, // nonce - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()), // end time keys[0].PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], keys[3]}, defaultKey, // tx fee payer @@ -563,12 +563,12 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { // Case 11: Proposed validator in pending validator set for subnet // First, add validator to pending validator set of subnet tx, err = vm.newAddNonDefaultSubnetValidatorTx( - defaultNonce+1, // nonce - defaultWeight, // weight - uint64(defaultGenesisTime.Unix())+1, // start time + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(defaultGenesisTime.Unix())+1, // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix())+1, // end time defaultKey.PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, // tx fee payer @@ -582,7 +582,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { SortByStartTime: true, Txs: []TimedTx{tx}, }, - testSubnet1.ID, + testSubnet1.id, ) // Node with ID nodeIDKey.PublicKey().Address() now pending validator for subnet with ID testSubnet1.ID @@ -604,7 +604,7 @@ func TestAddNonDefaultSubnetValidatorMarshal(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, diff --git a/vms/platformvm/advance_time_tx.go b/vms/platformvm/advance_time_tx.go index b126ec8..2b48707 100644 --- a/vms/platformvm/advance_time_tx.go +++ b/vms/platformvm/advance_time_tx.go @@ -7,6 +7,8 @@ import ( "fmt" "time" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/versiondb" ) @@ -86,7 +88,7 @@ func (tx *advanceTimeTx) SemanticVerify(db database.Database) (*versiondb.Databa return nil, nil, nil, nil, err } - current, pending, err := tx.vm.calculateValidators(db, tx.Timestamp(), DefaultSubnetID) + current, pending, _, _, err := tx.vm.calculateValidators(db, tx.Timestamp(), DefaultSubnetID) if err != nil { return nil, nil, nil, nil, err } @@ -98,48 +100,71 @@ func (tx *advanceTimeTx) SemanticVerify(db database.Database) (*versiondb.Databa return nil, nil, nil, nil, err } - // For each subnet, calculate what current and pending validator sets should be + // For each Subnet, calculate what current and pending validator sets should be // given new timestamp + + // Key: Subnet ID + // Value: IDs of validators that will have started validating this Subnet when + // timestamp is advanced to tx.Timestamp() + startedValidating := make(map[ids.ID]ids.ShortSet, 0) subnets, err := tx.vm.getSubnets(db) if err != nil { return nil, nil, nil, nil, err } for _, subnet := range subnets { - current, pending, err := tx.vm.calculateValidators(db, tx.Timestamp(), subnet.ID) + current, pending, started, _, err := tx.vm.calculateValidators(db, tx.Timestamp(), subnet.id) if err != nil { return nil, nil, nil, nil, err } - - if err := tx.vm.putCurrentValidators(onCommitDB, current, subnet.ID); err != nil { + if err := tx.vm.putCurrentValidators(onCommitDB, current, subnet.id); err != nil { return nil, nil, nil, nil, err } - if err := tx.vm.putPendingValidators(onCommitDB, pending, subnet.ID); err != nil { + if err := tx.vm.putPendingValidators(onCommitDB, pending, subnet.id); err != nil { return nil, nil, nil, nil, err } + startedValidating[subnet.ID()] = started } // If this block is committed, update the validator sets // onAbortDB or onCommitDB should commit (flush to vm.DB) before this is called - updateValidators := func() { + onCommitFunc := func() { + // For each Subnet, update the node's validator manager to reflect current Subnet membership subnets, err := tx.vm.getSubnets(tx.vm.DB) if err != nil { tx.vm.Ctx.Log.Error("failed to get subnets: %s", err) return } for _, subnet := range subnets { - if err := tx.vm.updateValidators(subnet.ID); err != nil { - tx.vm.Ctx.Log.Debug("failed to update validators on the default subnet: %s", err) + if err := tx.vm.updateValidators(subnet.id); err != nil { + tx.vm.Ctx.Log.Debug("failed to update Subnet %s: %s", subnet.id, err) } } if err := tx.vm.updateValidators(DefaultSubnetID); err != nil { - tx.vm.Ctx.Log.Fatal("failed to update validators on the default subnet: %s", err) + tx.vm.Ctx.Log.Fatal("failed to update Default Subnet: %s", err) + } + + // If this node started validating a Subnet, create the blockchains that the Subnet validates + chains, err := tx.vm.getChains(tx.vm.DB) // all blockchains + if err != nil { + tx.vm.Ctx.Log.Error("couldn't get blockchains: %s", err) + return + } + for subnetID, validatorIDs := range startedValidating { + if !validatorIDs.Contains(tx.vm.Ctx.NodeID) { + continue + } + for _, chain := range chains { + if chain.SubnetID.Equals(subnetID) { + tx.vm.createChain(chain) + } + } } } // Specify what the state of the chain will be if this proposal is aborted onAbortDB := versiondb.New(db) // state doesn't change - return onCommitDB, onAbortDB, updateValidators, nil, nil + return onCommitDB, onAbortDB, onCommitFunc, nil, nil } // InitiallyPrefersCommit returns true if the proposed time isn't after the diff --git a/vms/platformvm/atomic_block.go b/vms/platformvm/atomic_block.go new file mode 100644 index 0000000..8b973ad --- /dev/null +++ b/vms/platformvm/atomic_block.go @@ -0,0 +1,154 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/vms/components/core" +) + +var ( + errConflictingParentTxs = errors.New("block contains a transaction that conflicts with a transaction in a parent block") +) + +// AtomicTx is an operation that can be decided without being proposed, but must have special control over database commitment +type AtomicTx interface { + initialize(vm *VM) error + + ID() ids.ID + + // UTXOs this tx consumes + InputUTXOs() ids.Set + + // Attempt to verify this transaction with the provided state. The provided + // database can be modified arbitrarily. + SemanticVerify(database.Database) error + + Accept(database.Batch) error +} + +// AtomicBlock being accepted results in the transaction contained in the +// block to be accepted and committed to the chain. +type AtomicBlock struct { + CommonDecisionBlock `serialize:"true"` + + Tx AtomicTx `serialize:"true"` + + inputs ids.Set +} + +// initialize this block +func (ab *AtomicBlock) initialize(vm *VM, bytes []byte) error { + if err := ab.CommonDecisionBlock.initialize(vm, bytes); err != nil { + return err + } + return ab.Tx.initialize(vm) +} + +// Reject implements the snowman.Block interface +func (ab *AtomicBlock) conflicts(s ids.Set) bool { + if ab.Status() == choices.Accepted { + return false + } + if ab.inputs.Overlaps(s) { + return true + } + return ab.parentBlock().conflicts(s) +} + +// Verify this block performs a valid state transition. +// +// The parent block must be a proposal +// +// This function also sets onAcceptDB database if the verification passes. +func (ab *AtomicBlock) Verify() error { + parentBlock := ab.parentBlock() + + ab.inputs = ab.Tx.InputUTXOs() + + if parentBlock.conflicts(ab.inputs) { + return errConflictingParentTxs + } + + // AtomicBlock is not a modifier on a proposal block, so its parent must be + // a decision. + parent, ok := parentBlock.(decision) + if !ok { + return errInvalidBlockType + } + + pdb := parent.onAccept() + + ab.onAcceptDB = versiondb.New(pdb) + if err := ab.Tx.SemanticVerify(ab.onAcceptDB); err != nil { + return err + } + + ab.vm.currentBlocks[ab.ID().Key()] = ab + ab.parentBlock().addChild(ab) + return nil +} + +// Accept implements the snowman.Block interface +func (ab *AtomicBlock) Accept() { + ab.vm.Ctx.Log.Verbo("Accepting block with ID %s", ab.ID()) + + ab.CommonBlock.Accept() + + // Update the state of the chain in the database + if err := ab.onAcceptDB.Commit(); err != nil { + ab.vm.Ctx.Log.Error("unable to commit onAcceptDB") + } + + batch, err := ab.vm.DB.CommitBatch() + if err != nil { + ab.vm.Ctx.Log.Fatal("unable to commit vm's DB") + } + defer ab.vm.DB.Abort() + + if err := ab.Tx.Accept(batch); err != nil { + ab.vm.Ctx.Log.Error("unable to atomically commit block") + } + + for _, child := range ab.children { + child.setBaseDatabase(ab.vm.DB) + } + if ab.onAcceptFunc != nil { + ab.onAcceptFunc() + } + + parent := ab.parentBlock() + // remove this block and its parent from memory + parent.free() + ab.free() +} + +// newAtomicBlock returns a new *AtomicBlock where the block's parent, a +// decision block, has ID [parentID]. +func (vm *VM) newAtomicBlock(parentID ids.ID, tx AtomicTx) (*AtomicBlock, error) { + ab := &AtomicBlock{ + CommonDecisionBlock: CommonDecisionBlock{ + CommonBlock: CommonBlock{ + Block: core.NewBlock(parentID), + vm: vm, + }, + }, + Tx: tx, + } + + // We serialize this block as a Block so that it can be deserialized into a + // Block + blk := Block(ab) + bytes, err := Codec.Marshal(&blk) + if err != nil { + return nil, err + } + ab.Block.Initialize(bytes, vm.SnowmanVM) + return ab, nil +} diff --git a/vms/platformvm/common_blocks.go b/vms/platformvm/common_blocks.go index 5023a44..9d143f8 100644 --- a/vms/platformvm/common_blocks.go +++ b/vms/platformvm/common_blocks.go @@ -6,10 +6,12 @@ package platformvm import ( "errors" + "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/vms/components/missing" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/snow/consensus/snowman" "github.com/ava-labs/gecko/vms/components/core" ) @@ -87,6 +89,8 @@ type Block interface { // [bytes] is the byte representation of this block initialize(vm *VM, bytes []byte) error + conflicts(ids.Set) bool + // parentBlock returns the parent block, similarly to Parent. However, it // provides the more specific staking.Block interface. parentBlock() Block @@ -142,6 +146,14 @@ func (cb *CommonBlock) free() { cb.children = nil } +// Reject implements the snowman.Block interface +func (cb *CommonBlock) conflicts(s ids.Set) bool { + if cb.Status() == choices.Accepted { + return false + } + return cb.parentBlock().conflicts(s) +} + // Parent returns this block's parent func (cb *CommonBlock) Parent() snowman.Block { parent := cb.parentBlock() diff --git a/vms/platformvm/create_chain_tx.go b/vms/platformvm/create_chain_tx.go index 74bd3f0..241f052 100644 --- a/vms/platformvm/create_chain_tx.go +++ b/vms/platformvm/create_chain_tx.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" - "github.com/ava-labs/gecko/chains" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/crypto" @@ -15,8 +14,9 @@ import ( ) var ( - errInvalidVMID = errors.New("invalid VM ID") - errFxIDsNotSortedAndUnique = errors.New("feature extensions IDs must be sorted and unique") + errInvalidVMID = errors.New("invalid VM ID") + errFxIDsNotSortedAndUnique = errors.New("feature extensions IDs must be sorted and unique") + errControlSigsNotSortedAndUnique = errors.New("control signatures must be sorted and unique") ) // UnsignedCreateChainTx is an unsigned CreateChainTx @@ -24,6 +24,9 @@ type UnsignedCreateChainTx struct { // ID of the network this blockchain exists on NetworkID uint32 `serialize:"true"` + // ID of the Subnet that validates this blockchain + SubnetID ids.ID `serialize:"true"` + // Next unused nonce of account paying the transaction fee for this transaction. // Currently unused, as there are no tx fees. Nonce uint64 `serialize:"true"` @@ -37,7 +40,7 @@ type UnsignedCreateChainTx struct { // IDs of the feature extensions running on the new chain FxIDs []ids.ID `serialize:"true"` - // Byte representation of state of the new chain + // Byte representation of genesis state of the new chain GenesisData []byte `serialize:"true"` } @@ -45,11 +48,19 @@ type UnsignedCreateChainTx struct { type CreateChainTx struct { UnsignedCreateChainTx `serialize:"true"` - Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + // Address of the account that provides the transaction fee + // Set in SemanticVerify + PayerAddress ids.ShortID + + // Signature of key whose account provides the transaction fee + PayerSig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + + // Signatures from Subnet's control keys + // Should not empty slice, not nil, if there are no control sigs + ControlSigs [][crypto.SECP256K1RSigLen]byte `serialize:"true"` vm *VM id ids.ID - key crypto.PublicKey // public key of transaction signer bytes []byte } @@ -64,10 +75,6 @@ func (tx *CreateChainTx) initialize(vm *VM) error { // ID of this transaction func (tx *CreateChainTx) ID() ids.ID { return tx.id } -// Key returns the public key of the signer of this transaction -// Precondition: tx.Verify() has been called and returned nil -func (tx *CreateChainTx) Key() crypto.PublicKey { return tx.key } - // Bytes returns the byte representation of a CreateChainTx func (tx *CreateChainTx) Bytes() []byte { return tx.bytes } @@ -77,16 +84,20 @@ func (tx *CreateChainTx) SyntacticVerify() error { switch { case tx == nil: return errNilTx - case tx.key != nil: - return nil // Only verify the transaction once + case !tx.PayerAddress.IsZero(): // Only verify the transaction once + return nil case tx.NetworkID != tx.vm.Ctx.NetworkID: // verify the transaction is on this network return errWrongNetworkID case tx.id.IsZero(): return errInvalidID case tx.VMID.IsZero(): return errInvalidVMID + case tx.SubnetID.Equals(DefaultSubnetID): + return errDSCantValidate case !ids.IsSortedAndUniqueIDs(tx.FxIDs): return errFxIDsNotSortedAndUnique + case !crypto.IsSortedAndUniqueSECP2561RSigs(tx.ControlSigs): + return errControlSigsNotSortedAndUnique } unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) @@ -95,11 +106,11 @@ func (tx *CreateChainTx) SyntacticVerify() error { return err } - key, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.Sig[:]) + payerKey, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.PayerSig[:]) if err != nil { return err } - tx.key = key + tx.PayerAddress = payerKey.Address() return nil } @@ -125,10 +136,12 @@ func (tx *CreateChainTx) SemanticVerify(db database.Database) (func(), error) { } // Deduct tx fee from payer's account - account, err := tx.vm.getAccount(db, tx.Key().Address()) + account, err := tx.vm.getAccount(db, tx.PayerAddress) if err != nil { return nil, err } + // txFee is removed in account.Remove + // TODO: Consider changing Remove to be parameterized on total amount (inc. tx fee) to remove account, err = account.Remove(0, tx.Nonce) if err != nil { return nil, err @@ -137,20 +150,55 @@ func (tx *CreateChainTx) SemanticVerify(db database.Database) (func(), error) { return nil, err } - // If this proposal is committed, create the new blockchain using the chain manager + // Verify that this transaction has sufficient control signatures + subnets, err := tx.vm.getSubnets(db) // all subnets that exist + if err != nil { + return nil, err + } + var subnet *CreateSubnetTx // the subnet that will validate the new chain + for _, sn := range subnets { + if sn.id.Equals(tx.SubnetID) { + subnet = sn + break + } + } + if subnet == nil { + return nil, fmt.Errorf("there is no subnet with ID %s", tx.SubnetID) + } + if len(tx.ControlSigs) != int(subnet.Threshold) { + return nil, fmt.Errorf("expected tx to have %d control sigs but has %d", subnet.Threshold, len(tx.ControlSigs)) + } + + unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // Byte representation of the unsigned transaction + if err != nil { + return nil, err + } + unsignedBytesHash := hashing.ComputeHash256(unsignedBytes) + + // Each element is ID of key that signed this tx + controlIDs := make([]ids.ShortID, len(tx.ControlSigs)) + for i, sig := range tx.ControlSigs { + key, err := tx.vm.factory.RecoverHashPublicKey(unsignedBytesHash, sig[:]) + if err != nil { + return nil, err + } + controlIDs[i] = key.Address() + } + + // Verify each control signature on this tx is from a control key + controlKeys := ids.ShortSet{} + controlKeys.Add(subnet.ControlKeys...) + for _, controlID := range controlIDs { + if !controlKeys.Contains(controlID) { + return nil, errors.New("tx has control signature from key not in subnet's ControlKeys") + } + } + + // If this proposal is committed and this node is a member of the + // subnet that validates the blockchain, create the blockchain onAccept := func() { - chainParams := chains.ChainParameters{ - ID: tx.ID(), - GenesisData: tx.GenesisData, - VMAlias: tx.VMID.String(), - } - for _, fxID := range tx.FxIDs { - chainParams.FxAliases = append(chainParams.FxAliases, fxID.String()) - } - // TODO: Not sure how else to make this not nil pointer error during tests - if tx.vm.ChainManager != nil { - tx.vm.ChainManager.CreateChain(chainParams) - } + tx.vm.createChain(tx) } return onAccept, nil @@ -166,10 +214,14 @@ func (chains createChainList) Bytes() []byte { return bytes } -func (vm *VM) newCreateChainTx(nonce uint64, genesisData []byte, vmID ids.ID, fxIDs []ids.ID, chainName string, networkID uint32, key *crypto.PrivateKeySECP256K1R) (*CreateChainTx, error) { +func (vm *VM) newCreateChainTx(nonce uint64, subnetID ids.ID, genesisData []byte, + vmID ids.ID, fxIDs []ids.ID, chainName string, networkID uint32, + controlKeys []*crypto.PrivateKeySECP256K1R, + payerKey *crypto.PrivateKeySECP256K1R) (*CreateChainTx, error) { tx := &CreateChainTx{ UnsignedCreateChainTx: UnsignedCreateChainTx{ NetworkID: networkID, + SubnetID: subnetID, Nonce: nonce, GenesisData: genesisData, VMID: vmID, @@ -178,17 +230,33 @@ func (vm *VM) newCreateChainTx(nonce uint64, genesisData []byte, vmID ids.ID, fx }, } + // Generate byte repr. of unsigned transaction unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) - unsignedBytes, err := Codec.Marshal(&unsignedIntf) // Byte repr. of unsigned transaction + unsignedBytes, err := Codec.Marshal(&unsignedIntf) if err != nil { return nil, err } + unsignedBytesHash := hashing.ComputeHash256(unsignedBytes) + + // Sign the tx with control keys + tx.ControlSigs = make([][crypto.SECP256K1RSigLen]byte, len(controlKeys)) + for i, key := range controlKeys { + sig, err := key.SignHash(unsignedBytesHash) + if err != nil { + return nil, err + } + copy(tx.ControlSigs[i][:], sig) + } - sig, err := key.Sign(unsignedBytes) + // Sort the control signatures + crypto.SortSECP2561RSigs(tx.ControlSigs) + + // Sign with the payer key + payerSig, err := payerKey.Sign(unsignedBytes) if err != nil { return nil, err } - copy(tx.Sig[:], sig) + copy(tx.PayerSig[:], payerSig) return tx, tx.initialize(vm) } diff --git a/vms/platformvm/create_chain_tx_test.go b/vms/platformvm/create_chain_tx_test.go index 8c555c6..f9b8476 100644 --- a/vms/platformvm/create_chain_tx_test.go +++ b/vms/platformvm/create_chain_tx_test.go @@ -6,8 +6,8 @@ package platformvm import ( "testing" - "github.com/ava-labs/gecko/database/versiondb" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/vms/avm" ) @@ -24,18 +24,19 @@ func TestCreateChainTxSyntacticVerify(t *testing.T) { // Case 2: network ID is wrong tx, err := vm.newCreateChainTx( defaultNonce+1, + testSubnet1.id, nil, avm.ID, nil, "chain name", testNetworkID+1, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, ) if err != nil { t.Fatal(err) } err = tx.SyntacticVerify() - t.Log(err) if err == nil { t.Fatal("should've errored because network ID is wrong") } @@ -43,11 +44,13 @@ func TestCreateChainTxSyntacticVerify(t *testing.T) { // case 3: tx ID is empty tx, err = vm.newCreateChainTx( defaultNonce+1, + testSubnet1.id, nil, avm.ID, nil, "chain name", testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, ) if err != nil { @@ -61,11 +64,13 @@ func TestCreateChainTxSyntacticVerify(t *testing.T) { // Case 4: vm ID is empty tx, err = vm.newCreateChainTx( defaultNonce+1, + testSubnet1.id, nil, avm.ID, nil, "chain name", testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, ) if err != nil { @@ -75,62 +80,189 @@ func TestCreateChainTxSyntacticVerify(t *testing.T) { if err := tx.SyntacticVerify(); err == nil { t.Fatal("should've errored because tx ID is empty") } -} -func TestSemanticVerify(t *testing.T) { - vm := defaultVM() - - // create a tx - tx, err := vm.newCreateChainTx( + // Case 5: Control sigs not sorted + tx, err = vm.newCreateChainTx( defaultNonce+1, + testSubnet1.id, nil, avm.ID, nil, "chain name", testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + // Reverse signature order + tx.ControlSigs[0], tx.ControlSigs[1] = tx.ControlSigs[1], tx.ControlSigs[0] + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should've errored because control sigs not sorted") + } + + // Case 6: Control sigs not unique + tx, err = vm.newCreateChainTx( + defaultNonce+1, + testSubnet1.id, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + tx.ControlSigs[0] = tx.ControlSigs[1] + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should've errored because control sigs not unique") + } + + // Case 7: Valid tx passes syntactic verification + tx, err = vm.newCreateChainTx( + defaultNonce+1, + testSubnet1.id, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatalf("should have passed verification but got %v", err) + } +} + +// Ensure SemanticVerify fails when there are not enough control sigs +func TestCreateChainTxInsufficientControlSigs(t *testing.T) { + vm := defaultVM() + + // Case 1: No control sigs (2 are needed) + tx, err := vm.newCreateChainTx( + defaultNonce+1, + testSubnet1.id, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + nil, defaultKey, ) if err != nil { t.Fatal(err) } - newDB := versiondb.New(vm.DB) - - _, err = tx.SemanticVerify(newDB) - if err != nil { - t.Fatal(err) + _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have errored because there are no control sigs") } - chains, err := vm.getChains(newDB) - if err != nil { - t.Fatal(err) - } - for _, c := range chains { - if c.ID().Equals(tx.ID()) { - return - } - } - t.Fatalf("Should have added the chain to the set of chains") -} - -func TestSemanticVerifyAlreadyExisting(t *testing.T) { - vm := defaultVM() - - // create a tx - tx, err := vm.newCreateChainTx( + // Case 2: 1 control sig (2 are needed) + tx, err = vm.newCreateChainTx( defaultNonce+1, + testSubnet1.id, nil, avm.ID, nil, "chain name", testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0]}, defaultKey, ) if err != nil { t.Fatal(err) } - // put the chain in existing chain + _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have errored because there are no control sigs") + } +} + +// Ensure SemanticVerify fails when an incorrect control signature is given +func TestCreateChainTxWrongControlSig(t *testing.T) { + vm := defaultVM() + + // Generate new, random key to sign tx with + factory := crypto.FactorySECP256K1R{} + key, err := factory.NewPrivateKey() + if err != nil { + t.Fatal(err) + } + + tx, err := vm.newCreateChainTx( + defaultNonce+1, + testSubnet1.id, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], key.(*crypto.PrivateKeySECP256K1R)}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have errored because incorrect control sig given") + } +} + +// Ensure SemanticVerify fails when the Subnet the blockchain specifies as +// its validator set doesn't exist +func TestCreateChainTxNoSuchSubnet(t *testing.T) { + vm := defaultVM() + + tx, err := vm.newCreateChainTx( + defaultNonce+1, + ids.NewID([32]byte{1, 9, 124, 11, 20}), // pick some random ID for subnet + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have errored because Subnet doesn't exist") + } +} + +func TestCreateChainTxAlreadyExists(t *testing.T) { + vm := defaultVM() + + // create a tx + tx, err := vm.newCreateChainTx( + defaultNonce+1, + testSubnet1.id, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + // put the chain in existing chain list if err := vm.putChains(vm.DB, []*CreateChainTx{tx}); err != nil { t.Fatal(err) } @@ -140,3 +272,29 @@ func TestSemanticVerifyAlreadyExisting(t *testing.T) { t.Fatalf("should have failed because there is already a chain with ID %s", tx.id) } } + +// Ensure valid tx passes semanticVerify +func TestCreateChainTxValid(t *testing.T) { + vm := defaultVM() + + // create a valid tx + tx, err := vm.newCreateChainTx( + defaultNonce+1, + testSubnet1.id, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + _, err = tx.SemanticVerify(vm.DB) + if err != nil { + t.Fatalf("expected tx to pass verification but got error: %v", err) + } +} diff --git a/vms/platformvm/create_subnet_tx.go b/vms/platformvm/create_subnet_tx.go index 0d33ca7..16be8be 100644 --- a/vms/platformvm/create_subnet_tx.go +++ b/vms/platformvm/create_subnet_tx.go @@ -8,8 +8,8 @@ import ( "fmt" "github.com/ava-labs/gecko/database" - "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/hashing" ) @@ -17,18 +17,14 @@ import ( const maxThreshold = 25 var ( - errThresholdExceedsKeysLen = errors.New("threshold must be no more than number of control keys") - errThresholdTooHigh = fmt.Errorf("threshold can't be greater than %d", maxThreshold) + errThresholdExceedsKeysLen = errors.New("threshold must be no more than number of control keys") + errThresholdTooHigh = fmt.Errorf("threshold can't be greater than %d", maxThreshold) + errControlKeysNotSortedAndUnique = errors.New("control keys must be sorted and unique") + errUnneededKeys = errors.New("subnets shouldn't have keys if the threshold is 0") ) // UnsignedCreateSubnetTx is an unsigned proposal to create a new subnet type UnsignedCreateSubnetTx struct { - // The VM this tx exists within - vm *VM - - // ID is this transaction's ID - ID ids.ID - // NetworkID is the ID of the network this tx was issued on NetworkID uint32 `serialize:"true"` @@ -47,19 +43,28 @@ type UnsignedCreateSubnetTx struct { type CreateSubnetTx struct { UnsignedCreateSubnetTx `serialize:"true"` + // Signature on the UnsignedCreateSubnetTx's byte repr + Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + // The public key that signed this transaction // The transaction fee will be paid from the corresponding account // (ie the account whose ID is [key].Address()) // [key] is non-nil iff this tx is valid key crypto.PublicKey - // Signature on the UnsignedCreateSubnetTx's byte repr - Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + // The VM this tx exists within + vm *VM + + // ID is this transaction's ID + id ids.ID // Byte representation of this transaction (including signature) bytes []byte } +// ID returns the ID of this transaction +func (tx *CreateSubnetTx) ID() ids.ID { return tx.id } + // SyntacticVerify nil iff [tx] is syntactically valid. // If [tx] is valid, this method sets [tx.key] func (tx *CreateSubnetTx) SyntacticVerify() error { @@ -68,12 +73,18 @@ func (tx *CreateSubnetTx) SyntacticVerify() error { return errNilTx case tx.key != nil: return nil // Only verify the transaction once - case tx.ID.IsZero(): + case tx.id.IsZero(): return errInvalidID case tx.NetworkID != tx.vm.Ctx.NetworkID: return errWrongNetworkID case tx.Threshold > uint16(len(tx.ControlKeys)): return errThresholdExceedsKeysLen + case tx.Threshold > maxThreshold: + return errThresholdTooHigh + case tx.Threshold == 0 && len(tx.ControlKeys) > 0: + return errUnneededKeys + case !ids.IsSortedAndUniqueShortIDs(tx.ControlKeys): + return errControlKeysNotSortedAndUnique } // Byte representation of the unsigned transaction @@ -104,12 +115,6 @@ func (tx *CreateSubnetTx) SemanticVerify(db database.Database) (func(), error) { if err != nil { return nil, err } - - for _, subnet := range subnets { - if subnet.ID.Equals(tx.ID) { - return nil, fmt.Errorf("there is already a subnet with ID %s", tx.ID) - } - } subnets = append(subnets, tx) // add new subnet if err := tx.vm.putSubnets(db, subnets); err != nil { return nil, err @@ -128,7 +133,12 @@ func (tx *CreateSubnetTx) SemanticVerify(db database.Database) (func(), error) { return nil, err } - return nil, nil + // Register new subnet in validator manager + onAccept := func() { + tx.vm.validators.PutValidatorSet(tx.id, validators.NewSet()) + } + + return onAccept, nil } // Bytes returns the byte representation of [tx] @@ -152,22 +162,31 @@ func (tx *CreateSubnetTx) initialize(vm *VM) error { return err } tx.bytes = txBytes - tx.ID = ids.NewID(hashing.ComputeHash256Array(txBytes)) + tx.id = ids.NewID(hashing.ComputeHash256Array(txBytes)) return nil } +// [controlKeys] must be unique. They will be sorted by this method. +// If [controlKeys] is nil, [tx.Controlkeys] will be an empty list. func (vm *VM) newCreateSubnetTx(networkID uint32, nonce uint64, controlKeys []ids.ShortID, threshold uint16, payerKey *crypto.PrivateKeySECP256K1R, ) (*CreateSubnetTx, error) { + tx := &CreateSubnetTx{UnsignedCreateSubnetTx: UnsignedCreateSubnetTx{ + NetworkID: networkID, + Nonce: nonce, + ControlKeys: controlKeys, + Threshold: threshold, + }} - tx := &CreateSubnetTx{ - UnsignedCreateSubnetTx: UnsignedCreateSubnetTx{ - vm: vm, - NetworkID: networkID, - Nonce: nonce, - ControlKeys: controlKeys, - Threshold: threshold, - }, + if threshold == 0 && len(tx.ControlKeys) > 0 { + return nil, errUnneededKeys + } + + // Sort control keys + ids.SortShortIDs(tx.ControlKeys) + // Ensure control keys are unique + if !ids.IsSortedAndUniqueShortIDs(tx.ControlKeys) { + return nil, errControlKeysNotSortedAndUnique } unsignedIntf := interface{}(&tx.UnsignedCreateSubnetTx) diff --git a/vms/platformvm/event_heap_test.go b/vms/platformvm/event_heap_test.go index 1a045ad..01dbc44 100644 --- a/vms/platformvm/event_heap_test.go +++ b/vms/platformvm/event_heap_test.go @@ -18,7 +18,7 @@ func TestTxHeapStart(t *testing.T) { 123, // stake amount 1, // startTime 3, // endTime - ids.NewShortID([20]byte{1}), // node ID + ids.NewShortID([20]byte{}), // node ID ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination 0, // shares 0, // network ID @@ -33,7 +33,7 @@ func TestTxHeapStart(t *testing.T) { 123, // stake amount 1, // startTime 3, // endTime - ids.NewShortID([20]byte{}), // node ID + ids.NewShortID([20]byte{1}), // node ID ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination 0, // shares 0, // network ID @@ -85,7 +85,7 @@ func TestTxHeapStop(t *testing.T) { 123, // stake amount 1, // startTime 3, // endTime - ids.NewShortID([20]byte{1}), // node ID + ids.NewShortID([20]byte{}), // node ID ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination 0, // shares 0, // network ID @@ -100,7 +100,7 @@ func TestTxHeapStop(t *testing.T) { 123, // stake amount 1, // startTime 3, // endTime - ids.NewShortID([20]byte{}), // node ID + ids.NewShortID([20]byte{1}), // node ID ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination 0, // shares 0, // network ID diff --git a/vms/platformvm/export_tx.go b/vms/platformvm/export_tx.go new file mode 100644 index 0000000..6cdc021 --- /dev/null +++ b/vms/platformvm/export_tx.go @@ -0,0 +1,193 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + + "github.com/ava-labs/gecko/chains/atomic" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/vms/components/ava" +) + +var ( + errNoExportOutputs = errors.New("no export outputs") + errOutputsNotSorted = errors.New("outputs not sorted") +) + +// UnsignedExportTx is an unsigned ExportTx +type UnsignedExportTx struct { + // ID of the network this blockchain exists on + NetworkID uint32 `serialize:"true"` + + // Next unused nonce of account paying for this transaction. + Nonce uint64 `serialize:"true"` + + Outs []*ava.TransferableOutput `serialize:"true"` // The outputs of this transaction +} + +// ExportTx exports funds to the AVM +type ExportTx struct { + UnsignedExportTx `serialize:"true"` + + Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + + vm *VM + id ids.ID + key crypto.PublicKey // public key of transaction signer + bytes []byte +} + +func (tx *ExportTx) initialize(vm *VM) error { + tx.vm = vm + txBytes, err := Codec.Marshal(tx) // byte repr. of the signed tx + tx.bytes = txBytes + tx.id = ids.NewID(hashing.ComputeHash256Array(txBytes)) + return err +} + +// ID of this transaction +func (tx *ExportTx) ID() ids.ID { return tx.id } + +// Key returns the public key of the signer of this transaction +// Precondition: tx.Verify() has been called and returned nil +func (tx *ExportTx) Key() crypto.PublicKey { return tx.key } + +// Bytes returns the byte representation of an ExportTx +func (tx *ExportTx) Bytes() []byte { return tx.bytes } + +// InputUTXOs returns an empty set +func (tx *ExportTx) InputUTXOs() ids.Set { return ids.Set{} } + +// SyntacticVerify this transaction is well-formed +// Also populates [tx.Key] with the public key that signed this transaction +func (tx *ExportTx) SyntacticVerify() error { + switch { + case tx == nil: + return errNilTx + case tx.key != nil: + return nil // Only verify the transaction once + case tx.NetworkID != tx.vm.Ctx.NetworkID: // verify the transaction is on this network + return errWrongNetworkID + case tx.id.IsZero(): + return errInvalidID + case len(tx.Outs) == 0: + return errNoExportOutputs + } + + for _, out := range tx.Outs { + if err := out.Verify(); err != nil { + return err + } + if !out.AssetID().Equals(tx.vm.ava) { + return errUnknownAsset + } + } + if !ava.IsSortedTransferableOutputs(tx.Outs, Codec) { + return errOutputsNotSorted + } + + unsignedIntf := interface{}(&tx.UnsignedExportTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // byte repr of unsigned tx + if err != nil { + return err + } + + key, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.Sig[:]) + if err != nil { + return err + } + + tx.key = key + return nil +} + +// SemanticVerify this transaction is valid. +func (tx *ExportTx) SemanticVerify(db database.Database) error { + if err := tx.SyntacticVerify(); err != nil { + return err + } + + amount := uint64(0) + for _, out := range tx.Outs { + newAmount, err := math.Add64(out.Out.Amount(), amount) + if err != nil { + return err + } + amount = newAmount + } + + accountID := tx.key.Address() + account, err := tx.vm.getAccount(db, accountID) + if err != nil { + return errDBAccount + } + + account, err = account.Remove(amount, tx.Nonce) + if err != nil { + return err + } + return tx.vm.putAccount(db, account) +} + +// Accept this transaction. +func (tx *ExportTx) Accept(batch database.Batch) error { + txID := tx.ID() + + smDB := tx.vm.Ctx.SharedMemory.GetDatabase(tx.vm.avm) + defer tx.vm.Ctx.SharedMemory.ReleaseDatabase(tx.vm.avm) + + vsmDB := versiondb.New(smDB) + + state := ava.NewPrefixedState(vsmDB, Codec) + for i, out := range tx.Outs { + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{ + TxID: txID, + OutputIndex: uint32(i), + }, + Asset: ava.Asset{ID: out.AssetID()}, + Out: out.Out, + } + if err := state.FundPlatformUTXO(utxo); err != nil { + return err + } + } + + sharedBatch, err := vsmDB.CommitBatch() + if err != nil { + return err + } + + return atomic.WriteAll(batch, sharedBatch) +} + +func (vm *VM) newExportTx(nonce uint64, networkID uint32, outs []*ava.TransferableOutput, from *crypto.PrivateKeySECP256K1R) (*ExportTx, error) { + ava.SortTransferableOutputs(outs, Codec) + + tx := &ExportTx{UnsignedExportTx: UnsignedExportTx{ + NetworkID: networkID, + Nonce: nonce, + Outs: outs, + }} + + unsignedIntf := interface{}(&tx.UnsignedExportTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // Byte repr. of unsigned transaction + if err != nil { + return nil, err + } + + sig, err := from.Sign(unsignedBytes) + if err != nil { + return nil, err + } + copy(tx.Sig[:], sig) + + return tx, tx.initialize(vm) +} diff --git a/vms/platformvm/factory.go b/vms/platformvm/factory.go index 25f9786..7cdd916 100644 --- a/vms/platformvm/factory.go +++ b/vms/platformvm/factory.go @@ -16,14 +16,20 @@ var ( // Factory can create new instances of the Platform Chain type Factory struct { - ChainManager chains.Manager - Validators validators.Manager + ChainManager chains.Manager + Validators validators.Manager + StakingEnabled bool + AVA ids.ID + AVM ids.ID } // New returns a new instance of the Platform Chain func (f *Factory) New() interface{} { return &VM{ - ChainManager: f.ChainManager, - Validators: f.Validators, + chainManager: f.ChainManager, + validators: f.Validators, + stakingEnabled: f.StakingEnabled, + ava: f.AVA, + avm: f.AVM, } } diff --git a/vms/platformvm/import_tx.go b/vms/platformvm/import_tx.go new file mode 100644 index 0000000..728a3f7 --- /dev/null +++ b/vms/platformvm/import_tx.go @@ -0,0 +1,268 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + "fmt" + + "github.com/ava-labs/gecko/chains/atomic" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") + errWrongNumberOfCredentials = errors.New("should have the same number of credentials as inputs") + errNoImportInputs = errors.New("no import inputs") + errInputsNotSortedUnique = errors.New("inputs not sorted and unique") + errPublicKeySignatureMismatch = errors.New("signature doesn't match public key") + errUnknownAsset = errors.New("unknown asset ID") +) + +// UnsignedImportTx is an unsigned ImportTx +type UnsignedImportTx struct { + // ID of the network this blockchain exists on + NetworkID uint32 `serialize:"true"` + + // Next unused nonce of account paying the transaction fee and receiving the + // inputs of this transaction. + Nonce uint64 `serialize:"true"` + + // Account that this transaction is being sent by. This is needed to ensure the Credentials are replay safe. + Account ids.ShortID `serialize:"true"` + + Ins []*ava.TransferableInput `serialize:"true"` // The inputs to this transaction +} + +// ImportTx imports funds from the AVM +type ImportTx struct { + UnsignedImportTx `serialize:"true"` + + Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + Creds []verify.Verifiable `serialize:"true"` // The credentials of this transaction + + vm *VM + id ids.ID + key crypto.PublicKey // public key of transaction signer + unsignedBytes []byte + bytes []byte +} + +func (tx *ImportTx) initialize(vm *VM) error { + tx.vm = vm + txBytes, err := Codec.Marshal(tx) // byte repr. of the signed tx + tx.bytes = txBytes + tx.id = ids.NewID(hashing.ComputeHash256Array(txBytes)) + return err +} + +// ID of this transaction +func (tx *ImportTx) ID() ids.ID { return tx.id } + +// Key returns the public key of the signer of this transaction +// Precondition: tx.Verify() has been called and returned nil +func (tx *ImportTx) Key() crypto.PublicKey { return tx.key } + +// UnsignedBytes returns the unsigned byte representation of an ImportTx +func (tx *ImportTx) UnsignedBytes() []byte { return tx.unsignedBytes } + +// Bytes returns the byte representation of an ImportTx +func (tx *ImportTx) Bytes() []byte { return tx.bytes } + +// InputUTXOs returns an empty set +func (tx *ImportTx) InputUTXOs() ids.Set { + set := ids.Set{} + for _, in := range tx.Ins { + set.Add(in.InputID()) + } + return set +} + +// SyntacticVerify this transaction is well-formed +// Also populates [tx.Key] with the public key that signed this transaction +func (tx *ImportTx) SyntacticVerify() error { + switch { + case tx == nil: + return errNilTx + case tx.key != nil: + return nil // Only verify the transaction once + case tx.NetworkID != tx.vm.Ctx.NetworkID: // verify the transaction is on this network + return errWrongNetworkID + case tx.id.IsZero(): + return errInvalidID + case len(tx.Ins) == 0: + return errNoImportInputs + case len(tx.Ins) != len(tx.Creds): + return errWrongNumberOfCredentials + } + + for _, in := range tx.Ins { + if err := in.Verify(); err != nil { + return err + } + if !in.AssetID().Equals(tx.vm.ava) { + return errUnknownAsset + } + } + if !ava.IsSortedAndUniqueTransferableInputs(tx.Ins) { + return errInputsNotSortedUnique + } + + for _, cred := range tx.Creds { + if err := cred.Verify(); err != nil { + return err + } + } + + unsignedIntf := interface{}(&tx.UnsignedImportTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // byte repr of unsigned tx + if err != nil { + return err + } + + key, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.Sig[:]) + if err != nil { + return err + } + + if !tx.Account.Equals(key.Address()) { + return errPublicKeySignatureMismatch + } + + tx.key = key + tx.unsignedBytes = unsignedBytes + return nil +} + +// SemanticVerify this transaction is valid. +func (tx *ImportTx) SemanticVerify(db database.Database) error { + if err := tx.SyntacticVerify(); err != nil { + return err + } + + amount := uint64(0) + for _, in := range tx.Ins { + newAmount, err := math.Add64(in.In.Amount(), amount) + if err != nil { + return err + } + amount = newAmount + } + + // Deduct tx fee from payer's account + account, err := tx.vm.getAccount(db, tx.Key().Address()) + if err != nil { + return err + } + account, err = account.Add(amount) + if err != nil { + return err + } + account, err = account.Remove(0, tx.Nonce) + if err != nil { + return err + } + if err := tx.vm.putAccount(db, account); err != nil { + return err + } + + smDB := tx.vm.Ctx.SharedMemory.GetDatabase(tx.vm.avm) + defer tx.vm.Ctx.SharedMemory.ReleaseDatabase(tx.vm.avm) + + state := ava.NewPrefixedState(smDB, Codec) + + for i, in := range tx.Ins { + cred := tx.Creds[i] + + utxoID := in.UTXOID.InputID() + utxo, err := state.AVMUTXO(utxoID) + if err != nil { + return err + } + utxoAssetID := utxo.AssetID() + inAssetID := in.AssetID() + if !utxoAssetID.Equals(inAssetID) { + return errAssetIDMismatch + } + + if err := tx.vm.fx.VerifyTransfer(tx, in.In, cred, utxo.Out); err != nil { + return err + } + } + + return nil +} + +// Accept this transaction. +func (tx *ImportTx) Accept(batch database.Batch) error { + smDB := tx.vm.Ctx.SharedMemory.GetDatabase(tx.vm.avm) + defer tx.vm.Ctx.SharedMemory.ReleaseDatabase(tx.vm.avm) + + vsmDB := versiondb.New(smDB) + + state := ava.NewPrefixedState(vsmDB, Codec) + for _, in := range tx.Ins { + utxoID := in.UTXOID.InputID() + if err := state.SpendAVMUTXO(utxoID); err != nil { + return err + } + } + + sharedBatch, err := vsmDB.CommitBatch() + if err != nil { + return err + } + + return atomic.WriteAll(batch, sharedBatch) +} + +func (vm *VM) newImportTx(nonce uint64, networkID uint32, ins []*ava.TransferableInput, from [][]*crypto.PrivateKeySECP256K1R, to *crypto.PrivateKeySECP256K1R) (*ImportTx, error) { + ava.SortTransferableInputsWithSigners(ins, from) + + tx := &ImportTx{UnsignedImportTx: UnsignedImportTx{ + NetworkID: networkID, + Nonce: nonce, + Account: to.PublicKey().Address(), + Ins: ins, + }} + + unsignedIntf := interface{}(&tx.UnsignedImportTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // Byte repr. of unsigned transaction + if err != nil { + return nil, err + } + + hash := hashing.ComputeHash256(unsignedBytes) + + for _, credKeys := range from { + cred := &secp256k1fx.Credential{} + for _, key := range credKeys { + sig, err := key.SignHash(hash) + if err != nil { + return nil, fmt.Errorf("problem creating transaction: %w", err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + cred.Sigs = append(cred.Sigs, fixedSig) + } + tx.Creds = append(tx.Creds, cred) + } + + sig, err := to.SignHash(hash) + if err != nil { + return nil, err + } + copy(tx.Sig[:], sig) + + return tx, tx.initialize(vm) +} diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 4842ff1..7ab0cad 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -8,46 +8,34 @@ import ( "errors" "fmt" "net/http" - "net/http/httptest" - - "github.com/gorilla/rpc/v2/json2" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/json" + "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/vms/avm" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/secp256k1fx" ) var ( - errMissingDecisionBlock = errors.New("should have a decision block within the past two blocks") - errParsingID = errors.New("error parsing ID") - errGetAccount = errors.New("error retrieving account information") - errGetAccounts = errors.New("error getting accounts controlled by specified user") - errGetUser = errors.New("error while getting user. Does user exist?") - errNoMethodWithGenesis = errors.New("no method was provided but genesis data was provided") - errCreatingTransaction = errors.New("problem while creating transaction") - errNoDestination = errors.New("call is missing field 'stakeDestination'") - errNoSource = errors.New("call is missing field 'stakeSource'") - errGetStakeSource = errors.New("couldn't get account specified in 'stakeSource'") + errMissingDecisionBlock = errors.New("should have a decision block within the past two blocks") + errParsingID = errors.New("error parsing ID") + errGetAccount = errors.New("error retrieving account information") + errGetAccounts = errors.New("error getting accounts controlled by specified user") + errGetUser = errors.New("error while getting user. Does user exist?") + errNoMethodWithGenesis = errors.New("no method was provided but genesis data was provided") + errCreatingTransaction = errors.New("problem while creating transaction") + errNoDestination = errors.New("call is missing field 'stakeDestination'") + errNoSource = errors.New("call is missing field 'stakeSource'") + errGetStakeSource = errors.New("couldn't get account specified in 'stakeSource'") + errNoBlockchainWithAlias = errors.New("there is no blockchain with the specified alias") + errDSCantValidate = errors.New("new blockchain can't be validated by default Subnet") ) -var key *crypto.PrivateKeySECP256K1R - -func init() { - cb58 := formatting.CB58{} - err := cb58.FromString("24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5") - if err != nil { - panic(err) - } - factory := crypto.FactorySECP256K1R{} - pk, err := factory.ToPrivateKey(cb58.Bytes) - if err != nil { - panic(err) - } - key = pk.(*crypto.PrivateKeySECP256K1R) -} - // Service defines the API calls that can be made to the platform chain type Service struct{ vm *VM } @@ -97,7 +85,7 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon response.Subnets = make([]APISubnet, len(subnets)) for i, subnet := range subnets { response.Subnets[i] = APISubnet{ - ID: subnet.ID, + ID: subnet.id, ControlKeys: subnet.ControlKeys, Threshold: json.Uint16(subnet.Threshold), } @@ -108,10 +96,10 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon idsSet := ids.Set{} idsSet.Add(args.IDs...) for _, subnet := range subnets { - if idsSet.Contains(subnet.ID) { + if idsSet.Contains(subnet.id) { response.Subnets = append(response.Subnets, APISubnet{ - ID: subnet.ID, + ID: subnet.id, ControlKeys: subnet.ControlKeys, Threshold: json.Uint16(subnet.Threshold), }, @@ -248,7 +236,7 @@ func (service *Service) SampleValidators(_ *http.Request, args *SampleValidators args.SubnetID = DefaultSubnetID } - validators, ok := service.vm.Validators.GetValidatorSet(args.SubnetID) + validators, ok := service.vm.validators.GetValidatorSet(args.SubnetID) if !ok { return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID) } @@ -315,7 +303,7 @@ type ListAccountsReply struct { // ListAccounts lists all of the accounts controlled by [args.Username] func (service *Service) ListAccounts(_ *http.Request, args *ListAccountsArgs, reply *ListAccountsReply) error { - service.vm.Ctx.Log.Debug("platform.listAccounts called for user '%s'", args.Username) + service.vm.Ctx.Log.Debug("listAccounts called for user '%s'", args.Username) // db holds the user's info that pertains to the Platform Chain userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) @@ -378,7 +366,7 @@ type CreateAccountReply struct { // The account's ID is [privKey].PublicKey().Address(), where [privKey] is a // private key controlled by the user. func (service *Service) CreateAccount(_ *http.Request, args *CreateAccountArgs, reply *CreateAccountReply) error { - service.vm.Ctx.Log.Debug("platform.createAccount called for user '%s'", args.Username) + service.vm.Ctx.Log.Debug("createAccount called for user '%s'", args.Username) // userDB holds the user's info that pertains to the Platform Chain userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) @@ -432,6 +420,11 @@ type genericTx struct { ****************************************************** */ +// CreateTxResponse is the response from calls to create a transaction +type CreateTxResponse struct { + UnsignedTx formatting.CB58 `json:"unsignedTx"` +} + // AddDefaultSubnetValidatorArgs are the arguments to AddDefaultSubnetValidator type AddDefaultSubnetValidatorArgs struct { APIDefaultSubnetValidator @@ -440,16 +433,10 @@ type AddDefaultSubnetValidatorArgs struct { PayerNonce json.Uint64 `json:"payerNonce"` } -// AddDefaultSubnetValidatorResponse is the response from a call to AddDefaultSubnetValidator -type AddDefaultSubnetValidatorResponse struct { - // The unsigned transaction - UnsignedTx formatting.CB58 `json:"unsignedTx"` -} - // AddDefaultSubnetValidator returns an unsigned transaction to add a validator to the default subnet // The returned unsigned transaction should be signed using Sign() -func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefaultSubnetValidatorArgs, reply *AddDefaultSubnetValidatorResponse) error { - service.vm.Ctx.Log.Debug("platform.AddDefaultSubnetValidator called") +func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefaultSubnetValidatorArgs, reply *CreateTxResponse) error { + service.vm.Ctx.Log.Debug("AddDefaultSubnetValidator called") if args.ID.IsZero() { // If ID unspecified, use this node's ID as validator ID args.ID = service.vm.Ctx.NodeID @@ -490,17 +477,11 @@ type AddDefaultSubnetDelegatorArgs struct { PayerNonce json.Uint64 `json:"payerNonce"` } -// AddDefaultSubnetDelegatorResponse is the response from a call to AddDefaultSubnetDelegator -type AddDefaultSubnetDelegatorResponse struct { - // The unsigned transaction - UnsignedTx formatting.CB58 `json:"unsignedTx"` -} - // AddDefaultSubnetDelegator returns an unsigned transaction to add a delegator // to the default subnet // The returned unsigned transaction should be signed using Sign() -func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefaultSubnetDelegatorArgs, reply *AddDefaultSubnetDelegatorResponse) error { - service.vm.Ctx.Log.Debug("platform.AddDefaultSubnetDelegator called") +func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefaultSubnetDelegatorArgs, reply *CreateTxResponse) error { + service.vm.Ctx.Log.Debug("AddDefaultSubnetDelegator called") if args.ID.IsZero() { // If ID unspecified, use this node's ID as validator ID args.ID = service.vm.Ctx.NodeID @@ -541,15 +522,9 @@ type AddNonDefaultSubnetValidatorArgs struct { PayerNonce json.Uint64 `json:"payerNonce"` } -// AddNonDefaultSubnetValidatorResponse is the response from a call to AddNonDefaultSubnetValidator -type AddNonDefaultSubnetValidatorResponse struct { - // The unsigned transaction - UnsignedTx formatting.CB58 `json:"unsignedTx"` -} - // AddNonDefaultSubnetValidator adds a validator to a subnet other than the default subnet // Returns the unsigned transaction, which must be signed using Sign -func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddNonDefaultSubnetValidatorArgs, response *AddNonDefaultSubnetValidatorResponse) error { +func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddNonDefaultSubnetValidatorArgs, response *CreateTxResponse) error { tx := addNonDefaultSubnetValidatorTx{ UnsignedAddNonDefaultSubnetValidatorTx: UnsignedAddNonDefaultSubnetValidatorTx{ SubnetValidator: SubnetValidator{ @@ -583,6 +558,83 @@ func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddN return nil } +// CreateSubnetArgs are the arguments to CreateSubnet +type CreateSubnetArgs struct { + // The ID member of APISubnet is ignored + APISubnet + + // Nonce of the account that pays the transaction fee + PayerNonce json.Uint64 `json:"payerNonce"` +} + +// CreateSubnet returns an unsigned transaction to create a new subnet. +// The unsigned transaction must be signed with the key of [args.Payer] +func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *CreateTxResponse) error { + service.vm.Ctx.Log.Debug("platform.createSubnet called") + + // Create the transaction + tx := CreateSubnetTx{ + UnsignedCreateSubnetTx: UnsignedCreateSubnetTx{ + NetworkID: service.vm.Ctx.NetworkID, + Nonce: uint64(args.PayerNonce), + ControlKeys: args.ControlKeys, + Threshold: uint16(args.Threshold), + }, + key: nil, + Sig: [65]byte{}, + bytes: nil, + } + + txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) + if err != nil { + return errCreatingTransaction + } + + response.UnsignedTx.Bytes = txBytes + return nil +} + +// CreateExportTxArgs are the arguments to CreateExportTx +type CreateExportTxArgs struct { + // ID of the address that will receive the exported funds + To ids.ShortID `json:"to"` + + // Nonce of the account that pays the transaction fee + PayerNonce json.Uint64 `json:"payerNonce"` + + Amount json.Uint64 `json:"amount"` +} + +// CreateExportTx returns an unsigned transaction to export funds. +// The unsigned transaction must be signed with the key of [args.Payer] +func (service *Service) CreateExportTx(_ *http.Request, args *CreateExportTxArgs, response *CreateTxResponse) error { + service.vm.Ctx.Log.Debug("platform.createExportTx called") + + // Create the transaction + tx := ExportTx{UnsignedExportTx: UnsignedExportTx{ + NetworkID: service.vm.Ctx.NetworkID, + Nonce: uint64(args.PayerNonce), + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: service.vm.ava}, + Out: &secp256k1fx.TransferOutput{ + Amt: uint64(args.Amount), + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{args.To}, + }, + }, + }}, + }} + + txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) + if err != nil { + return errCreatingTransaction + } + + response.UnsignedTx.Bytes = txBytes + return nil +} + /* ****************************************************** **************** Sign/Issue Txs ********************** @@ -606,12 +658,12 @@ type SignArgs struct { // SignResponse is the response from Sign type SignResponse struct { // The signed bytes - Tx formatting.CB58 + Tx formatting.CB58 `json:"tx"` } // Sign [args.bytes] func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignResponse) error { - service.vm.Ctx.Log.Debug("platform.sign called") + service.vm.Ctx.Log.Debug("sign called") // Get the key of the Signer db, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) @@ -642,8 +694,12 @@ func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignRespons genTx.Tx, err = service.signAddNonDefaultSubnetValidatorTx(tx, key) case *CreateSubnetTx: genTx.Tx, err = service.signCreateSubnetTx(tx, key) + case *CreateChainTx: + genTx.Tx, err = service.signCreateChainTx(tx, key) + case *ExportTx: + genTx.Tx, err = service.signExportTx(tx, key) default: - err = errors.New("Could not parse given tx. Must be one of: addDefaultSubnetValidatorTx, addNonDefaultSubnetValidatorTx, createSubnetTx") + err = errors.New("Could not parse given tx") } if err != nil { return err @@ -655,7 +711,7 @@ func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignRespons // Sign [unsigned] with [key] func (service *Service) signAddDefaultSubnetValidatorTx(tx *addDefaultSubnetValidatorTx, key *crypto.PrivateKeySECP256K1R) (*addDefaultSubnetValidatorTx, error) { - service.vm.Ctx.Log.Debug("platform.signAddDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("signAddDefaultSubnetValidatorTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetValidatorTx) @@ -678,7 +734,7 @@ func (service *Service) signAddDefaultSubnetValidatorTx(tx *addDefaultSubnetVali // Sign [unsigned] with [key] func (service *Service) signAddDefaultSubnetDelegatorTx(tx *addDefaultSubnetDelegatorTx, key *crypto.PrivateKeySECP256K1R) (*addDefaultSubnetDelegatorTx, error) { - service.vm.Ctx.Log.Debug("platform.signAddDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("signAddDefaultSubnetValidatorTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetDelegatorTx) @@ -701,7 +757,7 @@ func (service *Service) signAddDefaultSubnetDelegatorTx(tx *addDefaultSubnetDele // Sign [xt] with [key] func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.PrivateKeySECP256K1R) (*CreateSubnetTx, error) { - service.vm.Ctx.Log.Debug("platform.signAddDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("signAddDefaultSubnetValidatorTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedCreateSubnetTx) @@ -722,6 +778,29 @@ func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.Priva return tx, nil } +// Sign [xt] with [key] +func (service *Service) signExportTx(tx *ExportTx, key *crypto.PrivateKeySECP256K1R) (*ExportTx, error) { + service.vm.Ctx.Log.Debug("platform.signAddDefaultSubnetValidatorTx called") + + // TODO: Should we check if tx is already signed? + unsignedIntf := interface{}(&tx.UnsignedExportTx) + unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return nil, fmt.Errorf("error serializing unsigned tx: %v", err) + } + + sig, err := key.Sign(unsignedTxBytes) + if err != nil { + return nil, errors.New("error while signing") + } + if len(sig) != crypto.SECP256K1RSigLen { + return nil, fmt.Errorf("expected signature to be length %d but was length %d", crypto.SECP256K1RSigLen, len(sig)) + } + copy(tx.Sig[:], sig) + + return tx, nil +} + // Signs an unsigned or partially signed addNonDefaultSubnetValidatorTx with [key] // If [key] is a control key for the subnet and there is an empty spot in tx.ControlSigs, signs there // If [key] is a control key for the subnet and there is no empty spot in tx.ControlSigs, signs as payer @@ -729,7 +808,7 @@ func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.Priva // Sorts tx.ControlSigs before returning // Assumes each element of tx.ControlSigs is actually a signature, not just empty bytes func (service *Service) signAddNonDefaultSubnetValidatorTx(tx *addNonDefaultSubnetValidatorTx, key *crypto.PrivateKeySECP256K1R) (*addNonDefaultSubnetValidatorTx, error) { - service.vm.Ctx.Log.Debug("platform.signAddNonDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("signAddNonDefaultSubnetValidatorTx called") // Compute the byte repr. of the unsigned tx and the signature of [key] over it unsignedIntf := interface{}(&tx.UnsignedAddNonDefaultSubnetValidatorTx) @@ -770,6 +849,196 @@ func (service *Service) signAddNonDefaultSubnetValidatorTx(tx *addNonDefaultSubn return nil, errors.New("no place for key to sign") } + crypto.SortSECP2561RSigs(tx.ControlSigs) + + return tx, nil +} + +// CreateImportTxArgs are the arguments to CreateImportTx +type CreateImportTxArgs struct { + // Addresses that can be used to sign the import + ImportAddresses []ids.ShortID `json:"importAddresses"` + + // ID of the account that will receive the imported funds, and pay the + // import fee + AccountID ids.ShortID `json:"accountID"` + + // Nonce of the account that pays the transaction fee + PayerNonce json.Uint64 `json:"payerNonce"` + + // User that controls the Addresses + Username string `json:"username"` + Password string `json:"password"` +} + +// CreateImportTx returns an unsigned transaction to import funds. +// The unsigned transaction must be signed with the key of [args.Payer] +func (service *Service) CreateImportTx(_ *http.Request, args *CreateImportTxArgs, response *SignResponse) error { + service.vm.Ctx.Log.Debug("platform.createImportTx called") + + // Get the key of the Signer + db, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) + if err != nil { + return fmt.Errorf("couldn't get data for user '%s'. Does user exist?", args.Username) + } + user := user{db: db} + + kc := secp256k1fx.NewKeychain() + for _, addr := range args.ImportAddresses { + key, err := user.getKey(addr) + if err != nil { + return errDB + } + kc.Add(key) + } + + key, err := user.getKey(args.AccountID) + if err != nil { + return errDB + } + kc.Add(key) + + addrs := ids.Set{} + for _, addr := range args.ImportAddresses { + addrs.Add(ids.NewID(hashing.ComputeHash256Array(addr.Bytes()))) + } + + utxos, err := service.vm.GetAtomicUTXOs(addrs) + if err != nil { + return fmt.Errorf("problem retrieving user's atomic UTXOs: %w", err) + } + + amount := uint64(0) + time := service.vm.clock.Unix() + + ins := []*ava.TransferableInput{} + keys := [][]*crypto.PrivateKeySECP256K1R{} + for _, utxo := range utxos { + if !utxo.AssetID().Equals(service.vm.ava) { + continue + } + inputIntf, signers, err := kc.Spend(utxo.Out, time) + if err != nil { + continue + } + input, ok := inputIntf.(ava.Transferable) + if !ok { + continue + } + spent, err := math.Add64(amount, input.Amount()) + if err != nil { + return err + } + amount = spent + + in := &ava.TransferableInput{ + UTXOID: utxo.UTXOID, + Asset: ava.Asset{ID: service.vm.ava}, + In: input, + } + + ins = append(ins, in) + keys = append(keys, signers) + } + + ava.SortTransferableInputsWithSigners(ins, keys) + + // Create the transaction + tx := ImportTx{UnsignedImportTx: UnsignedImportTx{ + NetworkID: service.vm.Ctx.NetworkID, + Nonce: uint64(args.PayerNonce), + Account: args.AccountID, + Ins: ins, + }} + + // TODO: Should we check if tx is already signed? + unsignedIntf := interface{}(&tx.UnsignedImportTx) + unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return fmt.Errorf("error serializing unsigned tx: %w", err) + } + hash := hashing.ComputeHash256(unsignedTxBytes) + + sig, err := key.SignHash(hash) + if err != nil { + return errors.New("error while signing") + } + copy(tx.Sig[:], sig) + + for _, credKeys := range keys { + cred := &secp256k1fx.Credential{} + for _, key := range credKeys { + sig, err := key.SignHash(hash) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + cred.Sigs = append(cred.Sigs, fixedSig) + } + tx.Creds = append(tx.Creds, cred) + } + + txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) + if err != nil { + return errCreatingTransaction + } + + response.Tx.Bytes = txBytes + return nil +} + +// Signs an unsigned or partially signed CreateChainTx with [key] +// If [key] is a control key for the subnet and there is an empty spot in tx.ControlSigs, signs there +// If [key] is a control key for the subnet and there is no empty spot in tx.ControlSigs, signs as payer +// If [key] is not a control key, sign as payer (account controlled by [key] pays the tx fee) +// Sorts tx.ControlSigs before returning +// Assumes each element of tx.ControlSigs is actually a signature, not just empty bytes +func (service *Service) signCreateChainTx(tx *CreateChainTx, key *crypto.PrivateKeySECP256K1R) (*CreateChainTx, error) { + service.vm.Ctx.Log.Debug("signCreateChainTx called") + + // Compute the byte repr. of the unsigned tx and the signature of [key] over it + unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) + unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return nil, fmt.Errorf("error serializing unsigned tx: %v", err) + } + sig, err := key.Sign(unsignedTxBytes) + if err != nil { + return nil, errors.New("error while signing") + } + if len(sig) != crypto.SECP256K1RSigLen { + return nil, fmt.Errorf("expected signature to be length %d but was length %d", crypto.SECP256K1RSigLen, len(sig)) + } + + // Get information about the subnet + subnet, err := service.vm.getSubnet(service.vm.DB, tx.SubnetID) + if err != nil { + return nil, fmt.Errorf("problem getting subnet information: %v", err) + } + + // Find the location at which [key] should put its signature. + // If [key] is a control key for this subnet and there is an empty spot in tx.ControlSigs, sign there + // If [key] is a control key for this subnet and there is no empty spot in tx.ControlSigs, sign as payer + // If [key] is not a control key, sign as payer (account controlled by [key] pays the tx fee) + controlKeySet := ids.ShortSet{} + controlKeySet.Add(subnet.ControlKeys...) + isControlKey := controlKeySet.Contains(key.PublicKey().Address()) + + payerSigEmpty := tx.PayerSig == [crypto.SECP256K1RSigLen]byte{} // true if no key has signed to pay the tx fee + + if isControlKey && len(tx.ControlSigs) != int(subnet.Threshold) { // Sign as controlSig + tx.ControlSigs = append(tx.ControlSigs, [crypto.SECP256K1RSigLen]byte{}) + copy(tx.ControlSigs[len(tx.ControlSigs)-1][:], sig) + } else if payerSigEmpty { // sign as payer + copy(tx.PayerSig[:], sig) + } else { + return nil, errors.New("no place for key to sign") + } + + crypto.SortSECP2561RSigs(tx.ControlSigs) + return tx, nil } @@ -787,6 +1056,8 @@ type IssueTxResponse struct { // IssueTx issues the transaction [args.Tx] to the network func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *IssueTxResponse) error { + service.vm.Ctx.Log.Debug("issueTx called") + genTx := genericTx{} if err := Codec.Unmarshal(args.Tx.Bytes, &genTx); err != nil { return err @@ -798,69 +1069,25 @@ func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *Is return fmt.Errorf("error initializing tx: %s", err) } service.vm.unissuedEvents.Push(tx) - defer service.vm.resetTimer() response.TxID = tx.ID() - return nil - case *CreateSubnetTx: + case DecisionTx: if err := tx.initialize(service.vm); err != nil { return fmt.Errorf("error initializing tx: %s", err) } service.vm.unissuedDecisionTxs = append(service.vm.unissuedDecisionTxs, tx) - defer service.vm.resetTimer() - response.TxID = tx.ID - return nil + response.TxID = tx.ID() + case AtomicTx: + if err := tx.initialize(service.vm); err != nil { + return fmt.Errorf("error initializing tx: %s", err) + } + service.vm.unissuedAtomicTxs = append(service.vm.unissuedAtomicTxs, tx) + response.TxID = tx.ID() default: - return errors.New("Could not parse given tx. Must be one of: addDefaultSubnetValidatorTx, addDefaultSubnetDelegatorTx, addNonDefaultSubnetValidatorTx, createSubnetTx") - } -} - -/* - ****************************************************** - **************** Create a Subnet ********************* - ****************************************************** - */ - -// CreateSubnetArgs are the arguments to CreateSubnet -type CreateSubnetArgs struct { - // The ID member of APISubnet is ignored - APISubnet - - // Nonce of the account that pays the transaction fee - PayerNonce json.Uint64 `json:"payerNonce"` -} - -// CreateSubnetResponse is the response from a call to CreateSubnet -type CreateSubnetResponse struct { - // Byte representation of the unsigned transaction to create a new subnet - UnsignedTx formatting.CB58 `json:"unsignedTx"` -} - -// CreateSubnet returns an unsigned transaction to create a new subnet. -// The unsigned transaction must be signed with the key of [args.Payer] -func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *CreateSubnetResponse) error { - service.vm.Ctx.Log.Debug("platform.createSubnet called") - - // Create the transaction - tx := CreateSubnetTx{ - UnsignedCreateSubnetTx: UnsignedCreateSubnetTx{ - NetworkID: service.vm.Ctx.NetworkID, - Nonce: uint64(args.PayerNonce), - ControlKeys: args.ControlKeys, - Threshold: uint16(args.Threshold), - }, - key: nil, - Sig: [65]byte{}, - bytes: nil, + return errors.New("Could not parse given tx. Must be a TimedTx, DecisionTx, or AtomicTx") } - txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) - if err != nil { - return errCreatingTransaction - } - - response.UnsignedTx.Bytes = txBytes + service.vm.resetTimer() return nil - } /* @@ -871,6 +1098,9 @@ func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, re // CreateBlockchainArgs is the arguments for calling CreateBlockchain type CreateBlockchainArgs struct { + // ID of Subnet that validates the new blockchain + SubnetID ids.ID `json:"subnetID"` + // ID of the VM the new blockchain is running VMID string `json:"vmID"` @@ -880,81 +1110,68 @@ type CreateBlockchainArgs struct { // Human-readable name for the new blockchain, not necessarily unique Name string `json:"name"` - // To generate the byte representation of the genesis data for this blockchain, - // a POST request with body [GenesisData] is made to the API method whose name is [Method], whose - // endpoint is [Endpoint]. See Platform Chain documentation for more info and examples. - Method string `json:"method"` - Endpoint string `json:"endpoint"` - GenesisData interface{} `json:"genesisData"` + // Next unused nonce of the account paying the transaction fee + PayerNonce json.Uint64 `json:"payerNonce"` + + // Genesis state of the blockchain being created + GenesisData formatting.CB58 `json:"genesisData"` } -// CreateGenesisReply is the reply from a call to CreateGenesis -type CreateGenesisReply struct { - Bytes formatting.CB58 `json:"bytes"` -} +// CreateBlockchain returns an unsigned transaction to create a new blockchain +// Must be signed with the Subnet's control keys and with a key that pays the transaction fee before issuance +func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, response *CreateTxResponse) error { + service.vm.Ctx.Log.Debug("createBlockchain called") -// CreateBlockchainReply is the reply from calling CreateBlockchain -type CreateBlockchainReply struct { - BlockchainID ids.ID `json:"blockchainID"` -} - -// CreateBlockchain issues a transaction to the network to create a new blockchain -func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, reply *CreateBlockchainReply) error { - vmID, err := service.vm.ChainManager.LookupVM(args.VMID) + vmID, err := service.vm.chainManager.LookupVM(args.VMID) if err != nil { return fmt.Errorf("no VM with ID '%s' found", args.VMID) } fxIDs := []ids.ID(nil) for _, fxIDStr := range args.FxIDs { - fxID, err := service.vm.ChainManager.LookupVM(fxIDStr) + fxID, err := service.vm.chainManager.LookupVM(fxIDStr) if err != nil { return fmt.Errorf("no FX with ID '%s' found", fxIDStr) } fxIDs = append(fxIDs, fxID) } - - genesisBytes := []byte(nil) - if args.Method != "" { - buf, err := json2.EncodeClientRequest(args.Method, args.GenesisData) - if err != nil { - return fmt.Errorf("problem building blockchain genesis state: %w", err) - } - - writer := httptest.NewRecorder() - service.vm.Ctx.HTTP.Call( - /*writer=*/ writer, - /*method=*/ "POST", - /*base=*/ args.VMID, - /*endpoint=*/ args.Endpoint, - /*body=*/ bytes.NewBuffer(buf), - /*headers=*/ map[string]string{ - "Content-Type": "application/json", - }, - ) - - result := CreateGenesisReply{} - if err := json2.DecodeClientResponse(writer.Body, &result); err != nil { - return fmt.Errorf("problem building blockchain genesis state: %w", err) - } - genesisBytes = result.Bytes.Bytes - } else if args.GenesisData != nil { - return errNoMethodWithGenesis + // If creating AVM instance, use secp256k1fx + // TODO: Document FXs and have user specify them in API call + fxIDsSet := ids.Set{} + fxIDsSet.Add(fxIDs...) + if vmID.Equals(avm.ID) && !fxIDsSet.Contains(secp256k1fx.ID) { + fxIDs = append(fxIDs, secp256k1fx.ID) } - // TODO: Should use the key store to sign this transaction. - // TODO: Nonce shouldn't always be 0 - tx, err := service.vm.newCreateChainTx(0, genesisBytes, vmID, fxIDs, args.Name, service.vm.Ctx.NetworkID, key) + if args.SubnetID.Equals(DefaultSubnetID) { + return errDSCantValidate + } + + tx := CreateChainTx{ + UnsignedCreateChainTx: UnsignedCreateChainTx{ + NetworkID: service.vm.Ctx.NetworkID, + SubnetID: args.SubnetID, + Nonce: uint64(args.PayerNonce), + ChainName: args.Name, + VMID: vmID, + FxIDs: fxIDs, + GenesisData: args.GenesisData.Bytes, + }, + PayerAddress: ids.ShortID{}, + PayerSig: [crypto.SECP256K1RSigLen]byte{}, + ControlSigs: nil, + vm: nil, + id: ids.ID{}, + bytes: nil, + } + + txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) if err != nil { - return fmt.Errorf("problem creating transaction: %w", err) + service.vm.Ctx.Log.Error("problem marshaling createChainTx: %v", err) + return errCreatingTransaction } - // Add this tx to the set of unissued txs - service.vm.unissuedDecisionTxs = append(service.vm.unissuedDecisionTxs, tx) - service.vm.resetTimer() - - reply.BlockchainID = tx.ID() - + response.UnsignedTx.Bytes = txBytes return nil } @@ -972,7 +1189,9 @@ type GetBlockchainStatusReply struct { // GetBlockchainStatus gets the status of a blockchain with the ID [args.BlockchainID]. func (service *Service) GetBlockchainStatus(_ *http.Request, args *GetBlockchainStatusArgs, reply *GetBlockchainStatusReply) error { - _, err := service.vm.ChainManager.Lookup(args.BlockchainID) + service.vm.Ctx.Log.Debug("getBlockchainStatus called") + + _, err := service.vm.chainManager.Lookup(args.BlockchainID) if err == nil { reply.Status = Validating return nil @@ -1026,3 +1245,100 @@ func (service *Service) chainExists(blockID ids.ID, chainID ids.ID) (bool, error return false, nil } + +// ValidatedByArgs is the arguments for calling ValidatedBy +type ValidatedByArgs struct { + // ValidatedBy returns the ID of the Subnet validating the blockchain with this ID + BlockchainID ids.ID `json:"blockchainID"` +} + +// ValidatedByResponse is the reply from calling ValidatedBy +type ValidatedByResponse struct { + // ID of the Subnet validating the specified blockchain + SubnetID ids.ID `json:"subnetID"` +} + +// ValidatedBy returns the ID of the Subnet that validates [args.BlockchainID] +func (service *Service) ValidatedBy(_ *http.Request, args *ValidatedByArgs, response *ValidatedByResponse) error { + service.vm.Ctx.Log.Debug("validatedBy called") + + chain, err := service.vm.getChain(service.vm.DB, args.BlockchainID) + if err != nil { + return err + } + response.SubnetID = chain.SubnetID + return nil +} + +// ValidatesArgs are the arguments to Validates +type ValidatesArgs struct { + SubnetID ids.ID `json:"subnetID"` +} + +// ValidatesResponse is the response from calling Validates +type ValidatesResponse struct { + BlockchainIDs []ids.ID `json:"blockchainIDs"` +} + +// Validates returns the IDs of the blockchains validated by [args.SubnetID] +func (service *Service) Validates(_ *http.Request, args *ValidatesArgs, response *ValidatesResponse) error { + service.vm.Ctx.Log.Debug("validates called") + + // Verify that the Subnet exists + if _, err := service.vm.getSubnet(service.vm.DB, args.SubnetID); err != nil { + return err + } + // Get the chains that exist + chains, err := service.vm.getChains(service.vm.DB) + if err != nil { + return err + } + // Filter to get the chains validated by the specified Subnet + for _, chain := range chains { + if chain.SubnetID.Equals(args.SubnetID) { + response.BlockchainIDs = append(response.BlockchainIDs, chain.ID()) + } + } + return nil +} + +// APIBlockchain is the representation of a blockchain used in API calls +type APIBlockchain struct { + // Blockchain's ID + ID ids.ID `json:"id"` + + // Blockchain's (non-unique) human-readable name + Name string `json:"name"` + + // Subnet that validates the blockchain + SubnetID ids.ID `json:"subnetID"` + + // Virtual Machine the blockchain runs + VMID ids.ID `json:"vmID"` +} + +// GetBlockchainsResponse is the response from a call to GetBlockchains +type GetBlockchainsResponse struct { + // blockchains that exist + Blockchains []APIBlockchain `json:"blockchains"` +} + +// GetBlockchains returns all of the blockchains that exist +func (service *Service) GetBlockchains(_ *http.Request, args *struct{}, response *GetBlockchainsResponse) error { + service.vm.Ctx.Log.Debug("getBlockchains called") + + chains, err := service.vm.getChains(service.vm.DB) + if err != nil { + return fmt.Errorf("couldn't retrieve blockchains: %v", err) + } + + for _, chain := range chains { + response.Blockchains = append(response.Blockchains, APIBlockchain{ + ID: chain.ID(), + Name: chain.ChainName, + SubnetID: chain.SubnetID, + VMID: chain.VMID, + }) + } + return nil +} diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index e1ece30..6efb8d6 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -22,7 +22,7 @@ func TestAddDefaultSubnetValidator(t *testing.T) { } func TestCreateBlockchainArgsParsing(t *testing.T) { - jsonString := `{"vmID":"lol","chainName":"awesome","genesisData":{"key":"value"}}` + jsonString := `{"vmID":"lol","fxIDs":["secp256k1"], "name":"awesome", "payerNonce":5, "genesisData":"SkB92YpWm4Q2iPnLGCuDPZPgUQMxajqQQuz91oi3xD984f8r"}` args := CreateBlockchainArgs{} err := json.Unmarshal([]byte(jsonString), &args) if err != nil { diff --git a/vms/platformvm/standard_block.go b/vms/platformvm/standard_block.go index 847f5c9..5f7e300 100644 --- a/vms/platformvm/standard_block.go +++ b/vms/platformvm/standard_block.go @@ -12,6 +12,8 @@ import ( // DecisionTx is an operation that can be decided without being proposed type DecisionTx interface { + ID() ids.ID + initialize(vm *VM) error // Attempt to verify this transaction with the provided state. The provided @@ -47,9 +49,10 @@ func (sb *StandardBlock) initialize(vm *VM, bytes []byte) error { // // This function also sets onAcceptDB database if the verification passes. func (sb *StandardBlock) Verify() error { + parentBlock := sb.parentBlock() // StandardBlock is not a modifier on a proposal block, so its parent must // be a decision. - parent, ok := sb.parentBlock().(decision) + parent, ok := parentBlock.(decision) if !ok { return errInvalidBlockType } diff --git a/vms/platformvm/state.go b/vms/platformvm/state.go index 9febf25..9f6d637 100644 --- a/vms/platformvm/state.go +++ b/vms/platformvm/state.go @@ -17,6 +17,7 @@ import ( var ( errEmptyAccountAddress = errors.New("account has empty address") + errNoSuchBlockchain = errors.New("there is no blockchain with the specified ID") ) // TODO: Cache prefixed IDs or use different way of keying into database @@ -146,7 +147,7 @@ func (vm *VM) putAccount(db database.Database, account Account) error { return nil } -// get the blockchains that exist +// get all the blockchains that exist func (vm *VM) getChains(db database.Database) ([]*CreateChainTx, error) { chainsInterface, err := vm.State.Get(db, chainsTypeID, chainsKey) if err != nil { @@ -154,12 +155,26 @@ func (vm *VM) getChains(db database.Database) ([]*CreateChainTx, error) { } chains, ok := chainsInterface.([]*CreateChainTx) if !ok { - vm.Ctx.Log.Warn("expected to retrieve []*CreateChainTx from database but got different type") + vm.Ctx.Log.Error("expected to retrieve []*CreateChainTx from database but got different type") return nil, errDBChains } return chains, nil } +// get a blockchain by its ID +func (vm *VM) getChain(db database.Database, ID ids.ID) (*CreateChainTx, error) { + chains, err := vm.getChains(db) + if err != nil { + return nil, err + } + for _, chain := range chains { + if chain.ID().Equals(ID) { + return chain, nil + } + } + return nil, errNoSuchBlockchain +} + // put the list of blockchains that exist to database func (vm *VM) putChains(db database.Database, chains createChainList) error { if err := vm.State.Put(db, chainsTypeID, chainsKey, chains); err != nil { @@ -211,18 +226,18 @@ func (vm *VM) getSubnets(db database.Database) ([]*CreateSubnetTx, error) { } // get the subnet with the specified ID -func (vm *VM) getSubnet(db database.Database, ID ids.ID) (*CreateSubnetTx, error) { +func (vm *VM) getSubnet(db database.Database, id ids.ID) (*CreateSubnetTx, error) { subnets, err := vm.getSubnets(db) if err != nil { return nil, err } for _, subnet := range subnets { - if subnet.ID.Equals(ID) { + if subnet.id.Equals(id) { return subnet, nil } } - return nil, fmt.Errorf("couldn't find subnet with ID %s", ID) + return nil, fmt.Errorf("couldn't find subnet with ID %s", id) } // register each type that we'll be storing in the database diff --git a/vms/platformvm/static_service.go b/vms/platformvm/static_service.go index cdfd1d7..80b66d2 100644 --- a/vms/platformvm/static_service.go +++ b/vms/platformvm/static_service.go @@ -9,6 +9,7 @@ import ( "net/http" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/json" ) @@ -74,11 +75,13 @@ type APIDefaultSubnetValidator struct { // [VMID] is the ID of the VM this chain runs. // [FxIDs] are the IDs of the Fxs the chain supports. // [Name] is a human-readable, non-unique name for the chain. +// [SubnetID] is the ID of the subnet that validates the chain type APIChain struct { GenesisData formatting.CB58 `json:"genesisData"` VMID ids.ID `json:"vmID"` FxIDs []ids.ID `json:"fxIDs"` Name string `json:"name"` + SubnetID ids.ID `json:"subnetID"` } // BuildGenesisArgs are the arguments used to create @@ -182,12 +185,15 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl tx := &CreateChainTx{ UnsignedCreateChainTx: UnsignedCreateChainTx{ NetworkID: uint32(args.NetworkID), + SubnetID: chain.SubnetID, Nonce: 0, ChainName: chain.Name, VMID: chain.VMID, FxIDs: chain.FxIDs, GenesisData: chain.GenesisData.Bytes, }, + ControlSigs: [][crypto.SECP256K1RSigLen]byte{}, + PayerSig: [crypto.SECP256K1RSigLen]byte{}, } if err := tx.initialize(nil); err != nil { return err diff --git a/vms/platformvm/static_service_test.go b/vms/platformvm/static_service_test.go index d1bdc4e..04433ff 100644 --- a/vms/platformvm/static_service_test.go +++ b/vms/platformvm/static_service_test.go @@ -4,114 +4,12 @@ package platformvm import ( - "bytes" "testing" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/json" ) -func TestBuildGenesis(t *testing.T) { - expected := []byte{ - 0x00, 0x00, 0x00, 0x01, 0x01, 0x5c, 0xce, 0x6c, - 0x55, 0xd6, 0xb5, 0x09, 0x84, 0x5c, 0x8c, 0x4e, - 0x30, 0xbe, 0xd9, 0x8d, 0x39, 0x1a, 0xe7, 0xf0, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x07, 0x5b, 0xcd, 0x15, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x05, 0x01, 0x5c, 0xce, 0x6c, 0x55, 0xd6, 0xb5, - 0x09, 0x84, 0x5c, 0x8c, 0x4e, 0x30, 0xbe, 0xd9, - 0x8d, 0x39, 0x1a, 0xe7, 0xf0, 0x00, 0x00, 0x00, - 0x00, 0x3a, 0xde, 0x68, 0xb1, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x5c, 0xce, 0x6c, 0x55, 0xd6, 0xb5, - 0x09, 0x84, 0x5c, 0x8c, 0x4e, 0x30, 0xbe, 0xd9, - 0x8d, 0x39, 0x1a, 0xe7, 0xf0, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x13, 0x4d, 0x79, 0x20, 0x46, - 0x61, 0x76, 0x6f, 0x72, 0x69, 0x74, 0x65, 0x20, - 0x45, 0x70, 0x69, 0x73, 0x6f, 0x64, 0x65, 0x53, - 0x6f, 0x75, 0x74, 0x68, 0x20, 0x50, 0x61, 0x72, - 0x6b, 0x20, 0x65, 0x70, 0x69, 0x73, 0x6f, 0x64, - 0x65, 0x20, 0x70, 0x6c, 0x61, 0x79, 0x65, 0x72, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x53, - 0x63, 0x6f, 0x74, 0x74, 0x20, 0x54, 0x65, 0x6e, - 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x20, 0x6d, 0x75, - 0x73, 0x74, 0x20, 0x64, 0x69, 0x65, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - } - - addr, _ := ids.ShortFromString("8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") - genesisData := formatting.CB58{} - genesisData.FromString("CGgRrQ3nws7RRMGyDV59cetJBAwmsmDyCSgku") - vmID, _ := ids.FromString("dkFD29iYU9e9jah2nrnksTWJUy2VVpg5Lnqd7nQqvCJgR26H4") - - account := APIAccount{ - Address: addr, - Balance: 123456789, - } - weight := json.Uint64(987654321) - validator := APIDefaultSubnetValidator{ - APIValidator: APIValidator{ - EndTime: 15, - Weight: &weight, - ID: addr, - }, - Destination: addr, - } - chains := APIChain{ - GenesisData: genesisData, - VMID: vmID, - Name: "My Favorite Episode", - } - - args := BuildGenesisArgs{ - Accounts: []APIAccount{ - account, - }, - Validators: []APIDefaultSubnetValidator{ - validator, - }, - Chains: []APIChain{ - chains, - }, - Time: 5, - } - reply := BuildGenesisReply{} - - ss := StaticService{} - if err := ss.BuildGenesis(nil, &args, &reply); err != nil { - t.Fatal(err) - } - - if !bytes.Equal(reply.Bytes.Bytes, expected) { - t.Fatalf("StaticService.BuildGenesis:\nReturned:\n%s\nExpected:\n%s", - formatting.DumpBytes{Bytes: reply.Bytes.Bytes}, - formatting.DumpBytes{Bytes: expected}) - } -} - func TestBuildGenesisInvalidAccountBalance(t *testing.T) { id, _ := ids.ShortFromString("8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") account := APIAccount{ diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 20eed9d..70bd903 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -20,12 +20,15 @@ import ( "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/math" "github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/utils/units" "github.com/ava-labs/gecko/utils/wrappers" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" "github.com/ava-labs/gecko/vms/components/core" + "github.com/ava-labs/gecko/vms/secp256k1fx" ) const ( @@ -108,6 +111,13 @@ func init() { Codec.RegisterType(&Abort{}), Codec.RegisterType(&Commit{}), Codec.RegisterType(&StandardBlock{}), + Codec.RegisterType(&AtomicBlock{}), + + Codec.RegisterType(&secp256k1fx.TransferInput{}), + Codec.RegisterType(&secp256k1fx.MintOutput{}), + Codec.RegisterType(&secp256k1fx.TransferOutput{}), + Codec.RegisterType(&secp256k1fx.MintOperation{}), + Codec.RegisterType(&secp256k1fx.Credential{}), Codec.RegisterType(&UnsignedAddDefaultSubnetValidatorTx{}), Codec.RegisterType(&addDefaultSubnetValidatorTx{}), @@ -124,6 +134,12 @@ func init() { Codec.RegisterType(&UnsignedCreateSubnetTx{}), Codec.RegisterType(&CreateSubnetTx{}), + Codec.RegisterType(&UnsignedImportTx{}), + Codec.RegisterType(&ImportTx{}), + + Codec.RegisterType(&UnsignedExportTx{}), + Codec.RegisterType(&ExportTx{}), + Codec.RegisterType(&advanceTimeTx{}), Codec.RegisterType(&rewardValidatorTx{}), ) @@ -136,10 +152,24 @@ func init() { type VM struct { *core.SnowmanVM - Validators validators.Manager + // Node's validator manager + // Maps Subnets --> nodes in the Subnet HEAD + validators validators.Manager + + // true if the node is being run with staking enabled + stakingEnabled bool // The node's chain manager - ChainManager chains.Manager + chainManager chains.Manager + + // AVA asset ID + ava ids.ID + + // AVM is the ID of the ava virtual machine + avm ids.ID + + fx secp256k1fx.Fx + codec codec.Codec // Used to create and use keys. factory crypto.FactorySECP256K1R @@ -154,6 +184,7 @@ type VM struct { // Transactions that have not been put into blocks yet unissuedEvents *EventHeap unissuedDecisionTxs []DecisionTx + unissuedAtomicTxs []AtomicTx // This timer goes off when it is time for the next validator to add/leave the validator set // When it goes off resetTimer() is called, triggering creation of a new block @@ -181,6 +212,12 @@ func (vm *VM) Initialize( return err } + vm.codec = codec.NewDefault() + if err := vm.fx.Initialize(vm); err != nil { + return err + } + vm.codec = Codec + // Register this VM's types with the database so we can get/put structs to/from it vm.registerDBTypes() @@ -265,8 +302,8 @@ func (vm *VM) Initialize( }) go ctx.Log.RecoverAndPanic(vm.timer.Dispatch) - if err := vm.updateValidators(DefaultSubnetID); err != nil { - ctx.Log.Error("failed to initialize the current validator set: %s", err) + if err := vm.initSubnets(); err != nil { + ctx.Log.Error("failed to initialize Subnets: %s", err) return err } @@ -282,27 +319,67 @@ func (vm *VM) Initialize( return nil } -// Create all of the chains that the database says should exist +// Create all chains that exist that this node validates +// Can only be called after initSubnets() func (vm *VM) initBlockchains() error { - vm.Ctx.Log.Verbo("platform chain initializing existing blockchains") - existingChains, err := vm.getChains(vm.DB) + vm.Ctx.Log.Info("initializing blockchains") + blockchains, err := vm.getChains(vm.DB) // get blockchains that exist if err != nil { return err } - for _, chain := range existingChains { // Create each blockchain - chainParams := chains.ChainParameters{ - ID: chain.ID(), - GenesisData: chain.GenesisData, - VMAlias: chain.VMID.String(), - } - for _, fxID := range chain.FxIDs { - chainParams.FxAliases = append(chainParams.FxAliases, fxID.String()) - } - vm.ChainManager.CreateChain(chainParams) + + for _, chain := range blockchains { + vm.createChain(chain) } return nil } +// Set the node's validator manager to be up to date +func (vm *VM) initSubnets() error { + vm.Ctx.Log.Info("initializing Subnets") + subnets, err := vm.getSubnets(vm.DB) + if err != nil { + return err + } + + if err := vm.updateValidators(DefaultSubnetID); err != nil { + return err + } + + for _, subnet := range subnets { + if err := vm.updateValidators(subnet.id); err != nil { + return err + } + } + + return nil +} + +// Create the blockchain described in [tx], but only if this node is a member of +// the Subnet that validates the chain +func (vm *VM) createChain(tx *CreateChainTx) { + // The validators that compose the Subnet that validates this chain + validators, subnetExists := vm.validators.GetValidatorSet(tx.SubnetID) + if !subnetExists { + vm.Ctx.Log.Error("blockchain %s validated by Subnet %s but couldn't get that Subnet. Blockchain not created") + return + } + if vm.stakingEnabled && !DefaultSubnetID.Equals(tx.SubnetID) && !validators.Contains(vm.Ctx.NodeID) { // This node doesn't validate this blockchain + return + } + + chainParams := chains.ChainParameters{ + ID: tx.ID(), + SubnetID: tx.SubnetID, + GenesisData: tx.GenesisData, + VMAlias: tx.VMID.String(), + } + for _, fxID := range tx.FxIDs { + chainParams.FxAliases = append(chainParams.FxAliases, fxID.String()) + } + vm.chainManager.CreateChain(chainParams) +} + // Shutdown this blockchain func (vm *VM) Shutdown() { vm.timer.Stop() @@ -338,6 +415,24 @@ func (vm *VM) BuildBlock() (snowman.Block, error) { return blk, vm.DB.Commit() } + // If there is a pending atomic tx, build a block with it + if len(vm.unissuedAtomicTxs) > 0 { + tx := vm.unissuedAtomicTxs[0] + vm.unissuedAtomicTxs = vm.unissuedAtomicTxs[1:] + blk, err := vm.newAtomicBlock(preferredID, tx) + if err != nil { + return nil, err + } + if err := blk.Verify(); err != nil { + vm.resetTimer() + return nil, err + } + if err := vm.State.PutBlock(vm.DB, blk); err != nil { + return nil, err + } + return blk, vm.DB.Commit() + } + // Get the preferred block (which we want to build off) preferred, err := vm.getBlock(preferredID) vm.Ctx.Log.AssertNoError(err) @@ -502,9 +597,9 @@ func (vm *VM) CreateStaticHandlers() map[string]*common.HTTPHandler { // Check if there is a block ready to be added to consensus // If so, notify the consensus engine func (vm *VM) resetTimer() { - // If there is a pending CreateChainTx, trigger building of a block - // with that transaction - if len(vm.unissuedDecisionTxs) > 0 { + // If there is a pending transaction, trigger building of a block with that + // transaction + if len(vm.unissuedDecisionTxs) > 0 || len(vm.unissuedAtomicTxs) > 0 { vm.SnowmanVM.NotifyBlockReady() return } @@ -584,7 +679,7 @@ func (vm *VM) nextValidatorChangeTime(db database.Database, start bool) time.Tim return earliest } for _, subnet := range subnets { - t := vm.nextSubnetValidatorChangeTime(db, subnet.ID, start) + t := vm.nextSubnetValidatorChangeTime(db, subnet.id, start) if t.Before(earliest) { earliest = t } @@ -614,13 +709,16 @@ func (vm *VM) nextSubnetValidatorChangeTime(db database.Database, subnetID ids.I // Returns: // 1) The validator set of subnet with ID [subnetID] when timestamp is advanced to [timestamp] // 2) The pending validator set of subnet with ID [subnetID] when timestamp is advanced to [timestamp] +// 3) The IDs of the validators that start validating [subnetID] between now and [timestamp] +// 4) The IDs of the validators that stop validating [subnetID] between now and [timestamp] // Note that this method will not remove validators from the current validator set of the default subnet. // That happens in reward blocks. -func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, subnetID ids.ID) (current, pending *EventHeap, err error) { +func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, subnetID ids.ID) (current, + pending *EventHeap, started, stopped ids.ShortSet, err error) { // remove validators whose end time <= [timestamp] current, err = vm.getCurrentValidators(db, subnetID) if err != nil { - return nil, nil, err + return nil, nil, nil, nil, err } if !subnetID.Equals(DefaultSubnetID) { // validators of default subnet removed in rewardValidatorTxs, not here for current.Len() > 0 { @@ -629,11 +727,12 @@ func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, sub break } current.Remove() + stopped.Add(next.Vdr().ID()) } } pending, err = vm.getPendingValidators(db, subnetID) if err != nil { - return nil, nil, err + return nil, nil, nil, nil, err } for pending.Len() > 0 { nextTx := pending.Peek() // pending staker with earliest start time @@ -642,8 +741,9 @@ func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, sub } heap.Push(current, nextTx) heap.Pop(pending) + started.Add(nextTx.Vdr().ID()) } - return current, pending, nil + return current, pending, started, stopped, nil } func (vm *VM) getValidators(validatorEvents *EventHeap) []validators.Validator { @@ -671,10 +771,12 @@ func (vm *VM) getValidators(validatorEvents *EventHeap) []validators.Validator { return vdrList } +// update the node's validator manager to contain the current validator set of the given Subnet func (vm *VM) updateValidators(subnetID ids.ID) error { - validatorSet, ok := vm.Validators.GetValidatorSet(subnetID) - if !ok { - return fmt.Errorf("couldn't get the validator sampler of the %s subnet", subnetID) + validatorSet, subnetInitialized := vm.validators.GetValidatorSet(subnetID) + if !subnetInitialized { // validator manager doesn't know about this subnet yet + validatorSet = validators.NewSet() + vm.validators.PutValidatorSet(subnetID, validatorSet) } currentValidators, err := vm.getCurrentValidators(vm.DB, subnetID) @@ -686,3 +788,37 @@ func (vm *VM) updateValidators(subnetID ids.ID) error { validatorSet.Set(validators) return nil } + +// Codec ... +func (vm *VM) Codec() codec.Codec { return vm.codec } + +// Clock ... +func (vm *VM) Clock() *timer.Clock { return &vm.clock } + +// Logger ... +func (vm *VM) Logger() logging.Logger { return vm.Ctx.Log } + +// GetAtomicUTXOs returns the utxos that at least one of the provided addresses is +// referenced in. +func (vm *VM) GetAtomicUTXOs(addrs ids.Set) ([]*ava.UTXO, error) { + smDB := vm.Ctx.SharedMemory.GetDatabase(vm.avm) + defer vm.Ctx.SharedMemory.ReleaseDatabase(vm.avm) + + state := ava.NewPrefixedState(smDB, vm.codec) + + utxoIDs := ids.Set{} + for _, addr := range addrs.List() { + utxos, _ := state.AVMFunds(addr) + utxoIDs.Add(utxos...) + } + + utxos := []*ava.UTXO{} + for _, utxoID := range utxoIDs.List() { + utxo, err := state.AVMUTXO(utxoID) + if err != nil { + return nil, err + } + utxos = append(utxos, utxo) + } + return utxos, nil +} diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 67c0084..e0af19f 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -10,6 +10,8 @@ import ( "testing" "time" + "github.com/ava-labs/gecko/chains" + "github.com/ava-labs/gecko/chains/atomic" "github.com/ava-labs/gecko/database/memdb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" @@ -18,7 +20,10 @@ import ( "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/core" + "github.com/ava-labs/gecko/vms/secp256k1fx" "github.com/ava-labs/gecko/vms/timestampvm" ) @@ -35,16 +40,17 @@ var ( // each key corresponds to an account that has $AVA and a genesis validator keys []*crypto.PrivateKeySECP256K1R - // amount all genesis validators stake + // amount all genesis validators stake in defaultVM defaultStakeAmount uint64 - // balance of accounts that exist at genesis + // balance of accounts that exist at genesis in defaultVM defaultBalance = 100 * MinimumStakeAmount // At genesis this account has AVA and is validating the default subnet defaultKey *crypto.PrivateKeySECP256K1R - // non-default subnet that exists at genesis in defaultVM + // non-default Subnet that exists at genesis in defaultVM + // Its controlKeys are keys[0], keys[1], keys[2] testSubnet1 *CreateSubnetTx testSubnet1ControlKeys []*crypto.PrivateKeySECP256K1R ) @@ -112,12 +118,13 @@ func defaultVM() *VM { } vm := &VM{ - SnowmanVM: &core.SnowmanVM{}, + SnowmanVM: &core.SnowmanVM{}, + chainManager: chains.MockManager{}, } defaultSubnet := validators.NewSet() - vm.Validators = validators.NewManager() - vm.Validators.PutValidatorSet(DefaultSubnetID, defaultSubnet) + vm.validators = validators.NewManager() + vm.validators.PutValidatorSet(DefaultSubnetID, defaultSubnet) vm.clock.Set(defaultGenesisTime) db := memdb.New() @@ -132,7 +139,7 @@ func defaultVM() *VM { testNetworkID, 0, []ids.ShortID{keys[0].PublicKey().Address(), keys[1].PublicKey().Address(), keys[2].PublicKey().Address()}, // control keys are keys[0], keys[1], keys[2] - 2, // 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet + 2, // threshold; 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet keys[0], ) if err != nil { @@ -149,7 +156,7 @@ func defaultVM() *VM { &EventHeap{ SortByStartTime: false, }, - tx.ID, + tx.id, ) if err != nil { panic(err) @@ -159,7 +166,7 @@ func defaultVM() *VM { &EventHeap{ SortByStartTime: true, }, - tx.ID, + tx.id, ) if err != nil { panic(err) @@ -433,7 +440,7 @@ func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) { uint64(startTime.Unix()), uint64(endTime.Unix()), keys[0].PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, keys[0], @@ -478,7 +485,7 @@ func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) { commit.Accept() // accept the proposal // Verify that new validator is in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, testSubnet1.ID) + pendingValidators, err := vm.getPendingValidators(vm.DB, testSubnet1.id) if err != nil { t.Fatal(err) } @@ -506,7 +513,7 @@ func TestAddNonDefaultSubnetValidatorReject(t *testing.T) { uint64(startTime.Unix()), uint64(endTime.Unix()), keys[0].PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, keys[0], @@ -551,7 +558,7 @@ func TestAddNonDefaultSubnetValidatorReject(t *testing.T) { abort.Accept() // reject the proposal // Verify that new validator NOT in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, testSubnet1.ID) + pendingValidators, err := vm.getPendingValidators(vm.DB, testSubnet1.id) if err != nil { t.Fatal(err) } @@ -761,11 +768,13 @@ func TestCreateChain(t *testing.T) { tx, err := vm.newCreateChainTx( defaultNonce+1, + testSubnet1.id, nil, timestampvm.ID, nil, - "name ", + "name", testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, keys[0], ) if err != nil { @@ -802,7 +811,7 @@ func TestCreateChain(t *testing.T) { } // Verify tx fee was deducted - account, err := vm.getAccount(vm.DB, tx.Key().Address()) + account, err := vm.getAccount(vm.DB, tx.PayerAddress) if err != nil { t.Fatal(err) } @@ -881,7 +890,7 @@ func TestCreateSubnet(t *testing.T) { uint64(startTime.Unix()), uint64(endTime.Unix()), keys[0].PublicKey().Address(), - createSubnetTx.ID, + createSubnetTx.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{keys[0]}, keys[0], @@ -931,7 +940,7 @@ func TestCreateSubnet(t *testing.T) { commit.Accept() // add the validator to pending validator set // Verify validator is in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, createSubnetTx.ID) + pendingValidators, err := vm.getPendingValidators(vm.DB, createSubnetTx.id) if err != nil { t.Fatal(err) } @@ -985,7 +994,7 @@ func TestCreateSubnet(t *testing.T) { // Verify validator no longer in pending validator set // Verify validator is in pending validator set - pendingValidators, err = vm.getPendingValidators(vm.DB, createSubnetTx.ID) + pendingValidators, err = vm.getPendingValidators(vm.DB, createSubnetTx.id) if err != nil { t.Fatal(err) } @@ -994,7 +1003,7 @@ func TestCreateSubnet(t *testing.T) { } // Verify validator is in current validator set - currentValidators, err := vm.getCurrentValidators(vm.DB, createSubnetTx.ID) + currentValidators, err := vm.getCurrentValidators(vm.DB, createSubnetTx.id) if err != nil { t.Fatal(err) } @@ -1044,19 +1053,177 @@ func TestCreateSubnet(t *testing.T) { commit.Accept() // remove validator from current validator set // pending validators and current validator should be empty - pendingValidators, err = vm.getPendingValidators(vm.DB, createSubnetTx.ID) + pendingValidators, err = vm.getPendingValidators(vm.DB, createSubnetTx.id) if err != nil { t.Fatal(err) } if pendingValidators.Len() != 0 { t.Fatal("pending validator set should be empty") } - currentValidators, err = vm.getCurrentValidators(vm.DB, createSubnetTx.ID) + currentValidators, err = vm.getCurrentValidators(vm.DB, createSubnetTx.id) if err != nil { t.Fatal(err) } if currentValidators.Len() != 0 { t.Fatal("pending validator set should be empty") } - +} + +// test asset import +func TestAtomicImport(t *testing.T) { + vm := defaultVM() + + avmID := ids.Empty.Prefix(0) + utxoID := ava.UTXOID{ + TxID: ids.Empty.Prefix(1), + OutputIndex: 1, + } + assetID := ids.Empty.Prefix(2) + amount := uint64(50000) + key := keys[0] + + sm := &atomic.SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + vm.Ctx.SharedMemory = sm.NewBlockchainSharedMemory(vm.Ctx.ChainID) + + tx, err := vm.newImportTx( + defaultNonce+1, + testNetworkID, + []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: utxoID, + Asset: ava.Asset{ID: assetID}, + In: &secp256k1fx.TransferInput{ + Amt: amount, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + [][]*crypto.PrivateKeySECP256K1R{[]*crypto.PrivateKeySECP256K1R{key}}, + key, + ) + if err != nil { + t.Fatal(err) + } + + vm.Ctx.Lock.Lock() + defer vm.Ctx.Lock.Unlock() + + vm.ava = assetID + vm.avm = avmID + + vm.unissuedAtomicTxs = append(vm.unissuedAtomicTxs, tx) + if _, err := vm.BuildBlock(); err == nil { + t.Fatalf("should have errored due to missing utxos") + } + + // Provide the avm UTXO: + + smDB := vm.Ctx.SharedMemory.GetDatabase(avmID) + + utxo := &ava.UTXO{ + UTXOID: utxoID, + Asset: ava.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amount, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + } + + state := ava.NewPrefixedState(smDB, Codec) + if err := state.FundAVMUTXO(utxo); err != nil { + t.Fatal(err) + } + + vm.Ctx.SharedMemory.ReleaseDatabase(avmID) + + vm.unissuedAtomicTxs = append(vm.unissuedAtomicTxs, tx) + blk, err := vm.BuildBlock() + if err != nil { + t.Fatal(err) + } + + if err := blk.Verify(); err != nil { + t.Fatal(err) + } + + blk.Accept() + + smDB = vm.Ctx.SharedMemory.GetDatabase(avmID) + defer vm.Ctx.SharedMemory.ReleaseDatabase(avmID) + + state = ava.NewPrefixedState(smDB, vm.codec) + if _, err := state.AVMUTXO(utxoID.InputID()); err == nil { + t.Fatalf("shouldn't have been able to read the utxo") + } +} + +// test optimistic asset import +func TestOptimisticAtomicImport(t *testing.T) { + vm := defaultVM() + + avmID := ids.Empty.Prefix(0) + utxoID := ava.UTXOID{ + TxID: ids.Empty.Prefix(1), + OutputIndex: 1, + } + assetID := ids.Empty.Prefix(2) + amount := uint64(50000) + key := keys[0] + + sm := &atomic.SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + vm.Ctx.SharedMemory = sm.NewBlockchainSharedMemory(vm.Ctx.ChainID) + + tx, err := vm.newImportTx( + defaultNonce+1, + testNetworkID, + []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: utxoID, + Asset: ava.Asset{ID: assetID}, + In: &secp256k1fx.TransferInput{ + Amt: amount, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + [][]*crypto.PrivateKeySECP256K1R{[]*crypto.PrivateKeySECP256K1R{key}}, + key, + ) + if err != nil { + t.Fatal(err) + } + + vm.Ctx.Lock.Lock() + defer vm.Ctx.Lock.Unlock() + + vm.ava = assetID + vm.avm = avmID + + blk, err := vm.newAtomicBlock(vm.Preferred(), tx) + if err != nil { + t.Fatal(err) + } + + if err := blk.Verify(); err == nil { + t.Fatalf("should have errored due to an invalid atomic utxo") + } + + previousAccount, err := vm.getAccount(vm.DB, key.PublicKey().Address()) + if err != nil { + t.Fatal(err) + } + + blk.Accept() + + newAccount, err := vm.getAccount(vm.DB, key.PublicKey().Address()) + if err != nil { + t.Fatal(err) + } + + if newAccount.Balance != previousAccount.Balance+amount { + t.Fatalf("failed to provide funds") + } } diff --git a/vms/propertyfx/burn_operation.go b/vms/propertyfx/burn_operation.go new file mode 100644 index 0000000..c662f6e --- /dev/null +++ b/vms/propertyfx/burn_operation.go @@ -0,0 +1,14 @@ +package propertyfx + +import ( + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +// BurnOperation ... +type BurnOperation struct { + secp256k1fx.Input `serialize:"true"` +} + +// Outs ... +func (op *BurnOperation) Outs() []verify.Verifiable { return nil } diff --git a/vms/propertyfx/burn_operation_test.go b/vms/propertyfx/burn_operation_test.go new file mode 100644 index 0000000..1e74833 --- /dev/null +++ b/vms/propertyfx/burn_operation_test.go @@ -0,0 +1,23 @@ +package propertyfx + +import ( + "testing" + + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestBurnOperationInvalid(t *testing.T) { + op := BurnOperation{Input: secp256k1fx.Input{ + SigIndices: []uint32{1, 0}, + }} + if err := op.Verify(); err == nil { + t.Fatalf("operation should have failed verification") + } +} + +func TestBurnOperationNumberOfOutput(t *testing.T) { + op := BurnOperation{} + if outs := op.Outs(); len(outs) != 0 { + t.Fatalf("wrong number of outputs") + } +} diff --git a/vms/propertyfx/credential.go b/vms/propertyfx/credential.go new file mode 100644 index 0000000..0b468cf --- /dev/null +++ b/vms/propertyfx/credential.go @@ -0,0 +1,10 @@ +package propertyfx + +import ( + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +// Credential ... +type Credential struct { + secp256k1fx.Credential `serialize:"true"` +} diff --git a/vms/propertyfx/factory.go b/vms/propertyfx/factory.go new file mode 100644 index 0000000..6cb6fba --- /dev/null +++ b/vms/propertyfx/factory.go @@ -0,0 +1,16 @@ +package propertyfx + +import ( + "github.com/ava-labs/gecko/ids" +) + +// ID that this Fx uses when labeled +var ( + ID = ids.NewID([32]byte{'p', 'r', 'o', 'p', 'e', 'r', 't', 'y', 'f', 'x'}) +) + +// Factory ... +type Factory struct{} + +// New ... +func (f *Factory) New() interface{} { return &Fx{} } diff --git a/vms/propertyfx/factory_test.go b/vms/propertyfx/factory_test.go new file mode 100644 index 0000000..a22fdd2 --- /dev/null +++ b/vms/propertyfx/factory_test.go @@ -0,0 +1,12 @@ +package propertyfx + +import ( + "testing" +) + +func TestFactory(t *testing.T) { + factory := Factory{} + if fx := factory.New(); fx == nil { + t.Fatalf("Factory.New returned nil") + } +} diff --git a/vms/propertyfx/fx.go b/vms/propertyfx/fx.go new file mode 100644 index 0000000..41cd225 --- /dev/null +++ b/vms/propertyfx/fx.go @@ -0,0 +1,109 @@ +package propertyfx + +import ( + "errors" + + "github.com/ava-labs/gecko/utils/wrappers" + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + errWrongTxType = errors.New("wrong tx type") + errWrongUTXOType = errors.New("wrong utxo type") + errWrongOperationType = errors.New("wrong operation type") + errWrongCredentialType = errors.New("wrong credential type") + + errNoUTXOs = errors.New("an operation must consume at least one UTXO") + errWrongNumberOfUTXOs = errors.New("wrong number of UTXOs for the operation") + errWrongNumberOfCreds = errors.New("wrong number of credentials for the operation") + + errWrongMintOutput = errors.New("wrong mint output provided") + + errCantTransfer = errors.New("cant transfer with this fx") +) + +// Fx ... +type Fx struct{ secp256k1fx.Fx } + +// Initialize ... +func (fx *Fx) Initialize(vmIntf interface{}) error { + if err := fx.InitializeVM(vmIntf); err != nil { + return err + } + + log := fx.VM.Logger() + log.Debug("Initializing nft fx") + + c := fx.VM.Codec() + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(&MintOutput{}), + c.RegisterType(&OwnedOutput{}), + c.RegisterType(&MintOperation{}), + c.RegisterType(&BurnOperation{}), + c.RegisterType(&Credential{}), + ) + return errs.Err +} + +// VerifyOperation ... +func (fx *Fx) VerifyOperation(txIntf, opIntf, credIntf interface{}, utxosIntf []interface{}) error { + tx, ok := txIntf.(secp256k1fx.Tx) + switch { + case !ok: + return errWrongTxType + case len(utxosIntf) != 1: + return errWrongNumberOfUTXOs + } + + cred, ok := credIntf.(*Credential) + if !ok { + return errWrongCredentialType + } + + switch op := opIntf.(type) { + case *MintOperation: + return fx.VerifyMintOperation(tx, op, cred, utxosIntf[0]) + case *BurnOperation: + return fx.VerifyTransferOperation(tx, op, cred, utxosIntf[0]) + default: + return errWrongOperationType + } +} + +// VerifyMintOperation ... +func (fx *Fx) VerifyMintOperation(tx secp256k1fx.Tx, op *MintOperation, cred *Credential, utxoIntf interface{}) error { + out, ok := utxoIntf.(*MintOutput) + if !ok { + return errWrongUTXOType + } + + if err := verify.All(op, cred, out); err != nil { + return err + } + + switch { + case !out.OutputOwners.Equals(&op.MintOutput.OutputOwners): + return errWrongMintOutput + default: + return fx.Fx.VerifyCredentials(tx, &op.MintInput, &cred.Credential, &out.OutputOwners) + } +} + +// VerifyTransferOperation ... +func (fx *Fx) VerifyTransferOperation(tx secp256k1fx.Tx, op *BurnOperation, cred *Credential, utxoIntf interface{}) error { + out, ok := utxoIntf.(*OwnedOutput) + if !ok { + return errWrongUTXOType + } + + if err := verify.All(op, cred, out); err != nil { + return err + } + + return fx.VerifyCredentials(tx, &op.Input, &cred.Credential, &out.OutputOwners) +} + +// VerifyTransfer ... +func (fx *Fx) VerifyTransfer(_, _, _, _ interface{}) error { return errCantTransfer } diff --git a/vms/propertyfx/fx_test.go b/vms/propertyfx/fx_test.go new file mode 100644 index 0000000..cfdf5c9 --- /dev/null +++ b/vms/propertyfx/fx_test.go @@ -0,0 +1,473 @@ +package propertyfx + +import ( + "testing" + "time" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + txBytes = []byte{0, 1, 2, 3, 4, 5} + sigBytes = [crypto.SECP256K1RSigLen]byte{ + 0x0e, 0x33, 0x4e, 0xbc, 0x67, 0xa7, 0x3f, 0xe8, + 0x24, 0x33, 0xac, 0xa3, 0x47, 0x88, 0xa6, 0x3d, + 0x58, 0xe5, 0x8e, 0xf0, 0x3a, 0xd5, 0x84, 0xf1, + 0xbc, 0xa3, 0xb2, 0xd2, 0x5d, 0x51, 0xd6, 0x9b, + 0x0f, 0x28, 0x5d, 0xcd, 0x3f, 0x71, 0x17, 0x0a, + 0xf9, 0xbf, 0x2d, 0xb1, 0x10, 0x26, 0x5c, 0xe9, + 0xdc, 0xc3, 0x9d, 0x7a, 0x01, 0x50, 0x9d, 0xe8, + 0x35, 0xbd, 0xcb, 0x29, 0x3a, 0xd1, 0x49, 0x32, + 0x00, + } + addrBytes = [hashing.AddrLen]byte{ + 0x01, 0x5c, 0xce, 0x6c, 0x55, 0xd6, 0xb5, 0x09, + 0x84, 0x5c, 0x8c, 0x4e, 0x30, 0xbe, 0xd9, 0x8d, + 0x39, 0x1a, 0xe7, 0xf0, + } +) + +func TestFxInitialize(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + fx := Fx{} + err := fx.Initialize(&vm) + if err != nil { + t.Fatal(err) + } +} + +func TestFxInitializeInvalid(t *testing.T) { + fx := Fx{} + err := fx.Initialize(nil) + if err == nil { + t.Fatalf("Should have returned an error") + } +} + +func TestFxVerifyMintOperation(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + MintOutput: MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }}, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { + t.Fatal(err) + } +} + +func TestFxVerifyMintOperationWrongTx(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(nil, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid tx") + } +} + +func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to not enough utxos") + } +} + +func TestFxVerifyMintOperationWrongCredential(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, nil, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to a bad credential") + } +} + +func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{nil} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo") + } +} + +func TestFxVerifyMintOperationFailingVerification(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + ids.ShortEmpty, + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") + } +} + +func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid mint output") + } +} + +func TestFxVerifyTransferOperation(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &OwnedOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &BurnOperation{Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }} + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { + t.Fatal(err) + } +} + +func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + op := &BurnOperation{Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }} + + utxos := []interface{}{nil} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo") + } +} + +func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { + + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &OwnedOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &BurnOperation{Input: secp256k1fx.Input{ + SigIndices: []uint32{1, 0}, + }} + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") + } +} + +func TestFxVerifyOperationUnknownOperation(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &OwnedOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, nil, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an unknown operation") + } +} + +func TestFxVerifyTransfer(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + if err := fx.VerifyTransfer(nil, nil, nil, nil); err == nil { + t.Fatalf("this Fx doesn't support transfers") + } +} diff --git a/vms/propertyfx/mint_operation.go b/vms/propertyfx/mint_operation.go new file mode 100644 index 0000000..af7b920 --- /dev/null +++ b/vms/propertyfx/mint_operation.go @@ -0,0 +1,37 @@ +package propertyfx + +import ( + "errors" + + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + errNilMintOperation = errors.New("nil mint operation") +) + +// MintOperation ... +type MintOperation struct { + MintInput secp256k1fx.Input `serialize:"true" json:"mintInput"` + MintOutput MintOutput `serialize:"true" json:"mintOutput"` + OwnedOutput OwnedOutput `serialize:"true" json:"ownedOutput"` +} + +// Outs ... +func (op *MintOperation) Outs() []verify.Verifiable { + return []verify.Verifiable{ + &op.MintOutput, + &op.OwnedOutput, + } +} + +// Verify ... +func (op *MintOperation) Verify() error { + switch { + case op == nil: + return errNilMintOperation + default: + return verify.All(&op.MintInput, &op.MintOutput, &op.OwnedOutput) + } +} diff --git a/vms/propertyfx/mint_operation_test.go b/vms/propertyfx/mint_operation_test.go new file mode 100644 index 0000000..dc2f350 --- /dev/null +++ b/vms/propertyfx/mint_operation_test.go @@ -0,0 +1,34 @@ +package propertyfx + +import ( + "testing" + + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestMintOperationVerifyNil(t *testing.T) { + op := (*MintOperation)(nil) + if err := op.Verify(); err == nil { + t.Fatalf("nil operation should have failed verification") + } +} + +func TestMintOperationVerifyInvalidOutput(t *testing.T) { + op := MintOperation{ + OwnedOutput: OwnedOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + }, + }, + } + if err := op.Verify(); err == nil { + t.Fatalf("operation should have failed verification") + } +} + +func TestMintOperationOuts(t *testing.T) { + op := MintOperation{} + if outs := op.Outs(); len(outs) != 2 { + t.Fatalf("Wrong number of outputs returned") + } +} diff --git a/vms/propertyfx/mint_output.go b/vms/propertyfx/mint_output.go new file mode 100644 index 0000000..46042da --- /dev/null +++ b/vms/propertyfx/mint_output.go @@ -0,0 +1,10 @@ +package propertyfx + +import ( + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +// MintOutput ... +type MintOutput struct { + secp256k1fx.OutputOwners `serialize:"true"` +} diff --git a/vms/propertyfx/owned_output.go b/vms/propertyfx/owned_output.go new file mode 100644 index 0000000..2ddb81c --- /dev/null +++ b/vms/propertyfx/owned_output.go @@ -0,0 +1,10 @@ +package propertyfx + +import ( + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +// OwnedOutput ... +type OwnedOutput struct { + secp256k1fx.OutputOwners `serialize:"true"` +} diff --git a/vms/secp256k1fx/credential.go b/vms/secp256k1fx/credential.go index 2b1cfc7..6420836 100644 --- a/vms/secp256k1fx/credential.go +++ b/vms/secp256k1fx/credential.go @@ -15,7 +15,7 @@ var ( // Credential ... type Credential struct { - Sigs [][crypto.SECP256K1RSigLen]byte `serialize:"true"` + Sigs [][crypto.SECP256K1RSigLen]byte `serialize:"true" json:"signatures"` } // Verify ... diff --git a/vms/secp256k1fx/factory_test.go b/vms/secp256k1fx/factory_test.go new file mode 100644 index 0000000..54bc901 --- /dev/null +++ b/vms/secp256k1fx/factory_test.go @@ -0,0 +1,15 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "testing" +) + +func TestFactory(t *testing.T) { + factory := Factory{} + if fx := factory.New(); fx == nil { + t.Fatalf("Factory.New returned nil") + } +} diff --git a/vms/secp256k1fx/fx.go b/vms/secp256k1fx/fx.go index ff54b91..e7571b5 100644 --- a/vms/secp256k1fx/fx.go +++ b/vms/secp256k1fx/fx.go @@ -8,23 +8,22 @@ import ( "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/components/verify" ) var ( errWrongVMType = errors.New("wrong vm type") errWrongTxType = errors.New("wrong tx type") + errWrongOpType = errors.New("wrong operation type") errWrongUTXOType = errors.New("wrong utxo type") errWrongOutputType = errors.New("wrong output type") errWrongInputType = errors.New("wrong input type") errWrongCredentialType = errors.New("wrong credential type") - errWrongNumberOfOutputs = errors.New("wrong number of outputs for an operation") - errWrongNumberOfInputs = errors.New("wrong number of inputs for an operation") - errWrongNumberOfCredentials = errors.New("wrong number of credentials for an operation") - - errWrongMintCreated = errors.New("wrong mint output created from the operation") + errWrongNumberOfUTXOs = errors.New("wrong number of utxos for the operation") + errWrongMintCreated = errors.New("wrong mint output created from the operation") errWrongAmounts = errors.New("input is consuming a different amount than expected") errTimelocked = errors.New("output is time locked") errTooManySigners = errors.New("input has more signers than expected") @@ -33,93 +32,85 @@ var ( errWrongSigner = errors.New("credential does not produce expected signer") ) -// Fx ... +// Fx describes the secp256k1 feature extension type Fx struct { - vm VM - secpFactory crypto.FactorySECP256K1R + VM VM + SECPFactory crypto.FactorySECP256K1R } // Initialize ... func (fx *Fx) Initialize(vmIntf interface{}) error { + if err := fx.InitializeVM(vmIntf); err != nil { + return err + } + + log := fx.VM.Logger() + log.Debug("Initializing secp561k1 fx") + + c := fx.VM.Codec() + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(&TransferInput{}), + c.RegisterType(&MintOutput{}), + c.RegisterType(&TransferOutput{}), + c.RegisterType(&MintOperation{}), + c.RegisterType(&Credential{}), + ) + return errs.Err +} + +// InitializeVM ... +func (fx *Fx) InitializeVM(vmIntf interface{}) error { vm, ok := vmIntf.(VM) if !ok { return errWrongVMType } - - c := vm.Codec() - c.RegisterType(&MintOutput{}) - c.RegisterType(&TransferOutput{}) - c.RegisterType(&MintInput{}) - c.RegisterType(&TransferInput{}) - c.RegisterType(&Credential{}) - - fx.vm = vm + fx.VM = vm return nil } // VerifyOperation ... -func (fx *Fx) VerifyOperation(txIntf interface{}, utxosIntf, insIntf, credsIntf, outsIntf []interface{}) error { +func (fx *Fx) VerifyOperation(txIntf, opIntf, credIntf interface{}, utxosIntf []interface{}) error { tx, ok := txIntf.(Tx) if !ok { return errWrongTxType } - - if len(outsIntf) != 2 { - return errWrongNumberOfOutputs - } - if len(utxosIntf) != 1 || len(insIntf) != 1 { - return errWrongNumberOfInputs - } - if len(credsIntf) != 1 { - return errWrongNumberOfCredentials - } - - utxo, ok := utxosIntf[0].(*MintOutput) + op, ok := opIntf.(*MintOperation) if !ok { - return errWrongUTXOType + return errWrongOpType } - in, ok := insIntf[0].(*MintInput) - if !ok { - return errWrongInputType - } - cred, ok := credsIntf[0].(*Credential) + cred, ok := credIntf.(*Credential) if !ok { return errWrongCredentialType } - newMint, ok := outsIntf[0].(*MintOutput) - if !ok { - return errWrongOutputType + if len(utxosIntf) != 1 { + return errWrongNumberOfUTXOs } - newOutput, ok := outsIntf[1].(*TransferOutput) + out, ok := utxosIntf[0].(*MintOutput) if !ok { - return errWrongOutputType + return errWrongUTXOType } - - return fx.verifyOperation(tx, utxo, in, cred, newMint, newOutput) + return fx.verifyOperation(tx, op, cred, out) } -func (fx *Fx) verifyOperation(tx Tx, utxo *MintOutput, in *MintInput, cred *Credential, newMint *MintOutput, newOutput *TransferOutput) error { - if err := verify.All(utxo, in, cred, newMint, newOutput); err != nil { +func (fx *Fx) verifyOperation(tx Tx, op *MintOperation, cred *Credential, utxo *MintOutput) error { + if err := verify.All(op, cred, utxo); err != nil { return err } - if !utxo.Equals(&newMint.OutputOwners) { + if !utxo.Equals(&op.MintOutput.OutputOwners) { return errWrongMintCreated } - return fx.verifyCredentials(tx, &utxo.OutputOwners, &in.Input, cred) + return fx.VerifyCredentials(tx, &op.MintInput, cred, &utxo.OutputOwners) } // VerifyTransfer ... -func (fx *Fx) VerifyTransfer(txIntf, utxoIntf, inIntf, credIntf interface{}) error { +func (fx *Fx) VerifyTransfer(txIntf, inIntf, credIntf, utxoIntf interface{}) error { tx, ok := txIntf.(Tx) if !ok { return errWrongTxType } - utxo, ok := utxoIntf.(*TransferOutput) - if !ok { - return errWrongUTXOType - } in, ok := inIntf.(*TransferInput) if !ok { return errWrongInputType @@ -128,15 +119,20 @@ func (fx *Fx) VerifyTransfer(txIntf, utxoIntf, inIntf, credIntf interface{}) err if !ok { return errWrongCredentialType } - return fx.verifyTransfer(tx, utxo, in, cred) + out, ok := utxoIntf.(*TransferOutput) + if !ok { + return errWrongUTXOType + } + return fx.VerifySpend(tx, in, cred, out) } -func (fx *Fx) verifyTransfer(tx Tx, utxo *TransferOutput, in *TransferInput, cred *Credential) error { +// VerifySpend ensures that the utxo can be sent to any address +func (fx *Fx) VerifySpend(tx Tx, in *TransferInput, cred *Credential, utxo *TransferOutput) error { if err := verify.All(utxo, in, cred); err != nil { return err } - clock := fx.vm.Clock() + clock := fx.VM.Clock() switch { case utxo.Amt != in.Amt: return errWrongAmounts @@ -144,10 +140,12 @@ func (fx *Fx) verifyTransfer(tx Tx, utxo *TransferOutput, in *TransferInput, cre return errTimelocked } - return fx.verifyCredentials(tx, &utxo.OutputOwners, &in.Input, cred) + return fx.VerifyCredentials(tx, &in.Input, cred, &utxo.OutputOwners) } -func (fx *Fx) verifyCredentials(tx Tx, out *OutputOwners, in *Input, cred *Credential) error { +// VerifyCredentials ensures that the output can be spent by the input with the +// credential. A nil return values means the output can be spent. +func (fx *Fx) VerifyCredentials(tx Tx, in *Input, cred *Credential, out *OutputOwners) error { numSigs := len(in.SigIndices) switch { case out.Threshold < uint32(numSigs): @@ -164,7 +162,7 @@ func (fx *Fx) verifyCredentials(tx Tx, out *OutputOwners, in *Input, cred *Crede for i, index := range in.SigIndices { sig := cred.Sigs[i] - pk, err := fx.secpFactory.RecoverHashPublicKey(txHash, sig[:]) + pk, err := fx.SECPFactory.RecoverHashPublicKey(txHash, sig[:]) if err != nil { return err } diff --git a/vms/secp256k1fx/fx_test.go b/vms/secp256k1fx/fx_test.go index 0d0e9d0..18cd7aa 100644 --- a/vms/secp256k1fx/fx_test.go +++ b/vms/secp256k1fx/fx_test.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/vms/components/codec" ) @@ -40,6 +41,8 @@ func (vm *testVM) Codec() codec.Codec { return codec.NewDefault() } func (vm *testVM) Clock() *timer.Clock { return &vm.clock } +func (vm *testVM) Logger() logging.Logger { return logging.NoLog{} } + type testCodec struct{} func (c *testCodec) RegisterStruct(interface{}) {} @@ -98,8 +101,7 @@ func TestFxVerifyTransfer(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err != nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err != nil { t.Fatal(err) } } @@ -134,8 +136,7 @@ func TestFxVerifyTransferNilTx(t *testing.T) { }, } - err := fx.VerifyTransfer(nil, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(nil, in, cred, out); err == nil { t.Fatalf("Should have failed verification due to a nil tx") } } @@ -163,8 +164,7 @@ func TestFxVerifyTransferNilOutput(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, nil, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, nil); err == nil { t.Fatalf("Should have failed verification due to a nil output") } } @@ -196,8 +196,7 @@ func TestFxVerifyTransferNilInput(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, nil, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, nil, cred, out); err == nil { t.Fatalf("Should have failed verification due to a nil input") } } @@ -230,8 +229,7 @@ func TestFxVerifyTransferNilCredential(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, nil) - if err == nil { + if err := fx.VerifyTransfer(tx, in, nil, out); err == nil { t.Fatalf("Should have failed verification due to a nil credential") } } @@ -269,8 +267,7 @@ func TestFxVerifyTransferInvalidOutput(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to an invalid output") } } @@ -308,8 +305,7 @@ func TestFxVerifyTransferWrongAmounts(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to different amounts") } } @@ -347,8 +343,7 @@ func TestFxVerifyTransferTimelocked(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to a timelocked output") } } @@ -387,8 +382,7 @@ func TestFxVerifyTransferTooManySigners(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to too many signers") } } @@ -424,8 +418,7 @@ func TestFxVerifyTransferTooFewSigners(t *testing.T) { Sigs: [][crypto.SECP256K1RSigLen]byte{}, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to too few signers") } } @@ -464,8 +457,7 @@ func TestFxVerifyTransferMismatchedSigners(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to too mismatched signers") } } @@ -503,8 +495,7 @@ func TestFxVerifyTransferInvalidSignature(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to an invalid signature") } } @@ -542,8 +533,7 @@ func TestFxVerifyTransferWrongSigner(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to a wrong signer") } } @@ -567,40 +557,37 @@ func TestFxVerifyOperation(t *testing.T) { }, }, } - in := &MintInput{ - Input: Input{ + op := &MintOperation{ + MintInput: Input{ SigIndices: []uint32{0}, }, + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, } cred := &Credential{ Sigs: [][crypto.SECP256K1RSigLen]byte{ sigBytes, }, } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + err := fx.VerifyOperation(tx, op, cred, utxos) if err != nil { t.Fatal(err) } @@ -622,94 +609,43 @@ func TestFxVerifyOperationUnknownTx(t *testing.T) { }, }, } - in := &MintInput{ - Input: Input{ + op := &MintOperation{ + MintInput: Input{ SigIndices: []uint32{0}, }, + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, } cred := &Credential{ Sigs: [][crypto.SECP256K1RSigLen]byte{ sigBytes, }, } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(nil, utxos, ins, creds, outs) + err := fx.VerifyOperation(nil, op, cred, utxos) if err == nil { t.Fatalf("Should have errored due to an invalid tx type") } } -func TestFxVerifyOperationWrongNumberOfOutputs(t *testing.T) { - vm := testVM{} - date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.clock.Set(date) - fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } - tx := &testTx{ - bytes: txBytes, - } - utxo := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - in := &MintInput{ - Input: Input{ - SigIndices: []uint32{0}, - }, - } - cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - sigBytes, - }, - } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - - utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) - if err == nil { - t.Fatalf("Should have errored due to a wrong number of outputs") - } -} - -func TestFxVerifyOperationWrongNumberOfInputs(t *testing.T) { +func TestFxVerifyOperationUnknownOperation(t *testing.T) { vm := testVM{} date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.clock.Set(date) @@ -733,35 +669,15 @@ func TestFxVerifyOperationWrongNumberOfInputs(t *testing.T) { sigBytes, }, } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } utxos := []interface{}{utxo} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, nil, creds, outs) + err := fx.VerifyOperation(tx, nil, cred, utxos) if err == nil { - t.Fatalf("Should have errored due to a wrong number of inputs") + t.Fatalf("Should have errored due to an invalid operation type") } } -func TestFxVerifyOperationWrongNumberOfCredentials(t *testing.T) { +func TestFxVerifyOperationUnknownCredential(t *testing.T) { vm := testVM{} date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.clock.Set(date) @@ -780,40 +696,38 @@ func TestFxVerifyOperationWrongNumberOfCredentials(t *testing.T) { }, }, } - in := &MintInput{ - Input: Input{ + op := &MintOperation{ + MintInput: Input{ SigIndices: []uint32{0}, }, - } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, }, }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, }, }, } utxos := []interface{}{utxo} - ins := []interface{}{in} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, nil, outs) + err := fx.VerifyOperation(tx, op, nil, utxos) if err == nil { - t.Fatalf("Should have errored due to a wrong number of credentials") + t.Fatalf("Should have errored due to an invalid credential type") } } -func TestFxVerifyOperationWrongUTXOType(t *testing.T) { +func TestFxVerifyOperationWrongNumberOfUTXOs(t *testing.T) { vm := testVM{} date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.clock.Set(date) @@ -824,9 +738,7 @@ func TestFxVerifyOperationWrongUTXOType(t *testing.T) { tx := &testTx{ bytes: txBytes, } - utxo := &TransferOutput{ - Amt: 1, - Locktime: 0, + utxo := &MintOutput{ OutputOwners: OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ @@ -834,46 +746,43 @@ func TestFxVerifyOperationWrongUTXOType(t *testing.T) { }, }, } - in := &MintInput{ - Input: Input{ + op := &MintOperation{ + MintInput: Input{ SigIndices: []uint32{0}, }, + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, } cred := &Credential{ Sigs: [][crypto.SECP256K1RSigLen]byte{ sigBytes, }, } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + utxos := []interface{}{utxo, utxo} + err := fx.VerifyOperation(tx, op, cred, utxos) if err == nil { - t.Fatalf("Should have errored due to a wrong utxo type") + t.Fatalf("Should have errored due to a wrong number of utxos") } } -func TestFxVerifyOperationWrongInputType(t *testing.T) { +func TestFxVerifyOperationUnknownUTXOType(t *testing.T) { vm := testVM{} date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.clock.Set(date) @@ -884,18 +793,27 @@ func TestFxVerifyOperationWrongInputType(t *testing.T) { tx := &testTx{ bytes: txBytes, } - utxo := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), + op := &MintOperation{ + MintInput: Input{ + SigIndices: []uint32{0}, + }, + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, }, }, - } - in := &TransferInput{ - Amt: 1, - Input: Input{ - SigIndices: []uint32{0}, + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, }, } cred := &Credential{ @@ -903,36 +821,15 @@ func TestFxVerifyOperationWrongInputType(t *testing.T) { sigBytes, }, } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + utxos := []interface{}{nil} + err := fx.VerifyOperation(tx, op, cred, utxos) if err == nil { - t.Fatalf("Should have errored due to a wrong input type") + t.Fatalf("Should have errored due to an invalid utxo type") } } -func TestFxVerifyOperationWrongCredentialType(t *testing.T) { +func TestFxVerifyOperationInvalidOperationVerify(t *testing.T) { vm := testVM{} date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.clock.Set(date) @@ -951,101 +848,40 @@ func TestFxVerifyOperationWrongCredentialType(t *testing.T) { }, }, } - in := &MintInput{ - Input: Input{ + op := &MintOperation{ + MintInput: Input{ SigIndices: []uint32{0}, }, - } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, }, }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, }, }, } - - utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{nil} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) - if err == nil { - t.Fatalf("Should have errored due to a wrong credential type") - } -} - -func TestFxVerifyOperationWrongMintType(t *testing.T) { - vm := testVM{} - date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.clock.Set(date) - fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } - tx := &testTx{ - bytes: txBytes, - } - utxo := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - in := &MintInput{ - Input: Input{ - SigIndices: []uint32{0}, - }, - } cred := &Credential{ Sigs: [][crypto.SECP256K1RSigLen]byte{ sigBytes, }, } - mintOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + err := fx.VerifyOperation(tx, op, cred, utxos) if err == nil { - t.Fatalf("Should have errored due to a wrong output type") + t.Fatalf("Should have errored due to a failed verify") } } -func TestFxVerifyOperationWrongTransferType(t *testing.T) { +func TestFxVerifyOperationMismatchedMintOutputs(t *testing.T) { vm := testVM{} date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.clock.Set(date) @@ -1064,152 +900,33 @@ func TestFxVerifyOperationWrongTransferType(t *testing.T) { }, }, } - in := &MintInput{ - Input: Input{ + op := &MintOperation{ + MintInput: Input{ SigIndices: []uint32{0}, }, + MintOutput: MintOutput{ + OutputOwners: OutputOwners{}, + }, + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, } cred := &Credential{ Sigs: [][crypto.SECP256K1RSigLen]byte{ sigBytes, }, } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + err := fx.VerifyOperation(tx, op, cred, utxos) if err == nil { - t.Fatalf("Should have errored due to a wrong output type") - } -} - -func TestFxVerifyOperationInvalid(t *testing.T) { - vm := testVM{} - date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.clock.Set(date) - fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } - tx := &testTx{ - bytes: txBytes, - } - utxo := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - in := &MintInput{ - Input: Input{ - SigIndices: []uint32{0}, - }, - } - cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - sigBytes, - }, - } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 0, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - - utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) - if err == nil { - t.Fatalf("Should have errored due to an invalid output") - } -} - -func TestFxVerifyOperationMismatchedMintOutput(t *testing.T) { - vm := testVM{} - date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.clock.Set(date) - fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } - tx := &testTx{ - bytes: txBytes, - } - utxo := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - in := &MintInput{ - Input: Input{ - SigIndices: []uint32{0}, - }, - } - cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - sigBytes, - }, - } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Addrs: []ids.ShortID{}, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - - utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) - if err == nil { - t.Fatalf("Should have errored due to a mismatched mint output") + t.Fatalf("Should have errored due to the wrong MintOutput being created") } } diff --git a/vms/secp256k1fx/input.go b/vms/secp256k1fx/input.go index 0a6ce66..1727c2d 100644 --- a/vms/secp256k1fx/input.go +++ b/vms/secp256k1fx/input.go @@ -16,7 +16,7 @@ var ( // Input ... type Input struct { - SigIndices []uint32 `serialize:"true"` + SigIndices []uint32 `serialize:"true" json:"signatureIndices"` } // Verify this input is syntactically valid diff --git a/vms/secp256k1fx/mint_operation.go b/vms/secp256k1fx/mint_operation.go new file mode 100644 index 0000000..2f612f6 --- /dev/null +++ b/vms/secp256k1fx/mint_operation.go @@ -0,0 +1,36 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "errors" + + "github.com/ava-labs/gecko/vms/components/verify" +) + +var ( + errNilMintOperation = errors.New("nil mint operation") +) + +// MintOperation ... +type MintOperation struct { + MintInput Input `serialize:"true" json:"mintInput"` + MintOutput MintOutput `serialize:"true" json:"mintOutput"` + TransferOutput TransferOutput `serialize:"true" json:"transferOutput"` +} + +// Outs ... +func (op *MintOperation) Outs() []verify.Verifiable { + return []verify.Verifiable{&op.MintOutput, &op.TransferOutput} +} + +// Verify ... +func (op *MintOperation) Verify() error { + switch { + case op == nil: + return errNilMintOperation + default: + return verify.All(&op.MintInput, &op.MintOutput, &op.TransferOutput) + } +} diff --git a/vms/secp256k1fx/mint_operation_test.go b/vms/secp256k1fx/mint_operation_test.go new file mode 100644 index 0000000..a0a1f20 --- /dev/null +++ b/vms/secp256k1fx/mint_operation_test.go @@ -0,0 +1,45 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestMintOperationVerifyNil(t *testing.T) { + op := (*MintOperation)(nil) + if err := op.Verify(); err == nil { + t.Fatalf("MintOperation.Verify should have returned an error due to an nil operation") + } +} + +func TestMintOperationOuts(t *testing.T) { + op := &MintOperation{ + MintInput: Input{ + SigIndices: []uint32{0}, + }, + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + }, + }, + } + + outs := op.Outs() + if len(outs) != 2 { + t.Fatalf("Wrong number of outputs") + } +} diff --git a/vms/secp256k1fx/output_owners.go b/vms/secp256k1fx/output_owners.go index 104a7a4..9cefecb 100644 --- a/vms/secp256k1fx/output_owners.go +++ b/vms/secp256k1fx/output_owners.go @@ -18,8 +18,8 @@ var ( // OutputOwners ... type OutputOwners struct { - Threshold uint32 `serialize:"true"` - Addrs []ids.ShortID `serialize:"true"` + Threshold uint32 `serialize:"true" json:"threshold"` + Addrs []ids.ShortID `serialize:"true" json:"addresses"` } // Addresses returns the addresses that manage this output diff --git a/vms/secp256k1fx/transfer_input.go b/vms/secp256k1fx/transfer_input.go index 5e44f76..0c2a104 100644 --- a/vms/secp256k1fx/transfer_input.go +++ b/vms/secp256k1fx/transfer_input.go @@ -13,7 +13,7 @@ var ( // TransferInput ... type TransferInput struct { - Amt uint64 `serialize:"true"` + Amt uint64 `serialize:"true" json:"amount"` Input `serialize:"true"` } diff --git a/vms/secp256k1fx/transfer_output.go b/vms/secp256k1fx/transfer_output.go index 69f2f20..6e31ce0 100644 --- a/vms/secp256k1fx/transfer_output.go +++ b/vms/secp256k1fx/transfer_output.go @@ -13,8 +13,8 @@ var ( // TransferOutput ... type TransferOutput struct { - Amt uint64 `serialize:"true"` - Locktime uint64 `serialize:"true"` + Amt uint64 `serialize:"true" json:"amount"` + Locktime uint64 `serialize:"true" json:"locktime"` OutputOwners `serialize:"true"` } diff --git a/vms/secp256k1fx/tx.go b/vms/secp256k1fx/tx.go index e2ac0f7..7ee304b 100644 --- a/vms/secp256k1fx/tx.go +++ b/vms/secp256k1fx/tx.go @@ -7,3 +7,9 @@ package secp256k1fx type Tx interface { UnsignedBytes() []byte } + +// TestTx is a minimal implementation of a Tx +type TestTx struct{ Bytes []byte } + +// UnsignedBytes returns Bytes +func (tx *TestTx) UnsignedBytes() []byte { return tx.Bytes } diff --git a/vms/secp256k1fx/vm.go b/vms/secp256k1fx/vm.go index 1083af7..bb59166 100644 --- a/vms/secp256k1fx/vm.go +++ b/vms/secp256k1fx/vm.go @@ -4,6 +4,7 @@ package secp256k1fx import ( + "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/vms/components/codec" ) @@ -12,4 +13,21 @@ import ( type VM interface { Codec() codec.Codec Clock() *timer.Clock + Logger() logging.Logger } + +// TestVM is a minimal implementation of a VM +type TestVM struct { + CLK *timer.Clock + Code codec.Codec + Log logging.Logger +} + +// Clock returns CLK +func (vm *TestVM) Clock() *timer.Clock { return vm.CLK } + +// Codec returns Code +func (vm *TestVM) Codec() codec.Codec { return vm.Code } + +// Logger returns Log +func (vm *TestVM) Logger() logging.Logger { return vm.Log } diff --git a/vms/spchainvm/consensus_benchmark_test.go b/vms/spchainvm/consensus_benchmark_test.go index 08c63ab..721fa44 100644 --- a/vms/spchainvm/consensus_benchmark_test.go +++ b/vms/spchainvm/consensus_benchmark_test.go @@ -86,7 +86,7 @@ func ConsensusLeader(numBlocks, numTxsPerBlock int, b *testing.B) { Context: ctx, Validators: vdrs, Beacons: beacons, - Alpha: (beacons.Len() + 1) / 2, + Alpha: uint64(beacons.Len()/2 + 1), Sender: &sender, }, Blocked: blocked, @@ -217,7 +217,7 @@ func ConsensusFollower(numBlocks, numTxsPerBlock int, b *testing.B) { Context: ctx, Validators: vdrs, Beacons: beacons, - Alpha: (beacons.Len() + 1) / 2, + Alpha: uint64(beacons.Len()/2 + 1), Sender: &sender, }, Blocked: blocked, diff --git a/vms/spchainvm/key_chain.go b/vms/spchainvm/keychain.go similarity index 77% rename from vms/spchainvm/key_chain.go rename to vms/spchainvm/keychain.go index 1984d8f..00eee64 100644 --- a/vms/spchainvm/key_chain.go +++ b/vms/spchainvm/keychain.go @@ -20,31 +20,42 @@ var ( // Keychain is a collection of keys that can be used to spend utxos type Keychain struct { + factory crypto.FactorySECP256K1R networkID uint32 chainID ids.ID - // This can be used to iterate over. However, it should not be modified externally. + + // Key: The id of a private key (namely, [privKey].PublicKey().Address().Key()) + // Value: The index in Keys of that private key keyMap map[[20]byte]int - Addrs ids.ShortSet - Keys []*crypto.PrivateKeySECP256K1R + + // Each element is an address controlled by a key in [Keys] + // This can be used to iterate over. It should not be modified externally. + Addrs ids.ShortSet + + // List of keys this keychain manages + // This can be used to iterate over. It should not be modified externally. + Keys []*crypto.PrivateKeySECP256K1R } // NewKeychain creates a new keychain for a chain func NewKeychain(networkID uint32, chainID ids.ID) *Keychain { return &Keychain{ - chainID: chainID, - keyMap: make(map[[20]byte]int), + networkID: networkID, + chainID: chainID, + keyMap: make(map[[20]byte]int), } } // New returns a newly generated private key -func (kc *Keychain) New() *crypto.PrivateKeySECP256K1R { - factory := &crypto.FactorySECP256K1R{} - - skGen, _ := factory.NewPrivateKey() +func (kc *Keychain) New() (*crypto.PrivateKeySECP256K1R, error) { + skGen, err := kc.factory.NewPrivateKey() + if err != nil { + return nil, err + } sk := skGen.(*crypto.PrivateKeySECP256K1R) kc.Add(sk) - return sk + return sk, nil } // Add a new key to the key chain diff --git a/vms/spdagvm/keychain.go b/vms/spdagvm/keychain.go index 1c4b8a2..7142a9d 100644 --- a/vms/spdagvm/keychain.go +++ b/vms/spdagvm/keychain.go @@ -20,29 +20,35 @@ var ( // Keychain is a collection of keys that can be used to spend utxos type Keychain struct { - // This can be used to iterate over. However, it should not be modified externally. + factory crypto.FactorySECP256K1R + networkID uint32 + chainID ids.ID + // Key: The id of a private key (namely, [privKey].PublicKey().Address().Key()) // Value: The index in Keys of that private key keyMap map[[20]byte]int // Each element is an address controlled by a key in [Keys] + // This can be used to iterate over. It should not be modified externally. Addrs ids.ShortSet // List of keys this keychain manages + // This can be used to iterate over. It should not be modified externally. Keys []*crypto.PrivateKeySECP256K1R } -func (kc *Keychain) init() { - if kc.keyMap == nil { - kc.keyMap = make(map[[20]byte]int) +// NewKeychain creates a new keychain for a chain +func NewKeychain(networkID uint32, chainID ids.ID) *Keychain { + return &Keychain{ + networkID: networkID, + chainID: chainID, + keyMap: make(map[[20]byte]int), } } // Add a new key to the key chain. // If [key] is already in the keychain, does nothing. func (kc *Keychain) Add(key *crypto.PrivateKeySECP256K1R) { - kc.init() - addr := key.PublicKey().Address() // The address controlled by [key] addrHash := addr.Key() if _, ok := kc.keyMap[addrHash]; !ok { @@ -53,9 +59,7 @@ func (kc *Keychain) Add(key *crypto.PrivateKeySECP256K1R) { } // Get a key from the keychain. If the key is unknown, the second return value is false. -func (kc Keychain) Get(id ids.ShortID) (*crypto.PrivateKeySECP256K1R, bool) { - kc.init() - +func (kc *Keychain) Get(id ids.ShortID) (*crypto.PrivateKeySECP256K1R, bool) { if i, ok := kc.keyMap[id.Key()]; ok { return kc.Keys[i], true } @@ -63,15 +67,13 @@ func (kc Keychain) Get(id ids.ShortID) (*crypto.PrivateKeySECP256K1R, bool) { } // Addresses returns a list of addresses this keychain manages -func (kc Keychain) Addresses() ids.ShortSet { return kc.Addrs } +func (kc *Keychain) Addresses() ids.ShortSet { return kc.Addrs } // New returns a newly generated private key. // The key and the address it controls are added to // [kc.Keys] and [kc.Addrs], respectively func (kc *Keychain) New() (*crypto.PrivateKeySECP256K1R, error) { - factory := crypto.FactorySECP256K1R{} - - skGen, err := factory.NewPrivateKey() + skGen, err := kc.factory.NewPrivateKey() if err != nil { return nil, err } @@ -84,8 +86,8 @@ func (kc *Keychain) New() (*crypto.PrivateKeySECP256K1R, error) { // Spend attempts to create an input func (kc *Keychain) Spend(utxo *UTXO, time uint64) (Input, *InputSigner, error) { builder := Builder{ - NetworkID: 0, - ChainID: ids.Empty, + NetworkID: kc.networkID, + ChainID: kc.chainID, } switch out := utxo.Out().(type) { @@ -148,8 +150,8 @@ func (kc *Keychain) GetSigsAndKeys(addresses []ids.ShortID, threshold int) ([]*S sigs := []*Sig{} keys := []*crypto.PrivateKeySECP256K1R{} builder := Builder{ - NetworkID: 0, - ChainID: ids.Empty, + NetworkID: kc.networkID, + ChainID: kc.chainID, } for i := uint32(0); i < uint32(len(addresses)) && len(keys) < threshold; i++ { if key, exists := kc.Get(addresses[i]); exists { diff --git a/vms/spdagvm/vm.go b/vms/spdagvm/vm.go index f5cd820..b873185 100644 --- a/vms/spdagvm/vm.go +++ b/vms/spdagvm/vm.go @@ -315,7 +315,7 @@ func (vm *VM) Send(amount uint64, assetID, toAddrStr string, fromPKs []string) ( } // Add all of the keys in [fromPKs] to a keychain - keychain := Keychain{} + keychain := NewKeychain(vm.ctx.NetworkID, vm.ctx.ChainID) factory := crypto.FactorySECP256K1R{} cb58 := formatting.CB58{} for _, fpk := range fromPKs { @@ -359,7 +359,7 @@ func (vm *VM) Send(amount uint64, assetID, toAddrStr string, fromPKs []string) ( ChainID: vm.ctx.ChainID, } currentTime := vm.clock.Unix() - tx, err := builder.NewTxFromUTXOs(&keychain, utxos, amount, vm.TxFee, 0, 1, toAddrs, outAddr, currentTime) + tx, err := builder.NewTxFromUTXOs(keychain, utxos, amount, vm.TxFee, 0, 1, toAddrs, outAddr, currentTime) if err != nil { return "", err } diff --git a/vms/spdagvm/vm_test.go b/vms/spdagvm/vm_test.go index 03a3b8e..75e8d1c 100644 --- a/vms/spdagvm/vm_test.go +++ b/vms/spdagvm/vm_test.go @@ -4,7 +4,6 @@ package spdagvm import ( - "math" "testing" "github.com/ava-labs/gecko/database/memdb" @@ -724,87 +723,3 @@ func TestIssuePendingDependency(t *testing.T) { ctx.Lock.Unlock() } - -// Ensure that an error is returned if an address will have more than -// math.MaxUint64 NanoAva -func TestTxOutputOverflow(t *testing.T) { - // Modify the genesis tx so the address controlled by [keys[0]] - // has math.MaxUint64 NanoAva - initBalances := map[string]uint64{ - keys[0].PublicKey().Address().String(): math.MaxUint64, - keys[1].PublicKey().Address().String(): defaultInitBalance, - keys[2].PublicKey().Address().String(): defaultInitBalance, - } - genesisTx := GenesisTx(initBalances) - - // Initialize vm - vmDB := memdb.New() - msgChan := make(chan common.Message, 1) - ctx.Lock.Lock() - vm := &VM{} - vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) - vm.batchTimeout = 0 - - // Create a new private key - testPK, err := vm.CreateKey() - if err != nil { - t.Fatalf("CreateKey(): %s", err) - } - // Get the address controlled by the new private key - testAddr, err := vm.GetAddress(testPK) - if err != nil { - t.Fatalf("GetAddress(%q): %s", testPK, err) - } - - // Get string repr. of keys[0] - cb58 := formatting.CB58{Bytes: keys[0].Bytes()} - privKey0 := cb58.String() - - // Send [math.MaxUint64 - txFeeTest] NanoAva from [privKey0] to [testAddr] - _, err = vm.Send(math.MaxUint64-txFeeTest, "", testAddr, []string{privKey0}) - if err != nil { - t.Fatalf("Send(%d,%q,%q,%v): failed with error - %s", uint64(math.MaxUint64-txFeeTest), "", testAddr, []string{privKey0}, err) - } - ctx.Lock.Unlock() - - if msg := <-msgChan; msg != common.PendingTxs { - t.Fatalf("Wrong message") - } - - // Accept the transaction - ctx.Lock.Lock() - if txs := vm.PendingTxs(); len(txs) != 1 { - t.Fatalf("PendingTxs(): returned wrong number of transactions - expected: %d; returned: %d", 1, len(txs)) - } else { - txs[0].Accept() - } - if txs := vm.PendingTxs(); len(txs) != 0 { - t.Fatalf("PendingTxs(): there should not have been any pending transactions") - } - - // Ensure that [testAddr] has balance [math.MaxUint64 - txFeeTest] - if testbal, err := vm.GetBalance(testAddr, ""); err != nil { - t.Fatalf("GetBalance(%q): %s", testAddr, err) - } else if testbal != math.MaxUint64-txFeeTest { - t.Fatalf("GetBalance(%q,%q): Balance Not Equal(%d,%d)", testAddr, "", uint64(math.MaxUint64-txFeeTest), testbal) - } - - // Ensure that the address controlled by [keys[0]] has balance 0 - if testbal, err := vm.GetBalance(keys[0].PublicKey().Address().String(), ""); err != nil { - t.Fatalf("GetBalance(%q): %s", keys[0].PublicKey().Address().String(), err) - } else if testbal != 0 { - // Balance of new address should be 0 - t.Fatalf("GetBalance(%q,%q): Balance Not Equal(%d,%d)", keys[0].PublicKey().Address().String(), "", 0, testbal) - } - - cb58.Bytes = keys[1].Bytes() - privKey1 := cb58.String() - - // Send [2*txFeeTest+1] NanoAva from [key1Str] to [testAddr] - // Should overflow [testAddr] by 1 - _, err = vm.Send(2*txFeeTest+1, "", testAddr, []string{privKey1}) - if err == errOutputOverflow { - t.Fatalf("Expected output to overflow but it did not") - } - ctx.Lock.Unlock() -} diff --git a/xputtest/README.md b/xputtest/README.md new file mode 100644 index 0000000..3b42682 --- /dev/null +++ b/xputtest/README.md @@ -0,0 +1,17 @@ +# Throughput testing + +A throughput test is run in two parts. First a network must be running with at least one of the nodes running a throughput server. To start a throughput server when running a node the `--xput-server-enabled=true` flag should be passed. + +An example single node network can be started with: + +```sh +./build/ava --public-ip=127.0.0.1 --xput-server-port=9652 --xput-server-enabled=true --db-enabled=false --staking-tls-enabled=false --snow-sample-size=1 --snow-quorum-size=1 +``` + +The thoughput node can be started with: + +```sh +./build/xputtest --ip=127.0.0.1 --port=9652 --sp-chain +``` + +The above example with run a throughput test on the simple payment chain. Tests can be run with `--sp-dag` to run throughput tests on the simple payment dag. Tests can be run with `--avm` to run throughput tests on the AVA virtual machine. diff --git a/xputtest/avm.go b/xputtest/avm.go index 4da2c88..16d0315 100644 --- a/xputtest/avm.go +++ b/xputtest/avm.go @@ -8,30 +8,23 @@ import ( "github.com/ava-labs/salticidae-go" - "github.com/ava-labs/gecko/genesis" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/networking" "github.com/ava-labs/gecko/utils/crypto" - "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/vms/avm" "github.com/ava-labs/gecko/vms/platformvm" "github.com/ava-labs/gecko/xputtest/avmwallet" ) -func (n *network) benchmarkAVM(genesisState *platformvm.Genesis) { - avmChain := genesisState.Chains[0] - n.log.AssertTrue(avmChain.ChainName == "AVM", "wrong chain name") - genesisBytes := avmChain.GenesisData - - wallet, err := avmwallet.NewWallet(n.networkID, avmChain.ID(), config.AvaTxFee) +// benchmark an instance of the avm +func (n *network) benchmarkAVM(chain *platformvm.CreateChainTx) { + genesisBytes := chain.GenesisData + wallet, err := avmwallet.NewWallet(n.log, n.networkID, chain.ID(), config.AvaTxFee) n.log.AssertNoError(err) - cb58 := formatting.CB58{} - keyStr := genesis.Keys[config.Key] - n.log.AssertNoError(cb58.FromString(keyStr)) factory := crypto.FactorySECP256K1R{} - sk, err := factory.ToPrivateKey(cb58.Bytes) + sk, err := factory.ToPrivateKey(config.Key) n.log.AssertNoError(err) wallet.ImportKey(sk.(*crypto.PrivateKeySECP256K1R)) @@ -56,9 +49,10 @@ func (n *network) benchmarkAVM(genesisState *platformvm.Genesis) { n.log.AssertNoError(wallet.GenerateTxs(config.NumTxs, assetID)) - go n.log.RecoverAndPanic(func() { n.IssueAVM(avmChain.ID(), assetID, wallet) }) + go n.log.RecoverAndPanic(func() { n.IssueAVM(chain.ID(), assetID, wallet) }) } +// issue transactions to the instance of the avm funded by the provided wallet func (n *network) IssueAVM(chainID ids.ID, assetID ids.ID, wallet *avmwallet.Wallet) { n.log.Debug("Issuing with %d", wallet.Balance(assetID)) numAccepted := 0 @@ -66,11 +60,15 @@ func (n *network) IssueAVM(chainID ids.ID, assetID ids.ID, wallet *avmwallet.Wal n.decided <- ids.ID{} + // track the last second of transactions meter := timer.TimedMeter{Duration: time.Second} for d := range n.decided { + // display the TPS every 1000 txs if numAccepted%1000 == 0 { n.log.Info("TPS: %d", meter.Ticks()) } + + // d is the ID of the tx that was accepted if !d.IsZero() { meter.Tick() n.log.Debug("Finalized %s", d) @@ -78,10 +76,12 @@ func (n *network) IssueAVM(chainID ids.ID, assetID ids.ID, wallet *avmwallet.Wal numPending-- } + // Issue all the txs that we can right now for numPending < config.MaxOutstandingTxs && wallet.Balance(assetID) > 0 && numAccepted+numPending < config.NumTxs { tx := wallet.NextTx() n.log.AssertTrue(tx != nil, "Tx creation failed") + // send the IssueTx message it, err := n.build.IssueTx(chainID, tx.Bytes()) n.log.AssertNoError(err) ds := it.DataStream() @@ -97,8 +97,11 @@ func (n *network) IssueAVM(chainID ids.ID, assetID ids.ID, wallet *avmwallet.Wal numPending++ n.log.Debug("Sent tx, pending = %d, accepted = %d", numPending, numAccepted) } + + // If we are done issuing txs, return from the function if numAccepted+numPending >= config.NumTxs { n.log.Info("done with test") + net.ec.Stop() return } } diff --git a/xputtest/avmwallet/utxo_set.go b/xputtest/avmwallet/utxo_set.go index 2e3e49e..c346969 100644 --- a/xputtest/avmwallet/utxo_set.go +++ b/xputtest/avmwallet/utxo_set.go @@ -8,18 +8,22 @@ import ( "strings" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/vms/avm" + "github.com/ava-labs/gecko/vms/components/ava" ) // UTXOSet ... type UTXOSet struct { - // This can be used to iterate over. However, it should not be modified externally. + // Key: The id of a UTXO + // Value: The index in UTXOs of that UTXO utxoMap map[[32]byte]int - UTXOs []*avm.UTXO + + // List of UTXOs in this set + // This can be used to iterate over. It should not be modified externally. + UTXOs []*ava.UTXO } // Put ... -func (us *UTXOSet) Put(utxo *avm.UTXO) { +func (us *UTXOSet) Put(utxo *ava.UTXO) { if us.utxoMap == nil { us.utxoMap = make(map[[32]byte]int) } @@ -32,7 +36,7 @@ func (us *UTXOSet) Put(utxo *avm.UTXO) { } // Get ... -func (us *UTXOSet) Get(id ids.ID) *avm.UTXO { +func (us *UTXOSet) Get(id ids.ID) *ava.UTXO { if us.utxoMap == nil { return nil } @@ -44,7 +48,7 @@ func (us *UTXOSet) Get(id ids.ID) *avm.UTXO { } // Remove ... -func (us *UTXOSet) Remove(id ids.ID) *avm.UTXO { +func (us *UTXOSet) Remove(id ids.ID) *ava.UTXO { i, ok := us.utxoMap[id.Key()] if !ok { return nil diff --git a/xputtest/avmwallet/wallet.go b/xputtest/avmwallet/wallet.go index 226765b..ef01eb0 100644 --- a/xputtest/avmwallet/wallet.go +++ b/xputtest/avmwallet/wallet.go @@ -13,10 +13,12 @@ import ( "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/math" "github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/avm" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -25,35 +27,42 @@ import ( type Wallet struct { networkID uint32 chainID ids.ID - clock timer.Clock - codec codec.Codec - keychain *secp256k1fx.Keychain // Mapping from public address to the SigningKeys - utxoSet *UTXOSet // Mapping from utxoIDs to UTXOs - balance map[[32]byte]uint64 - txFee uint64 + + clock timer.Clock + codec codec.Codec + log logging.Logger + + keychain *secp256k1fx.Keychain // Mapping from public address to the SigningKeys + utxoSet *UTXOSet // Mapping from utxoIDs to UTXOs + + balance map[[32]byte]uint64 + txFee uint64 txsSent int32 txs []*avm.Tx } // NewWallet returns a new Wallet -func NewWallet(networkID uint32, chainID ids.ID, txFee uint64) (*Wallet, error) { +func NewWallet(log logging.Logger, networkID uint32, chainID ids.ID, txFee uint64) (*Wallet, error) { c := codec.NewDefault() errs := wrappers.Errs{} errs.Add( c.RegisterType(&avm.BaseTx{}), c.RegisterType(&avm.CreateAssetTx{}), c.RegisterType(&avm.OperationTx{}), + c.RegisterType(&avm.ImportTx{}), + c.RegisterType(&avm.ExportTx{}), + c.RegisterType(&secp256k1fx.TransferInput{}), c.RegisterType(&secp256k1fx.MintOutput{}), c.RegisterType(&secp256k1fx.TransferOutput{}), - c.RegisterType(&secp256k1fx.MintInput{}), - c.RegisterType(&secp256k1fx.TransferInput{}), + c.RegisterType(&secp256k1fx.MintOperation{}), c.RegisterType(&secp256k1fx.Credential{}), ) return &Wallet{ networkID: networkID, chainID: chainID, codec: c, + log: log, keychain: secp256k1fx.NewKeychain(), utxoSet: &UTXOSet{}, balance: make(map[[32]byte]uint64), @@ -86,8 +95,8 @@ func (w *Wallet) ImportKey(sk *crypto.PrivateKeySECP256K1R) { w.keychain.Add(sk) // AddUTXO adds a new UTXO to this wallet if this wallet may spend it // The UTXO's output must be an OutputPayment -func (w *Wallet) AddUTXO(utxo *avm.UTXO) { - out, ok := utxo.Out.(avm.FxTransferable) +func (w *Wallet) AddUTXO(utxo *ava.UTXO) { + out, ok := utxo.Out.(ava.Transferable) if !ok { return } @@ -107,7 +116,7 @@ func (w *Wallet) RemoveUTXO(utxoID ids.ID) { assetID := utxo.AssetID() assetKey := assetID.Key() - newBalance := w.balance[assetKey] - utxo.Out.(avm.FxTransferable).Amount() + newBalance := w.balance[assetKey] - utxo.Out.(ava.Transferable).Amount() if newBalance == 0 { delete(w.balance, assetKey) } else { @@ -129,7 +138,7 @@ func (w *Wallet) CreateTx(assetID ids.ID, amount uint64, destAddr ids.ShortID) ( amountSpent := uint64(0) time := w.clock.Unix() - ins := []*avm.TransferableInput{} + ins := []*ava.TransferableInput{} keys := [][]*crypto.PrivateKeySECP256K1R{} for _, utxo := range w.utxoSet.UTXOs { if !utxo.AssetID().Equals(assetID) { @@ -139,7 +148,7 @@ func (w *Wallet) CreateTx(assetID ids.ID, amount uint64, destAddr ids.ShortID) ( if err != nil { continue } - input, ok := inputIntf.(avm.FxTransferable) + input, ok := inputIntf.(ava.Transferable) if !ok { continue } @@ -149,9 +158,9 @@ func (w *Wallet) CreateTx(assetID ids.ID, amount uint64, destAddr ids.ShortID) ( } amountSpent = spent - in := &avm.TransferableInput{ + in := &ava.TransferableInput{ UTXOID: utxo.UTXOID, - Asset: avm.Asset{ID: assetID}, + Asset: ava.Asset{ID: assetID}, In: input, } @@ -167,43 +176,39 @@ func (w *Wallet) CreateTx(assetID ids.ID, amount uint64, destAddr ids.ShortID) ( return nil, errors.New("insufficient funds") } - avm.SortTransferableInputsWithSigners(ins, keys) + ava.SortTransferableInputsWithSigners(ins, keys) - outs := []*avm.TransferableOutput{ - &avm.TransferableOutput{ - Asset: avm.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: amount, - Locktime: 0, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{destAddr}, - }, + outs := []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amount, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{destAddr}, }, }, - } + }} if amountSpent > amount { changeAddr, err := w.GetAddress() if err != nil { return nil, err } - outs = append(outs, - &avm.TransferableOutput{ - Asset: avm.Asset{ID: assetID}, - Out: &secp256k1fx.TransferOutput{ - Amt: amountSpent - amount, - Locktime: 0, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{changeAddr}, - }, + outs = append(outs, &ava.TransferableOutput{ + Asset: ava.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amountSpent - amount, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, }, }, - ) + }) } - avm.SortTransferableOutputs(outs, w.codec) + ava.SortTransferableOutputs(outs, w.codec) tx := &avm.Tx{ UnsignedTx: &avm.BaseTx{ @@ -232,7 +237,7 @@ func (w *Wallet) CreateTx(assetID ids.ID, amount uint64, destAddr ids.ShortID) ( cred.Sigs = append(cred.Sigs, fixedSig) } - tx.Creds = append(tx.Creds, &avm.Credential{Cred: cred}) + tx.Creds = append(tx.Creds, cred) } b, err := w.codec.Marshal(tx) @@ -249,10 +254,17 @@ func (w *Wallet) CreateTx(assetID ids.ID, amount uint64, destAddr ids.ShortID) ( // Generate them all on test initialization so tx generation is not bottleneck // in testing func (w *Wallet) GenerateTxs(numTxs int, assetID ids.ID) error { + w.log.Info("Generating %d transactions", numTxs) + ctx := snow.DefaultContextTest() ctx.NetworkID = w.networkID ctx.ChainID = w.chainID + frequency := numTxs / 50 + if frequency > 1000 { + frequency = 1000 + } + w.txs = make([]*avm.Tx, numTxs) for i := 0; i < numTxs; i++ { addr, err := w.CreateAddress() @@ -271,8 +283,14 @@ func (w *Wallet) GenerateTxs(numTxs int, assetID ids.ID) error { w.AddUTXO(utxo) } + if numGenerated := i + 1; numGenerated%frequency == 0 { + w.log.Info("Generated %d out of %d transactions", numGenerated, numTxs) + } + w.txs[i] = tx } + + w.log.Info("Finished generating %d transactions", numTxs) return nil } diff --git a/xputtest/avmwallet/wallet_test.go b/xputtest/avmwallet/wallet_test.go index b237a04..ddd8bc3 100644 --- a/xputtest/avmwallet/wallet_test.go +++ b/xputtest/avmwallet/wallet_test.go @@ -6,20 +6,16 @@ package avmwallet import ( "testing" - "github.com/ava-labs/gecko/genesis" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/utils/crypto" - "github.com/ava-labs/gecko/utils/formatting" - "github.com/ava-labs/gecko/utils/units" - "github.com/ava-labs/gecko/vms/avm" - "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/secp256k1fx" ) func TestNewWallet(t *testing.T) { chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) - w, err := NewWallet(12345, chainID, 0) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) if err != nil { t.Fatal(err) } @@ -30,7 +26,7 @@ func TestNewWallet(t *testing.T) { func TestWalletGetAddress(t *testing.T) { chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) - w, err := NewWallet(12345, chainID, 0) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) if err != nil { t.Fatal(err) } @@ -46,7 +42,7 @@ func TestWalletGetAddress(t *testing.T) { func TestWalletGetMultipleAddresses(t *testing.T) { chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) - w, err := NewWallet(12345, chainID, 0) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) if err != nil { t.Fatal(err) } @@ -66,7 +62,7 @@ func TestWalletGetMultipleAddresses(t *testing.T) { func TestWalletEmptyBalance(t *testing.T) { chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) - w, err := NewWallet(12345, chainID, 0) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) if err != nil { t.Fatal(err) } @@ -78,14 +74,14 @@ func TestWalletEmptyBalance(t *testing.T) { func TestWalletAddUTXO(t *testing.T) { chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) - w, err := NewWallet(12345, chainID, 0) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) if err != nil { t.Fatal(err) } - utxo := &avm.UTXO{ - UTXOID: avm.UTXOID{TxID: ids.Empty.Prefix(0)}, - Asset: avm.Asset{ID: ids.Empty.Prefix(1)}, + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{TxID: ids.Empty.Prefix(0)}, + Asset: ava.Asset{ID: ids.Empty.Prefix(1)}, Out: &secp256k1fx.TransferOutput{ Amt: 1000, }, @@ -100,14 +96,14 @@ func TestWalletAddUTXO(t *testing.T) { func TestWalletAddInvalidUTXO(t *testing.T) { chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) - w, err := NewWallet(12345, chainID, 0) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) if err != nil { t.Fatal(err) } - utxo := &avm.UTXO{ - UTXOID: avm.UTXOID{TxID: ids.Empty.Prefix(0)}, - Asset: avm.Asset{ID: ids.Empty.Prefix(1)}, + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{TxID: ids.Empty.Prefix(0)}, + Asset: ava.Asset{ID: ids.Empty.Prefix(1)}, } w.AddUTXO(utxo) @@ -119,7 +115,7 @@ func TestWalletAddInvalidUTXO(t *testing.T) { func TestWalletCreateTx(t *testing.T) { chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) - w, err := NewWallet(12345, chainID, 0) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) if err != nil { t.Fatal(err) } @@ -130,9 +126,9 @@ func TestWalletCreateTx(t *testing.T) { if err != nil { t.Fatal(err) } - utxo := &avm.UTXO{ - UTXOID: avm.UTXOID{TxID: ids.Empty.Prefix(1)}, - Asset: avm.Asset{ID: assetID}, + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{TxID: ids.Empty.Prefix(1)}, + Asset: ava.Asset{ID: assetID}, Out: &secp256k1fx.TransferOutput{ Amt: 1000, OutputOwners: secp256k1fx.OutputOwners{ @@ -169,7 +165,7 @@ func TestWalletCreateTx(t *testing.T) { func TestWalletImportKey(t *testing.T) { chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) - w, err := NewWallet(12345, chainID, 0) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) if err != nil { t.Fatal(err) } @@ -194,7 +190,7 @@ func TestWalletImportKey(t *testing.T) { func TestWalletString(t *testing.T) { chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) - w, err := NewWallet(12345, chainID, 0) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) if err != nil { t.Fatal(err) } @@ -220,96 +216,3 @@ func TestWalletString(t *testing.T) { t.Fatalf("got:\n%s\n\nexpected:\n%s", str, expected) } } - -func TestWalletWithGenesis(t *testing.T) { - ctx := snow.DefaultContextTest() - ctx.NetworkID = 12345 - ctx.ChainID = ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) - - w, err := NewWallet(ctx.NetworkID, ctx.ChainID, 0) - if err != nil { - t.Fatal(err) - } - - b58 := formatting.CB58{} - factory := crypto.FactorySECP256K1R{} - for _, key := range genesis.Keys { - if err := b58.FromString(key); err != nil { - t.Fatal(err) - } - - sk, err := factory.ToPrivateKey(b58.Bytes) - if err != nil { - t.Fatal(err) - } - w.ImportKey(sk.(*crypto.PrivateKeySECP256K1R)) - } - - platformGenesisBytes := genesis.Genesis(genesis.LocalID) - genesisState := &platformvm.Genesis{} - err = platformvm.Codec.Unmarshal(platformGenesisBytes, genesisState) - if err != nil { - t.Fatal(err) - } - if err := genesisState.Initialize(); err != nil { - t.Fatal(err) - } - - avmChain := genesisState.Chains[0] - if name := avmChain.ChainName; name != "AVM" { - t.Fatalf("wrong chain name") - } - genesisBytes := avmChain.GenesisData - - genesis := avm.Genesis{} - if err := w.codec.Unmarshal(genesisBytes, &genesis); err != nil { - t.Fatal(err) - } - - genesisTx := genesis.Txs[0] - tx := avm.Tx{ - UnsignedTx: &genesisTx.CreateAssetTx, - } - txBytes, err := w.codec.Marshal(&tx) - if err != nil { - t.Fatal(err) - } - tx.Initialize(txBytes) - - for _, utxo := range tx.UTXOs() { - w.AddUTXO(utxo) - } - - assetID := genesisTx.ID() - - if balance := w.Balance(assetID); balance != 45*units.MegaAva { - t.Fatalf("balance of %d was expected but got %d", 45*units.MegaAva, balance) - } - - for i := 1; i <= 1000; i++ { - addr, err := w.CreateAddress() - if err != nil { - t.Fatal(err) - } - - tx, err := w.CreateTx(assetID, uint64(i), addr) - if err != nil { - t.Fatal(err) - } - - if err := tx.SyntacticVerify(ctx, w.codec, 1); err != nil { - t.Fatal(err) - } - - for _, utxoID := range tx.InputUTXOs() { - w.RemoveUTXO(utxoID.InputID()) - } - for _, utxo := range tx.UTXOs() { - w.AddUTXO(utxo) - } - - if balance := w.Balance(assetID); balance != 45*units.MegaAva { - t.Fatalf("balance of %d was expected but got %d", 45*units.MegaAva, balance) - } - } -} diff --git a/xputtest/chainwallet/wallet.go b/xputtest/chainwallet/wallet.go index f52f34f..c96e115 100644 --- a/xputtest/chainwallet/wallet.go +++ b/xputtest/chainwallet/wallet.go @@ -10,13 +10,17 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/vms/spchainvm" ) // Wallet is a holder for keys and UTXOs. type Wallet struct { - networkID uint32 - chainID ids.ID + networkID uint32 + chainID ids.ID + + log logging.Logger + keychain *spchainvm.Keychain // Mapping from public address to the SigningKeys accountSet map[[20]byte]spchainvm.Account // Mapping from addresses to accounts balance uint64 @@ -25,17 +29,24 @@ type Wallet struct { } // NewWallet ... -func NewWallet(networkID uint32, chainID ids.ID) Wallet { - return Wallet{ +func NewWallet(log logging.Logger, networkID uint32, chainID ids.ID) *Wallet { + return &Wallet{ networkID: networkID, chainID: chainID, + log: log, keychain: spchainvm.NewKeychain(networkID, chainID), accountSet: make(map[[20]byte]spchainvm.Account), } } // CreateAddress returns a brand new address! Ready to receive funds! -func (w *Wallet) CreateAddress() ids.ShortID { return w.keychain.New().PublicKey().Address() } +func (w *Wallet) CreateAddress() (ids.ShortID, error) { + sk, err := w.keychain.New() + if err != nil { + return ids.ShortID{}, err + } + return sk.PublicKey().Address(), nil +} // ImportKey imports a private key into this wallet func (w *Wallet) ImportKey(sk *crypto.PrivateKeySECP256K1R) { w.keychain.Add(sk) } @@ -56,65 +67,36 @@ func (w *Wallet) Balance() uint64 { return w.balance } // Generate them all on test initialization so tx generation is not bottleneck // in testing func (w *Wallet) GenerateTxs(numTxs int) error { + w.log.Info("Generating %d transactions", numTxs) + ctx := snow.DefaultContextTest() ctx.NetworkID = w.networkID ctx.ChainID = w.chainID + frequency := numTxs / 50 + if frequency > 1000 { + frequency = 1000 + } + w.txs = make([]*spchainvm.Tx, numTxs) - for i := 0; i < numTxs; { - for _, account := range w.accountSet { - if i >= numTxs { - break - } - - accountID := account.ID() - key, exists := w.keychain.Get(accountID) - if !exists { - return errors.New("missing account") - } - - amount := uint64(1) - tx, sendAccount, err := account.CreateTx(amount, accountID, ctx, key) - if err != nil { - return err - } - - newAccount, err := sendAccount.Receive(tx, ctx) - if err != nil { - return err - } - w.accountSet[accountID.Key()] = newAccount - w.txs[i] = tx - i++ + for i := range w.txs { + tx, err := w.MakeTx() + if err != nil { + return err } + + if numGenerated := i + 1; numGenerated%frequency == 0 { + w.log.Info("Generated %d out of %d transactions", numGenerated, numTxs) + } + + w.txs[i] = tx } + + w.log.Info("Finished generating %d transactions", numTxs) + return nil } -/* -// Send a new transaction -func (w *Wallet) Send() *spchainvm.Tx { - ctx := snow.DefaultContextTest() - ctx.NetworkID = w.networkID - ctx.ChainID = w.chainID - - for _, account := range w.accountSet { - accountID := account.ID() - if key, exists := w.keychain.Get(accountID); exists { - amount := uint64(1) - if tx, sendAccount, err := account.CreateTx(amount, accountID, ctx, key); err == nil { - newAccount, err := sendAccount.Receive(tx, ctx) - if err == nil { - w.accountSet[accountID.Key()] = newAccount - return tx - } - } - } - } - return nil -} -*/ - // NextTx returns the next tx to be sent as part of xput test func (w *Wallet) NextTx() *spchainvm.Tx { if len(w.txs) == 0 { @@ -125,6 +107,35 @@ func (w *Wallet) NextTx() *spchainvm.Tx { return tx } +// MakeTx creates a new transaction and update the state to after the tx is accepted +func (w *Wallet) MakeTx() (*spchainvm.Tx, error) { + ctx := snow.DefaultContextTest() + ctx.NetworkID = w.networkID + ctx.ChainID = w.chainID + + for _, account := range w.accountSet { + accountID := account.ID() + key, exists := w.keychain.Get(accountID) + if !exists { + return nil, errors.New("missing account") + } + + amount := uint64(1) + tx, sendAccount, err := account.CreateTx(amount, accountID, ctx, key) + if err != nil { + continue + } + + newAccount, err := sendAccount.Receive(tx, ctx) + if err != nil { + return nil, err + } + w.accountSet[accountID.Key()] = newAccount + return tx, nil + } + return nil, errors.New("empty") +} + func (w Wallet) String() string { return fmt.Sprintf( "Keychain:\n"+ diff --git a/xputtest/config.go b/xputtest/config.go index f84a324..0efe70e 100644 --- a/xputtest/config.go +++ b/xputtest/config.go @@ -23,8 +23,10 @@ type Config struct { LoggingConfig logging.Config // Key describes which key to use to issue transactions + Key []byte + // NumTxs describes the number of transactions to issue // MaxOutstandingTxs describes how many txs to pipeline - Key, NumTxs, MaxOutstandingTxs int - Chain ChainType + NumTxs, MaxOutstandingTxs int + Chain ChainType } diff --git a/xputtest/dagwallet/utxo_set.go b/xputtest/dagwallet/utxo_set.go index ef222f2..142a47c 100644 --- a/xputtest/dagwallet/utxo_set.go +++ b/xputtest/dagwallet/utxo_set.go @@ -13,9 +13,13 @@ import ( // UTXOSet ... type UTXOSet struct { - // This can be used to iterate over. However, it should not be modified externally. + // Key: The id of a UTXO + // Value: The index in UTXOs of that UTXO utxoMap map[[32]byte]int - UTXOs []*spdagvm.UTXO + + // List of UTXOs in this set + // This can be used to iterate over. It should not be modified externally. + UTXOs []*spdagvm.UTXO } // Put ... diff --git a/xputtest/dagwallet/wallet.go b/xputtest/dagwallet/wallet.go index edc2434..a10be8f 100644 --- a/xputtest/dagwallet/wallet.go +++ b/xputtest/dagwallet/wallet.go @@ -25,11 +25,11 @@ type Wallet struct { } // NewWallet returns a new Wallet -func NewWallet(networkID uint32, chainID ids.ID, txFee uint64) Wallet { - return Wallet{ +func NewWallet(networkID uint32, chainID ids.ID, txFee uint64) *Wallet { + return &Wallet{ networkID: networkID, chainID: chainID, - keychain: &spdagvm.Keychain{}, + keychain: spdagvm.NewKeychain(networkID, chainID), utxoSet: &UTXOSet{}, txFee: txFee, } diff --git a/xputtest/main.go b/xputtest/main.go index 28e7544..3ed3236 100644 --- a/xputtest/main.go +++ b/xputtest/main.go @@ -16,7 +16,9 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/logging" - "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/vms/avm" + "github.com/ava-labs/gecko/vms/spchainvm" + "github.com/ava-labs/gecko/vms/spdagvm" ) func main() { @@ -24,6 +26,7 @@ func main() { fmt.Printf("Failed to parse arguments: %s\n", err) } + // set up logging config.LoggingConfig.Directory = path.Join(config.LoggingConfig.Directory, "client") log, err := logging.New(config.LoggingConfig) if err != nil { @@ -33,20 +36,18 @@ func main() { defer log.Stop() + // initialize state based on CLI args net.log = log crypto.EnableCrypto = config.EnableCrypto net.decided = make(chan ids.ID, config.MaxOutstandingTxs) - if config.Key >= len(genesis.Keys) || config.Key < 0 { - log.Fatal("Unknown key specified") - return - } - + // Init the network log.AssertNoError(net.Initialize()) net.net.Start() defer net.net.Stop() + // connect to the node serr := salticidae.NewError() remoteIP := salticidae.NewNetAddrFromIPPortString(config.RemoteIP.String(), true, &serr) if code := serr.GetCode(); code != 0 { @@ -60,6 +61,7 @@ func main() { return } + // start a cpu profile file, gErr := os.Create("cpu_client.profile") log.AssertNoError(gErr) gErr = pprof.StartCPUProfile(file) @@ -71,22 +73,25 @@ func main() { net.networkID = config.NetworkID - platformGenesisBytes := genesis.Genesis(net.networkID) - genesisState := &platformvm.Genesis{} - log.AssertNoError(platformvm.Codec.Unmarshal(platformGenesisBytes, genesisState)) - log.AssertNoError(genesisState.Initialize()) - + // start the benchmark we want to run switch config.Chain { case spChain: - net.benchmarkSPChain(genesisState) + tx, err := genesis.VMGenesis(config.NetworkID, spchainvm.ID) + log.AssertNoError(err) + net.benchmarkSPChain(tx) case spDAG: - net.benchmarkSPDAG(genesisState) + tx, err := genesis.VMGenesis(config.NetworkID, spdagvm.ID) + log.AssertNoError(err) + net.benchmarkSPChain(tx) case avmDAG: - net.benchmarkAVM(genesisState) + tx, err := genesis.VMGenesis(config.NetworkID, avm.ID) + log.AssertNoError(err) + net.benchmarkSPChain(tx) default: log.Fatal("did not specify whether to test dag or chain. Exiting") return } + // start processing network messages net.ec.Dispatch() } diff --git a/xputtest/params.go b/xputtest/params.go index f3017dc..2b107a9 100644 --- a/xputtest/params.go +++ b/xputtest/params.go @@ -6,11 +6,13 @@ package main import ( "flag" "fmt" + "os" stdnet "net" "github.com/ava-labs/gecko/genesis" "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/wrappers" ) @@ -28,43 +30,51 @@ func init() { loggingConfig, err := logging.DefaultConfig() errs.Add(err) + fs := flag.NewFlagSet("xputtest", flag.ContinueOnError) + // NetworkID: - networkName := flag.String("network-id", genesis.LocalName, "Network ID this node will connect to") + networkName := fs.String("network-id", genesis.LocalName, "Network ID this node will connect to") // Ava fees: - flag.Uint64Var(&config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva") + fs.Uint64Var(&config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva") // Assertions: - flag.BoolVar(&loggingConfig.Assertions, "assertions-enabled", true, "Turn on assertion execution") + fs.BoolVar(&loggingConfig.Assertions, "assertions-enabled", true, "Turn on assertion execution") // Crypto: - flag.BoolVar(&config.EnableCrypto, "signature-verification-enabled", true, "Turn on signature verification") + fs.BoolVar(&config.EnableCrypto, "signature-verification-enabled", true, "Turn on signature verification") // Remote Server: - ip := flag.String("ip", "127.0.0.1", "IP address of the remote server socket") - port := flag.Uint("port", 9652, "Port of the remote server socket") + ip := fs.String("ip", "127.0.0.1", "IP address of the remote server socket") + port := fs.Uint("port", 9652, "Port of the remote server socket") // Logging: - logsDir := flag.String("log-dir", "", "Logging directory for Ava") - logLevel := flag.String("log-level", "info", "The log level. Should be one of {all, debug, info, warn, error, fatal, off}") + logsDir := fs.String("log-dir", "", "Logging directory for Ava") + logLevel := fs.String("log-level", "info", "The log level. Should be one of {all, debug, info, warn, error, fatal, off}") // Test Variables: - spchain := flag.Bool("sp-chain", false, "Execute simple payment chain transactions") - spdag := flag.Bool("sp-dag", false, "Execute simple payment dag transactions") - avm := flag.Bool("avm", false, "Execute avm transactions") - flag.IntVar(&config.Key, "key", 0, "Index of the genesis key list to use") - flag.IntVar(&config.NumTxs, "num-txs", 25000, "Total number of transaction to issue") - flag.IntVar(&config.MaxOutstandingTxs, "max-outstanding", 1000, "Maximum number of transactions to leave outstanding") + spchain := fs.Bool("sp-chain", false, "Execute simple payment chain transactions") + spdag := fs.Bool("sp-dag", false, "Execute simple payment dag transactions") + avm := fs.Bool("avm", false, "Execute avm transactions") + key := fs.String("key", "", "Funded key in the genesis key to use to issue transactions") + fs.IntVar(&config.NumTxs, "num-txs", 25000, "Total number of transaction to issue") + fs.IntVar(&config.MaxOutstandingTxs, "max-outstanding", 1000, "Maximum number of transactions to leave outstanding") - flag.Parse() + ferr := fs.Parse(os.Args[1:]) + + if ferr == flag.ErrHelp { + // display usage/help text and exit successfully + os.Exit(0) + } + + if ferr != nil { + // other type of error occurred when parsing args + os.Exit(2) + } networkID, err := genesis.NetworkID(*networkName) errs.Add(err) - if networkID != genesis.LocalID { - errs.Add(fmt.Errorf("the only supported networkID is: %s", genesis.LocalName)) - } - config.NetworkID = networkID // Remote: @@ -77,6 +87,10 @@ func init() { Port: uint16(*port), } + cb58 := formatting.CB58{} + errs.Add(cb58.FromString(*key)) + config.Key = cb58.Bytes + // Logging: if *logsDir != "" { loggingConfig.Directory = *logsDir diff --git a/xputtest/spchain.go b/xputtest/spchain.go index 2b92353..816f893 100644 --- a/xputtest/spchain.go +++ b/xputtest/spchain.go @@ -8,37 +8,29 @@ import ( "github.com/ava-labs/salticidae-go" - "github.com/ava-labs/gecko/genesis" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/networking" "github.com/ava-labs/gecko/utils/crypto" - "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/vms/platformvm" "github.com/ava-labs/gecko/vms/spchainvm" "github.com/ava-labs/gecko/xputtest/chainwallet" ) -func (n *network) benchmarkSPChain(genesisState *platformvm.Genesis) { - spchainChain := genesisState.Chains[3] - n.log.AssertTrue(spchainChain.ChainName == "Simple Chain Payments", "wrong chain name") - genesisBytes := spchainChain.GenesisData - - wallet := chainwallet.NewWallet(n.networkID, spchainChain.ID()) +// benchmark an instance of the sp chain +func (n *network) benchmarkSPChain(chain *platformvm.CreateChainTx) { + genesisBytes := chain.GenesisData + wallet := chainwallet.NewWallet(n.log, n.networkID, chain.ID()) codec := spchainvm.Codec{} accounts, err := codec.UnmarshalGenesis(genesisBytes) n.log.AssertNoError(err) - cb58 := formatting.CB58{} factory := crypto.FactorySECP256K1R{} - for _, keyStr := range genesis.Keys { - n.log.AssertNoError(cb58.FromString(keyStr)) - skGen, err := factory.ToPrivateKey(cb58.Bytes) - n.log.AssertNoError(err) - sk := skGen.(*crypto.PrivateKeySECP256K1R) - wallet.ImportKey(sk) - } + skGen, err := factory.ToPrivateKey(config.Key) + n.log.AssertNoError(err) + sk := skGen.(*crypto.PrivateKeySECP256K1R) + wallet.ImportKey(sk) for _, account := range accounts { wallet.AddAccount(account) @@ -47,10 +39,10 @@ func (n *network) benchmarkSPChain(genesisState *platformvm.Genesis) { n.log.AssertNoError(wallet.GenerateTxs(config.NumTxs)) - go n.log.RecoverAndPanic(func() { n.IssueSPChain(spchainChain.ID(), wallet) }) + go n.log.RecoverAndPanic(func() { n.IssueSPChain(chain.ID(), wallet) }) } -func (n *network) IssueSPChain(chainID ids.ID, wallet chainwallet.Wallet) { +func (n *network) IssueSPChain(chainID ids.ID, wallet *chainwallet.Wallet) { n.log.Debug("Issuing with %d", wallet.Balance()) numAccepted := 0 numPending := 0 @@ -90,6 +82,7 @@ func (n *network) IssueSPChain(chainID ids.ID, wallet chainwallet.Wallet) { } if numAccepted+numPending >= config.NumTxs { n.log.Info("done with test") + net.ec.Stop() return } } diff --git a/xputtest/spdag.go b/xputtest/spdag.go index db96b8b..a0196c4 100644 --- a/xputtest/spdag.go +++ b/xputtest/spdag.go @@ -8,33 +8,26 @@ import ( "github.com/ava-labs/salticidae-go" - "github.com/ava-labs/gecko/genesis" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/networking" "github.com/ava-labs/gecko/utils/crypto" - "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/vms/platformvm" "github.com/ava-labs/gecko/vms/spdagvm" "github.com/ava-labs/gecko/xputtest/dagwallet" ) -func (n *network) benchmarkSPDAG(genesisState *platformvm.Genesis) { - spDAGChain := genesisState.Chains[2] - n.log.AssertTrue(spDAGChain.ChainName == "Simple DAG Payments", "wrong chain name") - genesisBytes := spDAGChain.GenesisData - - wallet := dagwallet.NewWallet(n.networkID, spDAGChain.ID(), config.AvaTxFee) +// benchmark an instance of the sp dag +func (n *network) benchmarkSPDAG(chain *platformvm.CreateChainTx) { + genesisBytes := chain.GenesisData + wallet := dagwallet.NewWallet(n.networkID, chain.ID(), config.AvaTxFee) codec := spdagvm.Codec{} tx, err := codec.UnmarshalTx(genesisBytes) n.log.AssertNoError(err) - cb58 := formatting.CB58{} - keyStr := genesis.Keys[config.Key] - n.log.AssertNoError(cb58.FromString(keyStr)) factory := crypto.FactorySECP256K1R{} - skGen, err := factory.ToPrivateKey(cb58.Bytes) + skGen, err := factory.ToPrivateKey(config.Key) n.log.AssertNoError(err) sk := skGen.(*crypto.PrivateKeySECP256K1R) wallet.ImportKey(sk) @@ -43,10 +36,11 @@ func (n *network) benchmarkSPDAG(genesisState *platformvm.Genesis) { wallet.AddUTXO(utxo) } - go n.log.RecoverAndPanic(func() { n.IssueSPDAG(spDAGChain.ID(), wallet) }) + go n.log.RecoverAndPanic(func() { n.IssueSPDAG(chain.ID(), wallet) }) } -func (n *network) IssueSPDAG(chainID ids.ID, wallet dagwallet.Wallet) { +// issue transactions to the instance of the spdag funded by the provided wallet +func (n *network) IssueSPDAG(chainID ids.ID, wallet *dagwallet.Wallet) { n.log.Info("starting avalanche benchmark") pending := make(map[[32]byte]*spdagvm.Tx) canAdd := []*spdagvm.Tx{}