diff --git a/.gitignore b/.gitignore index b2daa0e..f8a87c9 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,9 @@ awscpu # Output of the go coverage tool, specifically when used with LiteIDE *.out +# ignore GoLand metafiles directory +.idea/ + *logs/ .vscode* @@ -26,6 +29,7 @@ awscpu *.ava db* + *cpu[0-9]* *mem[0-9]* *lock[0-9]* @@ -42,4 +46,9 @@ db* bin/ build/ -*/mykey/staker.* \ No newline at end of file +keys/staker.* + +!*.go +!*.proto + +plugins/ \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index b24a23f..13ef1e7 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,10 +6,6 @@ RUN apt-get update && apt-get install -y libssl-dev libuv1-dev curl cmake RUN mkdir -p /go/src/github.com/ava-labs -# Because downloading ethereum takes long it is done separately, so that the docker -# layer, when cached can be re-used -RUN go get -t -v github.com/ava-labs/go-ethereum - WORKDIR $GOPATH/src/github.com/ava-labs/ COPY . gecko diff --git a/README.md b/README.md index 128a7be..645dbaf 100644 --- a/README.md +++ b/README.md @@ -51,7 +51,7 @@ The Gecko binary, named `ava`, is in the `build` directory. - Build the docker image of latest gecko branch by `scripts/build_image.sh`. - Check the built image by `docker image ls`, you should see some image tagged `gecko-xxxxxxxx`, where `xxxxxxxx` is the commit id of the Gecko source it was built from. -- Test Gecko by `docker run -ti -p 9651:9651 gecko-xxxxxxxx /gecko/build/ava +- Test Gecko by `docker run -ti -p 9650:9650 -p 9651:9651 gecko-xxxxxxxx /gecko/build/ava --public-ip=127.0.0.1 --snow-sample-size=1 --snow-quorum-size=1 --staking-tls-enabled=false`. (For a production deployment, you may want to extend the docker image with required credentials for staking and TLS.) diff --git a/api/keystore/service.go b/api/keystore/service.go index 604ec5d..7ca34b5 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -11,6 +11,7 @@ import ( "github.com/gorilla/rpc/v2" + "github.com/ava-labs/gecko/chains/atomic" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/encdb" "github.com/ava-labs/gecko/database/prefixdb" @@ -21,10 +22,30 @@ import ( "github.com/ava-labs/gecko/vms/components/codec" jsoncodec "github.com/ava-labs/gecko/utils/json" + zxcvbn "github.com/nbutton23/zxcvbn-go" +) + +const ( + // maxUserPassLen is the maximum length of the username or password allowed + maxUserPassLen = 1024 + + // requiredPassScore defines the score a password must achieve to be accepted + // as a password with strong characteristics by the zxcvbn package + // + // The scoring mechanism defined is as follows; + // + // 0 # too guessable: risky password. (guesses < 10^3) + // 1 # very guessable: protection from throttled online attacks. (guesses < 10^6) + // 2 # somewhat guessable: protection from unthrottled online attacks. (guesses < 10^8) + // 3 # safely unguessable: moderate protection from offline slow-hash scenario. (guesses < 10^10) + // 4 # very unguessable: strong protection from offline slow-hash scenario. (guesses >= 10^10) + requiredPassScore = 2 ) var ( - errEmptyUsername = errors.New("username can't be the empty string") + errEmptyUsername = errors.New("username can't be the empty string") + errUserPassMaxLength = fmt.Errorf("CreateUser call rejected due to username or password exceeding maximum length of %d chars", maxUserPassLen) + errWeakPassword = errors.New("Failed to create user as the given password is too weak. A stronger password is one of 8 or more characters containing attributes of upper and lowercase letters, numbers, and/or special characters") ) // KeyValuePair ... @@ -114,7 +135,11 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre ks.lock.Lock() defer ks.lock.Unlock() - ks.log.Verbo("CreateUser called with %s", args.Username) + ks.log.Verbo("CreateUser called with %.*s", maxUserPassLen, args.Username) + + if len(args.Username) > maxUserPassLen || len(args.Password) > maxUserPassLen { + return errUserPassMaxLength + } if args.Username == "" { return errEmptyUsername @@ -123,6 +148,10 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre return fmt.Errorf("user already exists: %s", args.Username) } + if zxcvbn.PasswordStrength(args.Password, nil).Score < requiredPassScore { + return errWeakPassword + } + usr := &User{} if err := usr.Initialize(args.Password); err != nil { return err @@ -174,7 +203,7 @@ type ExportUserArgs struct { // ExportUserReply is the reply from ExportUser type ExportUserReply struct { - User string `json:"user"` + User formatting.CB58 `json:"user"` } // ExportUser exports a serialized encoding of a user's information complete with encrypted database values @@ -214,16 +243,15 @@ func (ks *Keystore) ExportUser(_ *http.Request, args *ExportUserArgs, reply *Exp if err != nil { return err } - cb58 := formatting.CB58{Bytes: b} - reply.User = cb58.String() + reply.User.Bytes = b return nil } // ImportUserArgs are arguments for ImportUser type ImportUserArgs struct { - Username string `json:"username"` - Password string `json:"password"` - User string `json:"user"` + Username string `json:"username"` + Password string `json:"password"` + User formatting.CB58 `json:"user"` } // ImportUserReply is the response for ImportUser @@ -242,13 +270,8 @@ func (ks *Keystore) ImportUser(r *http.Request, args *ImportUserArgs, reply *Imp return fmt.Errorf("user already exists: %s", args.Username) } - cb58 := formatting.CB58{} - if err := cb58.FromString(args.User); err != nil { - return err - } - userData := UserDB{} - if err := ks.codec.Unmarshal(cb58.Bytes, &userData); err != nil { + if err := ks.codec.Unmarshal(args.User.Bytes, &userData); err != nil { return err } @@ -257,22 +280,25 @@ func (ks *Keystore) ImportUser(r *http.Request, args *ImportUserArgs, reply *Imp return err } - // TODO: Should add batching to prevent creating a user without importing - // the account - if err := ks.userDB.Put([]byte(args.Username), usrBytes); err != nil { + userBatch := ks.userDB.NewBatch() + if err := userBatch.Put([]byte(args.Username), usrBytes); err != nil { return err } - ks.users[args.Username] = &userData.User - - userDB := prefixdb.New([]byte(args.Username), ks.bcDB) - batch := userDB.NewBatch() + userDataDB := prefixdb.New([]byte(args.Username), ks.bcDB) + dataBatch := userDataDB.NewBatch() for _, kvp := range userData.Data { - batch.Put(kvp.Key, kvp.Value) + dataBatch.Put(kvp.Key, kvp.Value) } + if err := atomic.WriteAll(dataBatch, userBatch); err != nil { + return err + } + + ks.users[args.Username] = &userData.User + reply.Success = true - return batch.Write() + return nil } // NewBlockchainKeyStore ... diff --git a/api/keystore/service_test.go b/api/keystore/service_test.go index ab2a096..0868a29 100644 --- a/api/keystore/service_test.go +++ b/api/keystore/service_test.go @@ -5,6 +5,8 @@ package keystore import ( "bytes" + "fmt" + "math/rand" "testing" "github.com/ava-labs/gecko/database/memdb" @@ -12,6 +14,12 @@ import ( "github.com/ava-labs/gecko/utils/logging" ) +var ( + // strongPassword defines a password used for the following tests that + // scores high enough to pass the password strength scoring system + strongPassword = "N_+=_jJ;^(<;{4,:*m6CET}'&N;83FYK.wtNpwp-Jt" +) + func TestServiceListNoUsers(t *testing.T) { ks := Keystore{} ks.Initialize(logging.NoLog{}, memdb.New()) @@ -33,7 +41,7 @@ func TestServiceCreateUser(t *testing.T) { reply := CreateUserReply{} if err := ks.CreateUser(nil, &CreateUserArgs{ Username: "bob", - Password: "launch", + Password: strongPassword, }, &reply); err != nil { t.Fatal(err) } @@ -56,6 +64,78 @@ func TestServiceCreateUser(t *testing.T) { } } +// genStr returns a string of given length +func genStr(n int) string { + b := make([]byte, n) + rand.Read(b) + return fmt.Sprintf("%x", b)[:n] +} + +// TestServiceCreateUserArgsChecks generates excessively long usernames or +// passwords to assure the santity checks on string length are not exceeded +func TestServiceCreateUserArgsCheck(t *testing.T) { + ks := Keystore{} + ks.Initialize(logging.NoLog{}, memdb.New()) + + { + reply := CreateUserReply{} + err := ks.CreateUser(nil, &CreateUserArgs{ + Username: genStr(maxUserPassLen + 1), + Password: strongPassword, + }, &reply) + + if reply.Success || err != errUserPassMaxLength { + t.Fatal("User was created when it should have been rejected due to too long a Username, err =", err) + } + } + + { + reply := CreateUserReply{} + err := ks.CreateUser(nil, &CreateUserArgs{ + Username: "shortuser", + Password: genStr(maxUserPassLen + 1), + }, &reply) + + if reply.Success || err != errUserPassMaxLength { + t.Fatal("User was created when it should have been rejected due to too long a Password, err =", err) + } + } + + { + reply := ListUsersReply{} + if err := ks.ListUsers(nil, &ListUsersArgs{}, &reply); err != nil { + t.Fatal(err) + } + + if len(reply.Users) > 0 { + t.Fatalf("A user exists when there should be none") + } + } +} + +// TestServiceCreateUserWeakPassword tests creating a new user with a weak +// password to ensure the password strength check is working +func TestServiceCreateUserWeakPassword(t *testing.T) { + ks := Keystore{} + ks.Initialize(logging.NoLog{}, memdb.New()) + + { + reply := CreateUserReply{} + err := ks.CreateUser(nil, &CreateUserArgs{ + Username: "bob", + Password: "weak", + }, &reply) + + if err != errWeakPassword { + t.Error("Unexpected error occurred when testing weak password:", err) + } + + if reply.Success { + t.Fatal("User was created when it should have been rejected due to weak password") + } + } +} + func TestServiceCreateDuplicate(t *testing.T) { ks := Keystore{} ks.Initialize(logging.NoLog{}, memdb.New()) @@ -64,7 +144,7 @@ func TestServiceCreateDuplicate(t *testing.T) { reply := CreateUserReply{} if err := ks.CreateUser(nil, &CreateUserArgs{ Username: "bob", - Password: "launch", + Password: strongPassword, }, &reply); err != nil { t.Fatal(err) } @@ -77,7 +157,7 @@ func TestServiceCreateDuplicate(t *testing.T) { reply := CreateUserReply{} if err := ks.CreateUser(nil, &CreateUserArgs{ Username: "bob", - Password: "launch!", + Password: strongPassword, }, &reply); err == nil { t.Fatalf("Should have errored due to the username already existing") } @@ -90,7 +170,7 @@ func TestServiceCreateUserNoName(t *testing.T) { reply := CreateUserReply{} if err := ks.CreateUser(nil, &CreateUserArgs{ - Password: "launch", + Password: strongPassword, }, &reply); err == nil { t.Fatalf("Shouldn't have allowed empty username") } @@ -104,7 +184,7 @@ func TestServiceUseBlockchainDB(t *testing.T) { reply := CreateUserReply{} if err := ks.CreateUser(nil, &CreateUserArgs{ Username: "bob", - Password: "launch", + Password: strongPassword, }, &reply); err != nil { t.Fatal(err) } @@ -114,7 +194,7 @@ func TestServiceUseBlockchainDB(t *testing.T) { } { - db, err := ks.GetDatabase(ids.Empty, "bob", "launch") + db, err := ks.GetDatabase(ids.Empty, "bob", strongPassword) if err != nil { t.Fatal(err) } @@ -124,7 +204,7 @@ func TestServiceUseBlockchainDB(t *testing.T) { } { - db, err := ks.GetDatabase(ids.Empty, "bob", "launch") + db, err := ks.GetDatabase(ids.Empty, "bob", strongPassword) if err != nil { t.Fatal(err) } @@ -144,7 +224,7 @@ func TestServiceExportImport(t *testing.T) { reply := CreateUserReply{} if err := ks.CreateUser(nil, &CreateUserArgs{ Username: "bob", - Password: "launch", + Password: strongPassword, }, &reply); err != nil { t.Fatal(err) } @@ -154,7 +234,7 @@ func TestServiceExportImport(t *testing.T) { } { - db, err := ks.GetDatabase(ids.Empty, "bob", "launch") + db, err := ks.GetDatabase(ids.Empty, "bob", strongPassword) if err != nil { t.Fatal(err) } @@ -166,7 +246,7 @@ func TestServiceExportImport(t *testing.T) { exportReply := ExportUserReply{} if err := ks.ExportUser(nil, &ExportUserArgs{ Username: "bob", - Password: "launch", + Password: strongPassword, }, &exportReply); err != nil { t.Fatal(err) } @@ -178,7 +258,7 @@ func TestServiceExportImport(t *testing.T) { reply := ImportUserReply{} if err := newKS.ImportUser(nil, &ImportUserArgs{ Username: "bob", - Password: "launch", + Password: strongPassword, User: exportReply.User, }, &reply); err != nil { t.Fatal(err) @@ -189,7 +269,7 @@ func TestServiceExportImport(t *testing.T) { } { - db, err := newKS.GetDatabase(ids.Empty, "bob", "launch") + db, err := newKS.GetDatabase(ids.Empty, "bob", strongPassword) if err != nil { t.Fatal(err) } diff --git a/chains/atomic/blockchain_memory.go b/chains/atomic/blockchain_memory.go new file mode 100644 index 0000000..a02a85a --- /dev/null +++ b/chains/atomic/blockchain_memory.go @@ -0,0 +1,28 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" +) + +// BlockchainSharedMemory provides the API for a blockchain to interact with +// shared memory of another blockchain +type BlockchainSharedMemory struct { + blockchainID ids.ID + sm *SharedMemory +} + +// GetDatabase returns and locks the provided DB +func (bsm *BlockchainSharedMemory) GetDatabase(id ids.ID) database.Database { + sharedID := bsm.sm.sharedID(id, bsm.blockchainID) + return bsm.sm.GetDatabase(sharedID) +} + +// ReleaseDatabase unlocks the provided DB +func (bsm *BlockchainSharedMemory) ReleaseDatabase(id ids.ID) { + sharedID := bsm.sm.sharedID(id, bsm.blockchainID) + bsm.sm.ReleaseDatabase(sharedID) +} diff --git a/chains/atomic/blockchain_memory_test.go b/chains/atomic/blockchain_memory_test.go new file mode 100644 index 0000000..318ae0d --- /dev/null +++ b/chains/atomic/blockchain_memory_test.go @@ -0,0 +1,34 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/utils/logging" +) + +func TestBlockchainSharedMemory(t *testing.T) { + sm := SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + bsm0 := sm.NewBlockchainSharedMemory(blockchainID0) + bsm1 := sm.NewBlockchainSharedMemory(blockchainID1) + + sharedDB0 := bsm0.GetDatabase(blockchainID1) + if err := sharedDB0.Put([]byte{1}, []byte{2}); err != nil { + t.Fatal(err) + } + bsm0.ReleaseDatabase(blockchainID1) + + sharedDB1 := bsm1.GetDatabase(blockchainID0) + if value, err := sharedDB1.Get([]byte{1}); err != nil { + t.Fatal(err) + } else if !bytes.Equal(value, []byte{2}) { + t.Fatalf("database.Get Returned: 0x%x ; Expected: 0x%x", value, []byte{2}) + } + bsm1.ReleaseDatabase(blockchainID0) +} diff --git a/chains/atomic/memory.go b/chains/atomic/memory.go new file mode 100644 index 0000000..448e6c9 --- /dev/null +++ b/chains/atomic/memory.go @@ -0,0 +1,105 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "bytes" + "sync" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/prefixdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/vms/components/codec" +) + +type rcLock struct { + lock sync.Mutex + count int +} + +// SharedMemory is the interface for shared memory inside a subnet +type SharedMemory struct { + lock sync.Mutex + log logging.Logger + codec codec.Codec + locks map[[32]byte]*rcLock + db database.Database +} + +// Initialize the SharedMemory +func (sm *SharedMemory) Initialize(log logging.Logger, db database.Database) { + sm.log = log + sm.codec = codec.NewDefault() + sm.locks = make(map[[32]byte]*rcLock) + sm.db = db +} + +// NewBlockchainSharedMemory returns a new BlockchainSharedMemory +func (sm *SharedMemory) NewBlockchainSharedMemory(id ids.ID) *BlockchainSharedMemory { + return &BlockchainSharedMemory{ + blockchainID: id, + sm: sm, + } +} + +// GetDatabase returns and locks the provided DB +func (sm *SharedMemory) GetDatabase(id ids.ID) database.Database { + lock := sm.makeLock(id) + lock.Lock() + + return prefixdb.New(id.Bytes(), sm.db) +} + +// ReleaseDatabase unlocks the provided DB +func (sm *SharedMemory) ReleaseDatabase(id ids.ID) { + lock := sm.releaseLock(id) + lock.Unlock() +} + +func (sm *SharedMemory) makeLock(id ids.ID) *sync.Mutex { + sm.lock.Lock() + defer sm.lock.Unlock() + + key := id.Key() + rc, exists := sm.locks[key] + if !exists { + rc = &rcLock{} + sm.locks[key] = rc + } + rc.count++ + return &rc.lock +} + +func (sm *SharedMemory) releaseLock(id ids.ID) *sync.Mutex { + sm.lock.Lock() + defer sm.lock.Unlock() + + key := id.Key() + rc, exists := sm.locks[key] + if !exists { + panic("Attemping to free an unknown lock") + } + rc.count-- + if rc.count == 0 { + delete(sm.locks, key) + } + return &rc.lock +} + +// sharedID calculates the ID of the shared memory space +func (sm *SharedMemory) sharedID(id1, id2 ids.ID) ids.ID { + idKey1 := id1.Key() + idKey2 := id2.Key() + + if bytes.Compare(idKey1[:], idKey2[:]) == 1 { + idKey1, idKey2 = idKey2, idKey1 + } + + combinedBytes, err := sm.codec.Marshal([2][32]byte{idKey1, idKey2}) + sm.log.AssertNoError(err) + + return ids.NewID(hashing.ComputeHash256Array(combinedBytes)) +} diff --git a/chains/atomic/memory_test.go b/chains/atomic/memory_test.go new file mode 100644 index 0000000..f1cf020 --- /dev/null +++ b/chains/atomic/memory_test.go @@ -0,0 +1,69 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/logging" +) + +var ( + blockchainID0 = ids.Empty.Prefix(0) + blockchainID1 = ids.Empty.Prefix(1) +) + +func TestSharedMemorySharedID(t *testing.T) { + sm := SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + sharedID0 := sm.sharedID(blockchainID0, blockchainID1) + sharedID1 := sm.sharedID(blockchainID1, blockchainID0) + + if !sharedID0.Equals(sharedID1) { + t.Fatalf("SharedMemory.sharedID should be communitive") + } +} + +func TestSharedMemoryMakeReleaseLock(t *testing.T) { + sm := SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + sharedID := sm.sharedID(blockchainID0, blockchainID1) + + lock0 := sm.makeLock(sharedID) + + if lock1 := sm.makeLock(sharedID); lock0 != lock1 { + t.Fatalf("SharedMemory.makeLock should have returned the same lock") + } + sm.releaseLock(sharedID) + + if lock2 := sm.makeLock(sharedID); lock0 != lock2 { + t.Fatalf("SharedMemory.makeLock should have returned the same lock") + } + sm.releaseLock(sharedID) + sm.releaseLock(sharedID) + + if lock3 := sm.makeLock(sharedID); lock0 == lock3 { + t.Fatalf("SharedMemory.releaseLock should have returned freed the lock") + } + sm.releaseLock(sharedID) +} + +func TestSharedMemoryUnknownFree(t *testing.T) { + sm := SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + sharedID := sm.sharedID(blockchainID0, blockchainID1) + + defer func() { + if recover() == nil { + t.Fatalf("Should have panicked due to an unknown free") + } + }() + + sm.releaseLock(sharedID) +} diff --git a/chains/atomic/writer.go b/chains/atomic/writer.go new file mode 100644 index 0000000..bacabab --- /dev/null +++ b/chains/atomic/writer.go @@ -0,0 +1,21 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "github.com/ava-labs/gecko/database" +) + +// WriteAll assumes all batches have the same underlying database. Batches +// should not be modified after being passed to this function. +func WriteAll(baseBatch database.Batch, batches ...database.Batch) error { + baseBatch = baseBatch.Inner() + for _, batch := range batches { + batch = batch.Inner() + if err := batch.Replay(baseBatch); err != nil { + return err + } + } + return baseBatch.Write() +} diff --git a/chains/atomic/writer_test.go b/chains/atomic/writer_test.go new file mode 100644 index 0000000..8c79519 --- /dev/null +++ b/chains/atomic/writer_test.go @@ -0,0 +1,61 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package atomic + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/database/prefixdb" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/utils/logging" +) + +func TestWriteAll(t *testing.T) { + baseDB := memdb.New() + prefixedDBChain := prefixdb.New([]byte{0}, baseDB) + prefixedDBSharedMemory := prefixdb.New([]byte{1}, baseDB) + + sm := SharedMemory{} + sm.Initialize(logging.NoLog{}, prefixedDBSharedMemory) + + sharedID := sm.sharedID(blockchainID0, blockchainID1) + + sharedDB := sm.GetDatabase(sharedID) + + writeDB0 := versiondb.New(prefixedDBChain) + writeDB1 := versiondb.New(sharedDB) + defer sm.ReleaseDatabase(sharedID) + + if err := writeDB0.Put([]byte{1}, []byte{2}); err != nil { + t.Fatal(err) + } + if err := writeDB1.Put([]byte{2}, []byte{3}); err != nil { + t.Fatal(err) + } + + batch0, err := writeDB0.CommitBatch() + if err != nil { + t.Fatal(err) + } + batch1, err := writeDB1.CommitBatch() + if err != nil { + t.Fatal(err) + } + + if err := WriteAll(batch0, batch1); err != nil { + t.Fatal(err) + } + + if value, err := prefixedDBChain.Get([]byte{1}); err != nil { + t.Fatal(err) + } else if !bytes.Equal(value, []byte{2}) { + t.Fatalf("database.Get Returned: 0x%x ; Expected: 0x%x", value, []byte{2}) + } else if value, err := sharedDB.Get([]byte{2}); err != nil { + t.Fatal(err) + } else if !bytes.Equal(value, []byte{3}) { + t.Fatalf("database.Get Returned: 0x%x ; Expected: 0x%x", value, []byte{3}) + } +} diff --git a/chains/manager.go b/chains/manager.go index efb4372..3f7d48d 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/gecko/api" "github.com/ava-labs/gecko/api/keystore" + "github.com/ava-labs/gecko/chains/atomic" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/ids" @@ -26,6 +27,7 @@ import ( "github.com/ava-labs/gecko/snow/triggers" "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/math" "github.com/ava-labs/gecko/vms" avacon "github.com/ava-labs/gecko/snow/consensus/avalanche" @@ -92,6 +94,7 @@ type manager struct { // That is, [chainID].String() is an alias for the chain, too ids.Aliaser + stakingEnabled bool // True iff the network has staking enabled log logging.Logger logFactory logging.Factory vmManager vms.Manager // Manage mappings from vm ID --> vm @@ -109,6 +112,7 @@ type manager struct { awaiter Awaiter // Waits for required connections before running bootstrapping server *api.Server // Handles HTTP API calls keystore *keystore.Keystore + sharedMemory *atomic.SharedMemory unblocked bool blockedChains []ChainParameters @@ -120,6 +124,7 @@ type manager struct { // validate this chain // TODO: Make this function take less arguments func New( + stakingEnabled bool, log logging.Logger, logFactory logging.Factory, vmManager vms.Manager, @@ -135,6 +140,7 @@ func New( awaiter Awaiter, server *api.Server, keystore *keystore.Keystore, + sharedMemory *atomic.SharedMemory, ) Manager { timeoutManager := timeout.Manager{} timeoutManager.Initialize(requestTimeout) @@ -143,6 +149,7 @@ func New( router.Initialize(log, &timeoutManager) m := &manager{ + stakingEnabled: stakingEnabled, log: log, logFactory: logFactory, vmManager: vmManager, @@ -159,6 +166,7 @@ func New( awaiter: awaiter, server: server, keystore: keystore, + sharedMemory: sharedMemory, } m.Initialize() return m @@ -206,7 +214,12 @@ func (m *manager) ForceCreateChain(chain ChainParameters) { } // Create the chain - vm := vmFactory.New() + vm, err := vmFactory.New() + if err != nil { + m.log.Error("error while creating vm: %s", err) + return + } + // TODO: Shutdown VM if an error occurs fxs := make([]*common.Fx, len(chain.FxAliases)) for i, fxAlias := range chain.FxAliases { @@ -223,10 +236,16 @@ func (m *manager) ForceCreateChain(chain ChainParameters) { return } + fx, err := fxFactory.New() + if err != nil { + m.log.Error("error while creating fx: %s", err) + return + } + // Create the fx fxs[i] = &common.Fx{ ID: fxID, - Fx: fxFactory.New(), + Fx: fx, } } @@ -246,6 +265,7 @@ func (m *manager) ForceCreateChain(chain ChainParameters) { NodeID: m.nodeID, HTTP: m.server, Keystore: m.keystore.NewBlockchainKeyStore(chain.ID), + SharedMemory: m.sharedMemory.NewBlockchainSharedMemory(chain.ID), BCLookup: m, } consensusParams := m.consensusParams @@ -256,7 +276,13 @@ func (m *manager) ForceCreateChain(chain ChainParameters) { } // The validators of this blockchain - validators, ok := m.validators.GetValidatorSet(ids.Empty) // TODO: Change argument to chain.SubnetID + var validators validators.Set // Validators validating this blockchain + var ok bool + if m.stakingEnabled { + validators, ok = m.validators.GetValidatorSet(chain.SubnetID) + } else { // Staking is disabled. Every peer validates every subnet. + validators, ok = m.validators.GetValidatorSet(ids.Empty) // ids.Empty is the default subnet ID. TODO: Move to const package so we can use it here. + } if !ok { m.log.Error("couldn't get validator set of subnet with ID %s. The subnet may not exist", chain.SubnetID) return @@ -353,7 +379,7 @@ func (m *manager) createAvalancheChain( msgChan := make(chan common.Message, defaultChannelSize) if err := vm.Initialize(ctx, vmDB, genesisData, msgChan, fxs); err != nil { - return err + return fmt.Errorf("error during vm's Initialize: %w", err) } // Handles serialization/deserialization of vertices and also the @@ -376,13 +402,22 @@ func (m *manager) createAvalancheChain( }, } + bootstrapWeight := uint64(0) + for _, beacon := range beacons.List() { + newWeight, err := math.Add64(bootstrapWeight, beacon.Weight()) + if err != nil { + return err + } + bootstrapWeight = newWeight + } + engine.Initialize(avaeng.Config{ BootstrapConfig: avaeng.BootstrapConfig{ Config: common.Config{ Context: ctx, Validators: validators, Beacons: beacons, - Alpha: (beacons.Len() + 1) / 2, + Alpha: bootstrapWeight/2 + 1, // must be > 50% Sender: &sender, }, VtxBlocked: vtxBlocker, @@ -403,6 +438,8 @@ func (m *manager) createAvalancheChain( go ctx.Log.RecoverAndPanic(handler.Dispatch) awaiting := &networking.AwaitingConnections{ + Requested: beacons, + WeightRequired: (3*bootstrapWeight + 3) / 4, // 75% must be connected to Finish: func() { ctx.Lock.Lock() defer ctx.Lock.Unlock() @@ -410,10 +447,6 @@ func (m *manager) createAvalancheChain( engine.Startup() }, } - for _, vdr := range beacons.List() { - awaiting.Requested.Add(vdr.ID()) - } - awaiting.NumRequired = (3*awaiting.Requested.Len() + 3) / 4 // 75% must be connected to m.awaiter.AwaitConnections(awaiting) return nil @@ -454,6 +487,15 @@ func (m *manager) createSnowmanChain( sender := sender.Sender{} sender.Initialize(ctx, m.sender, m.chainRouter, m.timeoutManager) + bootstrapWeight := uint64(0) + for _, beacon := range beacons.List() { + newWeight, err := math.Add64(bootstrapWeight, beacon.Weight()) + if err != nil { + return err + } + bootstrapWeight = newWeight + } + // The engine handles consensus engine := smeng.Transitive{} engine.Initialize(smeng.Config{ @@ -462,7 +504,7 @@ func (m *manager) createSnowmanChain( Context: ctx, Validators: validators, Beacons: beacons, - Alpha: (beacons.Len() + 1) / 2, + Alpha: bootstrapWeight/2 + 1, // must be > 50% Sender: &sender, }, Blocked: blocked, @@ -482,6 +524,8 @@ func (m *manager) createSnowmanChain( go ctx.Log.RecoverAndPanic(handler.Dispatch) awaiting := &networking.AwaitingConnections{ + Requested: beacons, + WeightRequired: (3*bootstrapWeight + 3) / 4, // 75% must be connected to Finish: func() { ctx.Lock.Lock() defer ctx.Lock.Unlock() @@ -489,10 +533,6 @@ func (m *manager) createSnowmanChain( engine.Startup() }, } - for _, vdr := range beacons.List() { - awaiting.Requested.Add(vdr.ID()) - } - awaiting.NumRequired = (3*awaiting.Requested.Len() + 3) / 4 // 75% must be connected to m.awaiter.AwaitConnections(awaiting) return nil } diff --git a/chains/mock_manager.go b/chains/mock_manager.go new file mode 100644 index 0000000..7c0f86b --- /dev/null +++ b/chains/mock_manager.go @@ -0,0 +1,37 @@ +package chains + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/networking/router" +) + +// MockManager implements Manager but does nothing. Always returns nil error. +// To be used only in tests (namely in package platformvm) +type MockManager struct{} + +// Router ... +func (mm MockManager) Router() router.Router { return nil } + +// CreateChain ... +func (mm MockManager) CreateChain(ChainParameters) {} + +// ForceCreateChain ... +func (mm MockManager) ForceCreateChain(ChainParameters) {} + +// AddRegistrant ... +func (mm MockManager) AddRegistrant(Registrant) {} + +// Lookup ... +func (mm MockManager) Lookup(string) (ids.ID, error) { return ids.ID{}, nil } + +// LookupVM ... +func (mm MockManager) LookupVM(string) (ids.ID, error) { return ids.ID{}, nil } + +// Aliases ... +func (mm MockManager) Aliases(ids.ID) []string { return nil } + +// Alias ... +func (mm MockManager) Alias(ids.ID, string) error { return nil } + +// Shutdown ... +func (mm MockManager) Shutdown() {} diff --git a/database/batch.go b/database/batch.go index 443fd67..53ce3e5 100644 --- a/database/batch.go +++ b/database/batch.go @@ -23,6 +23,11 @@ type Batch interface { // Replay replays the batch contents. Replay(w KeyValueWriter) error + + // Inner returns a Batch writing to the inner database, if one exists. If + // this batch is already writing to the base DB, then itself should be + // returned. + Inner() Batch } // Batcher wraps the NewBatch method of a backing data store. diff --git a/database/encdb/encdb.go b/database/encdb/db.go similarity index 100% rename from database/encdb/encdb.go rename to database/encdb/db.go diff --git a/database/encdb/encdb_test.go b/database/encdb/db_test.go similarity index 100% rename from database/encdb/encdb_test.go rename to database/encdb/db_test.go diff --git a/database/leveldb/leveldb.go b/database/leveldb/db.go similarity index 98% rename from database/leveldb/leveldb.go rename to database/leveldb/db.go index ef5e89c..a763829 100644 --- a/database/leveldb/leveldb.go +++ b/database/leveldb/db.go @@ -184,6 +184,9 @@ func (b *batch) Replay(w database.KeyValueWriter) error { return updateError(replay.err) } +// Inner returns itself +func (b *batch) Inner() database.Batch { return b } + type replayer struct { writer database.KeyValueWriter err error diff --git a/database/leveldb/leveldb_test.go b/database/leveldb/db_test.go similarity index 100% rename from database/leveldb/leveldb_test.go rename to database/leveldb/db_test.go diff --git a/database/memdb/memdb.go b/database/memdb/db.go similarity index 98% rename from database/memdb/memdb.go rename to database/memdb/db.go index 9f6ba58..24b5104 100644 --- a/database/memdb/memdb.go +++ b/database/memdb/db.go @@ -208,6 +208,9 @@ func (b *batch) Replay(w database.KeyValueWriter) error { return nil } +// Inner returns itself +func (b *batch) Inner() database.Batch { return b } + type iterator struct { initialized bool keys []string diff --git a/database/memdb/memdb_test.go b/database/memdb/db_test.go similarity index 100% rename from database/memdb/memdb_test.go rename to database/memdb/db_test.go diff --git a/database/mockdb/mockdb.go b/database/mockdb/db.go similarity index 100% rename from database/mockdb/mockdb.go rename to database/mockdb/db.go diff --git a/database/mockdb/mockdb_test.go b/database/mockdb/db_test.go similarity index 100% rename from database/mockdb/mockdb_test.go rename to database/mockdb/db_test.go diff --git a/database/nodb/nodb.go b/database/nodb/db.go similarity index 97% rename from database/nodb/nodb.go rename to database/nodb/db.go index 96b5ef0..3f1bceb 100644 --- a/database/nodb/nodb.go +++ b/database/nodb/db.go @@ -69,6 +69,9 @@ func (*Batch) Reset() {} // Replay does nothing func (*Batch) Replay(database.KeyValueWriter) error { return database.ErrClosed } +// Inner returns itself +func (b *Batch) Inner() database.Batch { return b } + // Iterator does nothing type Iterator struct{ Err error } diff --git a/database/prefixdb/prefixdb.go b/database/prefixdb/db.go similarity index 100% rename from database/prefixdb/prefixdb.go rename to database/prefixdb/db.go diff --git a/database/prefixdb/prefixdb_test.go b/database/prefixdb/db_test.go similarity index 100% rename from database/prefixdb/prefixdb_test.go rename to database/prefixdb/db_test.go diff --git a/database/rpcdb/db_client.go b/database/rpcdb/db_client.go new file mode 100644 index 0000000..8d23049 --- /dev/null +++ b/database/rpcdb/db_client.go @@ -0,0 +1,268 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package rpcdb + +import ( + "fmt" + + "golang.org/x/net/context" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/nodb" + "github.com/ava-labs/gecko/database/rpcdb/proto" +) + +var ( + errClosed = fmt.Sprintf("rpc error: code = Unknown desc = %s", database.ErrClosed) + errNotFound = fmt.Sprintf("rpc error: code = Unknown desc = %s", database.ErrNotFound) +) + +// DatabaseClient is an implementation of database that talks over RPC. +type DatabaseClient struct{ client proto.DatabaseClient } + +// NewClient returns a database instance connected to a remote database instance +func NewClient(client proto.DatabaseClient) *DatabaseClient { + return &DatabaseClient{client: client} +} + +// Has returns false, nil +func (db *DatabaseClient) Has(key []byte) (bool, error) { + resp, err := db.client.Has(context.Background(), &proto.HasRequest{ + Key: key, + }) + if err != nil { + return false, updateError(err) + } + return resp.Has, nil +} + +// Get returns nil, error +func (db *DatabaseClient) Get(key []byte) ([]byte, error) { + resp, err := db.client.Get(context.Background(), &proto.GetRequest{ + Key: key, + }) + if err != nil { + return nil, updateError(err) + } + return resp.Value, nil +} + +// Put returns nil +func (db *DatabaseClient) Put(key, value []byte) error { + _, err := db.client.Put(context.Background(), &proto.PutRequest{ + Key: key, + Value: value, + }) + return updateError(err) +} + +// Delete returns nil +func (db *DatabaseClient) Delete(key []byte) error { + _, err := db.client.Delete(context.Background(), &proto.DeleteRequest{ + Key: key, + }) + return updateError(err) +} + +// NewBatch returns a new batch +func (db *DatabaseClient) NewBatch() database.Batch { return &batch{db: db} } + +// NewIterator implements the Database interface +func (db *DatabaseClient) NewIterator() database.Iterator { + return db.NewIteratorWithStartAndPrefix(nil, nil) +} + +// NewIteratorWithStart implements the Database interface +func (db *DatabaseClient) NewIteratorWithStart(start []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(start, nil) +} + +// NewIteratorWithPrefix implements the Database interface +func (db *DatabaseClient) NewIteratorWithPrefix(prefix []byte) database.Iterator { + return db.NewIteratorWithStartAndPrefix(nil, prefix) +} + +// NewIteratorWithStartAndPrefix returns a new empty iterator +func (db *DatabaseClient) NewIteratorWithStartAndPrefix(start, prefix []byte) database.Iterator { + resp, err := db.client.NewIteratorWithStartAndPrefix(context.Background(), &proto.NewIteratorWithStartAndPrefixRequest{ + Start: start, + Prefix: prefix, + }) + if err != nil { + return &nodb.Iterator{Err: updateError(err)} + } + return &iterator{ + db: db, + id: resp.Id, + } +} + +// Stat returns an error +func (db *DatabaseClient) Stat(property string) (string, error) { + resp, err := db.client.Stat(context.Background(), &proto.StatRequest{ + Property: property, + }) + if err != nil { + return "", updateError(err) + } + return resp.Stat, nil +} + +// Compact returns nil +func (db *DatabaseClient) Compact(start, limit []byte) error { + _, err := db.client.Compact(context.Background(), &proto.CompactRequest{ + Start: start, + Limit: limit, + }) + return updateError(err) +} + +// Close returns nil +func (db *DatabaseClient) Close() error { + _, err := db.client.Close(context.Background(), &proto.CloseRequest{}) + return updateError(err) +} + +type keyValue struct { + key []byte + value []byte + delete bool +} + +type batch struct { + db *DatabaseClient + writes []keyValue + size int +} + +func (b *batch) Put(key, value []byte) error { + b.writes = append(b.writes, keyValue{copyBytes(key), copyBytes(value), false}) + b.size += len(value) + return nil +} + +func (b *batch) Delete(key []byte) error { + b.writes = append(b.writes, keyValue{copyBytes(key), nil, true}) + b.size++ + return nil +} + +func (b *batch) ValueSize() int { return b.size } + +func (b *batch) Write() error { + request := &proto.WriteBatchRequest{} + + keySet := make(map[string]struct{}, len(b.writes)) + for i := len(b.writes) - 1; i >= 0; i-- { + kv := b.writes[i] + key := string(kv.key) + if _, overwritten := keySet[key]; overwritten { + continue + } + keySet[key] = struct{}{} + + if kv.delete { + request.Deletes = append(request.Deletes, &proto.DeleteRequest{ + Key: kv.key, + }) + } else { + request.Puts = append(request.Puts, &proto.PutRequest{ + Key: kv.key, + Value: kv.value, + }) + } + } + + _, err := b.db.client.WriteBatch(context.Background(), request) + return updateError(err) +} + +func (b *batch) Reset() { + b.writes = b.writes[:0] + b.size = 0 +} + +func (b *batch) Replay(w database.KeyValueWriter) error { + for _, keyvalue := range b.writes { + if keyvalue.delete { + if err := w.Delete(keyvalue.key); err != nil { + return err + } + } else if err := w.Put(keyvalue.key, keyvalue.value); err != nil { + return err + } + } + return nil +} + +func (b *batch) Inner() database.Batch { return b } + +type iterator struct { + db *DatabaseClient + id uint64 + key []byte + value []byte + err error +} + +// Next returns false +func (it *iterator) Next() bool { + resp, err := it.db.client.IteratorNext(context.Background(), &proto.IteratorNextRequest{ + Id: it.id, + }) + if err != nil { + it.err = err + return false + } + it.key = resp.Key + it.value = resp.Value + return resp.FoundNext +} + +// Error returns any errors +func (it *iterator) Error() error { + if it.err != nil { + return it.err + } + + _, err := it.db.client.IteratorError(context.Background(), &proto.IteratorErrorRequest{ + Id: it.id, + }) + it.err = updateError(err) + return it.err +} + +// Key returns nil +func (it *iterator) Key() []byte { return it.key } + +// Value returns nil +func (it *iterator) Value() []byte { return it.value } + +// Release does nothing +func (it *iterator) Release() { + it.db.client.IteratorRelease(context.Background(), &proto.IteratorReleaseRequest{ + Id: it.id, + }) +} + +func copyBytes(bytes []byte) []byte { + copiedBytes := make([]byte, len(bytes)) + copy(copiedBytes, bytes) + return copiedBytes +} + +func updateError(err error) error { + if err == nil { + return nil + } + + switch err.Error() { + case errClosed: + return database.ErrClosed + case errNotFound: + return database.ErrNotFound + default: + return err + } +} diff --git a/database/rpcdb/db_server.go b/database/rpcdb/db_server.go new file mode 100644 index 0000000..5abc3be --- /dev/null +++ b/database/rpcdb/db_server.go @@ -0,0 +1,143 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package rpcdb + +import ( + "errors" + + "golang.org/x/net/context" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/rpcdb/proto" +) + +var ( + errUnknownIterator = errors.New("unknown iterator") +) + +// DatabaseServer is a database that is managed over RPC. +type DatabaseServer struct { + db database.Database + batch database.Batch + + nextIteratorID uint64 + iterators map[uint64]database.Iterator +} + +// NewServer returns a database instance that is managed remotely +func NewServer(db database.Database) *DatabaseServer { + return &DatabaseServer{ + db: db, + batch: db.NewBatch(), + iterators: make(map[uint64]database.Iterator), + } +} + +// Has ... +func (db *DatabaseServer) Has(_ context.Context, req *proto.HasRequest) (*proto.HasResponse, error) { + has, err := db.db.Has(req.Key) + if err != nil { + return nil, err + } + return &proto.HasResponse{Has: has}, nil +} + +// Get ... +func (db *DatabaseServer) Get(_ context.Context, req *proto.GetRequest) (*proto.GetResponse, error) { + value, err := db.db.Get(req.Key) + if err != nil { + return nil, err + } + return &proto.GetResponse{Value: value}, nil +} + +// Put ... +func (db *DatabaseServer) Put(_ context.Context, req *proto.PutRequest) (*proto.PutResponse, error) { + return &proto.PutResponse{}, db.db.Put(req.Key, req.Value) +} + +// Delete ... +func (db *DatabaseServer) Delete(_ context.Context, req *proto.DeleteRequest) (*proto.DeleteResponse, error) { + return &proto.DeleteResponse{}, db.db.Delete(req.Key) +} + +// Stat ... +func (db *DatabaseServer) Stat(_ context.Context, req *proto.StatRequest) (*proto.StatResponse, error) { + stat, err := db.db.Stat(req.Property) + if err != nil { + return nil, err + } + return &proto.StatResponse{Stat: stat}, nil +} + +// Compact ... +func (db *DatabaseServer) Compact(_ context.Context, req *proto.CompactRequest) (*proto.CompactResponse, error) { + return &proto.CompactResponse{}, db.db.Compact(req.Start, req.Limit) +} + +// Close ... +func (db *DatabaseServer) Close(_ context.Context, _ *proto.CloseRequest) (*proto.CloseResponse, error) { + return &proto.CloseResponse{}, db.db.Close() +} + +// WriteBatch ... +func (db *DatabaseServer) WriteBatch(_ context.Context, req *proto.WriteBatchRequest) (*proto.WriteBatchResponse, error) { + db.batch.Reset() + + for _, put := range req.Puts { + if err := db.batch.Put(put.Key, put.Value); err != nil { + return nil, err + } + } + + for _, del := range req.Deletes { + if err := db.batch.Delete(del.Key); err != nil { + return nil, err + } + } + + return &proto.WriteBatchResponse{}, db.batch.Write() +} + +// NewIteratorWithStartAndPrefix ... +func (db *DatabaseServer) NewIteratorWithStartAndPrefix(_ context.Context, req *proto.NewIteratorWithStartAndPrefixRequest) (*proto.NewIteratorWithStartAndPrefixResponse, error) { + id := db.nextIteratorID + it := db.db.NewIteratorWithStartAndPrefix(req.Start, req.Prefix) + db.iterators[id] = it + + db.nextIteratorID++ + return &proto.NewIteratorWithStartAndPrefixResponse{Id: id}, nil +} + +// IteratorNext ... +func (db *DatabaseServer) IteratorNext(_ context.Context, req *proto.IteratorNextRequest) (*proto.IteratorNextResponse, error) { + it, exists := db.iterators[req.Id] + if !exists { + return nil, errUnknownIterator + } + return &proto.IteratorNextResponse{ + FoundNext: it.Next(), + Key: it.Key(), + Value: it.Value(), + }, nil +} + +// IteratorError ... +func (db *DatabaseServer) IteratorError(_ context.Context, req *proto.IteratorErrorRequest) (*proto.IteratorErrorResponse, error) { + it, exists := db.iterators[req.Id] + if !exists { + return nil, errUnknownIterator + } + return &proto.IteratorErrorResponse{}, it.Error() +} + +// IteratorRelease ... +func (db *DatabaseServer) IteratorRelease(_ context.Context, req *proto.IteratorReleaseRequest) (*proto.IteratorReleaseResponse, error) { + it, exists := db.iterators[req.Id] + if exists { + delete(db.iterators, req.Id) + it.Release() + } + return &proto.IteratorReleaseResponse{}, nil +} diff --git a/database/rpcdb/db_test.go b/database/rpcdb/db_test.go new file mode 100644 index 0000000..ae833d5 --- /dev/null +++ b/database/rpcdb/db_test.go @@ -0,0 +1,51 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package rpcdb + +import ( + "log" + "net" + "testing" + + "golang.org/x/net/context" + + "google.golang.org/grpc" + "google.golang.org/grpc/test/bufconn" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/database/rpcdb/proto" +) + +const ( + bufSize = 1 << 20 +) + +func TestInterface(t *testing.T) { + for _, test := range database.Tests { + listener := bufconn.Listen(bufSize) + server := grpc.NewServer() + proto.RegisterDatabaseServer(server, NewServer(memdb.New())) + go func() { + if err := server.Serve(listener); err != nil { + log.Fatalf("Server exited with error: %v", err) + } + }() + + dialer := grpc.WithContextDialer( + func(context.Context, string) (net.Conn, error) { + return listener.Dial() + }) + + ctx := context.Background() + conn, err := grpc.DialContext(ctx, "", dialer, grpc.WithInsecure()) + if err != nil { + t.Fatalf("Failed to dial: %s", err) + } + + db := NewClient(proto.NewDatabaseClient(conn)) + test(t, db) + conn.Close() + } +} diff --git a/database/rpcdb/proto/db.pb.go b/database/rpcdb/proto/db.pb.go new file mode 100644 index 0000000..452bad7 --- /dev/null +++ b/database/rpcdb/proto/db.pb.go @@ -0,0 +1,1526 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: db.proto + +package proto + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type HasRequest struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HasRequest) Reset() { *m = HasRequest{} } +func (m *HasRequest) String() string { return proto.CompactTextString(m) } +func (*HasRequest) ProtoMessage() {} +func (*HasRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{0} +} + +func (m *HasRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HasRequest.Unmarshal(m, b) +} +func (m *HasRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HasRequest.Marshal(b, m, deterministic) +} +func (m *HasRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HasRequest.Merge(m, src) +} +func (m *HasRequest) XXX_Size() int { + return xxx_messageInfo_HasRequest.Size(m) +} +func (m *HasRequest) XXX_DiscardUnknown() { + xxx_messageInfo_HasRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_HasRequest proto.InternalMessageInfo + +func (m *HasRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type HasResponse struct { + Has bool `protobuf:"varint,1,opt,name=has,proto3" json:"has,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HasResponse) Reset() { *m = HasResponse{} } +func (m *HasResponse) String() string { return proto.CompactTextString(m) } +func (*HasResponse) ProtoMessage() {} +func (*HasResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{1} +} + +func (m *HasResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HasResponse.Unmarshal(m, b) +} +func (m *HasResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HasResponse.Marshal(b, m, deterministic) +} +func (m *HasResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HasResponse.Merge(m, src) +} +func (m *HasResponse) XXX_Size() int { + return xxx_messageInfo_HasResponse.Size(m) +} +func (m *HasResponse) XXX_DiscardUnknown() { + xxx_messageInfo_HasResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_HasResponse proto.InternalMessageInfo + +func (m *HasResponse) GetHas() bool { + if m != nil { + return m.Has + } + return false +} + +type GetRequest struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetRequest) Reset() { *m = GetRequest{} } +func (m *GetRequest) String() string { return proto.CompactTextString(m) } +func (*GetRequest) ProtoMessage() {} +func (*GetRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{2} +} + +func (m *GetRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetRequest.Unmarshal(m, b) +} +func (m *GetRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetRequest.Marshal(b, m, deterministic) +} +func (m *GetRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetRequest.Merge(m, src) +} +func (m *GetRequest) XXX_Size() int { + return xxx_messageInfo_GetRequest.Size(m) +} +func (m *GetRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetRequest proto.InternalMessageInfo + +func (m *GetRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type GetResponse struct { + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetResponse) Reset() { *m = GetResponse{} } +func (m *GetResponse) String() string { return proto.CompactTextString(m) } +func (*GetResponse) ProtoMessage() {} +func (*GetResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{3} +} + +func (m *GetResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetResponse.Unmarshal(m, b) +} +func (m *GetResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetResponse.Marshal(b, m, deterministic) +} +func (m *GetResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetResponse.Merge(m, src) +} +func (m *GetResponse) XXX_Size() int { + return xxx_messageInfo_GetResponse.Size(m) +} +func (m *GetResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetResponse proto.InternalMessageInfo + +func (m *GetResponse) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type PutRequest struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutRequest) Reset() { *m = PutRequest{} } +func (m *PutRequest) String() string { return proto.CompactTextString(m) } +func (*PutRequest) ProtoMessage() {} +func (*PutRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{4} +} + +func (m *PutRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PutRequest.Unmarshal(m, b) +} +func (m *PutRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PutRequest.Marshal(b, m, deterministic) +} +func (m *PutRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutRequest.Merge(m, src) +} +func (m *PutRequest) XXX_Size() int { + return xxx_messageInfo_PutRequest.Size(m) +} +func (m *PutRequest) XXX_DiscardUnknown() { + xxx_messageInfo_PutRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_PutRequest proto.InternalMessageInfo + +func (m *PutRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *PutRequest) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type PutResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *PutResponse) Reset() { *m = PutResponse{} } +func (m *PutResponse) String() string { return proto.CompactTextString(m) } +func (*PutResponse) ProtoMessage() {} +func (*PutResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{5} +} + +func (m *PutResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_PutResponse.Unmarshal(m, b) +} +func (m *PutResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_PutResponse.Marshal(b, m, deterministic) +} +func (m *PutResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_PutResponse.Merge(m, src) +} +func (m *PutResponse) XXX_Size() int { + return xxx_messageInfo_PutResponse.Size(m) +} +func (m *PutResponse) XXX_DiscardUnknown() { + xxx_messageInfo_PutResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_PutResponse proto.InternalMessageInfo + +type DeleteRequest struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteRequest) Reset() { *m = DeleteRequest{} } +func (m *DeleteRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteRequest) ProtoMessage() {} +func (*DeleteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{6} +} + +func (m *DeleteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteRequest.Unmarshal(m, b) +} +func (m *DeleteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteRequest.Marshal(b, m, deterministic) +} +func (m *DeleteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteRequest.Merge(m, src) +} +func (m *DeleteRequest) XXX_Size() int { + return xxx_messageInfo_DeleteRequest.Size(m) +} +func (m *DeleteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteRequest proto.InternalMessageInfo + +func (m *DeleteRequest) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +type DeleteResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *DeleteResponse) Reset() { *m = DeleteResponse{} } +func (m *DeleteResponse) String() string { return proto.CompactTextString(m) } +func (*DeleteResponse) ProtoMessage() {} +func (*DeleteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{7} +} + +func (m *DeleteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_DeleteResponse.Unmarshal(m, b) +} +func (m *DeleteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_DeleteResponse.Marshal(b, m, deterministic) +} +func (m *DeleteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_DeleteResponse.Merge(m, src) +} +func (m *DeleteResponse) XXX_Size() int { + return xxx_messageInfo_DeleteResponse.Size(m) +} +func (m *DeleteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_DeleteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_DeleteResponse proto.InternalMessageInfo + +type StatRequest struct { + Property string `protobuf:"bytes,1,opt,name=property,proto3" json:"property,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatRequest) Reset() { *m = StatRequest{} } +func (m *StatRequest) String() string { return proto.CompactTextString(m) } +func (*StatRequest) ProtoMessage() {} +func (*StatRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{8} +} + +func (m *StatRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatRequest.Unmarshal(m, b) +} +func (m *StatRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatRequest.Marshal(b, m, deterministic) +} +func (m *StatRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatRequest.Merge(m, src) +} +func (m *StatRequest) XXX_Size() int { + return xxx_messageInfo_StatRequest.Size(m) +} +func (m *StatRequest) XXX_DiscardUnknown() { + xxx_messageInfo_StatRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_StatRequest proto.InternalMessageInfo + +func (m *StatRequest) GetProperty() string { + if m != nil { + return m.Property + } + return "" +} + +type StatResponse struct { + Stat string `protobuf:"bytes,1,opt,name=stat,proto3" json:"stat,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StatResponse) Reset() { *m = StatResponse{} } +func (m *StatResponse) String() string { return proto.CompactTextString(m) } +func (*StatResponse) ProtoMessage() {} +func (*StatResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{9} +} + +func (m *StatResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StatResponse.Unmarshal(m, b) +} +func (m *StatResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StatResponse.Marshal(b, m, deterministic) +} +func (m *StatResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_StatResponse.Merge(m, src) +} +func (m *StatResponse) XXX_Size() int { + return xxx_messageInfo_StatResponse.Size(m) +} +func (m *StatResponse) XXX_DiscardUnknown() { + xxx_messageInfo_StatResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_StatResponse proto.InternalMessageInfo + +func (m *StatResponse) GetStat() string { + if m != nil { + return m.Stat + } + return "" +} + +type CompactRequest struct { + Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + Limit []byte `protobuf:"bytes,2,opt,name=limit,proto3" json:"limit,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompactRequest) Reset() { *m = CompactRequest{} } +func (m *CompactRequest) String() string { return proto.CompactTextString(m) } +func (*CompactRequest) ProtoMessage() {} +func (*CompactRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{10} +} + +func (m *CompactRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompactRequest.Unmarshal(m, b) +} +func (m *CompactRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompactRequest.Marshal(b, m, deterministic) +} +func (m *CompactRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompactRequest.Merge(m, src) +} +func (m *CompactRequest) XXX_Size() int { + return xxx_messageInfo_CompactRequest.Size(m) +} +func (m *CompactRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CompactRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CompactRequest proto.InternalMessageInfo + +func (m *CompactRequest) GetStart() []byte { + if m != nil { + return m.Start + } + return nil +} + +func (m *CompactRequest) GetLimit() []byte { + if m != nil { + return m.Limit + } + return nil +} + +type CompactResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CompactResponse) Reset() { *m = CompactResponse{} } +func (m *CompactResponse) String() string { return proto.CompactTextString(m) } +func (*CompactResponse) ProtoMessage() {} +func (*CompactResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{11} +} + +func (m *CompactResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CompactResponse.Unmarshal(m, b) +} +func (m *CompactResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CompactResponse.Marshal(b, m, deterministic) +} +func (m *CompactResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CompactResponse.Merge(m, src) +} +func (m *CompactResponse) XXX_Size() int { + return xxx_messageInfo_CompactResponse.Size(m) +} +func (m *CompactResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CompactResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CompactResponse proto.InternalMessageInfo + +type CloseRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseRequest) Reset() { *m = CloseRequest{} } +func (m *CloseRequest) String() string { return proto.CompactTextString(m) } +func (*CloseRequest) ProtoMessage() {} +func (*CloseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{12} +} + +func (m *CloseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseRequest.Unmarshal(m, b) +} +func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) +} +func (m *CloseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseRequest.Merge(m, src) +} +func (m *CloseRequest) XXX_Size() int { + return xxx_messageInfo_CloseRequest.Size(m) +} +func (m *CloseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CloseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseRequest proto.InternalMessageInfo + +type CloseResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseResponse) Reset() { *m = CloseResponse{} } +func (m *CloseResponse) String() string { return proto.CompactTextString(m) } +func (*CloseResponse) ProtoMessage() {} +func (*CloseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{13} +} + +func (m *CloseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseResponse.Unmarshal(m, b) +} +func (m *CloseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseResponse.Marshal(b, m, deterministic) +} +func (m *CloseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseResponse.Merge(m, src) +} +func (m *CloseResponse) XXX_Size() int { + return xxx_messageInfo_CloseResponse.Size(m) +} +func (m *CloseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CloseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseResponse proto.InternalMessageInfo + +type WriteBatchRequest struct { + Puts []*PutRequest `protobuf:"bytes,1,rep,name=puts,proto3" json:"puts,omitempty"` + Deletes []*DeleteRequest `protobuf:"bytes,2,rep,name=deletes,proto3" json:"deletes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteBatchRequest) Reset() { *m = WriteBatchRequest{} } +func (m *WriteBatchRequest) String() string { return proto.CompactTextString(m) } +func (*WriteBatchRequest) ProtoMessage() {} +func (*WriteBatchRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{14} +} + +func (m *WriteBatchRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WriteBatchRequest.Unmarshal(m, b) +} +func (m *WriteBatchRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WriteBatchRequest.Marshal(b, m, deterministic) +} +func (m *WriteBatchRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteBatchRequest.Merge(m, src) +} +func (m *WriteBatchRequest) XXX_Size() int { + return xxx_messageInfo_WriteBatchRequest.Size(m) +} +func (m *WriteBatchRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WriteBatchRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteBatchRequest proto.InternalMessageInfo + +func (m *WriteBatchRequest) GetPuts() []*PutRequest { + if m != nil { + return m.Puts + } + return nil +} + +func (m *WriteBatchRequest) GetDeletes() []*DeleteRequest { + if m != nil { + return m.Deletes + } + return nil +} + +type WriteBatchResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteBatchResponse) Reset() { *m = WriteBatchResponse{} } +func (m *WriteBatchResponse) String() string { return proto.CompactTextString(m) } +func (*WriteBatchResponse) ProtoMessage() {} +func (*WriteBatchResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{15} +} + +func (m *WriteBatchResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WriteBatchResponse.Unmarshal(m, b) +} +func (m *WriteBatchResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WriteBatchResponse.Marshal(b, m, deterministic) +} +func (m *WriteBatchResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteBatchResponse.Merge(m, src) +} +func (m *WriteBatchResponse) XXX_Size() int { + return xxx_messageInfo_WriteBatchResponse.Size(m) +} +func (m *WriteBatchResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WriteBatchResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteBatchResponse proto.InternalMessageInfo + +type NewIteratorRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NewIteratorRequest) Reset() { *m = NewIteratorRequest{} } +func (m *NewIteratorRequest) String() string { return proto.CompactTextString(m) } +func (*NewIteratorRequest) ProtoMessage() {} +func (*NewIteratorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{16} +} + +func (m *NewIteratorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NewIteratorRequest.Unmarshal(m, b) +} +func (m *NewIteratorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NewIteratorRequest.Marshal(b, m, deterministic) +} +func (m *NewIteratorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewIteratorRequest.Merge(m, src) +} +func (m *NewIteratorRequest) XXX_Size() int { + return xxx_messageInfo_NewIteratorRequest.Size(m) +} +func (m *NewIteratorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NewIteratorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NewIteratorRequest proto.InternalMessageInfo + +type NewIteratorWithStartAndPrefixRequest struct { + Start []byte `protobuf:"bytes,1,opt,name=start,proto3" json:"start,omitempty"` + Prefix []byte `protobuf:"bytes,2,opt,name=prefix,proto3" json:"prefix,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NewIteratorWithStartAndPrefixRequest) Reset() { *m = NewIteratorWithStartAndPrefixRequest{} } +func (m *NewIteratorWithStartAndPrefixRequest) String() string { return proto.CompactTextString(m) } +func (*NewIteratorWithStartAndPrefixRequest) ProtoMessage() {} +func (*NewIteratorWithStartAndPrefixRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{17} +} + +func (m *NewIteratorWithStartAndPrefixRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NewIteratorWithStartAndPrefixRequest.Unmarshal(m, b) +} +func (m *NewIteratorWithStartAndPrefixRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NewIteratorWithStartAndPrefixRequest.Marshal(b, m, deterministic) +} +func (m *NewIteratorWithStartAndPrefixRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewIteratorWithStartAndPrefixRequest.Merge(m, src) +} +func (m *NewIteratorWithStartAndPrefixRequest) XXX_Size() int { + return xxx_messageInfo_NewIteratorWithStartAndPrefixRequest.Size(m) +} +func (m *NewIteratorWithStartAndPrefixRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NewIteratorWithStartAndPrefixRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NewIteratorWithStartAndPrefixRequest proto.InternalMessageInfo + +func (m *NewIteratorWithStartAndPrefixRequest) GetStart() []byte { + if m != nil { + return m.Start + } + return nil +} + +func (m *NewIteratorWithStartAndPrefixRequest) GetPrefix() []byte { + if m != nil { + return m.Prefix + } + return nil +} + +type NewIteratorWithStartAndPrefixResponse struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NewIteratorWithStartAndPrefixResponse) Reset() { *m = NewIteratorWithStartAndPrefixResponse{} } +func (m *NewIteratorWithStartAndPrefixResponse) String() string { return proto.CompactTextString(m) } +func (*NewIteratorWithStartAndPrefixResponse) ProtoMessage() {} +func (*NewIteratorWithStartAndPrefixResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{18} +} + +func (m *NewIteratorWithStartAndPrefixResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NewIteratorWithStartAndPrefixResponse.Unmarshal(m, b) +} +func (m *NewIteratorWithStartAndPrefixResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NewIteratorWithStartAndPrefixResponse.Marshal(b, m, deterministic) +} +func (m *NewIteratorWithStartAndPrefixResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NewIteratorWithStartAndPrefixResponse.Merge(m, src) +} +func (m *NewIteratorWithStartAndPrefixResponse) XXX_Size() int { + return xxx_messageInfo_NewIteratorWithStartAndPrefixResponse.Size(m) +} +func (m *NewIteratorWithStartAndPrefixResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NewIteratorWithStartAndPrefixResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NewIteratorWithStartAndPrefixResponse proto.InternalMessageInfo + +func (m *NewIteratorWithStartAndPrefixResponse) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +type IteratorNextRequest struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IteratorNextRequest) Reset() { *m = IteratorNextRequest{} } +func (m *IteratorNextRequest) String() string { return proto.CompactTextString(m) } +func (*IteratorNextRequest) ProtoMessage() {} +func (*IteratorNextRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{19} +} + +func (m *IteratorNextRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IteratorNextRequest.Unmarshal(m, b) +} +func (m *IteratorNextRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IteratorNextRequest.Marshal(b, m, deterministic) +} +func (m *IteratorNextRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_IteratorNextRequest.Merge(m, src) +} +func (m *IteratorNextRequest) XXX_Size() int { + return xxx_messageInfo_IteratorNextRequest.Size(m) +} +func (m *IteratorNextRequest) XXX_DiscardUnknown() { + xxx_messageInfo_IteratorNextRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_IteratorNextRequest proto.InternalMessageInfo + +func (m *IteratorNextRequest) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +type IteratorNextResponse struct { + FoundNext bool `protobuf:"varint,1,opt,name=foundNext,proto3" json:"foundNext,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,3,opt,name=value,proto3" json:"value,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IteratorNextResponse) Reset() { *m = IteratorNextResponse{} } +func (m *IteratorNextResponse) String() string { return proto.CompactTextString(m) } +func (*IteratorNextResponse) ProtoMessage() {} +func (*IteratorNextResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{20} +} + +func (m *IteratorNextResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IteratorNextResponse.Unmarshal(m, b) +} +func (m *IteratorNextResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IteratorNextResponse.Marshal(b, m, deterministic) +} +func (m *IteratorNextResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_IteratorNextResponse.Merge(m, src) +} +func (m *IteratorNextResponse) XXX_Size() int { + return xxx_messageInfo_IteratorNextResponse.Size(m) +} +func (m *IteratorNextResponse) XXX_DiscardUnknown() { + xxx_messageInfo_IteratorNextResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_IteratorNextResponse proto.InternalMessageInfo + +func (m *IteratorNextResponse) GetFoundNext() bool { + if m != nil { + return m.FoundNext + } + return false +} + +func (m *IteratorNextResponse) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *IteratorNextResponse) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +type IteratorErrorRequest struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IteratorErrorRequest) Reset() { *m = IteratorErrorRequest{} } +func (m *IteratorErrorRequest) String() string { return proto.CompactTextString(m) } +func (*IteratorErrorRequest) ProtoMessage() {} +func (*IteratorErrorRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{21} +} + +func (m *IteratorErrorRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IteratorErrorRequest.Unmarshal(m, b) +} +func (m *IteratorErrorRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IteratorErrorRequest.Marshal(b, m, deterministic) +} +func (m *IteratorErrorRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_IteratorErrorRequest.Merge(m, src) +} +func (m *IteratorErrorRequest) XXX_Size() int { + return xxx_messageInfo_IteratorErrorRequest.Size(m) +} +func (m *IteratorErrorRequest) XXX_DiscardUnknown() { + xxx_messageInfo_IteratorErrorRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_IteratorErrorRequest proto.InternalMessageInfo + +func (m *IteratorErrorRequest) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +type IteratorErrorResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IteratorErrorResponse) Reset() { *m = IteratorErrorResponse{} } +func (m *IteratorErrorResponse) String() string { return proto.CompactTextString(m) } +func (*IteratorErrorResponse) ProtoMessage() {} +func (*IteratorErrorResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{22} +} + +func (m *IteratorErrorResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IteratorErrorResponse.Unmarshal(m, b) +} +func (m *IteratorErrorResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IteratorErrorResponse.Marshal(b, m, deterministic) +} +func (m *IteratorErrorResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_IteratorErrorResponse.Merge(m, src) +} +func (m *IteratorErrorResponse) XXX_Size() int { + return xxx_messageInfo_IteratorErrorResponse.Size(m) +} +func (m *IteratorErrorResponse) XXX_DiscardUnknown() { + xxx_messageInfo_IteratorErrorResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_IteratorErrorResponse proto.InternalMessageInfo + +type IteratorReleaseRequest struct { + Id uint64 `protobuf:"varint,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IteratorReleaseRequest) Reset() { *m = IteratorReleaseRequest{} } +func (m *IteratorReleaseRequest) String() string { return proto.CompactTextString(m) } +func (*IteratorReleaseRequest) ProtoMessage() {} +func (*IteratorReleaseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{23} +} + +func (m *IteratorReleaseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IteratorReleaseRequest.Unmarshal(m, b) +} +func (m *IteratorReleaseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IteratorReleaseRequest.Marshal(b, m, deterministic) +} +func (m *IteratorReleaseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_IteratorReleaseRequest.Merge(m, src) +} +func (m *IteratorReleaseRequest) XXX_Size() int { + return xxx_messageInfo_IteratorReleaseRequest.Size(m) +} +func (m *IteratorReleaseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_IteratorReleaseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_IteratorReleaseRequest proto.InternalMessageInfo + +func (m *IteratorReleaseRequest) GetId() uint64 { + if m != nil { + return m.Id + } + return 0 +} + +type IteratorReleaseResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *IteratorReleaseResponse) Reset() { *m = IteratorReleaseResponse{} } +func (m *IteratorReleaseResponse) String() string { return proto.CompactTextString(m) } +func (*IteratorReleaseResponse) ProtoMessage() {} +func (*IteratorReleaseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_8817812184a13374, []int{24} +} + +func (m *IteratorReleaseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_IteratorReleaseResponse.Unmarshal(m, b) +} +func (m *IteratorReleaseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_IteratorReleaseResponse.Marshal(b, m, deterministic) +} +func (m *IteratorReleaseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_IteratorReleaseResponse.Merge(m, src) +} +func (m *IteratorReleaseResponse) XXX_Size() int { + return xxx_messageInfo_IteratorReleaseResponse.Size(m) +} +func (m *IteratorReleaseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_IteratorReleaseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_IteratorReleaseResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*HasRequest)(nil), "proto.HasRequest") + proto.RegisterType((*HasResponse)(nil), "proto.HasResponse") + proto.RegisterType((*GetRequest)(nil), "proto.GetRequest") + proto.RegisterType((*GetResponse)(nil), "proto.GetResponse") + proto.RegisterType((*PutRequest)(nil), "proto.PutRequest") + proto.RegisterType((*PutResponse)(nil), "proto.PutResponse") + proto.RegisterType((*DeleteRequest)(nil), "proto.DeleteRequest") + proto.RegisterType((*DeleteResponse)(nil), "proto.DeleteResponse") + proto.RegisterType((*StatRequest)(nil), "proto.StatRequest") + proto.RegisterType((*StatResponse)(nil), "proto.StatResponse") + proto.RegisterType((*CompactRequest)(nil), "proto.CompactRequest") + proto.RegisterType((*CompactResponse)(nil), "proto.CompactResponse") + proto.RegisterType((*CloseRequest)(nil), "proto.CloseRequest") + proto.RegisterType((*CloseResponse)(nil), "proto.CloseResponse") + proto.RegisterType((*WriteBatchRequest)(nil), "proto.WriteBatchRequest") + proto.RegisterType((*WriteBatchResponse)(nil), "proto.WriteBatchResponse") + proto.RegisterType((*NewIteratorRequest)(nil), "proto.NewIteratorRequest") + proto.RegisterType((*NewIteratorWithStartAndPrefixRequest)(nil), "proto.NewIteratorWithStartAndPrefixRequest") + proto.RegisterType((*NewIteratorWithStartAndPrefixResponse)(nil), "proto.NewIteratorWithStartAndPrefixResponse") + proto.RegisterType((*IteratorNextRequest)(nil), "proto.IteratorNextRequest") + proto.RegisterType((*IteratorNextResponse)(nil), "proto.IteratorNextResponse") + proto.RegisterType((*IteratorErrorRequest)(nil), "proto.IteratorErrorRequest") + proto.RegisterType((*IteratorErrorResponse)(nil), "proto.IteratorErrorResponse") + proto.RegisterType((*IteratorReleaseRequest)(nil), "proto.IteratorReleaseRequest") + proto.RegisterType((*IteratorReleaseResponse)(nil), "proto.IteratorReleaseResponse") +} + +func init() { proto.RegisterFile("db.proto", fileDescriptor_8817812184a13374) } + +var fileDescriptor_8817812184a13374 = []byte{ + // 639 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x54, 0x5b, 0x4f, 0x13, 0x41, + 0x18, 0x0d, 0xbd, 0x51, 0x4e, 0x2f, 0xc0, 0xb0, 0xdc, 0x46, 0x40, 0x1d, 0xc5, 0x60, 0x6c, 0x30, + 0x41, 0x8d, 0x3e, 0xf8, 0x82, 0x60, 0x40, 0x1f, 0x48, 0xb3, 0x98, 0xf0, 0xe2, 0xcb, 0x40, 0x87, + 0xb0, 0x5a, 0xd8, 0x75, 0x77, 0xaa, 0xe8, 0xaf, 0xf0, 0x27, 0x9b, 0x9d, 0xfd, 0x66, 0x6f, 0x6d, + 0x91, 0xa7, 0xee, 0x7c, 0xdf, 0x39, 0x67, 0x66, 0xbe, 0x39, 0xa7, 0x68, 0x0e, 0xce, 0x77, 0x83, + 0xd0, 0xd7, 0x3e, 0xab, 0x9b, 0x1f, 0xb1, 0x05, 0x1c, 0xcb, 0xc8, 0x55, 0x3f, 0x46, 0x2a, 0xd2, + 0x6c, 0x01, 0xd5, 0xef, 0xea, 0xf7, 0xda, 0xcc, 0xa3, 0x99, 0x9d, 0xb6, 0x1b, 0x7f, 0x8a, 0x87, + 0x68, 0x99, 0x7e, 0x14, 0xf8, 0x37, 0x91, 0x8a, 0x01, 0x57, 0x32, 0x32, 0x80, 0xa6, 0x1b, 0x7f, + 0xc6, 0x02, 0x47, 0x4a, 0x4f, 0x17, 0x78, 0x82, 0x96, 0xe9, 0x93, 0x80, 0x83, 0xfa, 0x4f, 0x39, + 0x1c, 0x29, 0x82, 0x24, 0x0b, 0xf1, 0x1a, 0xe8, 0x8f, 0xa6, 0x8b, 0x64, 0xac, 0x4a, 0x9e, 0xd5, + 0x41, 0xcb, 0xb0, 0x12, 0x69, 0xf1, 0x18, 0x9d, 0x43, 0x35, 0x54, 0x5a, 0x4d, 0x3f, 0xcc, 0x02, + 0xba, 0x16, 0x42, 0xa4, 0xe7, 0x68, 0x9d, 0x6a, 0x99, 0x6e, 0xcd, 0xd1, 0x0c, 0x42, 0x3f, 0x50, + 0xa1, 0x4e, 0x78, 0x73, 0x6e, 0xba, 0x16, 0x02, 0xed, 0x04, 0x4a, 0x57, 0x61, 0xa8, 0x45, 0x5a, + 0x6a, 0xc2, 0x99, 0x6f, 0xf1, 0x1e, 0xdd, 0x03, 0xff, 0x3a, 0x90, 0x17, 0xa9, 0xa2, 0x83, 0x7a, + 0xa4, 0x65, 0xa8, 0xed, 0x85, 0xcd, 0x22, 0xae, 0x0e, 0xbd, 0x6b, 0x4f, 0xdb, 0x0b, 0x99, 0x85, + 0x58, 0xc4, 0x7c, 0xca, 0xa6, 0xf3, 0x75, 0xd1, 0x3e, 0x18, 0xfa, 0x91, 0xbd, 0x93, 0x98, 0x47, + 0x87, 0xd6, 0x04, 0xf8, 0x86, 0xc5, 0xb3, 0xd0, 0xd3, 0xea, 0x83, 0xd4, 0x17, 0x57, 0x76, 0xd3, + 0x6d, 0xd4, 0x82, 0x91, 0x8e, 0xdf, 0xa9, 0xba, 0xd3, 0xda, 0x5b, 0x4c, 0x9e, 0x7c, 0x37, 0x1b, + 0xb1, 0x6b, 0xda, 0x6c, 0x17, 0xb3, 0x03, 0x33, 0x8e, 0x68, 0xad, 0x62, 0x90, 0x0e, 0x21, 0x0b, + 0x73, 0x74, 0x2d, 0x48, 0x38, 0x60, 0xf9, 0xbd, 0xe8, 0x04, 0x0e, 0xd8, 0x89, 0xfa, 0xf5, 0x49, + 0xab, 0x50, 0x6a, 0x3f, 0xb4, 0x07, 0xfd, 0x82, 0xa7, 0xb9, 0xea, 0x99, 0xa7, 0xaf, 0x4e, 0xe3, + 0x9b, 0xef, 0xdf, 0x0c, 0xfa, 0xa1, 0xba, 0xf4, 0x6e, 0xef, 0x9e, 0xcf, 0x0a, 0x1a, 0x81, 0x81, + 0xd1, 0x80, 0x68, 0x25, 0xde, 0x62, 0xfb, 0x3f, 0xaa, 0xf4, 0x38, 0x5d, 0x54, 0xbc, 0x81, 0xd1, + 0xac, 0xb9, 0x15, 0x6f, 0x20, 0xb6, 0xb1, 0x64, 0x59, 0x27, 0xea, 0x36, 0x7d, 0x9d, 0x32, 0xec, + 0x2b, 0x9c, 0x22, 0x8c, 0xe4, 0x36, 0x30, 0x77, 0xe9, 0x8f, 0x6e, 0x06, 0x71, 0x91, 0xdc, 0x9f, + 0x15, 0xac, 0xd1, 0x2a, 0x13, 0x0c, 0x5b, 0xcd, 0x1b, 0xf6, 0x59, 0xa6, 0xfe, 0x31, 0x0c, 0xd3, + 0x59, 0x8d, 0x9d, 0x62, 0x15, 0xcb, 0x25, 0x1c, 0x8d, 0x7a, 0x07, 0x2b, 0xd9, 0x9c, 0x87, 0x4a, + 0xa6, 0xbe, 0x18, 0x93, 0x58, 0xc7, 0xea, 0x18, 0x32, 0x11, 0xd9, 0xfb, 0xdb, 0x40, 0xf3, 0x50, + 0x6a, 0x79, 0x2e, 0x23, 0xc5, 0x7a, 0xa8, 0x1e, 0xcb, 0x88, 0x59, 0x8b, 0x64, 0xff, 0x05, 0x9c, + 0xe5, 0x4b, 0x34, 0x86, 0x1e, 0xaa, 0x47, 0x4a, 0xa7, 0xe8, 0x2c, 0xf8, 0x29, 0x3a, 0x9f, 0xf5, + 0x1e, 0xaa, 0xfd, 0x51, 0x86, 0xce, 0xec, 0x97, 0xa2, 0x73, 0xf1, 0x65, 0x6f, 0xd0, 0x48, 0x6c, + 0xc7, 0x26, 0xba, 0x90, 0x2f, 0x97, 0xaa, 0x44, 0x7b, 0x89, 0x5a, 0x9c, 0x4a, 0x66, 0x25, 0x73, + 0x69, 0xe6, 0x4b, 0x85, 0x1a, 0x11, 0xde, 0x61, 0x96, 0x42, 0xc6, 0xac, 0x64, 0x31, 0xb2, 0x7c, + 0xa5, 0x5c, 0x26, 0xe6, 0x1e, 0xea, 0x26, 0x7b, 0xcc, 0xea, 0xe6, 0x93, 0xc9, 0x9d, 0x62, 0x91, + 0x38, 0xfb, 0x40, 0x16, 0x19, 0xb6, 0x46, 0x98, 0xb1, 0xc4, 0xf2, 0xf5, 0x09, 0x1d, 0x92, 0xf8, + 0x83, 0xcd, 0x3b, 0x3d, 0xcf, 0x5e, 0x10, 0xf7, 0x3e, 0x79, 0xe3, 0xbd, 0xfb, 0x81, 0x69, 0xef, + 0x23, 0xb4, 0xf3, 0x79, 0x60, 0x9c, 0xd8, 0x13, 0xb2, 0xc4, 0x1f, 0x4c, 0xec, 0x91, 0xd0, 0x67, + 0x74, 0x0a, 0x96, 0x66, 0x65, 0x74, 0x3e, 0x10, 0x7c, 0x63, 0x72, 0x93, 0xb4, 0xfa, 0x98, 0x2f, + 0x79, 0x9b, 0x6d, 0x96, 0x08, 0xc5, 0x74, 0xf0, 0xad, 0x69, 0xed, 0x44, 0xf1, 0xbc, 0x61, 0xda, + 0xaf, 0xfe, 0x05, 0x00, 0x00, 0xff, 0xff, 0x84, 0x99, 0x7f, 0xf3, 0x1f, 0x07, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// DatabaseClient is the client API for Database service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type DatabaseClient interface { + Has(ctx context.Context, in *HasRequest, opts ...grpc.CallOption) (*HasResponse, error) + Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) + Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) + Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*DeleteResponse, error) + Stat(ctx context.Context, in *StatRequest, opts ...grpc.CallOption) (*StatResponse, error) + Compact(ctx context.Context, in *CompactRequest, opts ...grpc.CallOption) (*CompactResponse, error) + Close(ctx context.Context, in *CloseRequest, opts ...grpc.CallOption) (*CloseResponse, error) + WriteBatch(ctx context.Context, in *WriteBatchRequest, opts ...grpc.CallOption) (*WriteBatchResponse, error) + NewIteratorWithStartAndPrefix(ctx context.Context, in *NewIteratorWithStartAndPrefixRequest, opts ...grpc.CallOption) (*NewIteratorWithStartAndPrefixResponse, error) + IteratorNext(ctx context.Context, in *IteratorNextRequest, opts ...grpc.CallOption) (*IteratorNextResponse, error) + IteratorError(ctx context.Context, in *IteratorErrorRequest, opts ...grpc.CallOption) (*IteratorErrorResponse, error) + IteratorRelease(ctx context.Context, in *IteratorReleaseRequest, opts ...grpc.CallOption) (*IteratorReleaseResponse, error) +} + +type databaseClient struct { + cc grpc.ClientConnInterface +} + +func NewDatabaseClient(cc grpc.ClientConnInterface) DatabaseClient { + return &databaseClient{cc} +} + +func (c *databaseClient) Has(ctx context.Context, in *HasRequest, opts ...grpc.CallOption) (*HasResponse, error) { + out := new(HasResponse) + err := c.cc.Invoke(ctx, "/proto.Database/Has", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*GetResponse, error) { + out := new(GetResponse) + err := c.cc.Invoke(ctx, "/proto.Database/Get", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) Put(ctx context.Context, in *PutRequest, opts ...grpc.CallOption) (*PutResponse, error) { + out := new(PutResponse) + err := c.cc.Invoke(ctx, "/proto.Database/Put", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) Delete(ctx context.Context, in *DeleteRequest, opts ...grpc.CallOption) (*DeleteResponse, error) { + out := new(DeleteResponse) + err := c.cc.Invoke(ctx, "/proto.Database/Delete", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) Stat(ctx context.Context, in *StatRequest, opts ...grpc.CallOption) (*StatResponse, error) { + out := new(StatResponse) + err := c.cc.Invoke(ctx, "/proto.Database/Stat", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) Compact(ctx context.Context, in *CompactRequest, opts ...grpc.CallOption) (*CompactResponse, error) { + out := new(CompactResponse) + err := c.cc.Invoke(ctx, "/proto.Database/Compact", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) Close(ctx context.Context, in *CloseRequest, opts ...grpc.CallOption) (*CloseResponse, error) { + out := new(CloseResponse) + err := c.cc.Invoke(ctx, "/proto.Database/Close", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) WriteBatch(ctx context.Context, in *WriteBatchRequest, opts ...grpc.CallOption) (*WriteBatchResponse, error) { + out := new(WriteBatchResponse) + err := c.cc.Invoke(ctx, "/proto.Database/WriteBatch", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) NewIteratorWithStartAndPrefix(ctx context.Context, in *NewIteratorWithStartAndPrefixRequest, opts ...grpc.CallOption) (*NewIteratorWithStartAndPrefixResponse, error) { + out := new(NewIteratorWithStartAndPrefixResponse) + err := c.cc.Invoke(ctx, "/proto.Database/NewIteratorWithStartAndPrefix", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) IteratorNext(ctx context.Context, in *IteratorNextRequest, opts ...grpc.CallOption) (*IteratorNextResponse, error) { + out := new(IteratorNextResponse) + err := c.cc.Invoke(ctx, "/proto.Database/IteratorNext", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) IteratorError(ctx context.Context, in *IteratorErrorRequest, opts ...grpc.CallOption) (*IteratorErrorResponse, error) { + out := new(IteratorErrorResponse) + err := c.cc.Invoke(ctx, "/proto.Database/IteratorError", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *databaseClient) IteratorRelease(ctx context.Context, in *IteratorReleaseRequest, opts ...grpc.CallOption) (*IteratorReleaseResponse, error) { + out := new(IteratorReleaseResponse) + err := c.cc.Invoke(ctx, "/proto.Database/IteratorRelease", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// DatabaseServer is the server API for Database service. +type DatabaseServer interface { + Has(context.Context, *HasRequest) (*HasResponse, error) + Get(context.Context, *GetRequest) (*GetResponse, error) + Put(context.Context, *PutRequest) (*PutResponse, error) + Delete(context.Context, *DeleteRequest) (*DeleteResponse, error) + Stat(context.Context, *StatRequest) (*StatResponse, error) + Compact(context.Context, *CompactRequest) (*CompactResponse, error) + Close(context.Context, *CloseRequest) (*CloseResponse, error) + WriteBatch(context.Context, *WriteBatchRequest) (*WriteBatchResponse, error) + NewIteratorWithStartAndPrefix(context.Context, *NewIteratorWithStartAndPrefixRequest) (*NewIteratorWithStartAndPrefixResponse, error) + IteratorNext(context.Context, *IteratorNextRequest) (*IteratorNextResponse, error) + IteratorError(context.Context, *IteratorErrorRequest) (*IteratorErrorResponse, error) + IteratorRelease(context.Context, *IteratorReleaseRequest) (*IteratorReleaseResponse, error) +} + +// UnimplementedDatabaseServer can be embedded to have forward compatible implementations. +type UnimplementedDatabaseServer struct { +} + +func (*UnimplementedDatabaseServer) Has(ctx context.Context, req *HasRequest) (*HasResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Has not implemented") +} +func (*UnimplementedDatabaseServer) Get(ctx context.Context, req *GetRequest) (*GetResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Get not implemented") +} +func (*UnimplementedDatabaseServer) Put(ctx context.Context, req *PutRequest) (*PutResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Put not implemented") +} +func (*UnimplementedDatabaseServer) Delete(ctx context.Context, req *DeleteRequest) (*DeleteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Delete not implemented") +} +func (*UnimplementedDatabaseServer) Stat(ctx context.Context, req *StatRequest) (*StatResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Stat not implemented") +} +func (*UnimplementedDatabaseServer) Compact(ctx context.Context, req *CompactRequest) (*CompactResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Compact not implemented") +} +func (*UnimplementedDatabaseServer) Close(ctx context.Context, req *CloseRequest) (*CloseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Close not implemented") +} +func (*UnimplementedDatabaseServer) WriteBatch(ctx context.Context, req *WriteBatchRequest) (*WriteBatchResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WriteBatch not implemented") +} +func (*UnimplementedDatabaseServer) NewIteratorWithStartAndPrefix(ctx context.Context, req *NewIteratorWithStartAndPrefixRequest) (*NewIteratorWithStartAndPrefixResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method NewIteratorWithStartAndPrefix not implemented") +} +func (*UnimplementedDatabaseServer) IteratorNext(ctx context.Context, req *IteratorNextRequest) (*IteratorNextResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IteratorNext not implemented") +} +func (*UnimplementedDatabaseServer) IteratorError(ctx context.Context, req *IteratorErrorRequest) (*IteratorErrorResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IteratorError not implemented") +} +func (*UnimplementedDatabaseServer) IteratorRelease(ctx context.Context, req *IteratorReleaseRequest) (*IteratorReleaseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method IteratorRelease not implemented") +} + +func RegisterDatabaseServer(s *grpc.Server, srv DatabaseServer) { + s.RegisterService(&_Database_serviceDesc, srv) +} + +func _Database_Has_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HasRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Has(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Database/Has", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Has(ctx, req.(*HasRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Get(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Database/Get", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Get(ctx, req.(*GetRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_Put_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(PutRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Put(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Database/Put", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Put(ctx, req.(*PutRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_Delete_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Delete(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Database/Delete", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Delete(ctx, req.(*DeleteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_Stat_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(StatRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Stat(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Database/Stat", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Stat(ctx, req.(*StatRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_Compact_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CompactRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Compact(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Database/Compact", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Compact(ctx, req.(*CompactRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_Close_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CloseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).Close(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Database/Close", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).Close(ctx, req.(*CloseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_WriteBatch_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WriteBatchRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).WriteBatch(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Database/WriteBatch", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).WriteBatch(ctx, req.(*WriteBatchRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_NewIteratorWithStartAndPrefix_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NewIteratorWithStartAndPrefixRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).NewIteratorWithStartAndPrefix(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Database/NewIteratorWithStartAndPrefix", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).NewIteratorWithStartAndPrefix(ctx, req.(*NewIteratorWithStartAndPrefixRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_IteratorNext_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IteratorNextRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).IteratorNext(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Database/IteratorNext", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).IteratorNext(ctx, req.(*IteratorNextRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_IteratorError_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IteratorErrorRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).IteratorError(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Database/IteratorError", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).IteratorError(ctx, req.(*IteratorErrorRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Database_IteratorRelease_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(IteratorReleaseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DatabaseServer).IteratorRelease(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Database/IteratorRelease", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DatabaseServer).IteratorRelease(ctx, req.(*IteratorReleaseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Database_serviceDesc = grpc.ServiceDesc{ + ServiceName: "proto.Database", + HandlerType: (*DatabaseServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Has", + Handler: _Database_Has_Handler, + }, + { + MethodName: "Get", + Handler: _Database_Get_Handler, + }, + { + MethodName: "Put", + Handler: _Database_Put_Handler, + }, + { + MethodName: "Delete", + Handler: _Database_Delete_Handler, + }, + { + MethodName: "Stat", + Handler: _Database_Stat_Handler, + }, + { + MethodName: "Compact", + Handler: _Database_Compact_Handler, + }, + { + MethodName: "Close", + Handler: _Database_Close_Handler, + }, + { + MethodName: "WriteBatch", + Handler: _Database_WriteBatch_Handler, + }, + { + MethodName: "NewIteratorWithStartAndPrefix", + Handler: _Database_NewIteratorWithStartAndPrefix_Handler, + }, + { + MethodName: "IteratorNext", + Handler: _Database_IteratorNext_Handler, + }, + { + MethodName: "IteratorError", + Handler: _Database_IteratorError_Handler, + }, + { + MethodName: "IteratorRelease", + Handler: _Database_IteratorRelease_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "db.proto", +} diff --git a/database/rpcdb/proto/db.proto b/database/rpcdb/proto/db.proto new file mode 100644 index 0000000..1f6f60c --- /dev/null +++ b/database/rpcdb/proto/db.proto @@ -0,0 +1,108 @@ +syntax = "proto3"; +package proto; + +message HasRequest { + bytes key = 1; +} + +message HasResponse { + bool has = 1; +} + +message GetRequest { + bytes key = 1; +} + +message GetResponse { + bytes value = 1; +} + +message PutRequest { + bytes key = 1; + bytes value = 2; +} + +message PutResponse {} + +message DeleteRequest { + bytes key = 1; +} + +message DeleteResponse {} + +message StatRequest { + string property = 1; +} + +message StatResponse { + string stat = 1; +} + +message CompactRequest { + bytes start = 1; + bytes limit = 2; +} + +message CompactResponse {} + +message CloseRequest {} + +message CloseResponse {} + +message WriteBatchRequest { + repeated PutRequest puts = 1; + repeated DeleteRequest deletes = 2; +} + +message WriteBatchResponse {} + +message NewIteratorRequest {} + +message NewIteratorWithStartAndPrefixRequest { + bytes start = 1; + bytes prefix = 2; +} + +message NewIteratorWithStartAndPrefixResponse { + uint64 id = 1; +} + +message IteratorNextRequest { + uint64 id = 1; +} + +message IteratorNextResponse { + bool foundNext = 1; + bytes key = 2; + bytes value = 3; +} + +message IteratorErrorRequest { + uint64 id = 1; +} + +message IteratorErrorResponse {} + +message IteratorReleaseRequest { + uint64 id = 1; +} + +message IteratorReleaseResponse {} + +service Database { + rpc Has(HasRequest) returns (HasResponse); + rpc Get(GetRequest) returns (GetResponse); + rpc Put(PutRequest) returns (PutResponse); + rpc Delete(DeleteRequest) returns (DeleteResponse); + rpc Stat(StatRequest) returns (StatResponse); + rpc Compact(CompactRequest) returns (CompactResponse); + rpc Close(CloseRequest) returns (CloseResponse); + + rpc WriteBatch(WriteBatchRequest) returns (WriteBatchResponse); + + rpc NewIteratorWithStartAndPrefix(NewIteratorWithStartAndPrefixRequest) returns (NewIteratorWithStartAndPrefixResponse); + + rpc IteratorNext(IteratorNextRequest) returns (IteratorNextResponse); + rpc IteratorError(IteratorErrorRequest) returns (IteratorErrorResponse); + rpc IteratorRelease(IteratorReleaseRequest) returns (IteratorReleaseResponse); +} \ No newline at end of file diff --git a/database/test_database.go b/database/test_database.go index f255d8a..2e33b25 100644 --- a/database/test_database.go +++ b/database/test_database.go @@ -16,7 +16,10 @@ var ( TestBatchPut, TestBatchDelete, TestBatchReset, + TestBatchReuse, + TestBatchRewrite, TestBatchReplay, + TestBatchInner, TestIterator, TestIteratorStart, TestIteratorPrefix, @@ -235,6 +238,105 @@ func TestBatchReset(t *testing.T, db Database) { } } +// TestBatchReuse ... +func TestBatchReuse(t *testing.T, db Database) { + key1 := []byte("hello1") + value1 := []byte("world1") + + key2 := []byte("hello2") + value2 := []byte("world2") + + batch := db.NewBatch() + if batch == nil { + t.Fatalf("db.NewBatch returned nil") + } + + if err := batch.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + if err := batch.Write(); err != nil { + t.Fatalf("Unexpected error on batch.Write: %s", err) + } + + if err := db.Delete(key1); err != nil { + t.Fatalf("Unexpected error on database.Delete: %s", err) + } + + if has, err := db.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("db.Has unexpectedly returned true on key %s", key1) + } + + batch.Reset() + + if err := batch.Put(key2, value2); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + if err := batch.Write(); err != nil { + t.Fatalf("Unexpected error on batch.Write: %s", err) + } + + if has, err := db.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("db.Has unexpectedly returned true on key %s", key1) + } else if has, err := db.Has(key2); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if !has { + t.Fatalf("db.Has unexpectedly returned false on key %s", key2) + } else if v, err := db.Get(key2); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value2, v) { + t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value2) + } +} + +// TestBatchRewrite ... +func TestBatchRewrite(t *testing.T, db Database) { + key := []byte("hello1") + value := []byte("world1") + + batch := db.NewBatch() + if batch == nil { + t.Fatalf("db.NewBatch returned nil") + } + + if err := batch.Put(key, value); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + if err := batch.Write(); err != nil { + t.Fatalf("Unexpected error on batch.Write: %s", err) + } + + if err := db.Delete(key); err != nil { + t.Fatalf("Unexpected error on database.Delete: %s", err) + } + + if has, err := db.Has(key); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("db.Has unexpectedly returned true on key %s", key) + } + + if err := batch.Write(); err != nil { + t.Fatalf("Unexpected error on batch.Write: %s", err) + } + + if has, err := db.Has(key); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if !has { + t.Fatalf("db.Has unexpectedly returned false on key %s", key) + } else if v, err := db.Get(key); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value, v) { + t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value) + } +} + // TestBatchReplay ... func TestBatchReplay(t *testing.T, db Database) { key1 := []byte("hello1") @@ -299,6 +401,62 @@ func TestBatchReplay(t *testing.T, db Database) { } } +// TestBatchInner ... +func TestBatchInner(t *testing.T, db Database) { + key1 := []byte("hello1") + value1 := []byte("world1") + + key2 := []byte("hello2") + value2 := []byte("world2") + + firstBatch := db.NewBatch() + if firstBatch == nil { + t.Fatalf("db.NewBatch returned nil") + } + + if err := firstBatch.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + secondBatch := db.NewBatch() + if secondBatch == nil { + t.Fatalf("db.NewBatch returned nil") + } + + if err := secondBatch.Put(key2, value2); err != nil { + t.Fatalf("Unexpected error on batch.Put: %s", err) + } + + innerFirstBatch := firstBatch.Inner() + innerSecondBatch := secondBatch.Inner() + + if err := innerFirstBatch.Replay(innerSecondBatch); err != nil { + t.Fatalf("Unexpected error on batch.Replay: %s", err) + } + + if err := innerSecondBatch.Write(); err != nil { + t.Fatalf("Unexpected error on batch.Write: %s", err) + } + + if has, err := db.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if !has { + t.Fatalf("db.Has unexpectedly returned false on key %s", key1) + } else if v, err := db.Get(key1); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value1, v) { + t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value1) + } else if has, err := db.Has(key2); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if !has { + t.Fatalf("db.Has unexpectedly returned false on key %s", key2) + } else if v, err := db.Get(key2); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value2, v) { + t.Fatalf("db.Get: Returned: 0x%x ; Expected: 0x%x", v, value2) + } +} + // TestIterator ... func TestIterator(t *testing.T, db Database) { key1 := []byte("hello1") diff --git a/database/versiondb/versiondb.go b/database/versiondb/db.go similarity index 91% rename from database/versiondb/versiondb.go rename to database/versiondb/db.go index a475e76..6f42131 100644 --- a/database/versiondb/versiondb.go +++ b/database/versiondb/db.go @@ -184,29 +184,55 @@ func (db *Database) Commit() error { db.lock.Lock() defer db.lock.Unlock() - if db.mem == nil { - return database.ErrClosed + batch, err := db.commitBatch() + if err != nil { + return err } - if len(db.mem) == 0 { - return nil + if err := batch.Write(); err != nil { + return err + } + db.abort() + return nil +} + +// Abort all changes to the underlying database +func (db *Database) Abort() { + db.lock.Lock() + defer db.lock.Unlock() + + db.abort() +} + +func (db *Database) abort() { db.mem = make(map[string]valueDelete, memdb.DefaultSize) } + +// CommitBatch returns a batch that will commit all pending writes to the underlying database +func (db *Database) CommitBatch() (database.Batch, error) { + db.lock.Lock() + defer db.lock.Unlock() + + return db.commitBatch() +} + +func (db *Database) commitBatch() (database.Batch, error) { + if db.mem == nil { + return nil, database.ErrClosed } batch := db.db.NewBatch() for key, value := range db.mem { if value.delete { if err := batch.Delete([]byte(key)); err != nil { - return err + return nil, err } } else if err := batch.Put([]byte(key), value.value); err != nil { - return err + return nil, err } } if err := batch.Write(); err != nil { - return err + return nil, err } - db.mem = make(map[string]valueDelete, memdb.DefaultSize) - return nil + return batch, nil } // Close implements the database.Database interface @@ -289,6 +315,9 @@ func (b *batch) Replay(w database.KeyValueWriter) error { return nil } +// Inner returns itself +func (b *batch) Inner() database.Batch { return b } + // iterator walks over both the in memory database and the underlying database // at the same time. type iterator struct { diff --git a/database/versiondb/versiondb_test.go b/database/versiondb/db_test.go similarity index 84% rename from database/versiondb/versiondb_test.go rename to database/versiondb/db_test.go index ab3a9bb..70cf8ff 100644 --- a/database/versiondb/versiondb_test.go +++ b/database/versiondb/db_test.go @@ -256,6 +256,72 @@ func TestCommitClosedDelete(t *testing.T) { } } +func TestAbort(t *testing.T) { + baseDB := memdb.New() + db := New(baseDB) + + key1 := []byte("hello1") + value1 := []byte("world1") + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on db.Put: %s", err) + } + + if value, err := db.Get(key1); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value, value1) { + t.Fatalf("db.Get Returned: 0x%x ; Expected: 0x%x", value, value1) + } else if has, err := baseDB.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("db.Has Returned: %v ; Expected: %v", has, false) + } + + db.Abort() + + if has, err := db.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("db.Has Returned: %v ; Expected: %v", has, false) + } else if has, err := baseDB.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("db.Has Returned: %v ; Expected: %v", has, false) + } +} + +func TestCommitBatch(t *testing.T) { + baseDB := memdb.New() + db := New(baseDB) + + key1 := []byte("hello1") + value1 := []byte("world1") + + if err := db.Put(key1, value1); err != nil { + t.Fatalf("Unexpected error on db.Put: %s", err) + } + + batch, err := db.CommitBatch() + if err != nil { + t.Fatalf("Unexpected error on db.CommitBatch: %s", err) + } + db.Abort() + + if err := batch.Write(); err != nil { + t.Fatalf("Unexpected error on batch.Write: %s", err) + } + + if value, err := db.Get(key1); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value, value1) { + t.Fatalf("db.Get Returned: 0x%x ; Expected: 0x%x", value, value1) + } else if value, err := baseDB.Get(key1); err != nil { + t.Fatalf("Unexpected error on db.Get: %s", err) + } else if !bytes.Equal(value, value1) { + t.Fatalf("db.Get Returned: 0x%x ; Expected: 0x%x", value, value1) + } +} + func TestSetDatabase(t *testing.T) { baseDB := memdb.New() newDB := memdb.New() diff --git a/genesis/aliases.go b/genesis/aliases.go new file mode 100644 index 0000000..80e5dcd --- /dev/null +++ b/genesis/aliases.go @@ -0,0 +1,77 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/avm" + "github.com/ava-labs/gecko/vms/nftfx" + "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/vms/propertyfx" + "github.com/ava-labs/gecko/vms/secp256k1fx" + "github.com/ava-labs/gecko/vms/spchainvm" + "github.com/ava-labs/gecko/vms/spdagvm" + "github.com/ava-labs/gecko/vms/timestampvm" +) + +// Aliases returns the default aliases based on the network ID +func Aliases(networkID uint32) (map[string][]string, map[[32]byte][]string, map[[32]byte][]string, error) { + generalAliases := map[string][]string{ + "vm/" + platformvm.ID.String(): []string{"vm/platform"}, + "vm/" + avm.ID.String(): []string{"vm/avm"}, + "vm/" + EVMID.String(): []string{"vm/evm"}, + "vm/" + spdagvm.ID.String(): []string{"vm/spdag"}, + "vm/" + spchainvm.ID.String(): []string{"vm/spchain"}, + "vm/" + timestampvm.ID.String(): []string{"vm/timestamp"}, + "bc/" + ids.Empty.String(): []string{"P", "platform", "bc/P", "bc/platform"}, + } + chainAliases := map[[32]byte][]string{ + ids.Empty.Key(): []string{"P", "platform"}, + } + vmAliases := map[[32]byte][]string{ + platformvm.ID.Key(): []string{"platform"}, + avm.ID.Key(): []string{"avm"}, + EVMID.Key(): []string{"evm"}, + spdagvm.ID.Key(): []string{"spdag"}, + spchainvm.ID.Key(): []string{"spchain"}, + timestampvm.ID.Key(): []string{"timestamp"}, + secp256k1fx.ID.Key(): []string{"secp256k1fx"}, + nftfx.ID.Key(): []string{"nftfx"}, + propertyfx.ID.Key(): []string{"propertyfx"}, + } + + genesisBytes, err := Genesis(networkID) + if err != nil { + return nil, nil, nil, err + } + + genesis := &platformvm.Genesis{} // TODO let's not re-create genesis to do aliasing + if err := platformvm.Codec.Unmarshal(genesisBytes, genesis); err != nil { + return nil, nil, nil, err + } + if err := genesis.Initialize(); err != nil { + return nil, nil, nil, err + } + + for _, chain := range genesis.Chains { + switch { + case avm.ID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"X", "avm", "bc/X", "bc/avm"} + chainAliases[chain.ID().Key()] = []string{"X", "avm"} + case EVMID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"C", "evm", "bc/C", "bc/evm"} + chainAliases[chain.ID().Key()] = []string{"C", "evm"} + case spdagvm.ID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"bc/spdag"} + chainAliases[chain.ID().Key()] = []string{"spdag"} + case spchainvm.ID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"bc/spchain"} + chainAliases[chain.ID().Key()] = []string{"spchain"} + case timestampvm.ID.Equals(chain.VMID): + generalAliases["bc/"+chain.ID().String()] = []string{"bc/timestamp"} + chainAliases[chain.ID().Key()] = []string{"timestamp"} + } + } + return generalAliases, chainAliases, vmAliases, nil +} diff --git a/genesis/config.go b/genesis/config.go new file mode 100644 index 0000000..ceab4d5 --- /dev/null +++ b/genesis/config.go @@ -0,0 +1,285 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "github.com/ava-labs/gecko/ids" +) + +// Note that since an AVA network has exactly one Platform Chain, +// and the Platform Chain defines the genesis state of the network +// (who is staking, which chains exist, etc.), defining the genesis +// state of the Platform Chain is the same as defining the genesis +// state of the network. + +// Config contains the genesis addresses used to construct a genesis +type Config struct { + MintAddresses, FundedAddresses, StakerIDs []string + ParsedMintAddresses, ParsedFundedAddresses, ParsedStakerIDs []ids.ShortID + EVMBytes []byte +} + +func (c *Config) init() error { + c.ParsedMintAddresses = nil + for _, addrStr := range c.MintAddresses { + addr, err := ids.ShortFromString(addrStr) + if err != nil { + return err + } + c.ParsedMintAddresses = append(c.ParsedMintAddresses, addr) + } + c.ParsedFundedAddresses = nil + for _, addrStr := range c.FundedAddresses { + addr, err := ids.ShortFromString(addrStr) + if err != nil { + return err + } + c.ParsedFundedAddresses = append(c.ParsedFundedAddresses, addr) + } + c.ParsedStakerIDs = nil + for _, addrStr := range c.StakerIDs { + addr, err := ids.ShortFromString(addrStr) + if err != nil { + return err + } + c.ParsedStakerIDs = append(c.ParsedStakerIDs, addr) + } + return nil +} + +// Hard coded genesis constants +var ( + CascadeConfig = Config{ + MintAddresses: []string{ + "95YUFjhDG892VePMzpwKF9JzewGKvGRi3", + }, + FundedAddresses: []string{ + "9uKvvA7E35QCwLvAaohXTCfFejbf3Rv17", + "JLrYNMYXANGj43BfWXBxMMAEenUBp1Sbn", + "7TUTzwrU6nbZtWHjTHEpdneUvjKBxb3EM", + "77mPUXBdQKwQpPoX6rckCZGLGGdkuG1G6", + "4gGWdFZ4Gax1B466YKXyKRRpWLb42Afdt", + "CKTkzAPsRxCreyiDTnjGxLmjMarxF28fi", + "4ABm9gFHVtsNdcKSd1xsacFkGneSgzpaa", + "DpL8PTsrjtLzv5J8LL3D2A6YcnCTqrNH9", + "ZdhZv6oZrmXLyFDy6ovXAu6VxmbTsT2h", + "6cesTteH62Y5mLoDBUASaBvCXuL2AthL", + }, + StakerIDs: []string{ + "NX4zVkuiRJZYe6Nzzav7GXN3TakUet3Co", + "CMsa8cMw4eib1Hb8GG4xiUKAq5eE1BwUX", + "DsMP6jLhi1MkDVc3qx9xx9AAZWx8e87Jd", + "N86eodVZja3GEyZJTo3DFUPGpxEEvjGHs", + "EkKeGSLUbHrrtuayBtbwgWDRUiAziC3ao", + }, + EVMBytes: []byte{ + 0x7b, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x22, 0x3a, 0x7b, 0x22, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x49, 0x64, 0x22, 0x3a, 0x34, 0x33, 0x31, + 0x31, 0x30, 0x2c, 0x22, 0x68, 0x6f, 0x6d, 0x65, + 0x73, 0x74, 0x65, 0x61, 0x64, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x64, + 0x61, 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, + 0x64, 0x61, 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x3a, + 0x74, 0x72, 0x75, 0x65, 0x2c, 0x22, 0x65, 0x69, + 0x70, 0x31, 0x35, 0x30, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x65, 0x69, + 0x70, 0x31, 0x35, 0x30, 0x48, 0x61, 0x73, 0x68, + 0x22, 0x3a, 0x22, 0x30, 0x78, 0x32, 0x30, 0x38, + 0x36, 0x37, 0x39, 0x39, 0x61, 0x65, 0x65, 0x62, + 0x65, 0x61, 0x65, 0x31, 0x33, 0x35, 0x63, 0x32, + 0x34, 0x36, 0x63, 0x36, 0x35, 0x30, 0x32, 0x31, + 0x63, 0x38, 0x32, 0x62, 0x34, 0x65, 0x31, 0x35, + 0x61, 0x32, 0x63, 0x34, 0x35, 0x31, 0x33, 0x34, + 0x30, 0x39, 0x39, 0x33, 0x61, 0x61, 0x63, 0x66, + 0x64, 0x32, 0x37, 0x35, 0x31, 0x38, 0x38, 0x36, + 0x35, 0x31, 0x34, 0x66, 0x30, 0x22, 0x2c, 0x22, + 0x65, 0x69, 0x70, 0x31, 0x35, 0x35, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, + 0x65, 0x69, 0x70, 0x31, 0x35, 0x38, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, + 0x62, 0x79, 0x7a, 0x61, 0x6e, 0x74, 0x69, 0x75, + 0x6d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, + 0x30, 0x2c, 0x22, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x74, 0x69, 0x6e, 0x6f, 0x70, 0x6c, + 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, + 0x30, 0x2c, 0x22, 0x70, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x62, 0x75, 0x72, 0x67, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x7d, 0x2c, 0x22, + 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0x3a, 0x22, + 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, + 0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, + 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, + 0x61, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30, + 0x22, 0x2c, 0x22, 0x67, 0x61, 0x73, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x22, 0x3a, 0x22, 0x30, 0x78, + 0x35, 0x66, 0x35, 0x65, 0x31, 0x30, 0x30, 0x22, + 0x2c, 0x22, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, + 0x75, 0x6c, 0x74, 0x79, 0x22, 0x3a, 0x22, 0x30, + 0x78, 0x30, 0x22, 0x2c, 0x22, 0x6d, 0x69, 0x78, + 0x48, 0x61, 0x73, 0x68, 0x22, 0x3a, 0x22, 0x30, + 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x22, 0x2c, 0x22, 0x63, 0x6f, 0x69, 0x6e, + 0x62, 0x61, 0x73, 0x65, 0x22, 0x3a, 0x22, 0x30, + 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x22, 0x2c, 0x22, 0x61, 0x6c, 0x6c, 0x6f, + 0x63, 0x22, 0x3a, 0x7b, 0x22, 0x35, 0x37, 0x32, + 0x66, 0x34, 0x64, 0x38, 0x30, 0x66, 0x31, 0x30, + 0x66, 0x36, 0x36, 0x33, 0x62, 0x35, 0x30, 0x34, + 0x39, 0x66, 0x37, 0x38, 0x39, 0x35, 0x34, 0x36, + 0x66, 0x32, 0x35, 0x66, 0x37, 0x30, 0x62, 0x62, + 0x36, 0x32, 0x61, 0x37, 0x66, 0x22, 0x3a, 0x7b, + 0x22, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x22, 0x3a, 0x22, 0x30, 0x78, 0x33, 0x33, 0x62, + 0x32, 0x65, 0x33, 0x63, 0x39, 0x66, 0x64, 0x30, + 0x38, 0x30, 0x34, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x22, 0x7d, 0x7d, 0x2c, + 0x22, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, + 0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, + 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x22, + 0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22, + 0x7d, + }, + } + DefaultConfig = Config{ + MintAddresses: []string{}, + FundedAddresses: []string{ + // Private key: ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN + "6Y3kysjF9jnHnYkdS9yGAuoHyae2eNmeV", + }, + StakerIDs: []string{ + "7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg", + "MFrZFVCXPv5iCn6M9K6XduxGTYp891xXZ", + "NFBbbJ4qCmNaCzeW7sxErhvWqvEQMnYcN", + "GWPcbFJZFfZreETSoWjPimr846mXEKCtu", + "P7oB2McjBGgW2NXXWVYjV8JEDFoW9xDE5", + }, + EVMBytes: []byte{ + 0x7b, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x22, 0x3a, 0x7b, 0x22, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x49, 0x64, 0x22, 0x3a, 0x34, 0x33, 0x31, + 0x31, 0x30, 0x2c, 0x22, 0x68, 0x6f, 0x6d, 0x65, + 0x73, 0x74, 0x65, 0x61, 0x64, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x64, + 0x61, 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, + 0x64, 0x61, 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x3a, + 0x74, 0x72, 0x75, 0x65, 0x2c, 0x22, 0x65, 0x69, + 0x70, 0x31, 0x35, 0x30, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x65, 0x69, + 0x70, 0x31, 0x35, 0x30, 0x48, 0x61, 0x73, 0x68, + 0x22, 0x3a, 0x22, 0x30, 0x78, 0x32, 0x30, 0x38, + 0x36, 0x37, 0x39, 0x39, 0x61, 0x65, 0x65, 0x62, + 0x65, 0x61, 0x65, 0x31, 0x33, 0x35, 0x63, 0x32, + 0x34, 0x36, 0x63, 0x36, 0x35, 0x30, 0x32, 0x31, + 0x63, 0x38, 0x32, 0x62, 0x34, 0x65, 0x31, 0x35, + 0x61, 0x32, 0x63, 0x34, 0x35, 0x31, 0x33, 0x34, + 0x30, 0x39, 0x39, 0x33, 0x61, 0x61, 0x63, 0x66, + 0x64, 0x32, 0x37, 0x35, 0x31, 0x38, 0x38, 0x36, + 0x35, 0x31, 0x34, 0x66, 0x30, 0x22, 0x2c, 0x22, + 0x65, 0x69, 0x70, 0x31, 0x35, 0x35, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, + 0x65, 0x69, 0x70, 0x31, 0x35, 0x38, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, + 0x62, 0x79, 0x7a, 0x61, 0x6e, 0x74, 0x69, 0x75, + 0x6d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, + 0x30, 0x2c, 0x22, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x74, 0x69, 0x6e, 0x6f, 0x70, 0x6c, + 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, + 0x30, 0x2c, 0x22, 0x70, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x62, 0x75, 0x72, 0x67, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x7d, 0x2c, 0x22, + 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0x3a, 0x22, + 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, + 0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, + 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, + 0x61, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30, + 0x22, 0x2c, 0x22, 0x67, 0x61, 0x73, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x22, 0x3a, 0x22, 0x30, 0x78, + 0x35, 0x66, 0x35, 0x65, 0x31, 0x30, 0x30, 0x22, + 0x2c, 0x22, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, + 0x75, 0x6c, 0x74, 0x79, 0x22, 0x3a, 0x22, 0x30, + 0x78, 0x30, 0x22, 0x2c, 0x22, 0x6d, 0x69, 0x78, + 0x48, 0x61, 0x73, 0x68, 0x22, 0x3a, 0x22, 0x30, + 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x22, 0x2c, 0x22, 0x63, 0x6f, 0x69, 0x6e, + 0x62, 0x61, 0x73, 0x65, 0x22, 0x3a, 0x22, 0x30, + 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x22, 0x2c, 0x22, 0x61, 0x6c, 0x6c, 0x6f, + 0x63, 0x22, 0x3a, 0x7b, 0x22, 0x37, 0x35, 0x31, + 0x61, 0x30, 0x62, 0x39, 0x36, 0x65, 0x31, 0x30, + 0x34, 0x32, 0x62, 0x65, 0x65, 0x37, 0x38, 0x39, + 0x34, 0x35, 0x32, 0x65, 0x63, 0x62, 0x32, 0x30, + 0x32, 0x35, 0x33, 0x66, 0x62, 0x61, 0x34, 0x30, + 0x64, 0x62, 0x65, 0x38, 0x35, 0x22, 0x3a, 0x7b, + 0x22, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x22, 0x3a, 0x22, 0x30, 0x78, 0x33, 0x33, 0x62, + 0x32, 0x65, 0x33, 0x63, 0x39, 0x66, 0x64, 0x30, + 0x38, 0x30, 0x34, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x22, 0x7d, 0x7d, 0x2c, + 0x22, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, + 0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, + 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x22, + 0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22, + 0x7d, + }, + } +) + +// GetConfig ... +func GetConfig(networkID uint32) *Config { + switch networkID { + case CascadeID: + return &CascadeConfig + default: + return &DefaultConfig + } +} diff --git a/genesis/genesis.go b/genesis/genesis.go index fa34a75..4cad047 100644 --- a/genesis/genesis.go +++ b/genesis/genesis.go @@ -3,511 +3,270 @@ package genesis -// TODO: Move this to a separate repo and leave only a byte array - import ( + "errors" "fmt" - "math" - "regexp" - "strconv" - "strings" + "time" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/json" + "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/avm" - "github.com/ava-labs/gecko/vms/evm" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/nftfx" "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/vms/propertyfx" + "github.com/ava-labs/gecko/vms/secp256k1fx" "github.com/ava-labs/gecko/vms/spchainvm" "github.com/ava-labs/gecko/vms/spdagvm" "github.com/ava-labs/gecko/vms/timestampvm" ) -// Note that since an AVA network has exactly one Platform Chain, -// and the Platform Chain defines the genesis state of the network -// (who is staking, which chains exist, etc.), defining the genesis -// state of the Platform Chain is the same as defining the genesis -// state of the network. - -// Hardcoded network IDs -const ( - MainnetID uint32 = 1 - TestnetID uint32 = 2 - BorealisID uint32 = 2 - LocalID uint32 = 12345 - - MainnetName = "mainnet" - TestnetName = "testnet" - BorealisName = "borealis" - LocalName = "local" -) - +// ID of the EVM VM var ( - validNetworkName = regexp.MustCompile(`network-[0-9]+`) + EVMID = ids.NewID([32]byte{'e', 'v', 'm'}) ) -// Hard coded genesis constants -var ( - // Give special names to the mainnet and testnet - NetworkIDToNetworkName = map[uint32]string{ - MainnetID: MainnetName, - TestnetID: BorealisName, - LocalID: LocalName, - } - NetworkNameToNetworkID = map[string]uint32{ - MainnetName: MainnetID, - TestnetName: TestnetID, - BorealisName: BorealisID, - LocalName: LocalID, - } - Keys = []string{ - "ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN", - } - Addresses = []string{ - "6Y3kysjF9jnHnYkdS9yGAuoHyae2eNmeV", - } - ParsedAddresses = []ids.ShortID{} - StakerIDs = []string{ - "7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg", - "MFrZFVCXPv5iCn6M9K6XduxGTYp891xXZ", - "NFBbbJ4qCmNaCzeW7sxErhvWqvEQMnYcN", - "GWPcbFJZFfZreETSoWjPimr846mXEKCtu", - "P7oB2McjBGgW2NXXWVYjV8JEDFoW9xDE5", - } - ParsedStakerIDs = []ids.ShortID{} -) - -func init() { - for _, addrStr := range Addresses { - addr, err := ids.ShortFromString(addrStr) - if err != nil { - panic(err) - } - ParsedAddresses = append(ParsedAddresses, addr) - } - for _, stakerIDStr := range StakerIDs { - stakerID, err := ids.ShortFromString(stakerIDStr) - if err != nil { - panic(err) - } - ParsedStakerIDs = append(ParsedStakerIDs, stakerID) - } -} - -// NetworkName returns a human readable name for the network with -// ID [networkID] -func NetworkName(networkID uint32) string { - if name, exists := NetworkIDToNetworkName[networkID]; exists { - return name - } - return fmt.Sprintf("network-%d", networkID) -} - -// NetworkID returns the ID of the network with name [networkName] -func NetworkID(networkName string) (uint32, error) { - networkName = strings.ToLower(networkName) - if id, exists := NetworkNameToNetworkID[networkName]; exists { - return id, nil - } - - if id, err := strconv.ParseUint(networkName, 10, 0); err == nil { - if id > math.MaxUint32 { - return 0, fmt.Errorf("NetworkID %s not in [0, 2^32)", networkName) - } - return uint32(id), nil - } - if validNetworkName.MatchString(networkName) { - if id, err := strconv.Atoi(networkName[8:]); err == nil { - if id > math.MaxUint32 { - return 0, fmt.Errorf("NetworkID %s not in [0, 2^32)", networkName) - } - return uint32(id), nil - } - } - - return 0, fmt.Errorf("Failed to parse %s as a network name", networkName) -} - -// Aliases returns the default aliases based on the network ID -func Aliases(networkID uint32) (generalAliases map[string][]string, chainAliases map[[32]byte][]string, vmAliases map[[32]byte][]string) { - generalAliases = map[string][]string{ - "vm/" + platformvm.ID.String(): []string{"vm/platform"}, - "vm/" + avm.ID.String(): []string{"vm/avm"}, - "vm/" + evm.ID.String(): []string{"vm/evm"}, - "vm/" + spdagvm.ID.String(): []string{"vm/spdag"}, - "vm/" + spchainvm.ID.String(): []string{"vm/spchain"}, - "vm/" + timestampvm.ID.String(): []string{"vm/timestamp"}, - "bc/" + ids.Empty.String(): []string{"P", "platform", "bc/P", "bc/platform"}, - } - chainAliases = map[[32]byte][]string{ - ids.Empty.Key(): []string{"P", "platform"}, - } - vmAliases = map[[32]byte][]string{ - platformvm.ID.Key(): []string{"platform"}, - avm.ID.Key(): []string{"avm"}, - evm.ID.Key(): []string{"evm"}, - spdagvm.ID.Key(): []string{"spdag"}, - spchainvm.ID.Key(): []string{"spchain"}, - timestampvm.ID.Key(): []string{"timestamp"}, - } - - genesisBytes := Genesis(networkID) - genesis := &platformvm.Genesis{} // TODO let's not re-create genesis to do aliasing - platformvm.Codec.Unmarshal(genesisBytes, genesis) // TODO check for error - genesis.Initialize() - - for _, chain := range genesis.Chains { - switch { - case avm.ID.Equals(chain.VMID): - generalAliases["bc/"+chain.ID().String()] = []string{"X", "avm", "bc/X", "bc/avm"} - chainAliases[chain.ID().Key()] = []string{"X", "avm"} - case evm.ID.Equals(chain.VMID): - generalAliases["bc/"+chain.ID().String()] = []string{"C", "evm", "bc/C", "bc/evm"} - chainAliases[chain.ID().Key()] = []string{"C", "evm"} - case spdagvm.ID.Equals(chain.VMID): - generalAliases["bc/"+chain.ID().String()] = []string{"bc/spdag"} - chainAliases[chain.ID().Key()] = []string{"spdag"} - case spchainvm.ID.Equals(chain.VMID): - generalAliases["bc/"+chain.ID().String()] = []string{"bc/spchain"} - chainAliases[chain.ID().Key()] = []string{"spchain"} - case timestampvm.ID.Equals(chain.VMID): - generalAliases["bc/"+chain.ID().String()] = []string{"bc/timestamp"} - chainAliases[chain.ID().Key()] = []string{"timestamp"} - } - } - return -} - // Genesis returns the genesis data of the Platform Chain. -// Since the Platform Chain causes the creation of all other -// chains, this function returns the genesis data of the entire network. +// Since an AVA network has exactly one Platform Chain, and the Platform Chain +// defines the genesis state of the network (who is staking, which chains exist, +// etc.), defining the genesis state of the Platform Chain is the same as +// defining the genesis state of the network. // The ID of the new network is [networkID]. -func Genesis(networkID uint32) []byte { - if networkID != LocalID { - panic("unknown network ID provided") + +// FromConfig ... +func FromConfig(networkID uint32, config *Config) ([]byte, error) { + if err := config.init(); err != nil { + return nil, err } - return []byte{ - 0x00, 0x00, 0x00, 0x01, 0x3c, 0xb7, 0xd3, 0x84, - 0x2e, 0x8c, 0xee, 0x6a, 0x0e, 0xbd, 0x09, 0xf1, - 0xfe, 0x88, 0x4f, 0x68, 0x61, 0xe1, 0xb2, 0x9c, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x05, 0xde, 0x31, 0xb4, 0xd8, 0xb2, 0x29, 0x91, - 0xd5, 0x1a, 0xa6, 0xaa, 0x1f, 0xc7, 0x33, 0xf2, - 0x3a, 0x85, 0x1a, 0x8c, 0x94, 0x00, 0x00, 0x12, - 0x30, 0x9c, 0xe5, 0x40, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x5d, 0xbb, 0x75, 0x80, 0x00, 0x00, 0x00, - 0x00, 0x5f, 0x9c, 0xa9, 0x00, 0x00, 0x00, 0x30, - 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, 0x8c, 0xee, - 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, 0x88, 0x4f, - 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, 0xaa, 0x18, - 0xd3, 0x99, 0x1c, 0xf6, 0x37, 0xaa, 0x6c, 0x16, - 0x2f, 0x5e, 0x95, 0xcf, 0x16, 0x3f, 0x69, 0xcd, - 0x82, 0x91, 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, - 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5d, 0xbb, - 0x75, 0x80, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9c, - 0xa9, 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xb7, - 0xd3, 0x84, 0x2e, 0x8c, 0xee, 0x6a, 0x0e, 0xbd, - 0x09, 0xf1, 0xfe, 0x88, 0x4f, 0x68, 0x61, 0xe1, - 0xb2, 0x9c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x05, 0xe9, 0x09, 0x4f, 0x73, 0x69, - 0x80, 0x02, 0xfd, 0x52, 0xc9, 0x08, 0x19, 0xb4, - 0x57, 0xb9, 0xfb, 0xc8, 0x66, 0xab, 0x80, 0x00, - 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x5d, 0xbb, 0x75, 0x80, 0x00, - 0x00, 0x00, 0x00, 0x5f, 0x9c, 0xa9, 0x00, 0x00, - 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, - 0x8c, 0xee, 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, - 0x88, 0x4f, 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x47, 0x9f, 0x66, 0xc8, 0xbe, 0x89, 0x58, 0x30, - 0x54, 0x7e, 0x70, 0xb4, 0xb2, 0x98, 0xca, 0xfd, - 0x43, 0x3d, 0xba, 0x6e, 0x00, 0x00, 0x12, 0x30, - 0x9c, 0xe5, 0x40, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x5d, 0xbb, 0x75, 0x80, 0x00, 0x00, 0x00, 0x00, - 0x5f, 0x9c, 0xa9, 0x00, 0x00, 0x00, 0x30, 0x39, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x3c, 0xb7, 0xd3, 0x84, 0x2e, 0x8c, 0xee, 0x6a, - 0x0e, 0xbd, 0x09, 0xf1, 0xfe, 0x88, 0x4f, 0x68, - 0x61, 0xe1, 0xb2, 0x9c, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x05, 0xf2, 0x9b, 0xce, - 0x5f, 0x34, 0xa7, 0x43, 0x01, 0xeb, 0x0d, 0xe7, - 0x16, 0xd5, 0x19, 0x4e, 0x4a, 0x4a, 0xea, 0x5d, - 0x7a, 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x5d, 0xbb, 0x75, - 0x80, 0x00, 0x00, 0x00, 0x00, 0x5f, 0x9c, 0xa9, - 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x3c, 0xb7, 0xd3, - 0x84, 0x2e, 0x8c, 0xee, 0x6a, 0x0e, 0xbd, 0x09, - 0xf1, 0xfe, 0x88, 0x4f, 0x68, 0x61, 0xe1, 0xb2, - 0x9c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x05, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, - 0x41, 0x56, 0x4d, 0x61, 0x76, 0x6d, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x73, - 0x65, 0x63, 0x70, 0x32, 0x35, 0x36, 0x6b, 0x31, - 0x66, 0x78, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x7c, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x03, 0x41, 0x56, 0x41, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x03, 0x41, 0x56, 0x41, 0x00, 0x03, 0x41, - 0x56, 0x41, 0x09, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x00, 0x04, 0x00, 0x9f, 0xdf, 0x42, 0xf6, - 0xe4, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, - 0x00, 0x00, 0x01, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, - 0x8c, 0xee, 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, - 0x88, 0x4f, 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x41, 0x74, - 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x65, 0x76, - 0x6d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x02, 0xc9, 0x7b, 0x22, - 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x22, 0x3a, - 0x7b, 0x22, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x49, - 0x64, 0x22, 0x3a, 0x34, 0x33, 0x31, 0x31, 0x30, - 0x2c, 0x22, 0x68, 0x6f, 0x6d, 0x65, 0x73, 0x74, - 0x65, 0x61, 0x64, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x64, 0x61, 0x6f, - 0x46, 0x6f, 0x72, 0x6b, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x64, 0x61, - 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x53, 0x75, 0x70, - 0x70, 0x6f, 0x72, 0x74, 0x22, 0x3a, 0x74, 0x72, - 0x75, 0x65, 0x2c, 0x22, 0x65, 0x69, 0x70, 0x31, - 0x35, 0x30, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, - 0x3a, 0x30, 0x2c, 0x22, 0x65, 0x69, 0x70, 0x31, - 0x35, 0x30, 0x48, 0x61, 0x73, 0x68, 0x22, 0x3a, - 0x22, 0x30, 0x78, 0x32, 0x30, 0x38, 0x36, 0x37, - 0x39, 0x39, 0x61, 0x65, 0x65, 0x62, 0x65, 0x61, - 0x65, 0x31, 0x33, 0x35, 0x63, 0x32, 0x34, 0x36, - 0x63, 0x36, 0x35, 0x30, 0x32, 0x31, 0x63, 0x38, - 0x32, 0x62, 0x34, 0x65, 0x31, 0x35, 0x61, 0x32, - 0x63, 0x34, 0x35, 0x31, 0x33, 0x34, 0x30, 0x39, - 0x39, 0x33, 0x61, 0x61, 0x63, 0x66, 0x64, 0x32, - 0x37, 0x35, 0x31, 0x38, 0x38, 0x36, 0x35, 0x31, - 0x34, 0x66, 0x30, 0x22, 0x2c, 0x22, 0x65, 0x69, - 0x70, 0x31, 0x35, 0x35, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x65, 0x69, - 0x70, 0x31, 0x35, 0x38, 0x42, 0x6c, 0x6f, 0x63, - 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x62, 0x79, - 0x7a, 0x61, 0x6e, 0x74, 0x69, 0x75, 0x6d, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, - 0x22, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, - 0x74, 0x69, 0x6e, 0x6f, 0x70, 0x6c, 0x65, 0x42, - 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, - 0x22, 0x70, 0x65, 0x74, 0x65, 0x72, 0x73, 0x62, - 0x75, 0x72, 0x67, 0x42, 0x6c, 0x6f, 0x63, 0x6b, - 0x22, 0x3a, 0x30, 0x7d, 0x2c, 0x22, 0x6e, 0x6f, - 0x6e, 0x63, 0x65, 0x22, 0x3a, 0x22, 0x30, 0x78, - 0x30, 0x22, 0x2c, 0x22, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3a, 0x22, - 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x65, 0x78, - 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, 0x61, 0x22, - 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30, 0x22, 0x2c, - 0x22, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, - 0x74, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x35, 0x66, - 0x35, 0x65, 0x31, 0x30, 0x30, 0x22, 0x2c, 0x22, - 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, 0x75, 0x6c, - 0x74, 0x79, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, - 0x22, 0x2c, 0x22, 0x6d, 0x69, 0x78, 0x48, 0x61, - 0x73, 0x68, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22, - 0x2c, 0x22, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, - 0x73, 0x65, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22, - 0x2c, 0x22, 0x61, 0x6c, 0x6c, 0x6f, 0x63, 0x22, - 0x3a, 0x7b, 0x22, 0x37, 0x35, 0x31, 0x61, 0x30, - 0x62, 0x39, 0x36, 0x65, 0x31, 0x30, 0x34, 0x32, - 0x62, 0x65, 0x65, 0x37, 0x38, 0x39, 0x34, 0x35, - 0x32, 0x65, 0x63, 0x62, 0x32, 0x30, 0x32, 0x35, - 0x33, 0x66, 0x62, 0x61, 0x34, 0x30, 0x64, 0x62, - 0x65, 0x38, 0x35, 0x22, 0x3a, 0x7b, 0x22, 0x62, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x22, 0x3a, - 0x22, 0x30, 0x78, 0x33, 0x33, 0x62, 0x32, 0x65, - 0x33, 0x63, 0x39, 0x66, 0x64, 0x30, 0x38, 0x30, - 0x34, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x22, 0x7d, 0x7d, 0x2c, 0x22, 0x6e, - 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, 0x3a, 0x22, - 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x67, 0x61, - 0x73, 0x55, 0x73, 0x65, 0x64, 0x22, 0x3a, 0x22, - 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x70, 0x61, - 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, - 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, - 0x30, 0x30, 0x30, 0x30, 0x30, 0x22, 0x7d, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x13, 0x53, 0x69, - 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x44, 0x41, 0x47, - 0x20, 0x50, 0x61, 0x79, 0x6d, 0x65, 0x6e, 0x74, - 0x73, 0x73, 0x70, 0x64, 0x61, 0x67, 0x76, 0x6d, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x60, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x01, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, 0x8c, 0xee, - 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, 0x88, 0x4f, - 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x15, - 0x53, 0x69, 0x6d, 0x70, 0x6c, 0x65, 0x20, 0x43, - 0x68, 0x61, 0x69, 0x6e, 0x20, 0x50, 0x61, 0x79, - 0x6d, 0x65, 0x6e, 0x74, 0x73, 0x73, 0x70, 0x63, - 0x68, 0x61, 0x69, 0x6e, 0x76, 0x6d, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x28, 0x00, 0x00, 0x00, - 0x01, 0x3c, 0xb7, 0xd3, 0x84, 0x2e, 0x8c, 0xee, - 0x6a, 0x0e, 0xbd, 0x09, 0xf1, 0xfe, 0x88, 0x4f, - 0x68, 0x61, 0xe1, 0xb2, 0x9c, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x12, - 0x30, 0x9c, 0xe5, 0x40, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x17, 0x53, 0x69, 0x6d, 0x70, - 0x6c, 0x65, 0x20, 0x54, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x20, 0x53, 0x65, 0x72, - 0x76, 0x65, 0x72, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x5d, 0xbb, 0x75, 0x80, - } -} + // Specify the genesis state of the AVM + avmArgs := avm.BuildGenesisArgs{} + { + ava := avm.AssetDefinition{ + Name: "AVA", + Symbol: "AVA", + Denomination: 9, + InitialState: map[string][]interface{}{}, + } -// VMGenesis ... -func VMGenesis(networkID uint32, vmID ids.ID) *platformvm.CreateChainTx { - genesisBytes := Genesis(networkID) - genesis := platformvm.Genesis{} - platformvm.Codec.Unmarshal(genesisBytes, &genesis) - for _, chain := range genesis.Chains { - if chain.VMID.Equals(vmID) { - return chain + if len(config.MintAddresses) > 0 { + ava.InitialState["variableCap"] = []interface{}{avm.Owners{ + Threshold: 1, + Minters: config.MintAddresses, + }} + } + for _, addr := range config.FundedAddresses { + ava.InitialState["fixedCap"] = append(ava.InitialState["fixedCap"], avm.Holder{ + Amount: json.Uint64(45 * units.MegaAva), + Address: addr, + }) + } + + avmArgs.GenesisData = map[string]avm.AssetDefinition{ + // The AVM starts out with one asset, $AVA + "AVA": ava, } } - return nil + avmReply := avm.BuildGenesisReply{} + + avmSS := avm.StaticService{} + err := avmSS.BuildGenesis(nil, &avmArgs, &avmReply) + if err != nil { + return nil, err + } + + // Specify the genesis state of the simple payments DAG + spdagvmArgs := spdagvm.BuildGenesisArgs{} + for _, addr := range config.ParsedFundedAddresses { + spdagvmArgs.Outputs = append(spdagvmArgs.Outputs, + spdagvm.APIOutput{ + Amount: json.Uint64(20 * units.KiloAva), + Threshold: 1, + Addresses: []ids.ShortID{addr}, + }, + ) + } + + spdagvmReply := spdagvm.BuildGenesisReply{} + spdagvmSS := spdagvm.StaticService{} + if err := spdagvmSS.BuildGenesis(nil, &spdagvmArgs, &spdagvmReply); err != nil { + return nil, fmt.Errorf("problem creating simple payments DAG: %w", err) + } + + // Specify the genesis state of the simple payments chain + spchainvmArgs := spchainvm.BuildGenesisArgs{} + for _, addr := range config.ParsedFundedAddresses { + spchainvmArgs.Accounts = append(spchainvmArgs.Accounts, + spchainvm.APIAccount{ + Address: addr, + Balance: json.Uint64(20 * units.KiloAva), + }, + ) + } + spchainvmReply := spchainvm.BuildGenesisReply{} + + spchainvmSS := spchainvm.StaticService{} + if err := spchainvmSS.BuildGenesis(nil, &spchainvmArgs, &spchainvmReply); err != nil { + return nil, fmt.Errorf("problem creating simple payments chain: %w", err) + } + + // Specify the initial state of the Platform Chain + platformvmArgs := platformvm.BuildGenesisArgs{ + NetworkID: json.Uint32(networkID), + } + for _, addr := range config.ParsedFundedAddresses { + platformvmArgs.Accounts = append(platformvmArgs.Accounts, + platformvm.APIAccount{ + Address: addr, + Balance: json.Uint64(20 * units.KiloAva), + }, + ) + } + + genesisTime := time.Date( + /*year=*/ 2019, + /*month=*/ time.November, + /*day=*/ 1, + /*hour=*/ 0, + /*minute=*/ 0, + /*second=*/ 0, + /*nano-second=*/ 0, + /*location=*/ time.UTC, + ) + stakingDuration := 365 * 24 * time.Hour // ~ 1 year + endStakingTime := genesisTime.Add(stakingDuration) + + for i, validatorID := range config.ParsedStakerIDs { + weight := json.Uint64(20 * units.KiloAva) + platformvmArgs.Validators = append(platformvmArgs.Validators, + platformvm.APIDefaultSubnetValidator{ + APIValidator: platformvm.APIValidator{ + StartTime: json.Uint64(genesisTime.Unix()), + EndTime: json.Uint64(endStakingTime.Unix()), + Weight: &weight, + ID: validatorID, + }, + Destination: config.ParsedFundedAddresses[i%len(config.ParsedFundedAddresses)], + }, + ) + } + + // Specify the chains that exist upon this network's creation + platformvmArgs.Chains = []platformvm.APIChain{ + platformvm.APIChain{ + GenesisData: avmReply.Bytes, + SubnetID: platformvm.DefaultSubnetID, + VMID: avm.ID, + FxIDs: []ids.ID{ + secp256k1fx.ID, + nftfx.ID, + propertyfx.ID, + }, + Name: "X-Chain", + }, + platformvm.APIChain{ + GenesisData: formatting.CB58{Bytes: config.EVMBytes}, + SubnetID: platformvm.DefaultSubnetID, + VMID: EVMID, + Name: "C-Chain", + }, + platformvm.APIChain{ + GenesisData: spdagvmReply.Bytes, + SubnetID: platformvm.DefaultSubnetID, + VMID: spdagvm.ID, + Name: "Simple DAG Payments", + }, + platformvm.APIChain{ + GenesisData: spchainvmReply.Bytes, + SubnetID: platformvm.DefaultSubnetID, + VMID: spchainvm.ID, + Name: "Simple Chain Payments", + }, + platformvm.APIChain{ + GenesisData: formatting.CB58{Bytes: []byte{}}, // There is no genesis data + SubnetID: platformvm.DefaultSubnetID, + VMID: timestampvm.ID, + Name: "Simple Timestamp Server", + }, + } + + platformvmArgs.Time = json.Uint64(genesisTime.Unix()) + platformvmReply := platformvm.BuildGenesisReply{} + + platformvmSS := platformvm.StaticService{} + if err := platformvmSS.BuildGenesis(nil, &platformvmArgs, &platformvmReply); err != nil { + return nil, fmt.Errorf("problem while building platform chain's genesis state: %w", err) + } + + return platformvmReply.Bytes.Bytes, nil +} + +// Genesis ... +func Genesis(networkID uint32) ([]byte, error) { return FromConfig(networkID, GetConfig(networkID)) } + +// VMGenesis ... +func VMGenesis(networkID uint32, vmID ids.ID) (*platformvm.CreateChainTx, error) { + genesisBytes, err := Genesis(networkID) + if err != nil { + return nil, err + } + genesis := platformvm.Genesis{} + platformvm.Codec.Unmarshal(genesisBytes, &genesis) + if err := genesis.Initialize(); err != nil { + return nil, err + } + for _, chain := range genesis.Chains { + if chain.VMID.Equals(vmID) { + return chain, nil + } + } + return nil, fmt.Errorf("couldn't find subnet with VM ID %s", vmID) +} + +// AVAAssetID ... +func AVAAssetID(networkID uint32) (ids.ID, error) { + createAVM, err := VMGenesis(networkID, avm.ID) + if err != nil { + return ids.ID{}, err + } + + c := codec.NewDefault() + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(&avm.BaseTx{}), + c.RegisterType(&avm.CreateAssetTx{}), + c.RegisterType(&avm.OperationTx{}), + c.RegisterType(&avm.ImportTx{}), + c.RegisterType(&avm.ExportTx{}), + c.RegisterType(&secp256k1fx.TransferInput{}), + c.RegisterType(&secp256k1fx.MintOutput{}), + c.RegisterType(&secp256k1fx.TransferOutput{}), + c.RegisterType(&secp256k1fx.MintOperation{}), + c.RegisterType(&secp256k1fx.Credential{}), + ) + if errs.Errored() { + return ids.ID{}, errs.Err + } + + genesis := avm.Genesis{} + if err := c.Unmarshal(createAVM.GenesisData, &genesis); err != nil { + return ids.ID{}, err + } + + if len(genesis.Txs) == 0 { + return ids.ID{}, errors.New("genesis creates no transactions") + } + genesisTx := genesis.Txs[0] + + tx := avm.Tx{UnsignedTx: &genesisTx.CreateAssetTx} + txBytes, err := c.Marshal(&tx) + if err != nil { + return ids.ID{}, err + } + tx.Initialize(txBytes) + + return tx.ID(), nil } diff --git a/genesis/genesis_test.go b/genesis/genesis_test.go index 7a6c6eb..825d07b 100644 --- a/genesis/genesis_test.go +++ b/genesis/genesis_test.go @@ -6,8 +6,8 @@ package genesis import ( "testing" + "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/vms/avm" - "github.com/ava-labs/gecko/vms/evm" "github.com/ava-labs/gecko/vms/platformvm" "github.com/ava-labs/gecko/vms/spchainvm" "github.com/ava-labs/gecko/vms/spdagvm" @@ -17,11 +17,11 @@ func TestNetworkName(t *testing.T) { if name := NetworkName(MainnetID); name != MainnetName { t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, MainnetName) } - if name := NetworkName(TestnetID); name != BorealisName { - t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, BorealisName) + if name := NetworkName(TestnetID); name != CascadeName { + t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, CascadeName) } - if name := NetworkName(BorealisID); name != BorealisName { - t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, BorealisName) + if name := NetworkName(CascadeID); name != CascadeName { + t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, CascadeName) } if name := NetworkName(4294967295); name != "network-4294967295" { t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, "network-4294967295") @@ -45,7 +45,7 @@ func TestNetworkID(t *testing.T) { t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", TestnetID, id) } - id, err = NetworkID(BorealisName) + id, err = NetworkID(CascadeName) if err != nil { t.Fatal(err) } @@ -53,7 +53,7 @@ func TestNetworkID(t *testing.T) { t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", TestnetID, id) } - id, err = NetworkID("bOrEaLiS") + id, err = NetworkID("cAsCaDe") if err != nil { t.Fatal(err) } @@ -91,12 +91,12 @@ func TestNetworkID(t *testing.T) { } func TestAliases(t *testing.T) { - generalAliases, _, _ := Aliases(LocalID) + generalAliases, _, _, _ := Aliases(LocalID) if _, exists := generalAliases["vm/"+platformvm.ID.String()]; !exists { t.Fatalf("Should have a custom alias from the vm") } else if _, exists := generalAliases["vm/"+avm.ID.String()]; !exists { t.Fatalf("Should have a custom alias from the vm") - } else if _, exists := generalAliases["vm/"+evm.ID.String()]; !exists { + } else if _, exists := generalAliases["vm/"+EVMID.String()]; !exists { t.Fatalf("Should have a custom alias from the vm") } else if _, exists := generalAliases["vm/"+spdagvm.ID.String()]; !exists { t.Fatalf("Should have a custom alias from the vm") @@ -106,9 +106,84 @@ func TestAliases(t *testing.T) { } func TestGenesis(t *testing.T) { - genesisBytes := Genesis(LocalID) + genesisBytes, err := Genesis(LocalID) + if err != nil { + t.Fatal(err) + } genesis := platformvm.Genesis{} if err := platformvm.Codec.Unmarshal(genesisBytes, &genesis); err != nil { t.Fatal(err) } } + +func TestVMGenesis(t *testing.T) { + tests := []struct { + networkID uint32 + vmID ids.ID + expectedID string + }{ + { + networkID: CascadeID, + vmID: avm.ID, + expectedID: "4ktRjsAKxgMr2aEzv9SWmrU7Xk5FniHUrVCX4P1TZSfTLZWFM", + }, + { + networkID: LocalID, + vmID: avm.ID, + expectedID: "4R5p2RXDGLqaifZE4hHWH9owe34pfoBULn1DrQTWivjg8o4aH", + }, + { + networkID: CascadeID, + vmID: EVMID, + expectedID: "2mUYSXfLrDtigwbzj1LxKVsHwELghc5sisoXrzJwLqAAQHF4i", + }, + { + networkID: LocalID, + vmID: EVMID, + expectedID: "tZGm6RCkeGpVETUTp11DW3UYFZmm69zfqxchpHrSF7wgy8rmw", + }, + } + + for _, test := range tests { + genesisTx, err := VMGenesis(test.networkID, test.vmID) + if err != nil { + t.Fatal(err) + } + if result := genesisTx.ID().String(); test.expectedID != result { + t.Fatalf("%s genesisID with networkID %d was expected to be %s but was %s", + test.vmID, + test.networkID, + test.expectedID, + result) + } + } +} + +func TestAVAAssetID(t *testing.T) { + tests := []struct { + networkID uint32 + expectedID string + }{ + { + networkID: CascadeID, + expectedID: "21d7KVtPrubc5fHr6CGNcgbUb4seUjmZKr35ZX7BZb5iP8pXWA", + }, + { + networkID: LocalID, + expectedID: "n8XH5JY1EX5VYqDeAhB4Zd4GKxi9UNQy6oPpMsCAj1Q6xkiiL", + }, + } + + for _, test := range tests { + avaID, err := AVAAssetID(test.networkID) + if err != nil { + t.Fatal(err) + } + if result := avaID.String(); test.expectedID != result { + t.Fatalf("AVA assetID with networkID %d was expected to be %s but was %s", + test.networkID, + test.expectedID, + result) + } + } +} diff --git a/genesis/network_id.go b/genesis/network_id.go new file mode 100644 index 0000000..5e5c0dd --- /dev/null +++ b/genesis/network_id.go @@ -0,0 +1,73 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package genesis + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" +) + +// Hardcoded network IDs +var ( + MainnetID uint32 = 1 + TestnetID uint32 = 2 + CascadeID uint32 = 2 + LocalID uint32 = 12345 + + MainnetName = "mainnet" + TestnetName = "testnet" + CascadeName = "cascade" + LocalName = "local" + + NetworkIDToNetworkName = map[uint32]string{ + MainnetID: MainnetName, + TestnetID: CascadeName, + LocalID: LocalName, + } + NetworkNameToNetworkID = map[string]uint32{ + MainnetName: MainnetID, + TestnetName: TestnetID, + CascadeName: CascadeID, + LocalName: LocalID, + } + + validNetworkName = regexp.MustCompile(`network-[0-9]+`) +) + +// NetworkName returns a human readable name for the network with +// ID [networkID] +func NetworkName(networkID uint32) string { + if name, exists := NetworkIDToNetworkName[networkID]; exists { + return name + } + return fmt.Sprintf("network-%d", networkID) +} + +// NetworkID returns the ID of the network with name [networkName] +func NetworkID(networkName string) (uint32, error) { + networkName = strings.ToLower(networkName) + if id, exists := NetworkNameToNetworkID[networkName]; exists { + return id, nil + } + + if id, err := strconv.ParseUint(networkName, 10, 0); err == nil { + if id > math.MaxUint32 { + return 0, fmt.Errorf("NetworkID %s not in [0, 2^32)", networkName) + } + return uint32(id), nil + } + if validNetworkName.MatchString(networkName) { + if id, err := strconv.Atoi(networkName[8:]); err == nil { + if id > math.MaxUint32 { + return 0, fmt.Errorf("NetworkID %s not in [0, 2^32)", networkName) + } + return uint32(id), nil + } + } + + return 0, fmt.Errorf("Failed to parse %s as a network name", networkName) +} diff --git a/ids/aliases_test.go b/ids/aliases_test.go new file mode 100644 index 0000000..8f8cb31 --- /dev/null +++ b/ids/aliases_test.go @@ -0,0 +1,113 @@ +// (c) 2020, Alex Willmer. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +import ( + "reflect" + "testing" +) + +func TestAliaserLookupError(t *testing.T) { + emptyAliaser := Aliaser{} + emptyAliaser.Initialize() + tests := []struct { + label string + aliaser Aliaser + alias string + res ID + }{ + {"Unitialized", Aliaser{}, "Batwoman", ID{}}, + {"Empty", emptyAliaser, "Batman", ID{}}, + } + for _, tt := range tests { + t.Run(tt.label, func(t *testing.T) { + res, err := tt.aliaser.Lookup(tt.alias) + if !tt.res.Equals(res) { + t.Errorf("Got %v, expected %v", res, tt.res) + } + if err == nil { + t.Error("Expected an error due to missing alias") + } + }) + } +} + +func TestAliaserLookup(t *testing.T) { + id := NewID([32]byte{'K', 'a', 't', 'e', ' ', 'K', 'a', 'n', 'e'}) + aliaser := Aliaser{} + aliaser.Initialize() + aliaser.Alias(id, "Batwoman") + + res, err := aliaser.Lookup("Batwoman") + if err != nil { + t.Fatalf("Unexpected error %q", err) + } + if !id.Equals(res) { + t.Fatalf("Got %v, expected %v", res, id) + } +} + +func TestAliaserAliasesEmpty(t *testing.T) { + id := NewID([32]byte{'J', 'a', 'm', 'e', 's', ' ', 'G', 'o', 'r', 'd', 'o', 'n'}) + aliaser := Aliaser{} + aliaser.Initialize() + + aliases := aliaser.Aliases(id) + if len(aliases) != 0 { + t.Fatalf("Unexpected aliases %#v", aliases) + } +} + +func TestAliaserAliases(t *testing.T) { + id := NewID([32]byte{'B', 'r', 'u', 'c', 'e', ' ', 'W', 'a', 'y', 'n', 'e'}) + aliaser := Aliaser{} + aliaser.Initialize() + aliaser.Alias(id, "Batman") + aliaser.Alias(id, "Dark Knight") + + aliases := aliaser.Aliases(id) + expected := []string{"Batman", "Dark Knight"} + if !reflect.DeepEqual(aliases, expected) { + t.Fatalf("Got %v, expected %v", aliases, expected) + } +} + +func TestAliaserPrimaryAlias(t *testing.T) { + id1 := NewID([32]byte{'J', 'a', 'm', 'e', 's', ' ', 'G', 'o', 'r', 'd', 'o', 'n'}) + id2 := NewID([32]byte{'B', 'r', 'u', 'c', 'e', ' ', 'W', 'a', 'y', 'n', 'e'}) + aliaser := Aliaser{} + aliaser.Initialize() + aliaser.Alias(id2, "Batman") + aliaser.Alias(id2, "Dark Knight") + + res, err := aliaser.PrimaryAlias(id1) + if res != "" { + t.Fatalf("Unexpected alias for %v", id1) + } + if err == nil { + t.Fatal("Expected an error given an id with no aliases") + } + + res, err = aliaser.PrimaryAlias(id2) + expected := "Batman" + if res != expected { + t.Fatalf("Got %v, expected %v", res, expected) + } + if err != nil { + t.Fatalf("Unexpected error %v", err) + } +} + +func TestAliaserAliasClash(t *testing.T) { + id1 := NewID([32]byte{'B', 'r', 'u', 'c', 'e', ' ', 'W', 'a', 'y', 'n', 'e'}) + id2 := NewID([32]byte{'D', 'i', 'c', 'k', ' ', 'G', 'r', 'a', 'y', 's', 'o', 'n'}) + aliaser := Aliaser{} + aliaser.Initialize() + aliaser.Alias(id1, "Batman") + + err := aliaser.Alias(id2, "Batman") + if err == nil { + t.Fatalf("Expected an error, due to an existing alias") + } +} diff --git a/ids/bag.go b/ids/bag.go index 1022489..de0af46 100644 --- a/ids/bag.go +++ b/ids/bag.go @@ -86,6 +86,19 @@ func (b *Bag) List() []ID { return idList } +// Equals returns true if the bags contain the same elements +func (b *Bag) Equals(oIDs Bag) bool { + if b.Len() != oIDs.Len() { + return false + } + for key, value := range b.counts { + if value != oIDs.counts[key] { + return false + } + } + return true +} + // Mode returns the id that has been seen the most and the number of times it // has been seen. Ties are broken by the first id to be seen the reported number // of times. @@ -95,7 +108,7 @@ func (b *Bag) Mode() (ID, int) { return b.mode, b.modeFreq } func (b *Bag) Threshold() Set { return b.metThreshold } // Filter returns the bag of ids with the same counts as this bag, except all -// the ids in the returned bag must have the same bits in the range [start, end] +// the ids in the returned bag must have the same bits in the range [start, end) // as id. func (b *Bag) Filter(start, end int, id ID) Bag { newBag := Bag{} diff --git a/ids/id_test.go b/ids/id_test.go index af3efa4..b541ed5 100644 --- a/ids/id_test.go +++ b/ids/id_test.go @@ -5,6 +5,7 @@ package ids import ( "bytes" + "reflect" "testing" ) @@ -29,10 +30,6 @@ func TestID(t *testing.T) { if b := id.Bytes(); !bytes.Equal(hash[:], b) { t.Fatalf("ID.Bytes returned wrong bytes") } - - if str := id.String(); str != "Ba3mm8Ra8JYYebeZ9p7zw1ayorDbeD1euwxhgzSLsncKqGoNt" { - t.Fatalf("ID.String returned wrong string: %s", str) - } } func TestIDBit(t *testing.T) { @@ -79,3 +76,143 @@ func TestFromString(t *testing.T) { t.Fatal("Expected FromString to be inverse of String but it wasn't") } } + +func TestIDFromStringError(t *testing.T) { + tests := []struct { + in string + }{ + {""}, + {"foo"}, + {"foobar"}, + } + for _, tt := range tests { + t.Run(tt.in, func(t *testing.T) { + _, err := FromString(tt.in) + if err == nil { + t.Error("Unexpected success") + } + }) + } +} + +func TestIDMarshalJSON(t *testing.T) { + tests := []struct { + label string + in ID + out []byte + err error + }{ + {"ID{}", ID{}, []byte("null"), nil}, + {"ID(\"ava labs\")", + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + []byte("\"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7\""), + nil, + }, + } + for _, tt := range tests { + t.Run(tt.label, func(t *testing.T) { + out, err := tt.in.MarshalJSON() + if err != tt.err { + t.Errorf("Expected err %s, got error %v", tt.err, err) + } else if !bytes.Equal(out, tt.out) { + t.Errorf("got %q, expected %q", out, tt.out) + } + }) + } +} + +func TestIDUnmarshalJSON(t *testing.T) { + tests := []struct { + label string + in []byte + out ID + err error + }{ + {"ID{}", []byte("null"), ID{}, nil}, + {"ID(\"ava labs\")", + []byte("\"jvYi6Tn9idMi7BaymUVi9zWjg5tpmW7trfKG1AYJLKZJ2fsU7\""), + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + nil, + }, + } + for _, tt := range tests { + t.Run(tt.label, func(t *testing.T) { + foo := ID{} + err := foo.UnmarshalJSON(tt.in) + if err != tt.err { + t.Errorf("Expected err %s, got error %v", tt.err, err) + } else if foo.ID != nil && foo.Key() != tt.out.Key() { + t.Errorf("got %q, expected %q", foo.Key(), tt.out.Key()) + } + }) + } +} + +func TestIDHex(t *testing.T) { + id := NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}) + expected := "617661206c61627300000000000000000000000000000000000000000000000000" + actual := id.Hex() + if actual != actual { + t.Fatalf("got %s, expected %s", actual, expected) + } +} + +func TestIDString(t *testing.T) { + tests := []struct { + label string + id ID + expected string + }{ + {"ID{}", ID{}, "nil"}, + {"ID{[32]byte{24}}", NewID([32]byte{24}), "Ba3mm8Ra8JYYebeZ9p7zw1ayorDbeD1euwxhgzSLsncKqGoNt"}, + } + for _, tt := range tests { + t.Run(tt.label, func(t *testing.T) { + result := tt.id.String() + if result != tt.expected { + t.Errorf("got %q, expected %q", result, tt.expected) + } + }) + } +} + +func TestSortIDs(t *testing.T) { + ids := []ID{ + NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + NewID([32]byte{'W', 'a', 'l', 'l', 'e', ' ', 'l', 'a', 'b', 's'}), + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + } + SortIDs(ids) + expected := []ID{ + NewID([32]byte{'W', 'a', 'l', 'l', 'e', ' ', 'l', 'a', 'b', 's'}), + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + } + if !reflect.DeepEqual(ids, expected) { + t.Fatal("[]ID was not sorted lexographically") + } +} + +func TestIsSortedAndUnique(t *testing.T) { + unsorted := []ID{ + NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + } + if IsSortedAndUniqueIDs(unsorted) { + t.Fatal("Wrongly accepted unsorted IDs") + } + duplicated := []ID{ + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + } + if IsSortedAndUniqueIDs(duplicated) { + t.Fatal("Wrongly accepted duplicated IDs") + } + sorted := []ID{ + NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}), + } + if !IsSortedAndUniqueIDs(sorted) { + t.Fatal("Wrongly rejected sorted, unique IDs") + } +} diff --git a/ids/queue_test.go b/ids/queue_test.go new file mode 100644 index 0000000..ab0dd4e --- /dev/null +++ b/ids/queue_test.go @@ -0,0 +1,70 @@ +package ids + +import ( + "reflect" + "testing" +) + +func TestQueueSetinit(t *testing.T) { + qs := QueueSet{} + qs.init() + if qs.idList == nil { + t.Fatal("Failed to initialize") + } + list := qs.idList + qs.init() + if list != qs.idList { + t.Fatal("Mutated an already intialized queue") + } +} + +func TestQueueSetSetHead(t *testing.T) { + qs := QueueSet{} + id := NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}) + qs.SetHead(id) + if qs.idList == nil || id != qs.idList.Front().Value.(ID) { + t.Fatal("Failed to set head of unintilised queue") + } + + qs.SetHead(id) + if qs.idList.Len() != 1 || id != qs.idList.Front().Value.(ID) { + t.Fatal("Mutated a queue which already had the desired head") + } + + id2 := NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}) + qs.SetHead(id2) + if qs.idList.Len() != 1 || id2 != qs.idList.Front().Value.(ID) { + t.Fatal("Didn't replace the existing head") + } +} + +func TestQueueSetAppend(t *testing.T) { + qs := QueueSet{} + id := NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}) + qs.Append(id) + if qs.idList == nil || id != qs.idList.Front().Value.(ID) { + t.Fatal("Failed to append to an uninitialised queue") + } + + id2 := NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}) + qs.Append(id2) + if qs.idList.Len() != 2 || id2 != qs.idList.Back().Value.(ID) { + t.Fatal("Failed to append to the back of the queue") + } +} + +func TestQueueGetTail(t *testing.T) { + qs := QueueSet{} + tail := qs.GetTail() + if !reflect.DeepEqual(tail, ID{}) { + t.Fatalf("Empty queue returned %v, expected empty ID %v", tail, Empty) + } + + qs.Append(NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'})) + id2 := NewID([32]byte{'e', 'v', 'a', ' ', 'l', 'a', 'b', 's'}) + qs.Append(id2) + tail = qs.GetTail() + if tail != id2 { + t.Fatalf("Populated queue returned %v, expected %v", tail, id2) + } +} diff --git a/ids/slice.go b/ids/slice.go new file mode 100644 index 0000000..68b38a8 --- /dev/null +++ b/ids/slice.go @@ -0,0 +1,33 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ids + +// Equals returns true if the arrays are equal +func Equals(a, b []ID) bool { + if len(a) != len(b) { + return false + } + + for i, aID := range a { + if !aID.Equals(b[i]) { + return false + } + } + return true +} + +// UnsortedEquals returns true if the have the same number of each ID +func UnsortedEquals(a, b []ID) bool { + if len(a) != len(b) { + return false + } + + aBag := Bag{} + aBag.Add(a...) + + bBag := Bag{} + bBag.Add(b...) + + return aBag.Equals(bBag) +} diff --git a/keys/keys1/genCA.sh b/keys/genCA.sh similarity index 100% rename from keys/keys1/genCA.sh rename to keys/genCA.sh diff --git a/keys/mykey/genStaker.sh b/keys/genStaker.sh similarity index 90% rename from keys/mykey/genStaker.sh rename to keys/genStaker.sh index d955767..34f6889 100755 --- a/keys/mykey/genStaker.sh +++ b/keys/genStaker.sh @@ -1,7 +1,7 @@ #!/bin/sh set -ex -keypath=$GOPATH/src/github.com/ava-labs/gecko/keys/mykey +keypath=$GOPATH/src/github.com/ava-labs/gecko/keys if test -f "$keypath/staker.key" || test -f "$keypath/staker.crt"; then echo "staker.key or staker.crt already exists. Not generating new key/certificiate." diff --git a/keys/keys1/genStaker.sh b/keys/keys1/genStaker.sh deleted file mode 100755 index 0a4b836..0000000 --- a/keys/keys1/genStaker.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/staker.key 4096 -openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr -openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys1/rootCA.srl b/keys/keys1/rootCA.srl deleted file mode 100644 index 473c41d..0000000 --- a/keys/keys1/rootCA.srl +++ /dev/null @@ -1 +0,0 @@ -BAF3B5C5C6D0D14A diff --git a/keys/keys1/staker.csr b/keys/keys1/staker.csr deleted file mode 100644 index a4d8227..0000000 --- a/keys/keys1/staker.csr +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK -DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw -ggIKAoICAQDKYSRw/W0YpYH/MTQhiFrR0m89l6yTuzLpDtjudr/5RnhIPvtqk7YI -Gm/m9l29xwR4J5r7SZGs+70yBetkbS+h7PwJ2rmWDwbrdyJKvVBhqf8kSn+VU2Le -PSIcJj193LDyWhV1H4lqNkUkcAR76Fh9qjMvA2p0vJ66+eDLXlph/RYapQx9HgOj -/0BmAKMrYCyo5BhRih+Ougg8aK4G9PQTIA5G2wTWW2QkHxM/QppFjZd/XwQeJ2H6 -ubWMFc5fttf6AzpJvFIDBu/JDCKWiCu5m8t4GL8w2OrIx8Js19lF4YYE2eojCreq -gPi64S3ocqwKsDoySTw6/5iKQ5BUYwUXX3z7EXOqD8SMHefUKeczj4WvAaZLzR27 -qXm55EgRYQAIX4fhmY7NfSop3Wh0Eo62+JHoM/1g+UgOXlbnWpY95Mgd7/fwDSWL -u4IxE0/uq8VufIbfC4yrY8qlTVfAffI1ldRdvJjPJBPiQ0CNrOl60LVptpkGc9sh -H7wZ2bP0bEnYKTgLAfOzD8Ut71O2AOIa80A1GNFl4Yle/MSNJOcQOSpgtWdREzIU -oenAjfuzM4OeTr4cRg4+VYTAo9KHKriN1DuewNzGd8WjKAVHmcIMjqISLTlzMhds -dm+OmfQ6OvyX7v0GTOBbhP09NGcww5A0gCzXN18FS5oxnxe6OG9D0wIDAQABoAAw -DQYJKoZIhvcNAQELBQADggIBAE7VplAZTEGHpYwXZvhlVg0qDsb/7IQj77eNteSU -33Dq6u7QLgS+Ea04Xv5BHnhnBoWRtrNR8WLTw64cuj6p/sqXiQsSNDgxNDPuPG+g -1FFi6wjgtoIJnx/QrITuUyO/MRy1awKLHlGfbY6yXSdLCC9bqLSIRm0tx+E+jo5C -0r5+ZOcLK8ZXWq9uHjmekX0hoN4qzsbQ0J5IeMh9ag+698aqzBSEDljLHg614yiK -FxtpD+23O0XfAdgqFgXRLLg3tt8AkVuys7r/uwHoz9du+nwW2U5nsMIYBXLV2mq3 -1KbpXDTlVwaSoA2LP8dpmvbyTgNbXsjPdS91Rrzd7fcsammcSV0aWPiXmIbTLtn8 -61ZRR0uj+jB68cRjSvegnheifsGyq6alr8OSUMdeWVyiPy2O7N6fUVj+Fmyzl5Ph -fl9UPZTmt/zOZrcSBoWjtZfmQVfw29SfMYwlNKALN4eOT6XwBLDK4uu4UXSoXwi+ -V8evUUfBWcrcXHMTIFhoZbW/b7gjhnv148XWYI0ta8pjt/akzlPLtf4ETPqfECNN -4+p2w9+R5ktzCLeceXQc8eN+ZwjIt31zG48J7Sl1wJB13VR0jPy6zDsyUIswIVfe -7gp7GHg8R0lzDpEYCvU+R7RUWK6xcpjt7+mTHM70csnnOg7uPqnXqOdtVplr0y+R -pmqJ ------END CERTIFICATE REQUEST----- diff --git a/keys/keys2/genCA.sh b/keys/keys2/genCA.sh deleted file mode 100755 index 14a0f4c..0000000 --- a/keys/keys2/genCA.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/rootCA.key 4096 -openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/keys2/genStaker.sh b/keys/keys2/genStaker.sh deleted file mode 100755 index 0a4b836..0000000 --- a/keys/keys2/genStaker.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/staker.key 4096 -openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr -openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys2/rootCA.crt b/keys/keys2/rootCA.crt deleted file mode 100644 index da6320a..0000000 --- a/keys/keys2/rootCA.crt +++ /dev/null @@ -1,34 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB -dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN -AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw -MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM -Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV -BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg -jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 -QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU -m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 -lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB -KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW -cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 -RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH -bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW -T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB -J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU -KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei -73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E -BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj -FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG -XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY -omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv -Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC -XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 -gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn -3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N -W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s -scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU -kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD -DB8IRfWqBx2nWw== ------END CERTIFICATE----- diff --git a/keys/keys2/rootCA.key b/keys/keys2/rootCA.key deleted file mode 100644 index fe23a96..0000000 --- a/keys/keys2/rootCA.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 -6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ -mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h -Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL -AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk -tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd -CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu -TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV -Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll -JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt -RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA -AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg -O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 -WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc -fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o -WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y -243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM -Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv -/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF -2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 -wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R -WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 -POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC -T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW -jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc -23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK -XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl -jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ -/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P -rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl -KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD -E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C -cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE -r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu -GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy -7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr -RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF -SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor -Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY -KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t -Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM -/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 -YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt -I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy -+jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f -UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER -KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW -MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe -f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== ------END RSA PRIVATE KEY----- diff --git a/keys/keys2/rootCA.srl b/keys/keys2/rootCA.srl deleted file mode 100644 index 473c41d..0000000 --- a/keys/keys2/rootCA.srl +++ /dev/null @@ -1 +0,0 @@ -BAF3B5C5C6D0D14A diff --git a/keys/keys2/staker.csr b/keys/keys2/staker.csr deleted file mode 100644 index 8b7f8c9..0000000 --- a/keys/keys2/staker.csr +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK -DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw -ggIKAoICAQDdToR60na6NuR9iSAUMyzPXJNMWVQbLyT5/iZCiJ3BB4YWMBhfxpJW -JiWXcM+znDgpJuyCEeh5Dp6ZY3Fe7k6Hht6FmFpDjwnjpQmdkEKUg00G+ElPTp/U -smsPL+JAswPqBZWpMBS3dsXQNunMMtMGlrf5S0l6XX4y7kc/GTxYgveWZ9JtR/m2 -KNer+wjgBHqJ4rPqnHB30sDYPZg91Cz1Ak8Bb2w2I108zQVgKK6eIqNKXJJ/4piz -SZdU4920wMxYBpnfDAchnxei9U/v3QbT7eKUI2fGr+hOWTIWU80+VeOBt8a6P4sS -9AQh5/6G8qwmAqO3YQ9dxN82iu/H3+N+GGa/M0r5rEWrzwIuFhwKvyQcpPRBm2yQ -nBnhL9G5kN6n4OBM0KsgZ3CYlHZSg4eWcNgBt1WCFsQc7vfUFaJnr8QP3pF4V/4B -ok7wTO5HN0A1EYEVYuX53NGnrKVe+Fg9+xMOgXPWkUNqdvpI9ZbV3Z0S5866qF3/ -vBZrhgCrKc5E/vMexBRe8Ki4wKqONVhi9WGUcRHvFEikc+7VrPj0YaG6zVLd+uOA -JN81fKOPYo4X4sZrMyPYl3OjGtMhfV4KvCaLEr1duOklqO6cCvGQ8iAlLVy3VJyW -5GJ0D0KyiAir4VNdAJKo1ZgiGivJLWulTfjUifCN9o115AiqJxiqwwIDAQABoAAw -DQYJKoZIhvcNAQELBQADggIBAM2IHKsQsebxTD50QQXtSNbyRzG/GpMZuZXn/QYO -QGW0ThJwtcmx6cqQvuyBovH5WhB9QUBFjiKkR7Qef7HUsgxU1cJA75gBfb2GMUru -Q+T37xOxtr6S2TcKOq/LvdJaTYmAHmW9V7vwEcrMRa9lWVTEmJIKTuxiUubpXtup -8OB8WLIvDikVtKtegvl6VCaTApCkUfuLhf7DERQ6sGLXWz6dVQcfvbfcXK2fn1Ik -Koxqy1SSz/rPb4u9NEk1yqvJQdpgnbTM3drTPHiIHCA7F6SjMu5tekHtVQkFOd6c -B0geEwyxY97zqnFv5YXiukXEaAnCRAlOuIZXRqtK6GFthTWo33YpB2KaRUtJ7IuP -og4Q/zjDs8DEc/qbbUbhyulExz6uoyRKO4j/gG3ESC6j09j7Eungt1LDhyt8p3wD -pytIIPkTseykO0CcEpEcGbES6d3u4PrFJ75XWxMkNZVK8mC3faxx2kJLfS1+4Fg8 -A0zbcN6qwm1ezGq2vGQcyVKyFVWJQAEAFuSO8sjW6dk3ClfE+MNGUvxTQMe96V14 -jGRICCp9aJrJXA3u0iQaUX0cXmlhegAYk7Ho/Ef3k/PcP8DzZ8Ck839oRHBw4pPv -tKbyiKnOcet7AFGwsiM2t5VLrj4jovhRLEiaXrCaxNe6j4xs63TEb+8uTCzKyktC -4BFq ------END CERTIFICATE REQUEST----- diff --git a/keys/keys3/genCA.sh b/keys/keys3/genCA.sh deleted file mode 100755 index 14a0f4c..0000000 --- a/keys/keys3/genCA.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/rootCA.key 4096 -openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/keys3/genStaker.sh b/keys/keys3/genStaker.sh deleted file mode 100755 index 0a4b836..0000000 --- a/keys/keys3/genStaker.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/staker.key 4096 -openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr -openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys3/rootCA.crt b/keys/keys3/rootCA.crt deleted file mode 100644 index da6320a..0000000 --- a/keys/keys3/rootCA.crt +++ /dev/null @@ -1,34 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB -dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN -AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw -MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM -Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV -BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg -jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 -QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU -m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 -lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB -KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW -cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 -RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH -bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW -T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB -J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU -KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei -73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E -BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj -FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG -XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY -omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv -Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC -XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 -gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn -3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N -W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s -scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU -kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD -DB8IRfWqBx2nWw== ------END CERTIFICATE----- diff --git a/keys/keys3/rootCA.key b/keys/keys3/rootCA.key deleted file mode 100644 index fe23a96..0000000 --- a/keys/keys3/rootCA.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 -6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ -mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h -Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL -AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk -tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd -CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu -TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV -Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll -JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt -RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA -AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg -O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 -WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc -fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o -WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y -243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM -Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv -/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF -2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 -wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R -WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 -POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC -T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW -jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc -23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK -XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl -jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ -/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P -rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl -KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD -E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C -cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE -r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu -GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy -7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr -RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF -SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor -Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY -KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t -Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM -/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 -YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt -I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy -+jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f -UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER -KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW -MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe -f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== ------END RSA PRIVATE KEY----- diff --git a/keys/keys3/rootCA.srl b/keys/keys3/rootCA.srl deleted file mode 100644 index 473c41d..0000000 --- a/keys/keys3/rootCA.srl +++ /dev/null @@ -1 +0,0 @@ -BAF3B5C5C6D0D14A diff --git a/keys/keys3/staker.csr b/keys/keys3/staker.csr deleted file mode 100644 index 87bcd8d..0000000 --- a/keys/keys3/staker.csr +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK -DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw -ggIKAoICAQC8mVDToHbkUF2gRdVfpydZLNKeQ38d6HZFkUM3U1dWLZFSZNvagN8h -lQvY/tQu3A40p19WgKbzWZre3tg1Akw8Jztdz9gl4RMn142IIO3CiwIptkE0Jopb -ZhmG5fAC2n/MXQtfieI3hzeR04LW4JgLKzf3Nn8xZdlBgJfBmL5qUUnE7O7IbJGG -ma6gSD3ewetE6KQZtNtf0xRIv08doZKYwTl6ItkdGK76ufqq098GVwWvA1wSune4 -+MFgs9N4eFJj6Jyt85fiK/cwPx7KRdgYgBzrZQ4EPshRnwWrBTieOOaJvAA2RMxM -EYzKRrJAAsYI1zxtNyqIUaBTcxmaz+NXUGW+wHwITic0Gp/XQm2Lwr/lxIV6OnAl -L3CgbSXirSnoG+eHQ+vDzBAcRDkTAgv/GUIzlfqT2StTK02uIBgJYzvFTG4plHit -ccRfy8wxsh5Z8xG99lmPQQtLsnlQAV+Li06Cb8CH4hUVoiWiVs5QAahqWmv5fpoX -0Es26RyUHXGbjE202pyMMA7jUerUVKMijOoGZtcH6zB4p/dJ0TtToRwOgrA7NCI9 -AYVtqVXrXG/udj8ur2r1bTVwIbHsOeTEP3gY0mHRWm2E/bLjt9vbYIRUxR8xWnLk -beBziNTwg+36jdDF+6gu3cUz/nbSn8YY+Y1jjXuM3lqF8iMaAobhuwIDAQABoAAw -DQYJKoZIhvcNAQELBQADggIBAEWU13T1alCni4R36J65TrGfIljW8LGhmWRo5xoV -YW7HxxZ/WTAFSwAv0yHCGq+H/tebRZhvua+c+jP16YBDoAp5neGWW57gLDg+35H5 -guLo73p/qM6hyaUGSfyO9D1nS1QX8R0r70TQYbIrVB4uQeTM2pEYR6NYO7bjPEWr -WwC6RnbtnsNGTeoH+LwiM+uY//VB/tUe1u2y6U8HkIXJo7j4+NqUL1xXmYmC6Rph -PNI3MAZUL40z1VX7fn/Vp7+rc0CBUsFMOLfLmSgL8jsQeKuyVAQKA4xzWQ2qeuGV -Bv24rHbnSxYSu8tMs31LZPn+fsvNWB9iU7MEiTUr+8nAPEAANNaBwaD1EUkzC1WC -OcCUpMgkhVuzfHq+eXWnw3cGVvEZ8A4DzOyl1ZFofxBX7IOOv0lmpDQSeEQlmKPF -LdWI2JJM76BjeXI7l5HbOmRJv1kYFwBq/biDxCSmmNX8uHvAj1EgDNXvr/pRw7rT -6yKOLtk1GSCCrrYQijCXRx2u276+j8MtC5i6FVcUoaSYD+nx2+ApOnZlYR7xsJYw -5ECaeDagjHP472FY/fuhy/UwAIFm5gCcVFs3A2M/Iyn+vsAW5WEdh+fwGiWxfw49 -Y5KRT8u7BD0R5T5UYxYwzYekEzxsoD3bvQGx/4tboSUxkOd7pVymbuGzIsQ18heI -78pG ------END CERTIFICATE REQUEST----- diff --git a/keys/keys4/genCA.sh b/keys/keys4/genCA.sh deleted file mode 100755 index 14a0f4c..0000000 --- a/keys/keys4/genCA.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/rootCA.key 4096 -openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/keys4/genStaker.sh b/keys/keys4/genStaker.sh deleted file mode 100755 index 0a4b836..0000000 --- a/keys/keys4/genStaker.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/staker.key 4096 -openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr -openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys4/rootCA.crt b/keys/keys4/rootCA.crt deleted file mode 100644 index da6320a..0000000 --- a/keys/keys4/rootCA.crt +++ /dev/null @@ -1,34 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB -dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN -AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw -MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM -Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV -BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg -jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 -QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU -m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 -lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB -KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW -cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 -RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH -bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW -T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB -J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU -KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei -73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E -BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj -FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG -XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY -omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv -Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC -XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 -gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn -3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N -W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s -scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU -kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD -DB8IRfWqBx2nWw== ------END CERTIFICATE----- diff --git a/keys/keys4/rootCA.key b/keys/keys4/rootCA.key deleted file mode 100644 index fe23a96..0000000 --- a/keys/keys4/rootCA.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 -6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ -mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h -Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL -AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk -tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd -CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu -TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV -Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll -JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt -RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA -AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg -O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 -WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc -fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o -WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y -243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM -Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv -/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF -2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 -wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R -WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 -POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC -T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW -jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc -23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK -XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl -jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ -/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P -rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl -KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD -E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C -cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE -r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu -GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy -7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr -RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF -SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor -Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY -KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t -Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM -/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 -YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt -I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy -+jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f -UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER -KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW -MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe -f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== ------END RSA PRIVATE KEY----- diff --git a/keys/keys4/rootCA.srl b/keys/keys4/rootCA.srl deleted file mode 100644 index 473c41d..0000000 --- a/keys/keys4/rootCA.srl +++ /dev/null @@ -1 +0,0 @@ -BAF3B5C5C6D0D14A diff --git a/keys/keys4/staker.csr b/keys/keys4/staker.csr deleted file mode 100644 index 9d94e57..0000000 --- a/keys/keys4/staker.csr +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK -DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw -ggIKAoICAQDZnDoDHE2nj82xDjH0Tb7OXMqQDHz+zbLidt6MSI1XB3vOAIEiPqrt -enGnqRbVFcm5GZxvxh4YQD8CjKSk1qgZJczs0DPSiGQ8Efl4PGO4xnEbllgL3PUR -PWp7mEV3oh6fxICgQKTBlT671EnFzB5lyJWpumRzvA1vyhBMsY8aO+xdq5LUFltY -zBdvpgLXVaDwHZQ2PQEWtF0d0JO2N0WFFDGNmx6n8pKSeIAVDsTwZCZK+FCeeEyo -GfXsINsc0yCMQslawkfOMqA9yBV3Ji6QmFYKyGYt65MWGNqPA4XrIyliKwCCXwz9 -mjaWyN7rAyw9cWlLMODNmDORWzGRZ5290MEAEIZsqjYHVitRTM/RnNIadToZGO0y -5uAkM14cmTvnsK1CP92qtfSisq75W/I91drThoEtTK78UGOl/5Q1YBR08F+tSUWZ -WyHeI6UOBUCGC2bCtmzKMl7vU25lG6mbCR1JuQi6RYpnfMjXH36lV4S7fTvSwwuR -03h2F3H1eFkWNG2lbFrW0dzDCPg3lXwmFQ65hUcQhctznoBz5C1lF2eW03wuVgxi -nnuVlJHjy/GrqmWsASn1PDuVs4k7k6DJfwyHAiA0uxXrGfxYvp7H8j4+2YOmWiWl -5xYgrEDjur5n8Zx46PHQer2Avq3sbEGEe1MCtXJlj3drd5Him3m+NQIDAQABoAAw -DQYJKoZIhvcNAQELBQADggIBAMdZKzx/Qz07D/ISgEe10+XofO5It86g12YJBgGN -4UEnKNk1quJIs0PAwcDNp7G4BpEMuP5xjyf4q976gzAkTg2kcB+LK85eGGSxkxAt -uFQPlFvk85qn4k7wLSx2zkqs47ItvqK5Ev8lLCZ/HfIy+7y57BKqDTvzhXarE3lq -bEZketwQvDcQPN7Ho9gxDMMQDeE2NeDyYhQtCMlX8PnmBRhWZ4CExODMdm8TrTJJ -5HDoj+fXCaSSbXPN25LKYSKOEM4wtzHa91hQK7JGoeHuSS0zFxDwXNKi3sLLtKTH -jsYL/E9bH2NxKPRoHwCJMS0N2jUqnHFyhQUp8VtJlxN0IsPLZGXFapVo4fk2hDpF -OapX0kysLV37KEklVHucQclU5SeTpzoS7gYXqvOJ3Q/IFR6CFAkCHaDggWpB/sVm -OPG6Pt6XXbGNCav9+Am+0q4UD5O1Sbobx+0XJu3VtnuZdn4Mt0uBSL1DZdG9ceig -mGz4bx1kvnzhL1LOAPmxOYqrLCwqJRkRCa+25uRNqBAqWcU48pwoxC3RLyWvy2UN -8Or+TsBzPUldq8yWn3s0/zE8yui6pxzpGUD2TfbUT78O0HJKn5nQjrjVdQZhaA4t -KnrZCz7lIrHRXf2Hbsg/9QgHhcpkknc98z0trNQHncp/kxUvrBJyJGrUh1DEkOSe -f9p0 ------END CERTIFICATE REQUEST----- diff --git a/keys/keys5/genCA.sh b/keys/keys5/genCA.sh deleted file mode 100755 index 14a0f4c..0000000 --- a/keys/keys5/genCA.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/rootCA.key 4096 -openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/keys5/genStaker.sh b/keys/keys5/genStaker.sh deleted file mode 100755 index 0a4b836..0000000 --- a/keys/keys5/genStaker.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/staker.key 4096 -openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr -openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256 diff --git a/keys/keys5/rootCA.crt b/keys/keys5/rootCA.crt deleted file mode 100644 index da6320a..0000000 --- a/keys/keys5/rootCA.crt +++ /dev/null @@ -1,34 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB -dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN -AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw -MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM -Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV -BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg -jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 -QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU -m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 -lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB -KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW -cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 -RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH -bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW -T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB -J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU -KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei -73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E -BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj -FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG -XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY -omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv -Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC -XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 -gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn -3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N -W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s -scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU -kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD -DB8IRfWqBx2nWw== ------END CERTIFICATE----- diff --git a/keys/keys5/rootCA.key b/keys/keys5/rootCA.key deleted file mode 100644 index fe23a96..0000000 --- a/keys/keys5/rootCA.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 -6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ -mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h -Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL -AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk -tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd -CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu -TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV -Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll -JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt -RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA -AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg -O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 -WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc -fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o -WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y -243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM -Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv -/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF -2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 -wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R -WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 -POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC -T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW -jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc -23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK -XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl -jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ -/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P -rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl -KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD -E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C -cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE -r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu -GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy -7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr -RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF -SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor -Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY -KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t -Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM -/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 -YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt -I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy -+jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f -UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER -KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW -MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe -f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== ------END RSA PRIVATE KEY----- diff --git a/keys/keys5/rootCA.srl b/keys/keys5/rootCA.srl deleted file mode 100644 index 473c41d..0000000 --- a/keys/keys5/rootCA.srl +++ /dev/null @@ -1 +0,0 @@ -BAF3B5C5C6D0D14A diff --git a/keys/keys5/staker.csr b/keys/keys5/staker.csr deleted file mode 100644 index 418de02..0000000 --- a/keys/keys5/staker.csr +++ /dev/null @@ -1,27 +0,0 @@ ------BEGIN CERTIFICATE REQUEST----- -MIIEfzCCAmcCAQAwOjELMAkGA1UEBhMCVVMxCzAJBgNVBAgMAk5ZMRAwDgYDVQQK -DAdBdmFsYWJzMQwwCgYDVQQDDANhdmEwggIiMA0GCSqGSIb3DQEBAQUAA4ICDwAw -ggIKAoICAQDgK5r5vdHtJFEgw7hGE/lzKaHcvwzr32armq0k9tYchJXfT3k1j1lX -tBAdcUN3gSRKjgzH/vjbn0ea3AiDCUd2Mck/n0KcJZ43S5I7ZjP7rbav296bKCZ1 -Hr7r5gXYFhk+3aUsVfDUqAPBwyP8KeV31ARVA/s+WPeWqs69QXTdyJuBYE5pr40v -1Sf+ebUInZ37uGY3kiO0Ex/JgcoQsGJzrWD/ztbRCFIvrdNJZd0pGvMlmTKp7XsM -R3cpvqk770//MLCdyGW/1IArTSuD1vd7mBX1JyVXKycYN0vIOtbgxPOFutUyqDOe -P7o51q4iPS3dCRgfmn/hWLwy+CtJe0BGKsb4tk0tKxo0se8v9JA8mUtnmzmMt4Y9 -jijOrCOB7XwWKmJYEm8N5Ubcy6cp2oL8vQVtzz3PXrkFt+3cFt1jrjdpQYgH4jyk -kWDeOjEfy1FCwzsNRudLTvLhfLn86/ZT4cLZ9JI7/WW0IPC8Fc7lhznJ+bIQUeEn -daGdgVkxuEg0MxdrMr0jU0IFoXySRXNRzcDWZShEjBTv7tnFxLmoNU+uJb/KpMH6 -sRYi3zs85ecaMKNyG+LDmBahUlHx5hKAH49O8855+AMhsg91ONZJldjQX0oZrIKz -K5BpsqeTl4c2Yt/fALiZaeFk1pBEsvVeMOBCIuWE+b4UIEaLAOhxfwIDAQABoAAw -DQYJKoZIhvcNAQELBQADggIBAMWzSdk6C53ijuLYOt77BAYpxnxRvKA1tsxJd9K5 -+R+ZGiuTLXWArgClhhwx65OTeqrwkVlh2CZoVTBeEMzvxn6zHO4S20KcWJ1lWU76 -OIrBZrAecBVXB5LmttUkvlMgVlWLQuVpVJiEn1jW6KeABqWKCLz7Au8TzHfr1HQ4 -1ukndu3SsKVwSIy0ZHFpQaXvzA8f0V93ap9R2RVw9BXqCJDe+BtQPvlCwOrvQ7d3 -gg+3aymbqUx3hrscEvd1ETad7LyFw3QfPcr1j1FwPH+K1/UDrWxIzxmO+HM5Lh8f -269aYceysgv/xa/KpANTxVAM7j1SE1CjjI5e5CQJVZ+gtAqTIv3lLkk0dWQksObN -Z1tTtJkFAUNbGsMadtVeTmx2eBcRi4LEv0DIPyyWUQTWwTYtaMFi8I0bYPk1T/fV -9umR6jqZ0l1qdiuLYOSYUx4iI5SAmCrA/kEINOj0u2gqqkxdOgUVsuKqer4w9Iyt -qOhhOHwctRo+cIhpVwcF2ouJeNrFqoBzOgHKQxBvcDWJM8ra5GCNIvD3MP4Q63hy -b4fkBcYwb1B2ETH9nSDtfW+JLjt70rvf6IxAiXRRiOv4fPzaUlK49NRVgjzx5Iu+ -8Zq4+I+S6qZOROWsOVSpJu44VvNZd5bMB9dEHnkoGxkPjo8pkC/o0uZbxsnZScSL -WGxS ------END CERTIFICATE REQUEST----- diff --git a/keys/keys1/staker.crt b/keys/local/staker1.crt similarity index 100% rename from keys/keys1/staker.crt rename to keys/local/staker1.crt diff --git a/keys/keys1/staker.key b/keys/local/staker1.key similarity index 100% rename from keys/keys1/staker.key rename to keys/local/staker1.key diff --git a/keys/keys2/staker.crt b/keys/local/staker2.crt similarity index 100% rename from keys/keys2/staker.crt rename to keys/local/staker2.crt diff --git a/keys/keys2/staker.key b/keys/local/staker2.key similarity index 100% rename from keys/keys2/staker.key rename to keys/local/staker2.key diff --git a/keys/keys3/staker.crt b/keys/local/staker3.crt similarity index 100% rename from keys/keys3/staker.crt rename to keys/local/staker3.crt diff --git a/keys/keys3/staker.key b/keys/local/staker3.key similarity index 100% rename from keys/keys3/staker.key rename to keys/local/staker3.key diff --git a/keys/keys4/staker.crt b/keys/local/staker4.crt similarity index 100% rename from keys/keys4/staker.crt rename to keys/local/staker4.crt diff --git a/keys/keys4/staker.key b/keys/local/staker4.key similarity index 100% rename from keys/keys4/staker.key rename to keys/local/staker4.key diff --git a/keys/keys5/staker.crt b/keys/local/staker5.crt similarity index 100% rename from keys/keys5/staker.crt rename to keys/local/staker5.crt diff --git a/keys/keys5/staker.key b/keys/local/staker5.key similarity index 100% rename from keys/keys5/staker.key rename to keys/local/staker5.key diff --git a/keys/mykey/genCA.sh b/keys/mykey/genCA.sh deleted file mode 100755 index 14a0f4c..0000000 --- a/keys/mykey/genCA.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh -set -ex - -openssl genrsa -out `dirname "$0"`/rootCA.key 4096 -openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt diff --git a/keys/mykey/rootCA.crt b/keys/mykey/rootCA.crt deleted file mode 100644 index da6320a..0000000 --- a/keys/mykey/rootCA.crt +++ /dev/null @@ -1,34 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV -BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB -dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN -AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw -MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM -Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV -BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw -DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg -jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93 -QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU -m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0 -lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB -KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW -cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44 -RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH -bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW -T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB -J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU -KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei -73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E -BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj -FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG -XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY -omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv -Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC -XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0 -gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn -3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N -W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s -scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU -kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD -DB8IRfWqBx2nWw== ------END CERTIFICATE----- diff --git a/keys/mykey/rootCA.key b/keys/mykey/rootCA.key deleted file mode 100644 index fe23a96..0000000 --- a/keys/mykey/rootCA.key +++ /dev/null @@ -1,51 +0,0 @@ ------BEGIN RSA PRIVATE KEY----- -MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1 -6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ -mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h -Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL -AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk -tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd -CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu -TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV -Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll -JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt -RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA -AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg -O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6 -WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc -fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o -WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y -243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM -Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv -/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF -2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3 -wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R -WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1 -POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC -T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW -jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc -23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK -XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl -jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+ -/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P -rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl -KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD -E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C -cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE -r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu -GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy -7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr -RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF -SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor -Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY -KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t -Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM -/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6 -YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt -I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy -+jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f -UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER -KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW -MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe -f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA== ------END RSA PRIVATE KEY----- diff --git a/keys/mykey/rootCA.srl b/keys/mykey/rootCA.srl deleted file mode 100644 index de0f603..0000000 --- a/keys/mykey/rootCA.srl +++ /dev/null @@ -1 +0,0 @@ -BAF3B5C5C6D0D164 diff --git a/keys/keys1/rootCA.crt b/keys/rootCA.crt similarity index 100% rename from keys/keys1/rootCA.crt rename to keys/rootCA.crt diff --git a/keys/keys1/rootCA.key b/keys/rootCA.key similarity index 100% rename from keys/keys1/rootCA.key rename to keys/rootCA.key diff --git a/keys/rootCA.srl b/keys/rootCA.srl new file mode 100644 index 0000000..617b916 --- /dev/null +++ b/keys/rootCA.srl @@ -0,0 +1 @@ +BAF3B5C5C6D0D166 diff --git a/main/main.go b/main/main.go index db293b5..fc7ae99 100644 --- a/main/main.go +++ b/main/main.go @@ -7,10 +7,10 @@ import ( "fmt" "path" + "github.com/ava-labs/gecko/nat" "github.com/ava-labs/gecko/node" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/logging" - "github.com/ava-labs/go-ethereum/p2p/nat" ) // main is the primary entry point to Ava. This can either create a CLI to an @@ -61,26 +61,11 @@ func main() { log.Warn("assertions are enabled. This may slow down execution") } - natChan := make(chan struct{}) - defer close(natChan) + mapper := nat.NewDefaultMapper(log, Config.Nat, nat.TCP, "gecko") + defer mapper.UnmapAllPorts() - go nat.Map( - /*nat=*/ Config.Nat, - /*closeChannel=*/ natChan, - /*protocol=*/ "TCP", - /*internetPort=*/ int(Config.StakingIP.Port), - /*localPort=*/ int(Config.StakingIP.Port), - /*name=*/ "Gecko Staking Server", - ) - - go nat.Map( - /*nat=*/ Config.Nat, - /*closeChannel=*/ natChan, - /*protocol=*/ "TCP", - /*internetPort=*/ int(Config.HTTPPort), - /*localPort=*/ int(Config.HTTPPort), - /*name=*/ "Gecko HTTP Server", - ) + mapper.MapPort(Config.StakingIP.Port, Config.StakingIP.Port) + mapper.MapPort(Config.HTTPPort, Config.HTTPPort) log.Debug("initializing node state") // MainNode is a global variable in the node.go file diff --git a/main/params.go b/main/params.go index 88a3722..b17b5cd 100644 --- a/main/params.go +++ b/main/params.go @@ -8,15 +8,15 @@ import ( "flag" "fmt" "net" + "os" "path" "strings" - "github.com/ava-labs/go-ethereum/p2p/nat" - "github.com/ava-labs/gecko/database/leveldb" "github.com/ava-labs/gecko/database/memdb" "github.com/ava-labs/gecko/genesis" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/nat" "github.com/ava-labs/gecko/node" "github.com/ava-labs/gecko/snow/networking/router" "github.com/ava-labs/gecko/utils" @@ -32,6 +32,14 @@ var ( Err error ) +// GetIPs returns the default IPs for each network +func GetIPs(networkID uint32) []string { + switch networkID { + default: + return nil + } +} + var ( errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs") ) @@ -44,72 +52,84 @@ func init() { loggingConfig, err := logging.DefaultConfig() errs.Add(err) + fs := flag.NewFlagSet("gecko", flag.ContinueOnError) + // NetworkID: - networkName := flag.String("network-id", genesis.LocalName, "Network ID this node will connect to") + networkName := fs.String("network-id", genesis.LocalName, "Network ID this node will connect to") // Ava fees: - flag.Uint64Var(&Config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva") + fs.Uint64Var(&Config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva") // Assertions: - flag.BoolVar(&loggingConfig.Assertions, "assertions-enabled", true, "Turn on assertion execution") + fs.BoolVar(&loggingConfig.Assertions, "assertions-enabled", true, "Turn on assertion execution") // Crypto: - flag.BoolVar(&Config.EnableCrypto, "signature-verification-enabled", true, "Turn on signature verification") + fs.BoolVar(&Config.EnableCrypto, "signature-verification-enabled", true, "Turn on signature verification") // Database: - db := flag.Bool("db-enabled", true, "Turn on persistent storage") - dbDir := flag.String("db-dir", "db", "Database directory for Ava state") + db := fs.Bool("db-enabled", true, "Turn on persistent storage") + dbDir := fs.String("db-dir", "db", "Database directory for Ava state") // IP: - consensusIP := flag.String("public-ip", "", "Public IP of this node") + consensusIP := fs.String("public-ip", "", "Public IP of this node") // HTTP Server: - httpPort := flag.Uint("http-port", 9650, "Port of the HTTP server") - flag.BoolVar(&Config.EnableHTTPS, "http-tls-enabled", false, "Upgrade the HTTP server to HTTPs") - flag.StringVar(&Config.HTTPSKeyFile, "http-tls-key-file", "", "TLS private key file for the HTTPs server") - flag.StringVar(&Config.HTTPSCertFile, "http-tls-cert-file", "", "TLS certificate file for the HTTPs server") + httpPort := fs.Uint("http-port", 9650, "Port of the HTTP server") + fs.BoolVar(&Config.EnableHTTPS, "http-tls-enabled", false, "Upgrade the HTTP server to HTTPs") + fs.StringVar(&Config.HTTPSKeyFile, "http-tls-key-file", "", "TLS private key file for the HTTPs server") + fs.StringVar(&Config.HTTPSCertFile, "http-tls-cert-file", "", "TLS certificate file for the HTTPs server") // Bootstrapping: - bootstrapIPs := flag.String("bootstrap-ips", "", "Comma separated list of bootstrap peer ips to connect to. Example: 127.0.0.1:9630,127.0.0.1:9631") - bootstrapIDs := flag.String("bootstrap-ids", "", "Comma separated list of bootstrap peer ids to connect to. Example: JR4dVmy6ffUGAKCBDkyCbeZbyHQBeDsET,8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") + bootstrapIPs := fs.String("bootstrap-ips", "default", "Comma separated list of bootstrap peer ips to connect to. Example: 127.0.0.1:9630,127.0.0.1:9631") + bootstrapIDs := fs.String("bootstrap-ids", "default", "Comma separated list of bootstrap peer ids to connect to. Example: JR4dVmy6ffUGAKCBDkyCbeZbyHQBeDsET,8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") // Staking: - consensusPort := flag.Uint("staking-port", 9651, "Port of the consensus server") - flag.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Require TLS to authenticate staking connections") - flag.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", "", "TLS private key file for staking connections") - flag.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", "", "TLS certificate file for staking connections") + consensusPort := fs.Uint("staking-port", 9651, "Port of the consensus server") + fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Require TLS to authenticate staking connections") + fs.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", "keys/staker.key", "TLS private key file for staking connections") + fs.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", "keys/staker.crt", "TLS certificate file for staking connections") + + // Plugins: + fs.StringVar(&Config.PluginDir, "plugin-dir", "./build/plugins", "Plugin directory for Ava VMs") // Logging: - logsDir := flag.String("log-dir", "", "Logging directory for Ava") - logLevel := flag.String("log-level", "info", "The log level. Should be one of {verbo, debug, info, warn, error, fatal, off}") - logDisplayLevel := flag.String("log-display-level", "", "The log display level. If left blank, will inherit the value of log-level. Otherwise, should be one of {verbo, debug, info, warn, error, fatal, off}") + logsDir := fs.String("log-dir", "", "Logging directory for Ava") + logLevel := fs.String("log-level", "info", "The log level. Should be one of {verbo, debug, info, warn, error, fatal, off}") + logDisplayLevel := fs.String("log-display-level", "", "The log display level. If left blank, will inherit the value of log-level. Otherwise, should be one of {verbo, debug, info, warn, error, fatal, off}") - flag.IntVar(&Config.ConsensusParams.K, "snow-sample-size", 20, "Number of nodes to query for each network poll") - flag.IntVar(&Config.ConsensusParams.Alpha, "snow-quorum-size", 18, "Alpha value to use for required number positive results") - flag.IntVar(&Config.ConsensusParams.BetaVirtuous, "snow-virtuous-commit-threshold", 20, "Beta value to use for virtuous transactions") - flag.IntVar(&Config.ConsensusParams.BetaRogue, "snow-rogue-commit-threshold", 30, "Beta value to use for rogue transactions") - flag.IntVar(&Config.ConsensusParams.Parents, "snow-avalanche-num-parents", 5, "Number of vertexes for reference from each new vertex") - flag.IntVar(&Config.ConsensusParams.BatchSize, "snow-avalanche-batch-size", 30, "Number of operations to batch in each new vertex") + fs.IntVar(&Config.ConsensusParams.K, "snow-sample-size", 5, "Number of nodes to query for each network poll") + fs.IntVar(&Config.ConsensusParams.Alpha, "snow-quorum-size", 4, "Alpha value to use for required number positive results") + fs.IntVar(&Config.ConsensusParams.BetaVirtuous, "snow-virtuous-commit-threshold", 20, "Beta value to use for virtuous transactions") + fs.IntVar(&Config.ConsensusParams.BetaRogue, "snow-rogue-commit-threshold", 30, "Beta value to use for rogue transactions") + fs.IntVar(&Config.ConsensusParams.Parents, "snow-avalanche-num-parents", 5, "Number of vertexes for reference from each new vertex") + fs.IntVar(&Config.ConsensusParams.BatchSize, "snow-avalanche-batch-size", 30, "Number of operations to batch in each new vertex") + fs.IntVar(&Config.ConsensusParams.ConcurrentRepolls, "snow-concurrent-repolls", 1, "Minimum number of concurrent polls for finalizing consensus") // Enable/Disable APIs: - flag.BoolVar(&Config.AdminAPIEnabled, "api-admin-enabled", true, "If true, this node exposes the Admin API") - flag.BoolVar(&Config.KeystoreAPIEnabled, "api-keystore-enabled", true, "If true, this node exposes the Keystore API") - flag.BoolVar(&Config.MetricsAPIEnabled, "api-metrics-enabled", true, "If true, this node exposes the Metrics API") - flag.BoolVar(&Config.IPCEnabled, "api-ipcs-enabled", false, "If true, IPCs can be opened") + fs.BoolVar(&Config.AdminAPIEnabled, "api-admin-enabled", true, "If true, this node exposes the Admin API") + fs.BoolVar(&Config.KeystoreAPIEnabled, "api-keystore-enabled", true, "If true, this node exposes the Keystore API") + fs.BoolVar(&Config.MetricsAPIEnabled, "api-metrics-enabled", true, "If true, this node exposes the Metrics API") + fs.BoolVar(&Config.IPCEnabled, "api-ipcs-enabled", false, "If true, IPCs can be opened") // Throughput Server - throughputPort := flag.Uint("xput-server-port", 9652, "Port of the deprecated throughput test server") - flag.BoolVar(&Config.ThroughputServerEnabled, "xput-server-enabled", false, "If true, throughput test server is created") + throughputPort := fs.Uint("xput-server-port", 9652, "Port of the deprecated throughput test server") + fs.BoolVar(&Config.ThroughputServerEnabled, "xput-server-enabled", false, "If true, throughput test server is created") - flag.Parse() + ferr := fs.Parse(os.Args[1:]) + + if ferr == flag.ErrHelp { + // display usage/help text and exit successfully + os.Exit(0) + } + + if ferr != nil { + // other type of error occurred when parsing args + os.Exit(2) + } networkID, err := genesis.NetworkID(*networkName) errs.Add(err) - if networkID != genesis.LocalID { - errs.Add(fmt.Errorf("the only supported networkID is: %s", genesis.LocalName)) - } - Config.NetworkID = networkID // DB: @@ -123,13 +143,17 @@ func init() { Config.DB = memdb.New() } - Config.Nat = nat.Any() + Config.Nat = nat.NewRouter() var ip net.IP // If public IP is not specified, get it using shell command dig if *consensusIP == "" { - ip, err = Config.Nat.ExternalIP() - errs.Add(fmt.Errorf("%s\nIf you are trying to create a local network, try adding --public-ip=127.0.0.1", err)) + ip, err = Config.Nat.IP() + errs.Add(fmt.Errorf( + "%s\n"+ + "If you are trying to create a local network, try adding --public-ip=127.0.0.1\n"+ + "If you are attempting to connect to a public network, you may need to manually report your IP and perform port forwarding", + err)) } else { ip = net.ParseIP(*consensusIP) } @@ -143,6 +167,9 @@ func init() { } // Bootstrapping: + if *bootstrapIPs == "default" { + *bootstrapIPs = strings.Join(GetIPs(networkID), ",") + } for _, ip := range strings.Split(*bootstrapIPs, ",") { if ip != "" { addr, err := utils.ToIPDesc(ip) @@ -152,6 +179,14 @@ func init() { }) } } + + if *bootstrapIDs == "default" { + if *bootstrapIPs == "" { + *bootstrapIDs = "" + } else { + *bootstrapIDs = strings.Join(genesis.GetConfig(networkID).StakerIDs, ",") + } + } if Config.EnableStaking { i := 0 cb58 := formatting.CB58{} diff --git a/nat/mapper.go b/nat/mapper.go new file mode 100644 index 0000000..3beaedd --- /dev/null +++ b/nat/mapper.go @@ -0,0 +1,143 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package nat + +import ( + "sync" + "time" + + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/wrappers" +) + +const ( + defaultMappingTimeout = 30 * time.Minute + defaultMappingUpdateInterval = 3 * defaultMappingTimeout / 4 +) + +// Mapper maps port +type Mapper interface { + MapPort(newInternalPort, newExternalPort uint16) error + UnmapAllPorts() error +} + +type mapper struct { + log logging.Logger + router Router + networkProtocol NetworkProtocol + mappingNames string + mappingTimeout time.Duration + mappingUpdateInterval time.Duration + + closer chan struct{} + wg sync.WaitGroup + errLock sync.Mutex + errs wrappers.Errs +} + +// NewMapper returns a new mapper that can map ports on a router +func NewMapper( + log logging.Logger, + router Router, + networkProtocol NetworkProtocol, + mappingNames string, + mappingTimeout time.Duration, + mappingUpdateInterval time.Duration, +) Mapper { + return &mapper{ + log: log, + router: router, + networkProtocol: networkProtocol, + mappingNames: mappingNames, + mappingTimeout: mappingTimeout, + mappingUpdateInterval: mappingUpdateInterval, + closer: make(chan struct{}), + } +} + +// NewDefaultMapper returns a new mapper that can map ports on a router with +// default settings +func NewDefaultMapper( + log logging.Logger, + router Router, + networkProtocol NetworkProtocol, + mappingNames string, +) Mapper { + return NewMapper( + log, + router, + networkProtocol, + mappingNames, + defaultMappingTimeout, // uses the default value + defaultMappingUpdateInterval, // uses the default value + ) +} + +// MapPort maps a local port to a port on the router until UnmapAllPorts is +// called. +func (m *mapper) MapPort(newInternalPort, newExternalPort uint16) error { + m.wg.Add(1) + go m.mapPort(newInternalPort, newExternalPort) + return nil +} + +func (m *mapper) mapPort(newInternalPort, newExternalPort uint16) { + // duration is set to 0 here so that the select case will execute + // immediately + updateTimer := time.NewTimer(0) + defer func() { + updateTimer.Stop() + + m.errLock.Lock() + m.errs.Add(m.router.UnmapPort( + m.networkProtocol, + newInternalPort, + newExternalPort)) + m.errLock.Unlock() + + m.log.Debug("Unmapped external port %d to internal port %d", + newExternalPort, + newInternalPort) + + m.wg.Done() + }() + + for { + select { + case <-updateTimer.C: + err := m.router.MapPort( + m.networkProtocol, + newInternalPort, + newExternalPort, + m.mappingNames, + m.mappingTimeout) + + if err != nil { + m.errLock.Lock() + m.errs.Add(err) + m.errLock.Unlock() + + m.log.Debug("Failed to add mapping from external port %d to internal port %d due to %s", + newExternalPort, + newInternalPort, + err) + } else { + m.log.Debug("Mapped external port %d to internal port %d", + newExternalPort, + newInternalPort) + } + + // remap the port in m.mappingUpdateInterval + updateTimer.Reset(m.mappingUpdateInterval) + case _, _ = <-m.closer: + return // only return when all ports are unmapped + } + } +} + +func (m *mapper) UnmapAllPorts() error { + close(m.closer) + m.wg.Wait() + return m.errs.Err +} diff --git a/nat/no_router.go b/nat/no_router.go new file mode 100644 index 0000000..edb86b6 --- /dev/null +++ b/nat/no_router.go @@ -0,0 +1,28 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package nat + +import ( + "errors" + "net" + "time" +) + +var ( + errNoRouter = errors.New("no nat enabled router was discovered") +) + +type noRouter struct{} + +func (noRouter) MapPort(_ NetworkProtocol, _, _ uint16, _ string, _ time.Duration) error { + return errNoRouter +} + +func (noRouter) UnmapPort(_ NetworkProtocol, _, _ uint16) error { + return errNoRouter +} + +func (noRouter) IP() (net.IP, error) { + return nil, errNoRouter +} diff --git a/nat/pmp.go b/nat/pmp.go new file mode 100644 index 0000000..311375d --- /dev/null +++ b/nat/pmp.go @@ -0,0 +1,71 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package nat + +import ( + "net" + "time" + + "github.com/jackpal/gateway" + "github.com/jackpal/go-nat-pmp" +) + +var ( + pmpClientTimeout = 500 * time.Millisecond +) + +// natPMPClient adapts the NAT-PMP protocol implementation so it conforms to +// the common interface. +type pmpClient struct { + client *natpmp.Client +} + +func (pmp *pmpClient) MapPort( + networkProtocol NetworkProtocol, + newInternalPort uint16, + newExternalPort uint16, + mappingName string, + mappingDuration time.Duration) error { + protocol := string(networkProtocol) + internalPort := int(newInternalPort) + externalPort := int(newExternalPort) + // go-nat-pmp uses seconds to denote their lifetime + lifetime := int(mappingDuration / time.Second) + + _, err := pmp.client.AddPortMapping(protocol, internalPort, externalPort, lifetime) + return err +} + +func (pmp *pmpClient) UnmapPort( + networkProtocol NetworkProtocol, + internalPort uint16, + _ uint16) error { + protocol := string(networkProtocol) + internalPortInt := int(internalPort) + + _, err := pmp.client.AddPortMapping(protocol, internalPortInt, 0, 0) + return err +} + +func (pmp *pmpClient) IP() (net.IP, error) { + response, err := pmp.client.GetExternalAddress() + if err != nil { + return nil, err + } + return response.ExternalIPAddress[:], nil +} + +func getPMPRouter() Router { + gatewayIP, err := gateway.DiscoverGateway() + if err != nil { + return nil + } + + pmp := &pmpClient{natpmp.NewClientWithTimeout(gatewayIP, pmpClientTimeout)} + if _, err := pmp.IP(); err != nil { + return nil + } + + return pmp +} diff --git a/nat/router.go b/nat/router.go new file mode 100644 index 0000000..11b58f9 --- /dev/null +++ b/nat/router.go @@ -0,0 +1,65 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +// Package nat performs network address translation and provides helpers for +// routing ports. +package nat + +import ( + "net" + "time" +) + +// NetworkProtocol is a protocol that will be used through a port +type NetworkProtocol string + +// Available protocol +const ( + TCP NetworkProtocol = "TCP" + UDP NetworkProtocol = "UDP" +) + +// Router provides a standard NAT router functions. Specifically, allowing the +// fetching of public IPs and port forwarding to this computer. +type Router interface { + // mapPort creates a mapping between a port on the local computer to an + // external port on the router. + // + // The mappingName is something displayed on the router, so it is included + // for completeness. + MapPort( + networkProtocol NetworkProtocol, + newInternalPort uint16, + newExternalPort uint16, + mappingName string, + mappingDuration time.Duration) error + + // UnmapPort clears a mapping that was previous made by a call to MapPort + UnmapPort( + networkProtocol NetworkProtocol, + internalPort uint16, + externalPort uint16) error + + // Returns the routers IP address on the network the router considers + // external + IP() (net.IP, error) +} + +// NewRouter returns a new router discovered on the local network +func NewRouter() Router { + routers := make(chan Router) + // Because getting a router can take a noticeable amount of time to error, + // we run these requests in parallel + go func() { + routers <- getUPnPRouter() + }() + go func() { + routers <- getPMPRouter() + }() + for i := 0; i < 2; i++ { + if router := <-routers; router != nil { + return router + } + } + return noRouter{} +} diff --git a/nat/upnp.go b/nat/upnp.go new file mode 100644 index 0000000..e60cd6e --- /dev/null +++ b/nat/upnp.go @@ -0,0 +1,253 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package nat + +import ( + "errors" + "fmt" + "net" + "time" + + "github.com/huin/goupnp" + "github.com/huin/goupnp/dcps/internetgateway1" + "github.com/huin/goupnp/dcps/internetgateway2" +) + +const ( + soapTimeout = time.Second +) + +var ( + errNoGateway = errors.New("Failed to connect to any avaliable gateways") +) + +// upnpClient is the interface used by goupnp for their client implementations +type upnpClient interface { + // attempts to map connection using the provided protocol from the external + // port to the internal port for the lease duration. + AddPortMapping( + newRemoteHost string, + newExternalPort uint16, + newProtocol string, + newInternalPort uint16, + newInternalClient string, + newEnabled bool, + newPortMappingDescription string, + newLeaseDuration uint32) error + + // attempt to remove any mapping from the external port. + DeletePortMapping( + newRemoteHost string, + newExternalPort uint16, + newProtocol string) error + + // attempts to return the external IP address, formatted as a string. + GetExternalIPAddress() (ip string, err error) + + // returns if there is rsip available, nat enabled, or an unexpected error. + GetNATRSIPStatus() (newRSIPAvailable bool, natEnabled bool, err error) +} + +type upnpRouter struct { + root *goupnp.RootDevice + client upnpClient +} + +func (n *upnpRouter) MapPort( + networkProtocol NetworkProtocol, + newInternalPort uint16, + newExternalPort uint16, + mappingName string, + mappingDuration time.Duration, +) error { + ip, err := n.localAddress() + if err != nil { + return err + } + + protocol := string(networkProtocol) + // goupnp uses seconds to denote their lifetime + lifetime := uint32(mappingDuration / time.Second) + + // UnmapPort's error is intentionally dropped, because the mapping may not + // exist. + n.UnmapPort(networkProtocol, newInternalPort, newExternalPort) + + return n.client.AddPortMapping( + "", // newRemoteHost isn't used to limit the mapping to a host + newExternalPort, + protocol, + newInternalPort, + ip.String(), // newInternalClient is the client traffic should be sent to + true, // newEnabled enables port mappings + mappingName, + lifetime, + ) +} + +func (n *upnpRouter) UnmapPort(networkProtocol NetworkProtocol, _, externalPort uint16) error { + protocol := string(networkProtocol) + return n.client.DeletePortMapping( + "", // newRemoteHost isn't used to limit the mapping to a host + externalPort, + protocol) +} + +func (n *upnpRouter) IP() (net.IP, error) { + ipStr, err := n.client.GetExternalIPAddress() + if err != nil { + return nil, err + } + + ip := net.ParseIP(ipStr) + if ip == nil { + return nil, fmt.Errorf("invalid IP %s", ipStr) + } + return ip, nil +} + +func (n *upnpRouter) localAddress() (net.IP, error) { + // attempt to get an address on the router + deviceAddr, err := net.ResolveUDPAddr("udp4", n.root.URLBase.Host) + if err != nil { + return nil, err + } + deviceIP := deviceAddr.IP + + netInterfaces, err := net.Interfaces() + if err != nil { + return nil, err + } + + // attempt to find one of my ips that the router would know about + for _, netInterface := range netInterfaces { + addrs, err := netInterface.Addrs() + if err != nil { + continue + } + + for _, addr := range addrs { + // this is pretty janky, but it seems to be the best way to get the + // ip mask and properly check if the ip references the device we are + // connected to + ipNet, ok := addr.(*net.IPNet) + if !ok { + continue + } + + if ipNet.Contains(deviceIP) { + return ipNet.IP, nil + } + } + } + return nil, fmt.Errorf("couldn't find the local address in the same network as %s", deviceIP) +} + +// getUPnPRouter searches for all Gateway Devices that have avaliable +// connections in the goupnp library and returns the first connection it can +// find. +func getUPnPRouter() Router { + routers := make(chan *upnpRouter) + // Because DiscoverDevices takes a noticeable amount of time to error, we + // run these requests in parallel + go func() { + routers <- connectToGateway(internetgateway1.URN_WANConnectionDevice_1, gateway1) + }() + go func() { + routers <- connectToGateway(internetgateway2.URN_WANConnectionDevice_2, gateway2) + }() + for i := 0; i < 2; i++ { + if router := <-routers; router != nil { + return router + } + } + return nil +} + +func gateway1(client goupnp.ServiceClient) upnpClient { + switch client.Service.ServiceType { + case internetgateway1.URN_WANIPConnection_1: + return &internetgateway1.WANIPConnection1{ServiceClient: client} + case internetgateway1.URN_WANPPPConnection_1: + return &internetgateway1.WANPPPConnection1{ServiceClient: client} + default: + return nil + } +} + +func gateway2(client goupnp.ServiceClient) upnpClient { + switch client.Service.ServiceType { + case internetgateway2.URN_WANIPConnection_1: + return &internetgateway2.WANIPConnection1{ServiceClient: client} + case internetgateway2.URN_WANIPConnection_2: + return &internetgateway2.WANIPConnection2{ServiceClient: client} + case internetgateway2.URN_WANPPPConnection_1: + return &internetgateway2.WANPPPConnection1{ServiceClient: client} + default: + return nil + } +} + +func connectToGateway(deviceType string, toClient func(goupnp.ServiceClient) upnpClient) *upnpRouter { + devs, err := goupnp.DiscoverDevices(deviceType) + if err != nil { + return nil + } + // we are iterating over all the network devices, acting a possible roots + for i := range devs { + dev := &devs[i] + if dev.Root == nil { + continue + } + + // the root device may be a router, so attempt to connect to that + rootDevice := &dev.Root.Device + if upnp := getRouter(dev, rootDevice, toClient); upnp != nil { + return upnp + } + + // attempt to connect to any sub devices + devices := rootDevice.Devices + for i := range devices { + if upnp := getRouter(dev, &devices[i], toClient); upnp != nil { + return upnp + } + } + } + return nil +} + +func getRouter(rootDevice *goupnp.MaybeRootDevice, device *goupnp.Device, toClient func(goupnp.ServiceClient) upnpClient) *upnpRouter { + for i := range device.Services { + service := &device.Services[i] + + soapClient := service.NewSOAPClient() + // make sure the client times out if needed + soapClient.HTTPClient.Timeout = soapTimeout + + // attempt to create a client connection + serviceClient := goupnp.ServiceClient{ + SOAPClient: soapClient, + RootDevice: rootDevice.Root, + Location: rootDevice.Location, + Service: service, + } + client := toClient(serviceClient) + if client == nil { + continue + } + + // check whether port mapping is enabled + if _, nat, err := client.GetNATRSIPStatus(); err != nil || !nat { + continue + } + + // we found a router! + return &upnpRouter{ + root: rootDevice.Root, + client: client, + } + } + return nil +} diff --git a/networking/handshake_handlers.go b/networking/handshake_handlers.go index 83d1348..e037781 100644 --- a/networking/handshake_handlers.go +++ b/networking/handshake_handlers.go @@ -54,6 +54,13 @@ Periodically gossip peerlists. stakers should be in the set). */ +/* +Attempt reconnections + - If a non-staker disconnects, delete the connection + - If a staker disconnects, attempt to reconnect to the node for awhile. If the + node isn't connected to after awhile delete the connection. +*/ + const ( // CurrentVersion this avalanche instance is executing. CurrentVersion = "avalanche/0.0.1" @@ -70,6 +77,9 @@ const ( // GetVersionTimeout is the amount of time to wait before sending a // getVersion message to a partially connected peer GetVersionTimeout = 2 * time.Second + // ReconnectTimeout is the amount of time to wait to reconnect to a staker + // before giving up + ReconnectTimeout = 1 * time.Minute ) // Manager is the struct that will be accessed on event calls @@ -100,6 +110,7 @@ type Handshake struct { connections AddrCert // Connections that I think are connected versionTimeout timer.TimeoutManager + reconnectTimeout timer.TimeoutManager peerListGossiper *timer.Repeater awaitingLock sync.Mutex @@ -143,6 +154,10 @@ func (nm *Handshake) Initialize( nm.versionTimeout.Initialize(GetVersionTimeout) go nm.log.RecoverAndPanic(nm.versionTimeout.Dispatch) + + nm.reconnectTimeout.Initialize(ReconnectTimeout) + go nm.log.RecoverAndPanic(nm.reconnectTimeout.Dispatch) + nm.peerListGossiper = timer.NewRepeater(nm.gossipPeerList, PeerListGossipSpacing) go nm.log.RecoverAndPanic(nm.peerListGossiper.Dispatch) } @@ -290,6 +305,73 @@ func checkPeerCertificate(_ *C.struct_msgnetwork_conn_t, connected C.bool, _ uns return connected } +func (nm *Handshake) connectedToPeer(conn *C.struct_peernetwork_conn_t, addr salticidae.NetAddr) { + ip := toIPDesc(addr) + // If we're enforcing staking, use a peer's certificate to uniquely identify them + // Otherwise, use a hash of their ip to identify them + cert := ids.ShortID{} + ipCert := toShortID(ip) + if nm.enableStaking { + cert = getPeerCert(conn) + } else { + cert = ipCert + } + + nm.log.Debug("Connected to %s", ip) + + longCert := cert.LongID() + nm.reconnectTimeout.Remove(longCert) + nm.reconnectTimeout.Remove(ipCert.LongID()) + + nm.pending.Add(addr, cert) + + handler := new(func()) + *handler = func() { + if nm.pending.ContainsIP(addr) { + nm.SendGetVersion(addr) + nm.versionTimeout.Put(longCert, *handler) + } + } + (*handler)() +} + +func (nm *Handshake) disconnectedFromPeer(addr salticidae.NetAddr) { + cert := ids.ShortID{} + if pendingCert, exists := nm.pending.GetID(addr); exists { + cert = pendingCert + } else if connectedCert, exists := nm.connections.GetID(addr); exists { + cert = connectedCert + } else { + return + } + + nm.log.Info("Disconnected from %s", toIPDesc(addr)) + + longCert := cert.LongID() + if nm.vdrs.Contains(cert) { + nm.reconnectTimeout.Put(longCert, func() { + nm.net.DelPeer(addr) + }) + } else { + nm.net.DelPeer(addr) + } + nm.versionTimeout.Remove(longCert) + + if !nm.enableStaking { + nm.vdrs.Remove(cert) + } + + nm.pending.RemoveIP(addr) + nm.connections.RemoveIP(addr) + nm.numPeers.Set(float64(nm.connections.Len())) + + nm.awaitingLock.Lock() + defer nm.awaitingLock.Unlock() + for _, awaiting := range HandshakeNet.awaiting { + awaiting.Remove(cert) + } +} + // peerHandler notifies a change to the set of connected peers // connected is true if a new peer is connected // connected is false if a formerly connected peer has disconnected @@ -298,68 +380,30 @@ func peerHandler(_conn *C.struct_peernetwork_conn_t, connected C.bool, _ unsafe. pConn := salticidae.PeerNetworkConnFromC(salticidae.CPeerNetworkConn(_conn)) addr := pConn.GetPeerAddr(true) - ip := toIPDesc(addr) - if !connected { - if !HandshakeNet.enableStaking { - cert := toShortID(ip) - HandshakeNet.vdrs.Remove(cert) - } - - cert := ids.ShortID{} - if pendingCert, exists := HandshakeNet.pending.GetID(addr); exists { - cert = pendingCert - } else if connectedCert, exists := HandshakeNet.connections.GetID(addr); exists { - cert = connectedCert - } else { - return - } - - HandshakeNet.pending.RemoveIP(addr) - HandshakeNet.connections.RemoveIP(addr) - - HandshakeNet.numPeers.Set(float64(HandshakeNet.connections.Len())) - - HandshakeNet.log.Warn("Disconnected from %s", ip) - - HandshakeNet.awaitingLock.Lock() - defer HandshakeNet.awaitingLock.Unlock() - - for _, awaiting := range HandshakeNet.awaiting { - awaiting.Remove(cert) - } - - return - } - - HandshakeNet.log.Debug("Connected to %s", ip) - - // If we're enforcing staking, use a peer's certificate to uniquely identify them - // Otherwise, use a hash of their ip to identify them - cert := ids.ShortID{} - if HandshakeNet.enableStaking { - cert = getPeerCert(_conn) + if connected { + HandshakeNet.connectedToPeer(_conn, addr) } else { - cert = toShortID(ip) + HandshakeNet.disconnectedFromPeer(addr) } - HandshakeNet.pending.Add(addr, cert) - - certID := cert.LongID() - handler := new(func()) - *handler = func() { - if HandshakeNet.pending.ContainsIP(addr) { - HandshakeNet.SendGetVersion(addr) - HandshakeNet.versionTimeout.Put(certID, *handler) - } - } - (*handler)() } // unknownPeerHandler notifies of an unknown peer connection attempt //export unknownPeerHandler func unknownPeerHandler(_addr *C.netaddr_t, _cert *C.x509_t, _ unsafe.Pointer) { - addr := salticidae.NetAddrFromC(salticidae.CNetAddr(_addr)) + addr := salticidae.NetAddrFromC(salticidae.CNetAddr(_addr)).Copy(true) ip := toIPDesc(addr) HandshakeNet.log.Info("Adding peer %s", ip) + + cert := ids.ShortID{} + if HandshakeNet.enableStaking { + cert = getCert(salticidae.X509FromC(salticidae.CX509(_cert))) + } else { + cert = toShortID(ip) + } + + HandshakeNet.reconnectTimeout.Put(cert.LongID(), func() { + HandshakeNet.net.DelPeer(addr) + }) HandshakeNet.net.AddPeer(addr) } @@ -522,16 +566,20 @@ func peerList(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe. cErr := salticidae.NewError() for _, ip := range ips { HandshakeNet.log.Verbo("Trying to adding peer %s", ip) - addr := salticidae.NewNetAddrFromIPPortString(ip.String(), false, &cErr) + addr := salticidae.NewNetAddrFromIPPortString(ip.String(), true, &cErr) if cErr.GetCode() == 0 && !HandshakeNet.myAddr.IsEq(addr) { // Make sure not to connect to myself ip := toIPDesc(addr) + ipCert := toShortID(ip) if !HandshakeNet.pending.ContainsIP(addr) && !HandshakeNet.connections.ContainsIP(addr) { HandshakeNet.log.Debug("Adding peer %s", ip) + + HandshakeNet.reconnectTimeout.Put(ipCert.LongID(), func() { + HandshakeNet.net.DelPeer(addr) + }) HandshakeNet.net.AddPeer(addr) } } - addr.Free() } } diff --git a/networking/voting_handlers.go b/networking/voting_handlers.go index ecc7402..4c7be62 100644 --- a/networking/voting_handlers.go +++ b/networking/voting_handlers.go @@ -366,9 +366,9 @@ func (s *Voting) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID vID := validatorID if addr, exists := s.conns.GetIP(vID); exists { addrs = append(addrs, addr) - s.log.Verbo("Sending a PushQuery to %s", toIPDesc(addr)) + s.log.Verbo("Sending a PullQuery to %s", toIPDesc(addr)) } else { - s.log.Warn("Attempted to send a PushQuery message to a disconnected validator: %s", vID) + s.log.Warn("Attempted to send a PullQuery message to a disconnected validator: %s", vID) s.executor.Add(func() { s.router.QueryFailed(vID, chainID, requestID) }) } } diff --git a/networking/xputtest/issuer.go b/networking/xputtest/issuer.go index 933f026..e76af3d 100644 --- a/networking/xputtest/issuer.go +++ b/networking/xputtest/issuer.go @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/logging" ) type issuableVM interface { @@ -17,17 +18,18 @@ type issuableVM interface { // Issuer manages all the chain transaction flushing. type Issuer struct { - lock sync.Mutex - vms map[[32]byte]issuableVM - locks map[[32]byte]sync.Locker - + lock sync.Mutex + log logging.Logger + vms map[[32]byte]issuableVM + locks map[[32]byte]sync.Locker callbacks chan func() } // Initialize this flusher -func (i *Issuer) Initialize() { +func (i *Issuer) Initialize(log logging.Logger) { i.lock.Lock() defer i.lock.Unlock() + i.log = log i.vms = make(map[[32]byte]issuableVM) i.locks = make(map[[32]byte]sync.Locker) i.callbacks = make(chan func(), 1000) @@ -64,8 +66,12 @@ func (i *Issuer) IssueTx(chainID ids.ID, tx []byte, finalized func(choices.Statu lock.Lock() defer lock.Unlock() if vm, exists := i.vms[key]; exists { - vm.IssueTx(tx, finalized) + if _, err := vm.IssueTx(tx, finalized); err != nil { + i.log.Error("Issuing the tx returned with %s unexpectedly", err) + } } } + } else { + i.log.Warn("Attempted to issue a Tx to an unsupported chain %s", chainID) } } diff --git a/node/config.go b/node/config.go index 76c9e48..b35d997 100644 --- a/node/config.go +++ b/node/config.go @@ -4,9 +4,8 @@ package node import ( - "github.com/ava-labs/go-ethereum/p2p/nat" - "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/nat" "github.com/ava-labs/gecko/snow/consensus/avalanche" "github.com/ava-labs/gecko/snow/networking/router" "github.com/ava-labs/gecko/utils" @@ -16,7 +15,7 @@ import ( // Config contains all of the configurations of an Ava node. type Config struct { // protocol to use for opening the network interface - Nat nat.Interface + Nat nat.Router // ID of the network this node should connect to NetworkID uint32 @@ -56,6 +55,9 @@ type Config struct { // Logging configuration LoggingConfig logging.Config + // Plugin directory + PluginDir string + // Consensus configuration ConsensusParams avalanche.Parameters diff --git a/node/node.go b/node/node.go index b61a146..9257ca7 100644 --- a/node/node.go +++ b/node/node.go @@ -14,6 +14,7 @@ import ( "errors" "fmt" "io/ioutil" + "path" "sync" "unsafe" @@ -25,6 +26,7 @@ import ( "github.com/ava-labs/gecko/api/keystore" "github.com/ava-labs/gecko/api/metrics" "github.com/ava-labs/gecko/chains" + "github.com/ava-labs/gecko/chains/atomic" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/genesis" @@ -35,10 +37,13 @@ import ( "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms" "github.com/ava-labs/gecko/vms/avm" - "github.com/ava-labs/gecko/vms/evm" + "github.com/ava-labs/gecko/vms/nftfx" "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/vms/propertyfx" + "github.com/ava-labs/gecko/vms/rpcchainvm" "github.com/ava-labs/gecko/vms/secp256k1fx" "github.com/ava-labs/gecko/vms/spchainvm" "github.com/ava-labs/gecko/vms/spdagvm" @@ -49,6 +54,10 @@ const ( maxMessageSize = 1 << 25 // maximum size of a message sent with salticidae ) +var ( + genesisHashKey = []byte("genesisID") +) + // MainNode is the reference for node callbacks var MainNode = Node{} @@ -68,6 +77,9 @@ type Node struct { // Handles calls to Keystore API keystoreServer keystore.Keystore + // Manages shared memory + sharedMemory atomic.SharedMemory + // Manages creation of blockchains and routing messages to them chainManager chains.Manager @@ -215,7 +227,7 @@ func (n *Node) initConsensusNet() { func (n *Node) initClients() { n.Issuer = &xputtest.Issuer{} - n.Issuer.Initialize() + n.Issuer.Initialize(n.Log) n.CClientAPI = &xputtest.CClientHandler n.CClientAPI.Initialize(n.ClientNet, n.Issuer) @@ -285,7 +297,38 @@ func (n *Node) Dispatch() { n.EC.Dispatch() } ****************************************************************************** */ -func (n *Node) initDatabase() { n.DB = n.Config.DB } +func (n *Node) initDatabase() error { + n.DB = n.Config.DB + + expectedGenesis, err := genesis.Genesis(n.Config.NetworkID) + if err != nil { + return err + } + rawExpectedGenesisHash := hashing.ComputeHash256(expectedGenesis) + + rawGenesisHash, err := n.DB.Get(genesisHashKey) + if err == database.ErrNotFound { + rawGenesisHash = rawExpectedGenesisHash + err = n.DB.Put(genesisHashKey, rawGenesisHash) + } + if err != nil { + return err + } + + genesisHash, err := ids.ToID(rawGenesisHash) + if err != nil { + return err + } + expectedGenesisHash, err := ids.ToID(rawExpectedGenesisHash) + if err != nil { + return err + } + + if !genesisHash.Equals(expectedGenesisHash) { + return fmt.Errorf("db contains invalid genesis hash. DB Genesis: %s Generated Genesis: %s", genesisHash, expectedGenesisHash) + } + return nil +} // Initialize this node's ID // If staking is disabled, a node's ID is a hash of its IP @@ -317,17 +360,32 @@ func (n *Node) initNodeID() error { } // Create the vmManager and register the following vms: -// AVM, EVM, Simple Payments DAG, Simple Payments Chain +// AVM, Simple Payments DAG, Simple Payments Chain // The Platform VM is registered in initStaking because // its factory needs to reference n.chainManager, which is nil right now -func (n *Node) initVMManager() { +func (n *Node) initVMManager() error { + avaAssetID, err := genesis.AVAAssetID(n.Config.NetworkID) + if err != nil { + return err + } + n.vmManager = vms.NewManager(&n.APIServer, n.HTTPLog) - n.vmManager.RegisterVMFactory(avm.ID, &avm.Factory{}) - n.vmManager.RegisterVMFactory(evm.ID, &evm.Factory{}) - n.vmManager.RegisterVMFactory(spdagvm.ID, &spdagvm.Factory{TxFee: n.Config.AvaTxFee}) - n.vmManager.RegisterVMFactory(spchainvm.ID, &spchainvm.Factory{}) - n.vmManager.RegisterVMFactory(secp256k1fx.ID, &secp256k1fx.Factory{}) - n.vmManager.RegisterVMFactory(timestampvm.ID, ×tampvm.Factory{}) + + errs := wrappers.Errs{} + errs.Add( + n.vmManager.RegisterVMFactory(avm.ID, &avm.Factory{ + AVA: avaAssetID, + Platform: ids.Empty, + }), + n.vmManager.RegisterVMFactory(genesis.EVMID, &rpcchainvm.Factory{Path: path.Join(n.Config.PluginDir, "evm")}), + n.vmManager.RegisterVMFactory(spdagvm.ID, &spdagvm.Factory{TxFee: n.Config.AvaTxFee}), + n.vmManager.RegisterVMFactory(spchainvm.ID, &spchainvm.Factory{}), + n.vmManager.RegisterVMFactory(timestampvm.ID, ×tampvm.Factory{}), + n.vmManager.RegisterVMFactory(secp256k1fx.ID, &secp256k1fx.Factory{}), + n.vmManager.RegisterVMFactory(nftfx.ID, &nftfx.Factory{}), + n.vmManager.RegisterVMFactory(propertyfx.ID, &propertyfx.Factory{}), + ) + return errs.Err } // Create the EventDispatcher used for hooking events @@ -343,38 +401,64 @@ func (n *Node) initEventDispatcher() { // Initializes the Platform chain. // Its genesis data specifies the other chains that should // be created. -func (n *Node) initChains() { +func (n *Node) initChains() error { n.Log.Info("initializing chains") vdrs := n.vdrs + + // If staking is disabled, ignore updates to Subnets' validator sets + // Instead of updating node's validator manager, platform chain makes changes + // to its own local validator manager (which isn't used for sampling) if !n.Config.EnableStaking { defaultSubnetValidators := validators.NewSet() + defaultSubnetValidators.Add(validators.NewValidator(n.ID, 1)) vdrs = validators.NewManager() vdrs.PutValidatorSet(platformvm.DefaultSubnetID, defaultSubnetValidators) } - n.vmManager.RegisterVMFactory( + avaAssetID, err := genesis.AVAAssetID(n.Config.NetworkID) + if err != nil { + return err + } + createAVMTx, err := genesis.VMGenesis(n.Config.NetworkID, avm.ID) + if err != nil { + return err + } + + err = n.vmManager.RegisterVMFactory( /*vmID=*/ platformvm.ID, /*vmFactory=*/ &platformvm.Factory{ - ChainManager: n.chainManager, - Validators: vdrs, + ChainManager: n.chainManager, + Validators: vdrs, + StakingEnabled: n.Config.EnableStaking, + AVA: avaAssetID, + AVM: createAVMTx.ID(), }, ) + if err != nil { + return err + } beacons := validators.NewSet() for _, peer := range n.Config.BootstrapPeers { beacons.Add(validators.NewValidator(peer.ID, 1)) } - genesisBytes := genesis.Genesis(n.Config.NetworkID) + genesisBytes, err := genesis.Genesis(n.Config.NetworkID) + if err != nil { + return err + } // Create the Platform Chain n.chainManager.ForceCreateChain(chains.ChainParameters{ ID: ids.Empty, + SubnetID: platformvm.DefaultSubnetID, GenesisData: genesisBytes, // Specifies other chains to create VMAlias: platformvm.ID.String(), CustomBeacons: beacons, }) + + return nil } // initAPIServer initializes the server that handles HTTP calls @@ -400,6 +484,7 @@ func (n *Node) initAPIServer() { // Assumes n.DB, n.vdrs all initialized (non-nil) func (n *Node) initChainManager() { n.chainManager = chains.New( + n.Config.EnableStaking, n.Log, n.LogFactory, n.vmManager, @@ -415,12 +500,20 @@ func (n *Node) initChainManager() { n.ValidatorAPI, &n.APIServer, &n.keystoreServer, + &n.sharedMemory, ) n.chainManager.AddRegistrant(&n.APIServer) } -// initWallet initializes the Wallet service +// initSharedMemory initializes the shared memory for cross chain interation +func (n *Node) initSharedMemory() { + n.Log.Info("initializing SharedMemory") + sharedMemoryDB := prefixdb.New([]byte("shared memory"), n.DB) + n.sharedMemory.Initialize(n.Log, sharedMemoryDB) +} + +// initKeystoreAPI initializes the keystore service // Assumes n.APIServer is already set func (n *Node) initKeystoreAPI() { n.Log.Info("initializing Keystore API") @@ -464,24 +557,35 @@ func (n *Node) initIPCAPI() { } // Give chains and VMs aliases as specified by the genesis information -func (n *Node) initAliases() { +func (n *Node) initAliases() error { n.Log.Info("initializing aliases") - defaultAliases, chainAliases, vmAliases := genesis.Aliases(n.Config.NetworkID) + defaultAliases, chainAliases, vmAliases, err := genesis.Aliases(n.Config.NetworkID) + if err != nil { + return err + } + for chainIDKey, aliases := range chainAliases { chainID := ids.NewID(chainIDKey) for _, alias := range aliases { - n.Log.AssertNoError(n.chainManager.Alias(chainID, alias)) + if err := n.chainManager.Alias(chainID, alias); err != nil { + return err + } } } for vmIDKey, aliases := range vmAliases { vmID := ids.NewID(vmIDKey) for _, alias := range aliases { - n.Log.AssertNoError(n.vmManager.Alias(vmID, alias)) + if err := n.vmManager.Alias(vmID, alias); err != nil { + return err + } } } for url, aliases := range defaultAliases { - n.APIServer.AddAliases(url, aliases...) + if err := n.APIServer.AddAliases(url, aliases...); err != nil { + return err + } } + return nil } // Initialize this node @@ -496,12 +600,17 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg } n.HTTPLog = httpLog - n.initDatabase() // Set up the node's database + if err := n.initDatabase(); err != nil { // Set up the node's database + return fmt.Errorf("problem initializing database: %w", err) + } if err = n.initNodeID(); err != nil { // Derive this node's ID return fmt.Errorf("problem initializing staker ID: %w", err) } + // initialize shared memory + n.initSharedMemory() + // Start HTTP APIs n.initAPIServer() // Start the API Server n.initKeystoreAPI() // Start the Keystore API @@ -511,8 +620,13 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg if err = n.initNetlib(); err != nil { // Set up all networking return fmt.Errorf("problem initializing networking: %w", err) } - n.initValidatorNet() // Set up the validator handshake + authentication - n.initVMManager() // Set up the vm manager + if err := n.initValidatorNet(); err != nil { // Set up the validator handshake + authentication + return fmt.Errorf("problem initializing validator network: %w", err) + } + if err := n.initVMManager(); err != nil { // Set up the vm manager + return fmt.Errorf("problem initializing the VM manager: %w", err) + } + n.initEventDispatcher() // Set up the event dipatcher n.initChainManager() // Set up the chain manager n.initConsensusNet() // Set up the main consensus network @@ -524,10 +638,11 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg n.initAdminAPI() // Start the Admin API n.initIPCAPI() // Start the IPC API - n.initAliases() // Set up aliases - n.initChains() // Start the Platform chain - return nil + if err := n.initAliases(); err != nil { // Set up aliases + return err + } + return n.initChains() // Start the Platform chain } // Shutdown this node diff --git a/scripts/ansible/inventory.yml b/scripts/ansible/inventory.yml index 1841bd9..5315082 100755 --- a/scripts/ansible/inventory.yml +++ b/scripts/ansible/inventory.yml @@ -2,8 +2,6 @@ borealis_bootstrap: hosts: bootstrap1: ansible_host: 3.227.207.132 - staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys1/staker.key" - staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys1/staker.crt" http_tls_enabled: true http_tls_key_file: "/home/ubuntu/ssl/privkey.pem" http_tls_cert_file: "/home/ubuntu/ssl/fullchain.pem" @@ -11,7 +9,7 @@ borealis_bootstrap: ansible_connection: ssh ansible_user: ubuntu - network_id: "borealis" + network_id: "cascade" api_admin_enabled: true api_keystore_enabled: true api_metrics_enabled: true @@ -28,6 +26,8 @@ borealis_bootstrap: bootstrap_ids: "" staking_port: 21001 staking_tls_enabled: true + staking_tls_key_file: "/home/ubuntu/keys/staker.key" + staking_tls_cert_file: "/home/ubuntu/keys/staker.crt" log_dir: "/home/ubuntu/.gecko" log_level: debug snow_sample_size: 3 @@ -44,25 +44,17 @@ borealis_node: hosts: node1: ansible_host: 34.207.133.167 - staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys2/staker.key" - staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys2/staker.crt" node2: ansible_host: 107.23.241.199 - staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys3/staker.key" - staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys3/staker.crt" node3: ansible_host: 54.197.215.186 - staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys4/staker.key" - staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys4/staker.crt" node4: ansible_host: 18.234.153.22 - staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys5/staker.key" - staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/keys5/staker.crt" vars: ansible_connection: ssh ansible_user: ubuntu - network_id: "borealis" + network_id: "cascade" api_admin_enabled: true api_keystore_enabled: true api_metrics_enabled: true @@ -76,9 +68,11 @@ borealis_node: http_tls_key_file: "" http_tls_cert_file: "" bootstrap_ips: "3.227.207.132:21001" - bootstrap_ids: "7Xhw2mDxuDS44j42TCB6U5579esbSt3Lg" + bootstrap_ids: "NX4zVkuiRJZYe6Nzzav7GXN3TakUet3Co" staking_port: 21001 staking_tls_enabled: true + staking_tls_key_file: "/home/ubuntu/keys/staker.key" + staking_tls_cert_file: "/home/ubuntu/keys/staker.crt" log_dir: "/home/ubuntu/.gecko" log_level: debug snow_sample_size: 3 diff --git a/scripts/ansible/restart_playbook.yml b/scripts/ansible/restart_playbook.yml index 97b8533..48d44fb 100755 --- a/scripts/ansible/restart_playbook.yml +++ b/scripts/ansible/restart_playbook.yml @@ -8,6 +8,7 @@ ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava repo_folder: ~/go/src/github.com/ava-labs/gecko repo_name: ava-labs/gecko + repo_branch: cascade tasks: - name: Kill Node command: killall ava @@ -15,6 +16,7 @@ - git: repo: ssh://git@github.com/{{ repo_name }}.git dest: "{{ repo_folder }}" + version: "{{ repo_branch }}" update: yes - name: Build project command: ./scripts/build.sh diff --git a/scripts/ansible/update_playbook.yml b/scripts/ansible/update_playbook.yml index ad9d314..b28def3 100755 --- a/scripts/ansible/update_playbook.yml +++ b/scripts/ansible/update_playbook.yml @@ -8,6 +8,7 @@ ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava repo_folder: ~/go/src/github.com/ava-labs/gecko repo_name: ava-labs/gecko + repo_branch: cascade tasks: - name: Kill Node command: killall ava @@ -15,6 +16,7 @@ - git: repo: ssh://git@github.com/{{ repo_name }}.git dest: "{{ repo_folder }}" + version: "{{ repo_branch }}" update: yes - name: Build project command: ./scripts/build.sh diff --git a/scripts/aws/create.py b/scripts/aws/create.py old mode 100644 new mode 100755 index ab7a6d7..07d75e7 --- a/scripts/aws/create.py +++ b/scripts/aws/create.py @@ -1,17 +1,16 @@ -import sys +#!/usr/bin/env python3 +""" +Start a number of AVA nodes on Amazon EC2 +""" + import boto3 -ec2 = boto3.client("ec2") - -# Should be called with python3 aws_create.py $numBootstraps $numNodes -numBootstraps = int(sys.argv[1]) -numNodes = int(sys.argv[2]) bootstapNode = "Borealis-Bootstrap" fullNode = "Borealis-Node" -def runInstances(num: int, name: str): +def runInstances(ec2, num: int, name: str): if num > 0: ec2.run_instances( ImageId="ami-0badd1c10cb7673e9", @@ -28,8 +27,18 @@ def runInstances(num: int, name: str): def main(): - runInstances(numBootstraps, bootstapNode) - runInstances(numNodes, fullNode) + import argparse + + parser = argparse.ArgumentParser( + description=__doc__, + ) + parser.add_argument('numBootstraps', type=int) + parser.add_argument('numNodes', type=int) + args = parser.parse_args() + + ec2 = boto3.client("ec2") + runInstances(ec2, args.numBootstraps, bootstapNode) + runInstances(ec2, args.numNodes, fullNode) if __name__ == "__main__": diff --git a/scripts/build.sh b/scripts/build.sh index 6bb31a3..b2d1984 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -3,18 +3,31 @@ # Ted: contact me when you make any changes PREFIX="${PREFIX:-$(pwd)/build}" +PLUGIN_PREFIX="$PREFIX/plugins" SRC_DIR="$(dirname "${BASH_SOURCE[0]}")" source "$SRC_DIR/env.sh" +CORETH_PKG=github.com/ava-labs/coreth +CORETH_PATH="$GOPATH/src/$CORETH_PKG" +if [[ -d "$CORETH_PATH/.git" ]]; then + cd "$CORETH_PATH" + go get -t -v -d "./..." + cd - +else + go get -t -v -d "$CORETH_PKG/..." +fi + GECKO_PKG=github.com/ava-labs/gecko GECKO_PATH="$GOPATH/src/$GECKO_PKG" if [[ -d "$GECKO_PATH/.git" ]]; then cd "$GECKO_PATH" - go get -t -v "./..." + go get -t -v -d "./..." cd - else - go get -t -v "$GECKO_PKG/..." + go get -t -v -d "$GECKO_PKG/..." fi + go build -o "$PREFIX/ava" "$GECKO_PATH/main/"*.go go build -o "$PREFIX/xputtest" "$GECKO_PATH/xputtest/"*.go +go build -o "$PLUGIN_PREFIX/evm" "$CORETH_PATH/plugin/"*.go diff --git a/scripts/build_image.sh b/scripts/build_image.sh index 1b91410..5da7f59 100755 --- a/scripts/build_image.sh +++ b/scripts/build_image.sh @@ -17,8 +17,6 @@ fi if [[ ! -d "$WORKPREFIX" ]]; then mkdir -p "$WORKPREFIX" git config --global credential.helper cache - git clone https://github.com/ava-labs/coreth.git "$WORKPREFIX/coreth" - git clone --depth 1 https://github.com/ava-labs/go-ethereum.git "$WORKPREFIX/go-ethereum" git clone https://github.com/ava-labs/gecko.git "$WORKPREFIX/gecko" fi GECKO_COMMIT="$(git --git-dir="$WORKPREFIX/gecko/.git" rev-parse --short HEAD)" diff --git a/snow/consensus/avalanche/consensus_test.go b/snow/consensus/avalanche/consensus_test.go index f803018..3135ce2 100644 --- a/snow/consensus/avalanche/consensus_test.go +++ b/snow/consensus/avalanche/consensus_test.go @@ -16,6 +16,11 @@ import ( "github.com/ava-labs/gecko/snow/consensus/snowstorm" ) +func GenerateID() ids.ID { + offset++ + return ids.Empty.Prefix(offset) +} + var ( Genesis = GenerateID() offset = uint64(0) @@ -100,7 +105,7 @@ func AddTest(t *testing.T, factory Factory) { if !avl.Finalized() { t.Fatalf("An empty avalanche instance is not finalized") - } else if !Matches([]ids.ID{vts[0].ID(), vts[1].ID()}, avl.Preferences().List()) { + } else if !ids.UnsortedEquals([]ids.ID{vts[0].ID(), vts[1].ID()}, avl.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") } @@ -119,7 +124,7 @@ func AddTest(t *testing.T, factory Factory) { if avl.Finalized() { t.Fatalf("A non-empty avalanche instance is finalized") - } else if !Matches([]ids.ID{vtx0.id}, avl.Preferences().List()) { + } else if !ids.UnsortedEquals([]ids.ID{vtx0.id}, avl.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") } @@ -138,7 +143,7 @@ func AddTest(t *testing.T, factory Factory) { if avl.Finalized() { t.Fatalf("A non-empty avalanche instance is finalized") - } else if !Matches([]ids.ID{vtx0.id}, avl.Preferences().List()) { + } else if !ids.UnsortedEquals([]ids.ID{vtx0.id}, avl.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") } @@ -146,7 +151,7 @@ func AddTest(t *testing.T, factory Factory) { if avl.Finalized() { t.Fatalf("A non-empty avalanche instance is finalized") - } else if !Matches([]ids.ID{vtx0.id}, avl.Preferences().List()) { + } else if !ids.UnsortedEquals([]ids.ID{vtx0.id}, avl.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") } @@ -154,7 +159,7 @@ func AddTest(t *testing.T, factory Factory) { if avl.Finalized() { t.Fatalf("A non-empty avalanche instance is finalized") - } else if !Matches([]ids.ID{vtx0.id}, avl.Preferences().List()) { + } else if !ids.UnsortedEquals([]ids.ID{vtx0.id}, avl.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") } } diff --git a/snow/consensus/avalanche/ids_test.go b/snow/consensus/avalanche/ids_test.go deleted file mode 100644 index dc9a807..0000000 --- a/snow/consensus/avalanche/ids_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "github.com/ava-labs/gecko/ids" -) - -func GenerateID() ids.ID { - offset++ - return ids.Empty.Prefix(offset) -} - -func Matches(a, b []ids.ID) bool { - if len(a) != len(b) { - return false - } - set := ids.Set{} - set.Add(a...) - for _, id := range b { - if !set.Contains(id) { - return false - } - } - return true -} -func MatchesShort(a, b []ids.ShortID) bool { - if len(a) != len(b) { - return false - } - set := ids.ShortSet{} - set.Add(a...) - for _, id := range b { - if !set.Contains(id) { - return false - } - } - return true -} diff --git a/snow/consensus/avalanche/metrics.go b/snow/consensus/avalanche/metrics.go new file mode 100644 index 0000000..4553361 --- /dev/null +++ b/snow/consensus/avalanche/metrics.go @@ -0,0 +1,87 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avalanche + +import ( + "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/timer" +) + +type metrics struct { + numProcessing prometheus.Gauge + latAccepted, latRejected prometheus.Histogram + + clock timer.Clock + processing map[[32]byte]time.Time +} + +// Initialize implements the Engine interface +func (m *metrics) Initialize(log logging.Logger, namespace string, registerer prometheus.Registerer) error { + m.processing = make(map[[32]byte]time.Time) + + m.numProcessing = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "vtx_processing", + Help: "Number of currently processing vertices", + }) + m.latAccepted = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: namespace, + Name: "vtx_accepted", + Help: "Latency of accepting from the time the vertex was issued in milliseconds", + Buckets: timer.Buckets, + }) + m.latRejected = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: namespace, + Name: "vtx_rejected", + Help: "Latency of rejecting from the time the vertex was issued in milliseconds", + Buckets: timer.Buckets, + }) + + if err := registerer.Register(m.numProcessing); err != nil { + return fmt.Errorf("Failed to register vtx_processing statistics due to %w", err) + } + if err := registerer.Register(m.latAccepted); err != nil { + return fmt.Errorf("Failed to register vtx_accepted statistics due to %w", err) + } + if err := registerer.Register(m.latRejected); err != nil { + return fmt.Errorf("Failed to register vtx_rejected statistics due to %w", err) + } + return nil +} + +func (m *metrics) Issued(id ids.ID) { + m.processing[id.Key()] = m.clock.Time() + m.numProcessing.Inc() +} + +func (m *metrics) Accepted(id ids.ID) { + key := id.Key() + start := m.processing[key] + end := m.clock.Time() + + delete(m.processing, key) + + m.latAccepted.Observe(float64(end.Sub(start).Milliseconds())) + m.numProcessing.Dec() +} + +func (m *metrics) Rejected(id ids.ID) { + key := id.Key() + start := m.processing[key] + end := m.clock.Time() + + delete(m.processing, key) + + m.latRejected.Observe(float64(end.Sub(start).Milliseconds())) + m.numProcessing.Dec() +} diff --git a/snow/consensus/avalanche/parameters_test.go b/snow/consensus/avalanche/parameters_test.go index aa28f98..8935cf5 100644 --- a/snow/consensus/avalanche/parameters_test.go +++ b/snow/consensus/avalanche/parameters_test.go @@ -12,10 +12,11 @@ import ( func TestParametersValid(t *testing.T) { p := Parameters{ Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -29,10 +30,11 @@ func TestParametersValid(t *testing.T) { func TestParametersInvalidParents(t *testing.T) { p := Parameters{ Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, }, Parents: 1, BatchSize: 1, @@ -46,10 +48,11 @@ func TestParametersInvalidParents(t *testing.T) { func TestParametersInvalidBatchSize(t *testing.T) { p := Parameters{ Parameters: snowball.Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 0, diff --git a/snow/consensus/avalanche/topological.go b/snow/consensus/avalanche/topological.go index dca8a19..8bee5c3 100644 --- a/snow/consensus/avalanche/topological.go +++ b/snow/consensus/avalanche/topological.go @@ -4,8 +4,6 @@ package avalanche import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/choices" @@ -28,14 +26,13 @@ func (TopologicalFactory) New() Consensus { return &Topological{} } // of the voting results. Assumes that vertices are inserted in topological // order. type Topological struct { + metrics + // Context used for logging ctx *snow.Context // Threshold for confidence increases params Parameters - numProcessing prometheus.Gauge - numAccepted, numRejected prometheus.Counter - // Maps vtxID -> vtx nodes map[[32]byte]Vertex // Tracks the conflict relations @@ -64,33 +61,8 @@ func (ta *Topological) Initialize(ctx *snow.Context, params Parameters, frontier ta.ctx = ctx ta.params = params - ta.numProcessing = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: params.Namespace, - Name: "vtx_processing", - Help: "Number of currently processing vertices", - }) - ta.numAccepted = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: params.Namespace, - Name: "vtx_accepted", - Help: "Number of vertices accepted", - }) - ta.numRejected = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: params.Namespace, - Name: "vtx_rejected", - Help: "Number of vertices rejected", - }) - - if err := ta.params.Metrics.Register(ta.numProcessing); err != nil { - ta.ctx.Log.Error("Failed to register vtx_processing statistics due to %s", err) - } - if err := ta.params.Metrics.Register(ta.numAccepted); err != nil { - ta.ctx.Log.Error("Failed to register vtx_accepted statistics due to %s", err) - } - if err := ta.params.Metrics.Register(ta.numRejected); err != nil { - ta.ctx.Log.Error("Failed to register vtx_rejected statistics due to %s", err) + if err := ta.metrics.Initialize(ctx.Log, params.Namespace, params.Metrics); err != nil { + ta.ctx.Log.Error("%s", err) } ta.nodes = make(map[[32]byte]Vertex) @@ -133,7 +105,7 @@ func (ta *Topological) Add(vtx Vertex) { } ta.nodes[key] = vtx // Add this vertex to the set of nodes - ta.numProcessing.Inc() + ta.metrics.Issued(vtxID) ta.update(vtx) // Update the vertex and it's ancestry } @@ -367,9 +339,8 @@ func (ta *Topological) update(vtx Vertex) { for _, dep := range deps { if status := dep.Status(); status == choices.Rejected { vtx.Reject() // My parent is rejected, so I should be rejected - ta.numRejected.Inc() delete(ta.nodes, vtxKey) - ta.numProcessing.Dec() + ta.metrics.Rejected(vtxID) ta.preferenceCache[vtxKey] = false ta.virtuousCache[vtxKey] = false @@ -420,18 +391,14 @@ func (ta *Topological) update(vtx Vertex) { // I'm acceptable, why not accept? ta.ctx.ConsensusDispatcher.Accept(ta.ctx.ChainID, vtxID, vtx.Bytes()) vtx.Accept() - ta.numAccepted.Inc() delete(ta.nodes, vtxKey) - ta.numProcessing.Dec() + ta.metrics.Accepted(vtxID) case rejectable: // I'm rejectable, why not reject? vtx.Reject() - ta.ctx.ConsensusDispatcher.Reject(ta.ctx.ChainID, vtxID, vtx.Bytes()) - - ta.numRejected.Inc() delete(ta.nodes, vtxKey) - ta.numProcessing.Dec() + ta.metrics.Rejected(vtxID) } } diff --git a/snow/consensus/avalanche/topological_test.go b/snow/consensus/avalanche/topological_test.go index f43ee5b..05046bc 100644 --- a/snow/consensus/avalanche/topological_test.go +++ b/snow/consensus/avalanche/topological_test.go @@ -27,11 +27,12 @@ func TestTopologicalTxIssued(t *testing.T) { TxIssuedTest(t, TopologicalFactory{ func TestAvalancheVoting(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -86,7 +87,7 @@ func TestAvalancheVoting(t *testing.T) { if ta.Finalized() { t.Fatalf("An avalanche instance finalized too early") - } else if !Matches([]ids.ID{vtx1.id}, ta.Preferences().List()) { + } else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") } @@ -94,7 +95,7 @@ func TestAvalancheVoting(t *testing.T) { if !ta.Finalized() { t.Fatalf("An avalanche instance finalized too late") - } else if !Matches([]ids.ID{vtx1.id}, ta.Preferences().List()) { + } else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") } else if tx0.Status() != choices.Rejected { t.Fatalf("Tx should have been rejected") @@ -106,11 +107,12 @@ func TestAvalancheVoting(t *testing.T) { func TestAvalancheTransitiveVoting(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -174,7 +176,7 @@ func TestAvalancheTransitiveVoting(t *testing.T) { if ta.Finalized() { t.Fatalf("An avalanche instance finalized too early") - } else if !Matches([]ids.ID{vtx2.id}, ta.Preferences().List()) { + } else if !ids.UnsortedEquals([]ids.ID{vtx2.id}, ta.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") } else if tx0.Status() != choices.Accepted { t.Fatalf("Tx should have been accepted") @@ -187,7 +189,7 @@ func TestAvalancheTransitiveVoting(t *testing.T) { if !ta.Finalized() { t.Fatalf("An avalanche instance finalized too late") - } else if !Matches([]ids.ID{vtx2.id}, ta.Preferences().List()) { + } else if !ids.UnsortedEquals([]ids.ID{vtx2.id}, ta.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") } else if tx0.Status() != choices.Accepted { t.Fatalf("Tx should have been accepted") @@ -199,11 +201,12 @@ func TestAvalancheTransitiveVoting(t *testing.T) { func TestAvalancheSplitVoting(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -252,7 +255,7 @@ func TestAvalancheSplitVoting(t *testing.T) { if !ta.Finalized() { t.Fatalf("An avalanche instance finalized too late") - } else if !Matches([]ids.ID{vtx0.id, vtx1.id}, ta.Preferences().List()) { + } else if !ids.UnsortedEquals([]ids.ID{vtx0.id, vtx1.id}, ta.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") } else if tx0.Status() != choices.Accepted { t.Fatalf("Tx should have been accepted") @@ -262,11 +265,12 @@ func TestAvalancheSplitVoting(t *testing.T) { func TestAvalancheTransitiveRejection(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -336,7 +340,7 @@ func TestAvalancheTransitiveRejection(t *testing.T) { if ta.Finalized() { t.Fatalf("An avalanche instance finalized too early") - } else if !Matches([]ids.ID{vtx1.id}, ta.Preferences().List()) { + } else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") } @@ -344,7 +348,7 @@ func TestAvalancheTransitiveRejection(t *testing.T) { if ta.Finalized() { t.Fatalf("An avalanche instance finalized too early") - } else if !Matches([]ids.ID{vtx1.id}, ta.Preferences().List()) { + } else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") } else if tx0.Status() != choices.Rejected { t.Fatalf("Tx should have been rejected") @@ -363,11 +367,12 @@ func TestAvalancheTransitiveRejection(t *testing.T) { func TestAvalancheVirtuous(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -484,11 +489,12 @@ func TestAvalancheVirtuous(t *testing.T) { func TestAvalancheIsVirtuous(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -567,11 +573,12 @@ func TestAvalancheIsVirtuous(t *testing.T) { func TestAvalancheQuiesce(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -660,11 +667,12 @@ func TestAvalancheQuiesce(t *testing.T) { func TestAvalancheOrphans(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, - Alpha: 1, - BetaVirtuous: math.MaxInt32, - BetaRogue: math.MaxInt32, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: math.MaxInt32, + BetaRogue: math.MaxInt32, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, diff --git a/snow/consensus/snowball/binary_slush.go b/snow/consensus/snowball/binary_slush.go new file mode 100644 index 0000000..84e4cd3 --- /dev/null +++ b/snow/consensus/snowball/binary_slush.go @@ -0,0 +1,27 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "fmt" +) + +// binarySlush is the implementation of a binary slush instance +type binarySlush struct { + // preference is the choice that last had a successful poll. Unless there + // hasn't been a successful poll, in which case it is the initially provided + // choice. + preference int +} + +// Initialize implements the BinarySlush interface +func (sl *binarySlush) Initialize(choice int) { sl.preference = choice } + +// Preference implements the BinarySlush interface +func (sl *binarySlush) Preference() int { return sl.preference } + +// RecordSuccessfulPoll implements the BinarySlush interface +func (sl *binarySlush) RecordSuccessfulPoll(choice int) { sl.preference = choice } + +func (sl *binarySlush) String() string { return fmt.Sprintf("SL(Preference = %d)", sl.preference) } diff --git a/snow/consensus/snowball/binary_snowball.go b/snow/consensus/snowball/binary_snowball.go index f755a6b..f41046c 100644 --- a/snow/consensus/snowball/binary_snowball.go +++ b/snow/consensus/snowball/binary_snowball.go @@ -9,6 +9,9 @@ import ( // binarySnowball is the implementation of a binary snowball instance type binarySnowball struct { + // wrap the binary snowflake logic + binarySnowflake + // preference is the choice with the largest number of successful polls. // Ties are broken by switching choice lazily preference int @@ -16,15 +19,12 @@ type binarySnowball struct { // numSuccessfulPolls tracks the total number of successful network polls of // the 0 and 1 choices numSuccessfulPolls [2]int - - // snowflake wraps the binary snowflake logic - snowflake binarySnowflake } // Initialize implements the BinarySnowball interface func (sb *binarySnowball) Initialize(beta, choice int) { + sb.binarySnowflake.Initialize(beta, choice) sb.preference = choice - sb.snowflake.Initialize(beta, choice) } // Preference implements the BinarySnowball interface @@ -34,7 +34,7 @@ func (sb *binarySnowball) Preference() int { // this case is handled for completion. Therefore, if snowflake is // finalized, then our finalized snowflake choice should be preferred. if sb.Finalized() { - return sb.snowflake.Preference() + return sb.binarySnowflake.Preference() } return sb.preference } @@ -45,20 +45,14 @@ func (sb *binarySnowball) RecordSuccessfulPoll(choice int) { if sb.numSuccessfulPolls[choice] > sb.numSuccessfulPolls[1-choice] { sb.preference = choice } - sb.snowflake.RecordSuccessfulPoll(choice) + sb.binarySnowflake.RecordSuccessfulPoll(choice) } -// RecordUnsuccessfulPoll implements the BinarySnowball interface -func (sb *binarySnowball) RecordUnsuccessfulPoll() { sb.snowflake.RecordUnsuccessfulPoll() } - -// Finalized implements the BinarySnowball interface -func (sb *binarySnowball) Finalized() bool { return sb.snowflake.Finalized() } - func (sb *binarySnowball) String() string { return fmt.Sprintf( - "SB(Preference = %d, NumSuccessfulPolls[0] = %d, NumSuccessfulPolls[1] = %d, SF = %s)", + "SB(Preference = %d, NumSuccessfulPolls[0] = %d, NumSuccessfulPolls[1] = %d, %s)", sb.preference, sb.numSuccessfulPolls[0], sb.numSuccessfulPolls[1], - &sb.snowflake) + &sb.binarySnowflake) } diff --git a/snow/consensus/snowball/binary_snowball_test.go b/snow/consensus/snowball/binary_snowball_test.go index dd37f13..2962164 100644 --- a/snow/consensus/snowball/binary_snowball_test.go +++ b/snow/consensus/snowball/binary_snowball_test.go @@ -96,15 +96,15 @@ func TestBinarySnowballRecordUnsuccessfulPoll(t *testing.T) { t.Fatalf("Finalized too late") } - expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 3, SF = SF(Preference = 1, Confidence = 2, Finalized = true))" + expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 3, SF(Confidence = 2, Finalized = true, SL(Preference = 1)))" if str := sb.String(); str != expected { t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) } } func TestBinarySnowballAcceptWeirdColor(t *testing.T) { - Red := 0 - Blue := 1 + Blue := 0 + Red := 1 beta := 2 @@ -151,7 +151,7 @@ func TestBinarySnowballAcceptWeirdColor(t *testing.T) { t.Fatalf("Finalized too late") } - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 2, SF = SF(Preference = 1, Confidence = 2, Finalized = true))" + expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 2, SF(Confidence = 2, Finalized = true, SL(Preference = 0)))" if str := sb.String(); str != expected { t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) } @@ -190,7 +190,7 @@ func TestBinarySnowballLockColor(t *testing.T) { t.Fatalf("Finalized too late") } - expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 2, SF = SF(Preference = 0, Confidence = 1, Finalized = true))" + expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 2, SF(Confidence = 1, Finalized = true, SL(Preference = 0)))" if str := sb.String(); str != expected { t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) } diff --git a/snow/consensus/snowball/binary_snowflake.go b/snow/consensus/snowball/binary_snowflake.go index 715e3ed..1860012 100644 --- a/snow/consensus/snowball/binary_snowflake.go +++ b/snow/consensus/snowball/binary_snowflake.go @@ -9,10 +9,8 @@ import ( // binarySnowflake is the implementation of a binary snowflake instance type binarySnowflake struct { - // preference is the choice that last had a successful poll. Unless there - // hasn't been a successful poll, in which case it is the initially provided - // choice. - preference int + // wrap the binary slush logic + binarySlush // confidence tracks the number of successful polls in a row that have // returned the preference @@ -29,29 +27,26 @@ type binarySnowflake struct { // Initialize implements the BinarySnowflake interface func (sf *binarySnowflake) Initialize(beta, choice int) { + sf.binarySlush.Initialize(choice) sf.beta = beta - sf.preference = choice } -// Preference implements the BinarySnowflake interface -func (sf *binarySnowflake) Preference() int { return sf.preference } - // RecordSuccessfulPoll implements the BinarySnowflake interface func (sf *binarySnowflake) RecordSuccessfulPoll(choice int) { - if sf.Finalized() { + if sf.finalized { return // This instace is already decided. } - if sf.preference == choice { + if preference := sf.Preference(); preference == choice { sf.confidence++ } else { // confidence is set to 1 because there has already been 1 successful // poll, namely this poll. sf.confidence = 1 - sf.preference = choice } sf.finalized = sf.confidence >= sf.beta + sf.binarySlush.RecordSuccessfulPoll(choice) } // RecordUnsuccessfulPoll implements the BinarySnowflake interface @@ -61,8 +56,8 @@ func (sf *binarySnowflake) RecordUnsuccessfulPoll() { sf.confidence = 0 } func (sf *binarySnowflake) Finalized() bool { return sf.finalized } func (sf *binarySnowflake) String() string { - return fmt.Sprintf("SF(Preference = %d, Confidence = %d, Finalized = %v)", - sf.Preference(), + return fmt.Sprintf("SF(Confidence = %d, Finalized = %v, %s)", sf.confidence, - sf.Finalized()) + sf.finalized, + &sf.binarySlush) } diff --git a/snow/consensus/snowball/binary_snowflake_test.go b/snow/consensus/snowball/binary_snowflake_test.go new file mode 100644 index 0000000..1078687 --- /dev/null +++ b/snow/consensus/snowball/binary_snowflake_test.go @@ -0,0 +1,56 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "testing" +) + +func TestBinarySnowflake(t *testing.T) { + Blue := 0 + Red := 1 + + beta := 2 + + sf := binarySnowflake{} + sf.Initialize(beta, Red) + + if pref := sf.Preference(); pref != Red { + t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Blue) + + if pref := sf.Preference(); pref != Blue { + t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Red) + + if pref := sf.Preference(); pref != Red { + t.Fatalf("Wrong preference. Expected %d got %d", Red, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Blue) + + if pref := sf.Preference(); pref != Blue { + t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Blue) + + if pref := sf.Preference(); pref != Blue { + t.Fatalf("Wrong preference. Expected %d got %d", Blue, pref) + } else if !sf.Finalized() { + t.Fatalf("Didn't finalized correctly") + } +} diff --git a/snow/consensus/snowball/byzantine.go b/snow/consensus/snowball/byzantine.go index 8995d11..88fda59 100644 --- a/snow/consensus/snowball/byzantine.go +++ b/snow/consensus/snowball/byzantine.go @@ -24,6 +24,7 @@ type Byzantine struct { // Initialize implements the Consensus interface func (b *Byzantine) Initialize(params Parameters, choice ids.ID) { + b.params = params b.preference = choice } diff --git a/snow/consensus/snowball/byzantine_test.go b/snow/consensus/snowball/byzantine_test.go new file mode 100644 index 0000000..cee357b --- /dev/null +++ b/snow/consensus/snowball/byzantine_test.go @@ -0,0 +1,54 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/prometheus/client_golang/prometheus" +) + +func TestByzantine(t *testing.T) { + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5, + } + + byzFactory := ByzantineFactory{} + byz := byzFactory.New() + byz.Initialize(params, Blue) + + if ret := byz.Parameters(); ret != params { + t.Fatalf("Should have returned the correct params") + } + + byz.Add(Green) + + if pref := byz.Preference(); !pref.Equals(Blue) { + t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref) + } + + oneGreen := ids.Bag{} + oneGreen.Add(Green) + byz.RecordPoll(oneGreen) + + if pref := byz.Preference(); !pref.Equals(Blue) { + t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref) + } + + byz.RecordUnsuccessfulPoll() + + if pref := byz.Preference(); !pref.Equals(Blue) { + t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref) + } + + if final := byz.Finalized(); !final { + t.Fatalf("Should be marked as accepted") + } + + if str := byz.String(); str != Blue.String() { + t.Fatalf("Wrong string, expected %s returned %s", Blue, str) + } +} diff --git a/snow/consensus/snowball/consensus.go b/snow/consensus/snowball/consensus.go index edbf14b..06cb3a4 100644 --- a/snow/consensus/snowball/consensus.go +++ b/snow/consensus/snowball/consensus.go @@ -69,6 +69,23 @@ type NnarySnowflake interface { Finalized() bool } +// NnarySlush is a slush instance deciding between an unbounded number of +// values. After performing a network sample of k nodes, if you have alpha +// votes for one of the choices, you should vote for that choice. +type NnarySlush interface { + fmt.Stringer + + // Takes in the initial choice + Initialize(initialPreference ids.ID) + + // Returns the currently preferred choice to be finalized + Preference() ids.ID + + // RecordSuccessfulPoll records a successful poll towards finalizing the + // specified choice. Assumes the choice was previously added. + RecordSuccessfulPoll(choice ids.ID) +} + // BinarySnowball augments BinarySnowflake with a counter that tracks the total // number of positive responses from a network sample. type BinarySnowball interface{ BinarySnowflake } @@ -97,6 +114,23 @@ type BinarySnowflake interface { Finalized() bool } +// BinarySlush is a slush instance deciding between two values. After performing +// a network sample of k nodes, if you have alpha votes for one of the choices, +// you should vote for that choice. +type BinarySlush interface { + fmt.Stringer + + // Takes in the initial choice + Initialize(initialPreference int) + + // Returns the currently preferred choice to be finalized + Preference() int + + // RecordSuccessfulPoll records a successful poll towards finalizing the + // specified choice + RecordSuccessfulPoll(choice int) +} + // UnarySnowball is a snowball instance deciding on one value. After performing // a network sample of k nodes, if you have alpha votes for the choice, you // should vote. Otherwise, you should reset. @@ -122,3 +156,29 @@ type UnarySnowball interface { // Returns a new unary snowball instance with the same state Clone() UnarySnowball } + +// UnarySnowflake is a snowflake instance deciding on one value. After +// performing a network sample of k nodes, if you have alpha votes for the +// choice, you should vote. Otherwise, you should reset. +type UnarySnowflake interface { + fmt.Stringer + + // Takes in the beta value + Initialize(beta int) + + // RecordSuccessfulPoll records a successful poll towards finalizing + RecordSuccessfulPoll() + + // RecordUnsuccessfulPoll resets the snowflake counter of this instance + RecordUnsuccessfulPoll() + + // Return whether a choice has been finalized + Finalized() bool + + // Returns a new binary snowball instance with the agreement parameters + // transferred. Takes in the new beta value and the original choice + Extend(beta, originalPreference int) BinarySnowflake + + // Returns a new unary snowflake instance with the same state + Clone() UnarySnowflake +} diff --git a/snow/consensus/snowball/consensus_test.go b/snow/consensus/snowball/consensus_test.go index 67fec3d..922f606 100644 --- a/snow/consensus/snowball/consensus_test.go +++ b/snow/consensus/snowball/consensus_test.go @@ -22,7 +22,7 @@ func ParamsTest(t *testing.T, factory Factory) { params := Parameters{ Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, ConcurrentRepolls: 1, } sb.Initialize(params, Red) @@ -34,5 +34,7 @@ func ParamsTest(t *testing.T, factory Factory) { t.Fatalf("Wrong Beta1 parameter") } else if p.BetaRogue != params.BetaRogue { t.Fatalf("Wrong Beta2 parameter") + } else if p.ConcurrentRepolls != params.ConcurrentRepolls { + t.Fatalf("Wrong Repoll parameter") } } diff --git a/snow/consensus/snowball/flat.go b/snow/consensus/snowball/flat.go index da4eb1b..21663c4 100644 --- a/snow/consensus/snowball/flat.go +++ b/snow/consensus/snowball/flat.go @@ -15,40 +15,27 @@ func (FlatFactory) New() Consensus { return &Flat{} } // Flat is a naive implementation of a multi-choice snowball instance type Flat struct { + // wraps the n-nary snowball logic + nnarySnowball + // params contains all the configurations of a snowball instance params Parameters - - // snowball wraps the n-nary snowball logic - snowball nnarySnowball } // Initialize implements the Consensus interface func (f *Flat) Initialize(params Parameters, choice ids.ID) { + f.nnarySnowball.Initialize(params.BetaVirtuous, params.BetaRogue, choice) f.params = params - f.snowball.Initialize(params.BetaVirtuous, params.BetaRogue, choice) } // Parameters implements the Consensus interface func (f *Flat) Parameters() Parameters { return f.params } -// Add implements the Consensus interface -func (f *Flat) Add(choice ids.ID) { f.snowball.Add(choice) } - -// Preference implements the Consensus interface -func (f *Flat) Preference() ids.ID { return f.snowball.Preference() } - // RecordPoll implements the Consensus interface func (f *Flat) RecordPoll(votes ids.Bag) { if pollMode, numVotes := votes.Mode(); numVotes >= f.params.Alpha { - f.snowball.RecordSuccessfulPoll(pollMode) + f.nnarySnowball.RecordSuccessfulPoll(pollMode) } else { f.RecordUnsuccessfulPoll() } } - -// RecordUnsuccessfulPoll implements the Consensus interface -func (f *Flat) RecordUnsuccessfulPoll() { f.snowball.RecordUnsuccessfulPoll() } - -// Finalized implements the Consensus interface -func (f *Flat) Finalized() bool { return f.snowball.Finalized() } -func (f *Flat) String() string { return f.snowball.String() } diff --git a/snow/consensus/snowball/flat_test.go b/snow/consensus/snowball/flat_test.go index 1aaa754..d2b9617 100644 --- a/snow/consensus/snowball/flat_test.go +++ b/snow/consensus/snowball/flat_test.go @@ -65,7 +65,7 @@ func TestFlat(t *testing.T) { t.Fatalf("Finalized too late") } - expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, NumSuccessfulPolls = 3, SF = SF(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, Confidence = 2, Finalized = true))" + expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, NumSuccessfulPolls = 3, SF(Confidence = 2, Finalized = true, SL(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES)))" if str := f.String(); str != expected { t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) } diff --git a/snow/consensus/snowball/nnary_slush.go b/snow/consensus/snowball/nnary_slush.go new file mode 100644 index 0000000..70a55c3 --- /dev/null +++ b/snow/consensus/snowball/nnary_slush.go @@ -0,0 +1,30 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "fmt" + + "github.com/ava-labs/gecko/ids" +) + +// nnarySlush is the implementation of a slush instance with an unbounded number +// of choices +type nnarySlush struct { + // preference is the choice that last had a successful poll. Unless there + // hasn't been a successful poll, in which case it is the initially provided + // choice. + preference ids.ID +} + +// Initialize implements the NnarySlush interface +func (sl *nnarySlush) Initialize(choice ids.ID) { sl.preference = choice } + +// Preference implements the NnarySlush interface +func (sl *nnarySlush) Preference() ids.ID { return sl.preference } + +// RecordSuccessfulPoll implements the NnarySlush interface +func (sl *nnarySlush) RecordSuccessfulPoll(choice ids.ID) { sl.preference = choice } + +func (sl *nnarySlush) String() string { return fmt.Sprintf("SL(Preference = %s)", sl.preference) } diff --git a/snow/consensus/snowball/nnary_snowball.go b/snow/consensus/snowball/nnary_snowball.go index 6821a50..2595622 100644 --- a/snow/consensus/snowball/nnary_snowball.go +++ b/snow/consensus/snowball/nnary_snowball.go @@ -11,6 +11,9 @@ import ( // nnarySnowball is a naive implementation of a multi-color snowball instance type nnarySnowball struct { + // wrap the n-nary snowflake logic + nnarySnowflake + // preference is the choice with the largest number of successful polls. // Ties are broken by switching choice lazily preference ids.ID @@ -22,21 +25,15 @@ type nnarySnowball struct { // numSuccessfulPolls tracks the total number of successful network polls of // the choices numSuccessfulPolls map[[32]byte]int - - // snowflake wraps the n-nary snowflake logic - snowflake nnarySnowflake } // Initialize implements the NnarySnowball interface func (sb *nnarySnowball) Initialize(betaVirtuous, betaRogue int, choice ids.ID) { + sb.nnarySnowflake.Initialize(betaVirtuous, betaRogue, choice) sb.preference = choice sb.numSuccessfulPolls = make(map[[32]byte]int) - sb.snowflake.Initialize(betaVirtuous, betaRogue, choice) } -// Add implements the NnarySnowball interface -func (sb *nnarySnowball) Add(choice ids.ID) { sb.snowflake.Add(choice) } - // Preference implements the NnarySnowball interface func (sb *nnarySnowball) Preference() ids.ID { // It is possible, with low probability, that the snowflake preference is @@ -44,17 +41,13 @@ func (sb *nnarySnowball) Preference() ids.ID { // this case is handled for completion. Therefore, if snowflake is // finalized, then our finalized snowflake choice should be preferred. if sb.Finalized() { - return sb.snowflake.Preference() + return sb.nnarySnowflake.Preference() } return sb.preference } // RecordSuccessfulPoll implements the NnarySnowball interface func (sb *nnarySnowball) RecordSuccessfulPoll(choice ids.ID) { - if sb.Finalized() { - return - } - key := choice.Key() numSuccessfulPolls := sb.numSuccessfulPolls[key] + 1 sb.numSuccessfulPolls[key] = numSuccessfulPolls @@ -64,16 +57,10 @@ func (sb *nnarySnowball) RecordSuccessfulPoll(choice ids.ID) { sb.maxSuccessfulPolls = numSuccessfulPolls } - sb.snowflake.RecordSuccessfulPoll(choice) + sb.nnarySnowflake.RecordSuccessfulPoll(choice) } -// RecordUnsuccessfulPoll implements the NnarySnowball interface -func (sb *nnarySnowball) RecordUnsuccessfulPoll() { sb.snowflake.RecordUnsuccessfulPoll() } - -// Finalized implements the NnarySnowball interface -func (sb *nnarySnowball) Finalized() bool { return sb.snowflake.Finalized() } - func (sb *nnarySnowball) String() string { - return fmt.Sprintf("SB(Preference = %s, NumSuccessfulPolls = %d, SF = %s)", - sb.preference, sb.maxSuccessfulPolls, &sb.snowflake) + return fmt.Sprintf("SB(Preference = %s, NumSuccessfulPolls = %d, %s)", + sb.preference, sb.maxSuccessfulPolls, &sb.nnarySnowflake) } diff --git a/snow/consensus/snowball/nnary_snowball_test.go b/snow/consensus/snowball/nnary_snowball_test.go index 655fdc6..50eb667 100644 --- a/snow/consensus/snowball/nnary_snowball_test.go +++ b/snow/consensus/snowball/nnary_snowball_test.go @@ -55,50 +55,24 @@ func TestNnarySnowball(t *testing.T) { } } -func TestNnarySnowflake(t *testing.T) { - betaVirtuous := 2 +func TestVirtuousNnarySnowball(t *testing.T) { + betaVirtuous := 1 betaRogue := 2 - sf := nnarySnowflake{} - sf.Initialize(betaVirtuous, betaRogue, Red) - sf.Add(Blue) - sf.Add(Green) + sb := nnarySnowball{} + sb.Initialize(betaVirtuous, betaRogue, Red) - if pref := sf.Preference(); !Red.Equals(pref) { + if pref := sb.Preference(); !Red.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sf.Finalized() { + } else if sb.Finalized() { t.Fatalf("Finalized too early") } - sf.RecordSuccessfulPoll(Blue) + sb.RecordSuccessfulPoll(Red) - if pref := sf.Preference(); !Blue.Equals(pref) { - t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } - - sf.RecordSuccessfulPoll(Red) - - if pref := sf.Preference(); !Red.Equals(pref) { + if pref := sb.Preference(); !Red.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if sf.Finalized() { - t.Fatalf("Finalized too early") - } - - sf.RecordSuccessfulPoll(Red) - - if pref := sf.Preference(); !Red.Equals(pref) { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if !sf.Finalized() { - t.Fatalf("Should be finalized") - } - - sf.RecordSuccessfulPoll(Blue) - - if pref := sf.Preference(); !Red.Equals(pref) { - t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) - } else if !sf.Finalized() { + } else if !sb.Finalized() { t.Fatalf("Should be finalized") } } @@ -143,7 +117,7 @@ func TestNarySnowballRecordUnsuccessfulPoll(t *testing.T) { t.Fatalf("Finalized too late") } - expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, NumSuccessfulPolls = 3, SF = SF(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, Confidence = 2, Finalized = true))" + expected := "SB(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES, NumSuccessfulPolls = 3, SF(Confidence = 2, Finalized = true, SL(Preference = TtF4d2QWbk5vzQGTEPrN48x6vwgAoAmKQ9cbp79inpQmcRKES)))" if str := sb.String(); str != expected { t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) } @@ -159,7 +133,7 @@ func TestNarySnowballRecordUnsuccessfulPoll(t *testing.T) { } } -func TestNarySnowflakeColor(t *testing.T) { +func TestNarySnowballDifferentSnowflakeColor(t *testing.T) { betaVirtuous := 2 betaRogue := 2 @@ -175,7 +149,7 @@ func TestNarySnowflakeColor(t *testing.T) { sb.RecordSuccessfulPoll(Blue) - if pref := sb.snowflake.Preference(); !Blue.Equals(pref) { + if pref := sb.nnarySnowflake.Preference(); !Blue.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) } @@ -183,7 +157,7 @@ func TestNarySnowflakeColor(t *testing.T) { if pref := sb.Preference(); !Blue.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) - } else if pref := sb.snowflake.Preference(); !Red.Equals(pref) { + } else if pref := sb.nnarySnowflake.Preference(); !Red.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) } } diff --git a/snow/consensus/snowball/nnary_snowflake.go b/snow/consensus/snowball/nnary_snowflake.go index f9d1069..8b461f0 100644 --- a/snow/consensus/snowball/nnary_snowflake.go +++ b/snow/consensus/snowball/nnary_snowflake.go @@ -12,6 +12,9 @@ import ( // nnarySnowflake is the implementation of a snowflake instance with an // unbounded number of choices type nnarySnowflake struct { + // wrap the n-nary slush logic + nnarySlush + // betaVirtuous is the number of consecutive successful queries required for // finalization on a virtuous instance. betaVirtuous int @@ -24,11 +27,6 @@ type nnarySnowflake struct { // returned the preference confidence int - // preference is the choice that last had a successful poll. Unless there - // hasn't been a successful poll, in which case it is the initially provided - // choice. - preference ids.ID - // rogue tracks if this instance has multiple choices or only one rogue bool @@ -39,32 +37,31 @@ type nnarySnowflake struct { // Initialize implements the NnarySnowflake interface func (sf *nnarySnowflake) Initialize(betaVirtuous, betaRogue int, choice ids.ID) { + sf.nnarySlush.Initialize(choice) sf.betaVirtuous = betaVirtuous sf.betaRogue = betaRogue - sf.preference = choice } // Add implements the NnarySnowflake interface func (sf *nnarySnowflake) Add(choice ids.ID) { sf.rogue = sf.rogue || !choice.Equals(sf.preference) } -// Preference implements the NnarySnowflake interface -func (sf *nnarySnowflake) Preference() ids.ID { return sf.preference } - // RecordSuccessfulPoll implements the NnarySnowflake interface func (sf *nnarySnowflake) RecordSuccessfulPoll(choice ids.ID) { - if sf.Finalized() { - return + if sf.finalized { + return // This instace is already decided. } - if sf.preference.Equals(choice) { + if preference := sf.nnarySlush.Preference(); preference.Equals(choice) { sf.confidence++ } else { + // confidence is set to 1 because there has already been 1 successful + // poll, namely this poll. sf.confidence = 1 - sf.preference = choice } sf.finalized = (!sf.rogue && sf.confidence >= sf.betaVirtuous) || sf.confidence >= sf.betaRogue + sf.nnarySlush.RecordSuccessfulPoll(choice) } // RecordUnsuccessfulPoll implements the NnarySnowflake interface @@ -74,8 +71,8 @@ func (sf *nnarySnowflake) RecordUnsuccessfulPoll() { sf.confidence = 0 } func (sf *nnarySnowflake) Finalized() bool { return sf.finalized } func (sf *nnarySnowflake) String() string { - return fmt.Sprintf("SF(Preference = %s, Confidence = %d, Finalized = %v)", - sf.preference, + return fmt.Sprintf("SF(Confidence = %d, Finalized = %v, %s)", sf.confidence, - sf.Finalized()) + sf.finalized, + &sf.nnarySlush) } diff --git a/snow/consensus/snowball/nnary_snowflake_test.go b/snow/consensus/snowball/nnary_snowflake_test.go new file mode 100644 index 0000000..cbf3864 --- /dev/null +++ b/snow/consensus/snowball/nnary_snowflake_test.go @@ -0,0 +1,134 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "testing" +) + +func TestNnarySnowflake(t *testing.T) { + betaVirtuous := 2 + betaRogue := 2 + + sf := nnarySnowflake{} + sf.Initialize(betaVirtuous, betaRogue, Red) + sf.Add(Blue) + sf.Add(Green) + + if pref := sf.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Blue) + + if pref := sf.Preference(); !Blue.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Red) + + if pref := sf.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sf.Finalized() { + t.Fatalf("Finalized too early") + } + + sf.RecordSuccessfulPoll(Red) + + if pref := sf.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if !sf.Finalized() { + t.Fatalf("Should be finalized") + } + + sf.RecordSuccessfulPoll(Blue) + + if pref := sf.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if !sf.Finalized() { + t.Fatalf("Should be finalized") + } +} + +func TestVirtuousNnarySnowflake(t *testing.T) { + betaVirtuous := 2 + betaRogue := 3 + + sb := nnarySnowflake{} + sb.Initialize(betaVirtuous, betaRogue, Red) + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Red) + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Red) + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if !sb.Finalized() { + t.Fatalf("Should be finalized") + } +} + +func TestRogueNnarySnowflake(t *testing.T) { + betaVirtuous := 1 + betaRogue := 2 + + sb := nnarySnowflake{} + sb.Initialize(betaVirtuous, betaRogue, Red) + if sb.rogue { + t.Fatalf("Shouldn't be rogue") + } + + sb.Add(Red) + if sb.rogue { + t.Fatalf("Shouldn't be rogue") + } + + sb.Add(Blue) + if !sb.rogue { + t.Fatalf("Should be rogue") + } + + sb.Add(Red) + if !sb.rogue { + t.Fatalf("Should be rogue") + } + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Red) + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if sb.Finalized() { + t.Fatalf("Finalized too early") + } + + sb.RecordSuccessfulPoll(Red) + + if pref := sb.Preference(); !Red.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) + } else if !sb.Finalized() { + t.Fatalf("Should be finalized") + } +} diff --git a/snow/consensus/snowball/parameters.go b/snow/consensus/snowball/parameters.go index 5e14afa..7d77405 100644 --- a/snow/consensus/snowball/parameters.go +++ b/snow/consensus/snowball/parameters.go @@ -9,11 +9,28 @@ import ( "github.com/prometheus/client_golang/prometheus" ) +const ( + errMsg = "__________ .___\n" + + "\\______ \\____________ __| _/__.__.\n" + + " | | _/\\_ __ \\__ \\ / __ < | |\n" + + " | | \\ | | \\// __ \\_/ /_/ |\\___ |\n" + + " |______ / |__| (____ /\\____ |/ ____|\n" + + " \\/ \\/ \\/\\/\n" + + "\n" + + "🏆 🏆 🏆 🏆 🏆 🏆\n" + + " ________ ________ ________________\n" + + " / _____/ \\_____ \\ / _ \\__ ___/\n" + + "/ \\ ___ / | \\ / /_\\ \\| |\n" + + "\\ \\_\\ \\/ | \\/ | \\ |\n" + + " \\______ /\\_______ /\\____|__ /____|\n" + + " \\/ \\/ \\/\n" +) + // Parameters required for snowball consensus type Parameters struct { - Namespace string - Metrics prometheus.Registerer - K, Alpha, BetaVirtuous, BetaRogue int + Namespace string + Metrics prometheus.Registerer + K, Alpha, BetaVirtuous, BetaRogue, ConcurrentRepolls int } // Valid returns nil if the parameters describe a valid initialization. @@ -25,8 +42,14 @@ func (p Parameters) Valid() error { return fmt.Errorf("K = %d, Alpha = %d: Fails the condition that: Alpha <= K", p.K, p.Alpha) case p.BetaVirtuous <= 0: return fmt.Errorf("BetaVirtuous = %d: Fails the condition that: 0 < BetaVirtuous", p.BetaVirtuous) + case p.BetaRogue == 3 && p.BetaVirtuous == 28: + return fmt.Errorf("BetaVirtuous = %d, BetaRogue = %d: Fails the condition that: BetaVirtuous <= BetaRogue\n%s", p.BetaVirtuous, p.BetaRogue, errMsg) case p.BetaRogue < p.BetaVirtuous: return fmt.Errorf("BetaVirtuous = %d, BetaRogue = %d: Fails the condition that: BetaVirtuous <= BetaRogue", p.BetaVirtuous, p.BetaRogue) + case p.ConcurrentRepolls <= 0: + return fmt.Errorf("ConcurrentRepolls = %d: Fails the condition that: 0 < ConcurrentRepolls", p.ConcurrentRepolls) + case p.ConcurrentRepolls > p.BetaRogue: + return fmt.Errorf("ConcurrentRepolls = %d, BetaRogue = %d: Fails the condition that: ConcurrentRepolls <= BetaRogue", p.ConcurrentRepolls, p.BetaRogue) default: return nil } diff --git a/snow/consensus/snowball/parameters_test.go b/snow/consensus/snowball/parameters_test.go index de1b666..7c3668c 100644 --- a/snow/consensus/snowball/parameters_test.go +++ b/snow/consensus/snowball/parameters_test.go @@ -4,15 +4,46 @@ package snowball import ( + "fmt" + "strings" "testing" ) func TestParametersValid(t *testing.T) { p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + } + + if err := p.Valid(); err != nil { + t.Fatal(err) + } +} + +func TestParametersAnotherValid(t *testing.T) { + p := Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 28, + BetaRogue: 30, + ConcurrentRepolls: 1, + } + + if err := p.Valid(); err != nil { + t.Fatal(err) + } +} + +func TestParametersYetAnotherValid(t *testing.T) { + p := Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 3, + BetaRogue: 3, + ConcurrentRepolls: 1, } if err := p.Valid(); err != nil { @@ -22,10 +53,11 @@ func TestParametersValid(t *testing.T) { func TestParametersInvalidK(t *testing.T) { p := Parameters{ - K: 0, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, + K: 0, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } if err := p.Valid(); err == nil { @@ -35,10 +67,11 @@ func TestParametersInvalidK(t *testing.T) { func TestParametersInvalidAlpha(t *testing.T) { p := Parameters{ - K: 1, - Alpha: 0, - BetaVirtuous: 1, - BetaRogue: 1, + K: 1, + Alpha: 0, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } if err := p.Valid(); err == nil { @@ -48,10 +81,11 @@ func TestParametersInvalidAlpha(t *testing.T) { func TestParametersInvalidBetaVirtuous(t *testing.T) { p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 0, - BetaRogue: 1, + K: 1, + Alpha: 1, + BetaVirtuous: 0, + BetaRogue: 1, + ConcurrentRepolls: 1, } if err := p.Valid(); err == nil { @@ -61,13 +95,57 @@ func TestParametersInvalidBetaVirtuous(t *testing.T) { func TestParametersInvalidBetaRogue(t *testing.T) { p := Parameters{ - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 0, + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 0, + ConcurrentRepolls: 1, } if err := p.Valid(); err == nil { t.Fatalf("Should have failed due to invalid beta rogue") } } + +func TestParametersAnotherInvalidBetaRogue(t *testing.T) { + p := Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 28, + BetaRogue: 3, + ConcurrentRepolls: 1, + } + + if err := p.Valid(); err == nil { + t.Fatalf("Should have failed due to invalid beta rogue") + } else if !strings.Contains(err.Error(), "\n") { + t.Fatalf("Should have described the extensive error") + } +} + +func TestParametersInvalidConcurrentRepolls(t *testing.T) { + tests := []Parameters{ + Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 2, + }, + Parameters{ + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 0, + }, + } + for _, p := range tests { + label := fmt.Sprintf("ConcurrentRepolls=%d", p.ConcurrentRepolls) + t.Run(label, func(t *testing.T) { + if err := p.Valid(); err == nil { + t.Error("Should have failed due to invalid concurrent repolls") + } + }) + } +} diff --git a/snow/consensus/snowball/tree.go b/snow/consensus/snowball/tree.go index ad6554b..f28d0a8 100644 --- a/snow/consensus/snowball/tree.go +++ b/snow/consensus/snowball/tree.go @@ -18,6 +18,10 @@ func (TreeFactory) New() Consensus { return &Tree{} } // Tree implements the snowball interface by using a modified patricia tree. type Tree struct { + // node is the root that represents the first snowball instance in the tree, + // and contains references to all the other snowball instances in the tree. + node + // params contains all the configurations of a snowball instance params Parameters @@ -31,10 +35,6 @@ type Tree struct { // that any later traversal into this sub-tree should call // RecordUnsuccessfulPoll before performing any other action. shouldReset bool - - // root is the node that represents the first snowball instance in the tree, - // and contains references to all the other snowball instances in the tree. - root node } // Initialize implements the Consensus interface @@ -44,7 +44,7 @@ func (t *Tree) Initialize(params Parameters, choice ids.ID) { snowball := &unarySnowball{} snowball.Initialize(params.BetaVirtuous) - t.root = &unaryNode{ + t.node = &unaryNode{ tree: t, preference: choice, commonPrefix: ids.NumBits, // The initial state has no conflicts @@ -57,20 +57,17 @@ func (t *Tree) Parameters() Parameters { return t.params } // Add implements the Consensus interface func (t *Tree) Add(choice ids.ID) { - prefix := t.root.DecidedPrefix() + prefix := t.node.DecidedPrefix() // Make sure that we haven't already decided against this new id if ids.EqualSubset(0, prefix, t.Preference(), choice) { - t.root = t.root.Add(choice) + t.node = t.node.Add(choice) } } -// Preference implements the Consensus interface -func (t *Tree) Preference() ids.ID { return t.root.Preference() } - // RecordPoll implements the Consensus interface func (t *Tree) RecordPoll(votes ids.Bag) { // Get the assumed decided prefix of the root node. - decidedPrefix := t.root.DecidedPrefix() + decidedPrefix := t.node.DecidedPrefix() // If any of the bits differ from the preference in this prefix, the vote is // for a rejected operation. So, we filter out these invalid votes. @@ -78,7 +75,7 @@ func (t *Tree) RecordPoll(votes ids.Bag) { // Now that the votes have been restricted to valid votes, pass them into // the first snowball instance - t.root = t.root.RecordPoll(filteredVotes, t.shouldReset) + t.node = t.node.RecordPoll(filteredVotes, t.shouldReset) // Because we just passed the reset into the snowball instance, we should no // longer reset. @@ -88,14 +85,11 @@ func (t *Tree) RecordPoll(votes ids.Bag) { // RecordUnsuccessfulPoll implements the Consensus interface func (t *Tree) RecordUnsuccessfulPoll() { t.shouldReset = true } -// Finalized implements the Consensus interface -func (t *Tree) Finalized() bool { return t.root.Finalized() } - func (t *Tree) String() string { builder := strings.Builder{} prefixes := []string{""} - nodes := []node{t.root} + nodes := []node{t.node} for len(prefixes) > 0 { newSize := len(prefixes) - 1 @@ -321,14 +315,14 @@ func (u *unaryNode) Add(newChoice ids.ID) node { u.decidedPrefix, u.commonPrefix, u.preference, newChoice); !found { // If the first difference doesn't exist, then this node shouldn't be // split - if u.child != nil && ids.EqualSubset( - u.commonPrefix, u.child.DecidedPrefix(), u.preference, newChoice) { - // If the choice matched my child's prefix, then the add should be - // passed to my child. (Case 1. from above) + if u.child != nil { + // Because this node will finalize before any children could + // finalize, it must be that the newChoice will match my child's + // prefix u.child = u.child.Add(newChoice) } - // If the choice didn't my child's prefix, then the choice was - // previously rejected and the tree should not be modified + // if u.child is nil, then we are attempting to add the same choice into + // the tree, which should be a noop } else { // The difference was found, so this node must be split @@ -409,13 +403,18 @@ func (u *unaryNode) RecordPoll(votes ids.Bag, reset bool) node { u.snowball.RecordSuccessfulPoll() if u.child != nil { - decidedPrefix := u.child.DecidedPrefix() - filteredVotes := votes.Filter(u.commonPrefix, decidedPrefix, u.preference) + // We are guaranteed that u.commonPrefix will equal + // u.child.DecidedPrefix(). Otherwise, there must have been a + // decision under this node, which isn't possible because + // beta1 <= beta2. That means that filtering the votes between + // u.commonPrefix and u.child.DecidedPrefix() would always result in + // the same set being returned. + // If I'm now decided, return my child if u.Finalized() { - return u.child.RecordPoll(filteredVotes, u.shouldReset) + return u.child.RecordPoll(votes, u.shouldReset) } - u.child = u.child.RecordPoll(filteredVotes, u.shouldReset) + u.child = u.child.RecordPoll(votes, u.shouldReset) // The child's preference may have changed u.preference = u.child.Preference() } @@ -482,6 +481,10 @@ func (b *binaryNode) Add(id ids.ID) node { ids.EqualSubset(b.bit+1, child.DecidedPrefix(), b.preferences[bit], id) { b.children[bit] = child.Add(id) } + // If child is nil, then the id has already been added to the tree, so + // nothing should be done + // If the decided prefix isn't matched, then a previous decision has made + // the id that is being added to have already been rejected return b } diff --git a/snow/consensus/snowball/tree_test.go b/snow/consensus/snowball/tree_test.go index 56904e1..4fb1159 100644 --- a/snow/consensus/snowball/tree_test.go +++ b/snow/consensus/snowball/tree_test.go @@ -18,7 +18,7 @@ func TestTreeParams(t *testing.T) { ParamsTest(t, TreeFactory{}) } func TestSnowballSingleton(t *testing.T) { params := Parameters{ Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5, + K: 1, Alpha: 1, BetaVirtuous: 2, BetaRogue: 5, } tree := Tree{} tree.Initialize(params, Red) @@ -35,6 +35,14 @@ func TestSnowballSingleton(t *testing.T) { t.Fatalf("Snowball is finalized too soon") } + + empty := ids.Bag{} + tree.RecordPoll(empty) + + if tree.Finalized() { + t.Fatalf("Snowball is finalized too soon") + } + tree.RecordPoll(oneRed) if tree.Finalized() { @@ -170,10 +178,13 @@ func TestSnowballLastBinary(t *testing.T) { tree.Initialize(params, zero) tree.Add(one) - expected := "SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [0, 255)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 255" + // Should do nothing + tree.Add(one) + + expected := "SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 255)\n" + + " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 255" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !zero.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) } else if tree.Finalized() { @@ -199,6 +210,238 @@ func TestSnowballLastBinary(t *testing.T) { } } +func TestSnowballAddPreviouslyRejected(t *testing.T) { + zero := ids.NewID([32]byte{0b00000000}) + one := ids.NewID([32]byte{0b00000001}) + two := ids.NewID([32]byte{0b00000010}) + four := ids.NewID([32]byte{0b00000100}) + + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + tree := Tree{} + tree.Initialize(params, zero) + tree.Add(one) + tree.Add(four) + + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + zeroBag := ids.Bag{} + zeroBag.Add(zero) + tree.RecordPoll(zeroBag) + + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + tree.Add(two) + + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } +} + +func TestSnowballNewUnary(t *testing.T) { + zero := ids.NewID([32]byte{0b00000000}) + one := ids.NewID([32]byte{0b00000001}) + + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 2, BetaRogue: 3, + } + tree := Tree{} + tree.Initialize(params, zero) + tree.Add(one) + + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + oneBag := ids.Bag{} + oneBag.Add(one) + tree.RecordPoll(oneBag) + + { + expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 0\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [1, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !one.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", one, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + tree.RecordPoll(oneBag) + + { + expected := "SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 2, SF(Confidence = 2, Finalized = false, SL(Preference = 1))) Bit = 0\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n"+ + " SB(NumSuccessfulPolls = 2, SF(Confidence = 2, Finalized = true)) Bits = [1, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !one.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", one, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } +} + +func TestSnowballTransitiveReset(t *testing.T) { + zero := ids.NewID([32]byte{0b00000000}) + two := ids.NewID([32]byte{0b00000010}) + eight := ids.NewID([32]byte{0b00001000}) + + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 2, BetaRogue: 2, + } + tree := Tree{} + tree.Initialize(params, zero) + tree.Add(two) + tree.Add(eight) + + { + expected := "SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 1)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 1\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 3)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 3\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + zeroBag := ids.Bag{} + zeroBag.Add(zero) + tree.RecordPoll(zeroBag) + + { + expected := "SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [0, 1)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + + emptyBag := ids.Bag{} + tree.RecordPoll(emptyBag) + + { + expected := "SB(NumSuccessfulPolls = 1, SF(Confidence = 0, Finalized = false)) Bits = [0, 1)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [2, 3)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + tree.RecordPoll(zeroBag) + + { + expected := "SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = false)) Bits = [0, 1)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 1\n"+ + " SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = false)) Bits = [2, 3)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 3\n"+ + " SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [4, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } + } + + tree.RecordPoll(zeroBag) + + { + expected := "SB(NumSuccessfulPolls = 3, SF(Confidence = 2, Finalized = true)) Bits = [4, 256)" + if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } else if pref := tree.Preference(); !zero.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", zero, pref) + } else if !tree.Finalized() { + t.Fatalf("Finalized too late") + } + } +} + func TestSnowballTrinary(t *testing.T) { params := Parameters{ Metrics: prometheus.NewRegistry(), @@ -256,7 +499,7 @@ func TestSnowballTrinary(t *testing.T) { tree.RecordPoll(redBag) if pref := tree.Preference(); !Blue.Equals(pref) { - t.Fatalf("Wrong preference. Expected %s got %s", Green, pref) + t.Fatalf("Wrong preference. Expected %s got %s", Blue, pref) } else if tree.Finalized() { t.Fatalf("Finalized too early") } @@ -378,9 +621,9 @@ func TestSnowballFineGrained(t *testing.T) { tree := Tree{} tree.Initialize(params, c0000) { - expected := "SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [0, 256)" + expected := "SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !c0000.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) } else if tree.Finalized() { @@ -390,11 +633,11 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c1100) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [1, 256)\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [1, 256)" + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !c0000.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) } else if tree.Finalized() { @@ -404,13 +647,13 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c1000) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [1, 256)\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 1, Confidence = 0, Finalized = false)) Bit = 1\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)" + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n" + + " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !c0000.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) } else if tree.Finalized() { @@ -420,16 +663,16 @@ func TestSnowballFineGrained(t *testing.T) { tree.Add(c0010) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 0\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [1, 2)\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 0, Finalized = false)) Bit = 2\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [3, 256)\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 1, Confidence = 0, Finalized = false)) Bit = 1\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)" + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2)\n" + + " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + + " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !c0000.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) } else if tree.Finalized() { @@ -441,15 +684,15 @@ func TestSnowballFineGrained(t *testing.T) { c0000Bag.Add(c0000) tree.RecordPoll(c0000Bag) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 1, Finalized = false)) Bit = 0\n" + - " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 1, Finalized = false)) Bit = 2\n" + - " SB(NumSuccessfulPolls = 1, Confidence = 1, Finalized = true) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [3, 256)\n" + - " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 1, Confidence = 0, Finalized = false)) Bit = 1\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)\n" + - " SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [2, 256)" + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n" + + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n" + + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n" + + " SB(Preference = 1, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 1))) Bit = 1\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)\n" + + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [2, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !c0000.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) } else if tree.Finalized() { @@ -461,11 +704,11 @@ func TestSnowballFineGrained(t *testing.T) { c0010Bag.Add(c0010) tree.RecordPoll(c0010Bag) { - expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 1, SF = SF(Preference = 1, Confidence = 1, Finalized = false)) Bit = 2\n" + - " SB(NumSuccessfulPolls = 1, Confidence = 1, Finalized = true) Bits = [3, 256)\n" + - " SB(NumSuccessfulPolls = 1, Confidence = 1, Finalized = true) Bits = [3, 256)" + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 1, SF(Confidence = 1, Finalized = false, SL(Preference = 1))) Bit = 2\n" + + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n" + + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !c0000.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) } else if tree.Finalized() { @@ -475,9 +718,9 @@ func TestSnowballFineGrained(t *testing.T) { tree.RecordPoll(c0010Bag) { - expected := "SB(NumSuccessfulPolls = 2, Confidence = 2, Finalized = true) Bits = [3, 256)" + expected := "SB(NumSuccessfulPolls = 2, SF(Confidence = 2, Finalized = true)) Bits = [3, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !c0010.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", c0010, pref) } else if !tree.Finalized() { @@ -496,9 +739,9 @@ func TestSnowballDoubleAdd(t *testing.T) { tree.Add(Red) { - expected := "SB(NumSuccessfulPolls = 0, Confidence = 0, Finalized = false) Bits = [0, 256)" + expected := "SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 256)" if str := tree.String(); expected != str { - t.Fatalf("Wrong string. Expected %s got %s", expected, str) + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) } else if pref := tree.Preference(); !Red.Equals(pref) { t.Fatalf("Wrong preference. Expected %s got %s", Red, pref) } else if tree.Finalized() { @@ -533,3 +776,108 @@ func TestSnowballConsistent(t *testing.T) { t.Fatalf("Network agreed on inconsistent values") } } + +func TestSnowballFilterBinaryChildren(t *testing.T) { + c0000 := ids.NewID([32]byte{0b00000000}) + c1000 := ids.NewID([32]byte{0b00000001}) + c0100 := ids.NewID([32]byte{0b00000010}) + c0010 := ids.NewID([32]byte{0b00000100}) + + params := Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + tree := Tree{} + tree.Initialize(params, c0000) + { + expected := "SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [0, 256)" + if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } else if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } + } + + tree.Add(c1000) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } else if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } + } + + tree.Add(c0010) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 2)\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 0, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } else if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } + } + + c0000Bag := ids.Bag{} + c0000Bag.Add(c0000) + tree.RecordPoll(c0000Bag) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } else if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } + } + + tree.Add(c0100) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 0\n"+ + " SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0))) Bit = 2\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [1, 256)" + if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } else if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } + } + + c0100Bag := ids.Bag{} + c0100Bag.Add(c0100) + tree.RecordPoll(c0100Bag) + { + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 1, NumSuccessfulPolls[1] = 0, SF(Confidence = 0, Finalized = false, SL(Preference = 0))) Bit = 2\n"+ + " SB(NumSuccessfulPolls = 1, SF(Confidence = 1, Finalized = true)) Bits = [3, 256)\n"+ + " SB(NumSuccessfulPolls = 0, SF(Confidence = 0, Finalized = false)) Bits = [3, 256)" + if pref := tree.Preference(); !c0000.Equals(pref) { + t.Fatalf("Wrong preference. Expected %s got %s", c0000, pref) + } else if tree.Finalized() { + t.Fatalf("Finalized too early") + } else if str := tree.String(); expected != str { + t.Fatalf("Wrong string. Expected:\n%s\ngot:\n%s", expected, str) + } + } +} diff --git a/snow/consensus/snowball/unary_snowball.go b/snow/consensus/snowball/unary_snowball.go index 6d0db07..26ea35d 100644 --- a/snow/consensus/snowball/unary_snowball.go +++ b/snow/consensus/snowball/unary_snowball.go @@ -9,64 +9,40 @@ import ( // unarySnowball is the implementation of a unary snowball instance type unarySnowball struct { - // beta is the number of consecutive successful queries required for - // finalization. - beta int - - // confidence tracks the number of successful polls in a row that have - // returned the preference - confidence int + // wrap the unary snowflake logic + unarySnowflake // numSuccessfulPolls tracks the total number of successful network polls numSuccessfulPolls int - - // finalized prevents the state from changing after the required number of - // consecutive polls has been reached - finalized bool } -// Initialize implements the UnarySnowball interface -func (sb *unarySnowball) Initialize(beta int) { sb.beta = beta } - // RecordSuccessfulPoll implements the UnarySnowball interface func (sb *unarySnowball) RecordSuccessfulPoll() { sb.numSuccessfulPolls++ - sb.confidence++ - sb.finalized = sb.finalized || sb.confidence >= sb.beta + sb.unarySnowflake.RecordSuccessfulPoll() } -// RecordUnsuccessfulPoll implements the UnarySnowball interface -func (sb *unarySnowball) RecordUnsuccessfulPoll() { sb.confidence = 0 } - -// Finalized implements the UnarySnowball interface -func (sb *unarySnowball) Finalized() bool { return sb.finalized } - // Extend implements the UnarySnowball interface func (sb *unarySnowball) Extend(beta int, choice int) BinarySnowball { bs := &binarySnowball{ - preference: choice, - snowflake: binarySnowflake{ - beta: beta, - preference: choice, - finalized: sb.Finalized(), + binarySnowflake: binarySnowflake{ + binarySlush: binarySlush{preference: choice}, + beta: beta, + finalized: sb.Finalized(), }, + preference: choice, } return bs } // Clone implements the UnarySnowball interface func (sb *unarySnowball) Clone() UnarySnowball { - return &unarySnowball{ - beta: sb.beta, - numSuccessfulPolls: sb.numSuccessfulPolls, - confidence: sb.confidence, - finalized: sb.Finalized(), - } + newSnowball := *sb + return &newSnowball } func (sb *unarySnowball) String() string { - return fmt.Sprintf("SB(NumSuccessfulPolls = %d, Confidence = %d, Finalized = %v)", + return fmt.Sprintf("SB(NumSuccessfulPolls = %d, %s)", sb.numSuccessfulPolls, - sb.confidence, - sb.Finalized()) + &sb.unarySnowflake) } diff --git a/snow/consensus/snowball/unary_snowball_test.go b/snow/consensus/snowball/unary_snowball_test.go index 8bf098a..3f4efe5 100644 --- a/snow/consensus/snowball/unary_snowball_test.go +++ b/snow/consensus/snowball/unary_snowball_test.go @@ -35,7 +35,7 @@ func TestUnarySnowball(t *testing.T) { sbCloneIntf := sb.Clone() sbClone, ok := sbCloneIntf.(*unarySnowball) if !ok { - t.Fatalf("Unexpectedly clone type") + t.Fatalf("Unexpected clone type") } UnarySnowballStateTest(t, sbClone, 2, 1, false) diff --git a/snow/consensus/snowball/unary_snowflake.go b/snow/consensus/snowball/unary_snowflake.go new file mode 100644 index 0000000..2172331 --- /dev/null +++ b/snow/consensus/snowball/unary_snowflake.go @@ -0,0 +1,60 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "fmt" +) + +// unarySnowflake is the implementation of a unary snowflake instance +type unarySnowflake struct { + // beta is the number of consecutive successful queries required for + // finalization. + beta int + + // confidence tracks the number of successful polls in a row that have + // returned the preference + confidence int + + // finalized prevents the state from changing after the required number of + // consecutive polls has been reached + finalized bool +} + +// Initialize implements the UnarySnowflake interface +func (sf *unarySnowflake) Initialize(beta int) { sf.beta = beta } + +// RecordSuccessfulPoll implements the UnarySnowflake interface +func (sf *unarySnowflake) RecordSuccessfulPoll() { + sf.confidence++ + sf.finalized = sf.finalized || sf.confidence >= sf.beta +} + +// RecordUnsuccessfulPoll implements the UnarySnowflake interface +func (sf *unarySnowflake) RecordUnsuccessfulPoll() { sf.confidence = 0 } + +// Finalized implements the UnarySnowflake interface +func (sf *unarySnowflake) Finalized() bool { return sf.finalized } + +// Extend implements the UnarySnowflake interface +func (sf *unarySnowflake) Extend(beta int, choice int) BinarySnowflake { + return &binarySnowflake{ + binarySlush: binarySlush{preference: choice}, + confidence: sf.confidence, + beta: beta, + finalized: sf.finalized, + } +} + +// Clone implements the UnarySnowflake interface +func (sf *unarySnowflake) Clone() UnarySnowflake { + newSnowflake := *sf + return &newSnowflake +} + +func (sf *unarySnowflake) String() string { + return fmt.Sprintf("SF(Confidence = %d, Finalized = %v)", + sf.confidence, + sf.finalized) +} diff --git a/snow/consensus/snowball/unary_snowflake_test.go b/snow/consensus/snowball/unary_snowflake_test.go new file mode 100644 index 0000000..55d29e2 --- /dev/null +++ b/snow/consensus/snowball/unary_snowflake_test.go @@ -0,0 +1,67 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowball + +import ( + "testing" +) + +func UnarySnowflakeStateTest(t *testing.T, sf *unarySnowflake, expectedConfidence int, expectedFinalized bool) { + if confidence := sf.confidence; confidence != expectedConfidence { + t.Fatalf("Wrong confidence. Expected %d got %d", expectedConfidence, confidence) + } else if finalized := sf.Finalized(); finalized != expectedFinalized { + t.Fatalf("Wrong finalized status. Expected %v got %v", expectedFinalized, finalized) + } +} + +func TestUnarySnowflake(t *testing.T) { + beta := 2 + + sf := &unarySnowflake{} + sf.Initialize(beta) + + sf.RecordSuccessfulPoll() + UnarySnowflakeStateTest(t, sf, 1, false) + + sf.RecordUnsuccessfulPoll() + UnarySnowflakeStateTest(t, sf, 0, false) + + sf.RecordSuccessfulPoll() + UnarySnowflakeStateTest(t, sf, 1, false) + + sfCloneIntf := sf.Clone() + sfClone, ok := sfCloneIntf.(*unarySnowflake) + if !ok { + t.Fatalf("Unexpected clone type") + } + + UnarySnowflakeStateTest(t, sfClone, 1, false) + + binarySnowflake := sfClone.Extend(beta, 0) + + binarySnowflake.RecordUnsuccessfulPoll() + + binarySnowflake.RecordSuccessfulPoll(1) + + if binarySnowflake.Finalized() { + t.Fatalf("Should not have finalized") + } + + binarySnowflake.RecordSuccessfulPoll(1) + + if binarySnowflake.Preference() != 1 { + t.Fatalf("Wrong preference") + } else if !binarySnowflake.Finalized() { + t.Fatalf("Should have finalized") + } + + sf.RecordSuccessfulPoll() + UnarySnowflakeStateTest(t, sf, 2, true) + + sf.RecordUnsuccessfulPoll() + UnarySnowflakeStateTest(t, sf, 0, true) + + sf.RecordSuccessfulPoll() + UnarySnowflakeStateTest(t, sf, 1, true) +} diff --git a/snow/consensus/snowman/block_test.go b/snow/consensus/snowman/block_test.go index 609530b..8b3b06f 100644 --- a/snow/consensus/snowman/block_test.go +++ b/snow/consensus/snowman/block_test.go @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/gecko/snow/choices" ) -type Blk struct { +type TestBlock struct { parent Block id ids.ID height int @@ -18,28 +18,28 @@ type Blk struct { bytes []byte } -func (b *Blk) Parent() Block { return b.parent } -func (b *Blk) ID() ids.ID { return b.id } -func (b *Blk) Status() choices.Status { return b.status } -func (b *Blk) Accept() { +func (b *TestBlock) Parent() Block { return b.parent } +func (b *TestBlock) ID() ids.ID { return b.id } +func (b *TestBlock) Status() choices.Status { return b.status } +func (b *TestBlock) Accept() { if b.status.Decided() && b.status != choices.Accepted { panic("Dis-agreement") } b.status = choices.Accepted } -func (b *Blk) Reject() { +func (b *TestBlock) Reject() { if b.status.Decided() && b.status != choices.Rejected { panic("Dis-agreement") } b.status = choices.Rejected } -func (b *Blk) Verify() error { return nil } -func (b *Blk) Bytes() []byte { return b.bytes } +func (b *TestBlock) Verify() error { return nil } +func (b *TestBlock) Bytes() []byte { return b.bytes } -type sortBlks []*Blk +type sortBlocks []*TestBlock -func (sb sortBlks) Less(i, j int) bool { return sb[i].height < sb[j].height } -func (sb sortBlks) Len() int { return len(sb) } -func (sb sortBlks) Swap(i, j int) { sb[j], sb[i] = sb[i], sb[j] } +func (sb sortBlocks) Less(i, j int) bool { return sb[i].height < sb[j].height } +func (sb sortBlocks) Len() int { return len(sb) } +func (sb sortBlocks) Swap(i, j int) { sb[j], sb[i] = sb[i], sb[j] } -func SortVts(blks []*Blk) { sort.Sort(sortBlks(blks)) } +func SortVts(blocks []*TestBlock) { sort.Sort(sortBlocks(blocks)) } diff --git a/snow/consensus/snowman/consensus.go b/snow/consensus/snowman/consensus.go index 022e910..fe4db08 100644 --- a/snow/consensus/snowman/consensus.go +++ b/snow/consensus/snowman/consensus.go @@ -12,7 +12,7 @@ import ( // Consensus represents a general snowman instance that can be used directly to // process a series of dependent operations. type Consensus interface { - // Takes in alpha, beta1, beta2, and an assumed accepted decision. + // Takes in the context, snowball parameters, and the last accepted block. Initialize(*snow.Context, snowball.Parameters, ids.ID) // Returns the parameters that describe this snowman instance diff --git a/snow/consensus/snowman/consensus_test.go b/snow/consensus/snowman/consensus_test.go index 3d2b30b..f64b4d8 100644 --- a/snow/consensus/snowman/consensus_test.go +++ b/snow/consensus/snowman/consensus_test.go @@ -4,7 +4,6 @@ package snowman import ( - "fmt" "math/rand" "testing" @@ -16,571 +15,789 @@ import ( "github.com/ava-labs/gecko/snow/consensus/snowball" ) -func ParamsTest(t *testing.T, factory Factory) { +var ( + GenesisID = ids.Empty.Prefix(0) + Genesis = &TestBlock{ + id: GenesisID, + status: choices.Accepted, + } + + Tests = []func(*testing.T, Factory){ + InitializeTest, + AddToTailTest, + AddToNonTailTest, + AddToUnknownTest, + IssuedPreviouslyAcceptedTest, + IssuedPreviouslyRejectedTest, + IssuedUnissuedTest, + IssuedIssuedTest, + RecordPollAcceptSingleBlockTest, + RecordPollAcceptAndRejectTest, + RecordPollWhenFinalizedTest, + RecordPollRejectTransitivelyTest, + RecordPollTransitivelyResetConfidenceTest, + RecordPollInvalidVoteTest, + RecordPollTransitiveVotingTest, + RecordPollDivergedVotingTest, + MetricsProcessingErrorTest, + MetricsAcceptedErrorTest, + MetricsRejectedErrorTest, + RandomizedConsistencyTest, + } +) + +// Execute all tests against a consensus implementation +func ConsensusTest(t *testing.T, factory Factory) { + for _, test := range Tests { + test(t, factory) + } +} + +// Make sure that initialize sets the state correctly +func InitializeTest(t *testing.T, factory Factory) { sm := factory.New() ctx := snow.DefaultContextTest() params := snowball.Parameters{ - Namespace: fmt.Sprintf("gecko_%s", ctx.ChainID), - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 3, + BetaRogue: 5, + ConcurrentRepolls: 1, } - numProcessing := prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: params.Namespace, - Name: "processing", - }) - numAccepted := prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: params.Namespace, - Name: "accepted", - }) - numRejected := prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: params.Namespace, - Name: "rejected", - }) + sm.Initialize(ctx, params, GenesisID) - params.Metrics.Register(numProcessing) - params.Metrics.Register(numAccepted) - params.Metrics.Register(numRejected) - - sm.Initialize(ctx, params, Genesis.ID()) - - if p := sm.Parameters(); p.K != params.K { - t.Fatalf("Wrong K parameter") - } else if p.Alpha != params.Alpha { - t.Fatalf("Wrong Alpha parameter") - } else if p.BetaVirtuous != params.BetaVirtuous { - t.Fatalf("Wrong Beta1 parameter") - } else if p.BetaRogue != params.BetaRogue { - t.Fatalf("Wrong Beta2 parameter") + if p := sm.Parameters(); p != params { + t.Fatalf("Wrong returned parameters") + } + if pref := sm.Preference(); !pref.Equals(GenesisID) { + t.Fatalf("Wrong preference returned") + } + if !sm.Finalized() { + t.Fatalf("Wrong should have marked the instance as being finalized") } } -func AddTest(t *testing.T, factory Factory) { +// Make sure that adding a block to the tail updates the preference +func AddToTailTest(t *testing.T, factory Factory) { sm := factory.New() + ctx := snow.DefaultContextTest() params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 3, + BetaRogue: 5, + ConcurrentRepolls: 1, } - sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) + sm.Initialize(ctx, params, GenesisID) - if pref := sm.Preference(); !pref.Equals(Genesis.ID()) { - t.Fatalf("Wrong preference. Expected %s, got %s", Genesis.ID(), pref) - } - - dep0 := &Blk{ + block := &TestBlock{ parent: Genesis, id: ids.Empty.Prefix(1), } - sm.Add(dep0) - if pref := sm.Preference(); !pref.Equals(dep0.id) { - t.Fatalf("Wrong preference. Expected %s, got %s", dep0.id, pref) - } - dep1 := &Blk{ - parent: Genesis, - id: ids.Empty.Prefix(2), - } - sm.Add(dep1) - if pref := sm.Preference(); !pref.Equals(dep0.id) { - t.Fatalf("Wrong preference. Expected %s, got %s", dep0.id, pref) - } + // Adding to the previous preference will update the preference + sm.Add(block) - dep2 := &Blk{ - parent: dep0, - id: ids.Empty.Prefix(3), - } - sm.Add(dep2) - if pref := sm.Preference(); !pref.Equals(dep2.id) { - t.Fatalf("Wrong preference. Expected %s, got %s", dep2.id, pref) - } - - dep3 := &Blk{ - parent: &Blk{id: ids.Empty.Prefix(4)}, - id: ids.Empty.Prefix(5), - } - sm.Add(dep3) - if pref := sm.Preference(); !pref.Equals(dep2.id) { - t.Fatalf("Wrong preference. Expected %s, got %s", dep2.id, pref) + if pref := sm.Preference(); !pref.Equals(block.id) { + t.Fatalf("Wrong preference. Expected %s, got %s", block.id, pref) } } -func CollectTest(t *testing.T, factory Factory) { +// Make sure that adding a block not to the tail doesn't change the preference +func AddToNonTailTest(t *testing.T, factory Factory) { sm := factory.New() + ctx := snow.DefaultContextTest() params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 3, + BetaRogue: 5, + ConcurrentRepolls: 1, } - sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) + sm.Initialize(ctx, params, GenesisID) - dep1 := &Blk{ - parent: Genesis, - id: ids.Empty.Prefix(2), - } - sm.Add(dep1) - - dep0 := &Blk{ + firstBlock := &TestBlock{ parent: Genesis, id: ids.Empty.Prefix(1), } - sm.Add(dep0) - - dep2 := &Blk{ - parent: dep0, - id: ids.Empty.Prefix(3), + secondBlock := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(2), } - sm.Add(dep2) - dep3 := &Blk{ - parent: dep0, + // Adding to the previous preference will update the preference + sm.Add(firstBlock) + + if pref := sm.Preference(); !pref.Equals(firstBlock.id) { + t.Fatalf("Wrong preference. Expected %s, got %s", firstBlock.id, pref) + } + + // Adding to something other than the previous preference won't update the + // preference + sm.Add(secondBlock) + + if pref := sm.Preference(); !pref.Equals(firstBlock.id) { + t.Fatalf("Wrong preference. Expected %s, got %s", firstBlock.id, pref) + } +} + +// Make sure that adding a block that is detached from the rest of the tree +// rejects the block +func AddToUnknownTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 3, + BetaRogue: 5, + ConcurrentRepolls: 1, + } + sm.Initialize(ctx, params, GenesisID) + + block := &TestBlock{ + parent: &TestBlock{id: ids.Empty.Prefix(1)}, + id: ids.Empty.Prefix(2), + } + + // Adding a block with an unknown parent means the parent must have already + // been rejected. Therefore the block should be immediately rejected + sm.Add(block) + + if pref := sm.Preference(); !pref.Equals(GenesisID) { + t.Fatalf("Wrong preference. Expected %s, got %s", GenesisID, pref) + } else if status := block.Status(); status != choices.Rejected { + t.Fatalf("Should have rejected the block") + } +} + +func IssuedPreviouslyAcceptedTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 3, + BetaRogue: 5, + ConcurrentRepolls: 1, + } + sm.Initialize(ctx, params, GenesisID) + + if !sm.Issued(Genesis) { + t.Fatalf("Should have marked an accepted block as having been issued") + } +} + +func IssuedPreviouslyRejectedTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 3, + BetaRogue: 5, + ConcurrentRepolls: 1, + } + sm.Initialize(ctx, params, GenesisID) + + block := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(1), + status: choices.Rejected, + } + + if !sm.Issued(block) { + t.Fatalf("Should have marked a rejected block as having been issued") + } +} + +func IssuedUnissuedTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 3, + BetaRogue: 5, + ConcurrentRepolls: 1, + } + sm.Initialize(ctx, params, GenesisID) + + block := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(1), + status: choices.Processing, + } + + if sm.Issued(block) { + t.Fatalf("Shouldn't have marked an unissued block as having been issued") + } +} + +func IssuedIssuedTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 3, + BetaRogue: 5, + ConcurrentRepolls: 1, + } + sm.Initialize(ctx, params, GenesisID) + + block := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(1), + status: choices.Processing, + } + + sm.Add(block) + + if !sm.Issued(block) { + t.Fatalf("Should have marked a pending block as having been issued") + } +} + +func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 2, + BetaRogue: 3, + ConcurrentRepolls: 1, + } + sm.Initialize(ctx, params, GenesisID) + + block := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(1), + status: choices.Processing, + } + + sm.Add(block) + + votes := ids.Bag{} + votes.Add(block.id) + + sm.RecordPoll(votes) + + if pref := sm.Preference(); !pref.Equals(block.id) { + t.Fatalf("Preference returned the wrong block") + } else if sm.Finalized() { + t.Fatalf("Snowman instance finalized too soon") + } else if status := block.Status(); status != choices.Processing { + t.Fatalf("Block's status changed unexpectedly") + } + + sm.RecordPoll(votes) + + if pref := sm.Preference(); !pref.Equals(block.id) { + t.Fatalf("Preference returned the wrong block") + } else if !sm.Finalized() { + t.Fatalf("Snowman instance didn't finalize") + } else if status := block.Status(); status != choices.Accepted { + t.Fatalf("Block's status should have been set to accepted") + } +} + +func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + } + sm.Initialize(ctx, params, GenesisID) + + firstBlock := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(1), + status: choices.Processing, + } + secondBlock := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(2), + status: choices.Processing, + } + + sm.Add(firstBlock) + sm.Add(secondBlock) + + votes := ids.Bag{} + votes.Add(firstBlock.id) + + sm.RecordPoll(votes) + + if pref := sm.Preference(); !pref.Equals(firstBlock.id) { + t.Fatalf("Preference returned the wrong block") + } else if sm.Finalized() { + t.Fatalf("Snowman instance finalized too soon") + } else if status := firstBlock.Status(); status != choices.Processing { + t.Fatalf("Block's status changed unexpectedly") + } else if status := secondBlock.Status(); status != choices.Processing { + t.Fatalf("Block's status changed unexpectedly") + } + + sm.RecordPoll(votes) + + if pref := sm.Preference(); !pref.Equals(firstBlock.id) { + t.Fatalf("Preference returned the wrong block") + } else if !sm.Finalized() { + t.Fatalf("Snowman instance didn't finalize") + } else if status := firstBlock.Status(); status != choices.Accepted { + t.Fatalf("Block's status should have been set to accepted") + } else if status := secondBlock.Status(); status != choices.Rejected { + t.Fatalf("Block's status should have been set to rejected") + } +} + +func RecordPollWhenFinalizedTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + } + sm.Initialize(ctx, params, GenesisID) + + votes := ids.Bag{} + votes.Add(GenesisID) + sm.RecordPoll(votes) + + if !sm.Finalized() { + t.Fatalf("Consensus should still be finalized") + } else if pref := sm.Preference(); !GenesisID.Equals(pref) { + t.Fatalf("Wrong preference listed") + } +} + +func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + } + sm.Initialize(ctx, params, GenesisID) + + block0 := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(1), + status: choices.Processing, + } + block1 := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(2), + status: choices.Processing, + } + block2 := &TestBlock{ + parent: block1, + id: ids.Empty.Prefix(3), + status: choices.Processing, + } + + sm.Add(block0) + sm.Add(block1) + sm.Add(block2) + + // Current graph structure: + // G + // / \ + // 0 1 + // | + // 2 + // Tail = 0 + + votes := ids.Bag{} + votes.Add(block0.id) + sm.RecordPoll(votes) + + // Current graph structure: + // 0 + // Tail = 0 + + if !sm.Finalized() { + t.Fatalf("Finalized too late") + } else if pref := sm.Preference(); !block0.id.Equals(pref) { + t.Fatalf("Wrong preference listed") + } + + if status := block0.Status(); status != choices.Accepted { + t.Fatalf("Wrong status returned") + } else if status := block1.Status(); status != choices.Rejected { + t.Fatalf("Wrong status returned") + } else if status := block2.Status(); status != choices.Rejected { + t.Fatalf("Wrong status returned") + } +} + +func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 2, + BetaRogue: 2, + ConcurrentRepolls: 1, + } + sm.Initialize(ctx, params, GenesisID) + + block0 := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(1), + status: choices.Processing, + } + block1 := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(2), + status: choices.Processing, + } + block2 := &TestBlock{ + parent: block1, + id: ids.Empty.Prefix(3), + status: choices.Processing, + } + block3 := &TestBlock{ + parent: block1, id: ids.Empty.Prefix(4), - } - sm.Add(dep3) - - // Current graph structure: - // G - // / \ - // 0 1 - // / \ - // 2 3 - // Tail = 1 - - dep2_2 := ids.Bag{} - dep2_2.AddCount(dep2.id, 2) - sm.RecordPoll(dep2_2) - - // Current graph structure: - // G - // / \ - // 0 1 - // / \ - // 2 3 - // Tail = 2 - - if sm.Finalized() { - t.Fatalf("Finalized too early") - } else if !dep2.id.Equals(sm.Preference()) { - t.Fatalf("Wrong preference listed") - } - - dep3_2 := ids.Bag{} - dep3_2.AddCount(dep3.id, 2) - sm.RecordPoll(dep3_2) - - // Current graph structure: - // 0 - // / \ - // 2 3 - // Tail = 2 - - if sm.Finalized() { - t.Fatalf("Finalized too early") - } else if !dep2.id.Equals(sm.Preference()) { - t.Fatalf("Wrong preference listed") - } - - sm.RecordPoll(dep2_2) - - // Current graph structure: - // 0 - // / \ - // 2 3 - // Tail = 2 - - if sm.Finalized() { - t.Fatalf("Finalized too early") - } else if !dep2.id.Equals(sm.Preference()) { - t.Fatalf("Wrong preference listed") - } - - sm.RecordPoll(dep2_2) - - // Current graph structure: - // 2 - // Tail = 2 - - if !sm.Finalized() { - t.Fatalf("Finalized too late") - } else if !dep2.id.Equals(sm.Preference()) { - t.Fatalf("Wrong preference listed") - } - - if dep0.Status() != choices.Accepted { - t.Fatalf("Should have accepted") - } else if dep1.Status() != choices.Rejected { - t.Fatalf("Should have rejected") - } else if dep2.Status() != choices.Accepted { - t.Fatalf("Should have accepted") - } else if dep3.Status() != choices.Rejected { - t.Fatalf("Should have rejected") - } -} - -func CollectNothingTest(t *testing.T, factory Factory) { - sm := factory.New() - - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, - } - sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) - - // Current graph structure: - // G - // Tail = G - - genesis1 := ids.Bag{} - genesis1.AddCount(Genesis.ID(), 1) - sm.RecordPoll(genesis1) - - // Current graph structure: - // G - // Tail = G - - if !sm.Finalized() { - t.Fatalf("Finalized too late") - } else if !Genesis.ID().Equals(sm.Preference()) { - t.Fatalf("Wrong preference listed") - } -} - -func CollectTransRejectTest(t *testing.T, factory Factory) { - sm := factory.New() - - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, - } - sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) - - dep1 := &Blk{ - parent: Genesis, - id: ids.Empty.Prefix(2), - } - sm.Add(dep1) - - dep0 := &Blk{ - parent: Genesis, - id: ids.Empty.Prefix(1), - } - sm.Add(dep0) - - dep2 := &Blk{ - parent: dep0, - id: ids.Empty.Prefix(3), - } - sm.Add(dep2) - - // Current graph structure: - // G - // / \ - // 0 1 - // / - // 2 - // Tail = 1 - - dep1_1 := ids.Bag{} - dep1_1.AddCount(dep1.id, 1) - sm.RecordPoll(dep1_1) - sm.RecordPoll(dep1_1) - - // Current graph structure: - // 1 - // Tail = 1 - - if !sm.Finalized() { - t.Fatalf("Finalized too late") - } else if !dep1.id.Equals(sm.Preference()) { - t.Fatalf("Wrong preference listed") - } - - if dep0.Status() != choices.Rejected { - t.Fatalf("Should have rejected") - } else if dep1.Status() != choices.Accepted { - t.Fatalf("Should have accepted") - } else if dep2.Status() != choices.Rejected { - t.Fatalf("Should have rejected") - } -} - -func CollectTransResetTest(t *testing.T, factory Factory) { - sm := factory.New() - - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, - } - sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) - - dep1 := &Blk{ - parent: Genesis, - id: ids.Empty.Prefix(2), status: choices.Processing, } - sm.Add(dep1) - dep0 := &Blk{ + sm.Add(block0) + sm.Add(block1) + sm.Add(block2) + sm.Add(block3) + + // Current graph structure: + // G + // / \ + // 0 1 + // / \ + // 2 3 + + votesFor2 := ids.Bag{} + votesFor2.Add(block2.id) + sm.RecordPoll(votesFor2) + + if sm.Finalized() { + t.Fatalf("Finalized too early") + } else if pref := sm.Preference(); !block2.id.Equals(pref) { + t.Fatalf("Wrong preference listed") + } + + emptyVotes := ids.Bag{} + sm.RecordPoll(emptyVotes) + + if sm.Finalized() { + t.Fatalf("Finalized too early") + } else if pref := sm.Preference(); !block2.id.Equals(pref) { + t.Fatalf("Wrong preference listed") + } + + sm.RecordPoll(votesFor2) + + if sm.Finalized() { + t.Fatalf("Finalized too early") + } else if pref := sm.Preference(); !block2.id.Equals(pref) { + t.Fatalf("Wrong preference listed") + } + + votesFor3 := ids.Bag{} + votesFor3.Add(block3.id) + sm.RecordPoll(votesFor3) + + if sm.Finalized() { + t.Fatalf("Finalized too early") + } else if pref := sm.Preference(); !block2.id.Equals(pref) { + t.Fatalf("Wrong preference listed") + } + + sm.RecordPoll(votesFor3) + + if !sm.Finalized() { + t.Fatalf("Finalized too late") + } else if pref := sm.Preference(); !block3.id.Equals(pref) { + t.Fatalf("Wrong preference listed") + } + + if status := block0.Status(); status != choices.Rejected { + t.Fatalf("Wrong status returned") + } else if status := block1.Status(); status != choices.Accepted { + t.Fatalf("Wrong status returned") + } else if status := block2.Status(); status != choices.Rejected { + t.Fatalf("Wrong status returned") + } else if status := block3.Status(); status != choices.Accepted { + t.Fatalf("Wrong status returned") + } +} + +func RecordPollInvalidVoteTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 2, + BetaRogue: 2, + ConcurrentRepolls: 1, + } + sm.Initialize(ctx, params, GenesisID) + + block := &TestBlock{ parent: Genesis, id: ids.Empty.Prefix(1), status: choices.Processing, } - sm.Add(dep0) + unknownBlockID := ids.Empty.Prefix(2) - dep2 := &Blk{ - parent: dep0, - id: ids.Empty.Prefix(3), - status: choices.Processing, - } - sm.Add(dep2) + sm.Add(block) - // Current graph structure: - // G - // / \ - // 0 1 - // / - // 2 - // Tail = 1 + validVotes := ids.Bag{} + validVotes.Add(block.id) + sm.RecordPoll(validVotes) - dep1_1 := ids.Bag{} - dep1_1.AddCount(dep1.id, 1) - sm.RecordPoll(dep1_1) + invalidVotes := ids.Bag{} + invalidVotes.Add(unknownBlockID) + sm.RecordPoll(invalidVotes) - // Current graph structure: - // G - // / \ - // 0 1 - // / - // 2 - // Tail = 1 - - dep2_1 := ids.Bag{} - dep2_1.AddCount(dep2.id, 1) - sm.RecordPoll(dep2_1) + sm.RecordPoll(validVotes) if sm.Finalized() { t.Fatalf("Finalized too early") - } else if status := dep0.Status(); status != choices.Processing { - t.Fatalf("Shouldn't have accepted yet %s", status) - } - - if !dep1.id.Equals(sm.Preference()) { + } else if pref := sm.Preference(); !block.id.Equals(pref) { t.Fatalf("Wrong preference listed") } - - sm.RecordPoll(dep2_1) - sm.RecordPoll(dep2_1) - - if !sm.Finalized() { - t.Fatalf("Finalized too late") - } else if dep0.Status() != choices.Accepted { - t.Fatalf("Should have accepted") - } else if dep1.Status() != choices.Rejected { - t.Fatalf("Should have rejected") - } else if dep2.Status() != choices.Accepted { - t.Fatalf("Should have accepted") - } } -func CollectTransVoteTest(t *testing.T, factory Factory) { +func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { sm := factory.New() + ctx := snow.DefaultContextTest() params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 3, Alpha: 3, BetaVirtuous: 1, BetaRogue: 1, + Metrics: prometheus.NewRegistry(), + K: 3, + Alpha: 3, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } - sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) + sm.Initialize(ctx, params, GenesisID) - dep0 := &Blk{ + block0 := &TestBlock{ parent: Genesis, id: ids.Empty.Prefix(1), + status: choices.Processing, } - sm.Add(dep0) - - dep1 := &Blk{ - parent: dep0, + block1 := &TestBlock{ + parent: block0, id: ids.Empty.Prefix(2), + status: choices.Processing, } - sm.Add(dep1) - - dep2 := &Blk{ - parent: dep1, + block2 := &TestBlock{ + parent: block1, id: ids.Empty.Prefix(3), + status: choices.Processing, } - sm.Add(dep2) - - dep3 := &Blk{ - parent: dep0, + block3 := &TestBlock{ + parent: block0, id: ids.Empty.Prefix(4), + status: choices.Processing, } - sm.Add(dep3) - - dep4 := &Blk{ - parent: dep3, + block4 := &TestBlock{ + parent: block3, id: ids.Empty.Prefix(5), + status: choices.Processing, } - sm.Add(dep4) + + sm.Add(block0) + sm.Add(block1) + sm.Add(block2) + sm.Add(block3) + sm.Add(block4) // Current graph structure: - // G - // / - // 0 - // / \ - // 1 3 - // / \ - // 2 4 + // G + // | + // 0 + // / \ + // 1 3 + // | | + // 2 4 // Tail = 2 - dep0_2_4_1 := ids.Bag{} - dep0_2_4_1.AddCount(dep0.id, 1) - dep0_2_4_1.AddCount(dep2.id, 1) - dep0_2_4_1.AddCount(dep4.id, 1) - sm.RecordPoll(dep0_2_4_1) + votes0_2_4 := ids.Bag{} + votes0_2_4.Add(block0.id) + votes0_2_4.Add(block2.id) + votes0_2_4.Add(block4.id) + sm.RecordPoll(votes0_2_4) // Current graph structure: - // 0 - // / \ - // 1 3 - // / \ - // 2 4 + // 0 + // / \ + // 1 3 + // | | + // 2 4 // Tail = 2 - if !dep2.id.Equals(sm.Preference()) { + if pref := sm.Preference(); !block2.id.Equals(pref) { t.Fatalf("Wrong preference listed") + } else if sm.Finalized() { + t.Fatalf("Finalized too early") + } else if block0.Status() != choices.Accepted { + t.Fatalf("Should have accepted") + } else if block1.Status() != choices.Processing { + t.Fatalf("Should have accepted") + } else if block2.Status() != choices.Processing { + t.Fatalf("Should have accepted") + } else if block3.Status() != choices.Processing { + t.Fatalf("Should have rejected") + } else if block4.Status() != choices.Processing { + t.Fatalf("Should have rejected") } - dep2_3 := ids.Bag{} - dep2_3.AddCount(dep2.id, 3) - sm.RecordPoll(dep2_3) + dep2_2_2 := ids.Bag{} + dep2_2_2.AddCount(block2.id, 3) + sm.RecordPoll(dep2_2_2) // Current graph structure: // 2 // Tail = 2 - if !dep2.id.Equals(sm.Preference()) { + if pref := sm.Preference(); !block2.id.Equals(pref) { t.Fatalf("Wrong preference listed") - } - - if !sm.Finalized() { + } else if !sm.Finalized() { t.Fatalf("Finalized too late") - } else if dep0.Status() != choices.Accepted { + } else if block0.Status() != choices.Accepted { t.Fatalf("Should have accepted") - } else if dep1.Status() != choices.Accepted { + } else if block1.Status() != choices.Accepted { t.Fatalf("Should have accepted") - } else if dep2.Status() != choices.Accepted { + } else if block2.Status() != choices.Accepted { t.Fatalf("Should have accepted") - } else if dep3.Status() != choices.Rejected { + } else if block3.Status() != choices.Rejected { t.Fatalf("Should have rejected") - } else if dep4.Status() != choices.Rejected { + } else if block4.Status() != choices.Rejected { t.Fatalf("Should have rejected") } } -func DivergedVotingTest(t *testing.T, factory Factory) { +func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { sm := factory.New() + ctx := snow.DefaultContextTest() params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } - sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) + sm.Initialize(ctx, params, GenesisID) - dep0 := &Blk{ + block0 := &TestBlock{ parent: Genesis, id: ids.NewID([32]byte{0x0f}), // 0b1111 + status: choices.Processing, } - sm.Add(dep0) - - dep1 := &Blk{ + block1 := &TestBlock{ parent: Genesis, id: ids.NewID([32]byte{0x08}), // 0b1000 + status: choices.Processing, } - sm.Add(dep1) - - dep0_1 := ids.Bag{} - dep0_1.AddCount(dep0.id, 1) - sm.RecordPoll(dep0_1) - - dep2 := &Blk{ + block2 := &TestBlock{ parent: Genesis, id: ids.NewID([32]byte{0x01}), // 0b0001 + status: choices.Processing, } - sm.Add(dep2) + block3 := &TestBlock{ + parent: block2, + id: ids.Empty.Prefix(1), + status: choices.Processing, + } + + sm.Add(block0) + sm.Add(block1) + + votes0 := ids.Bag{} + votes0.Add(block0.id) + sm.RecordPoll(votes0) + + sm.Add(block2) // dep2 is already rejected. - dep3 := &Blk{ - parent: dep2, - id: ids.Empty.Prefix(3), - } - sm.Add(dep3) + sm.Add(block3) - if dep0.Status() == choices.Accepted { + if status := block0.Status(); status == choices.Accepted { t.Fatalf("Shouldn't be accepted yet") } // Transitively increases dep2. However, dep2 shares the first bit with // dep0. Because dep2 is already rejected, this will accept dep0. - dep3_1 := ids.Bag{} - dep3_1.AddCount(dep3.id, 1) - sm.RecordPoll(dep3_1) + votes3 := ids.Bag{} + votes3.Add(block3.id) + sm.RecordPoll(votes3) if !sm.Finalized() { t.Fatalf("Finalized too late") - } else if dep0.Status() != choices.Accepted { + } else if status := block0.Status(); status != choices.Accepted { t.Fatalf("Should be accepted") } } -func IssuedTest(t *testing.T, factory Factory) { - sm := factory.New() - - params := snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, - } - - sm.Initialize(snow.DefaultContextTest(), params, Genesis.ID()) - - dep0 := &Blk{ - parent: Genesis, - id: ids.NewID([32]byte{0}), - status: choices.Processing, - } - - if sm.Issued(dep0) { - t.Fatalf("Hasn't been issued yet") - } - - sm.Add(dep0) - - if !sm.Issued(dep0) { - t.Fatalf("Has been issued") - } - - dep1 := &Blk{ - parent: Genesis, - id: ids.NewID([32]byte{0x1}), // 0b0001 - status: choices.Accepted, - } - - if !sm.Issued(dep1) { - t.Fatalf("Has accepted status") - } -} - -func MetricsErrorTest(t *testing.T, factory Factory) { +func MetricsProcessingErrorTest(t *testing.T, factory Factory) { sm := factory.New() ctx := snow.DefaultContextTest() params := snowball.Parameters{ - Namespace: fmt.Sprintf("gecko_%s", ctx.ChainID), - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, } numProcessing := prometheus.NewGauge( @@ -588,31 +805,118 @@ func MetricsErrorTest(t *testing.T, factory Factory) { Namespace: params.Namespace, Name: "processing", }) - numAccepted := prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: params.Namespace, - Name: "accepted", - }) - numRejected := prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: params.Namespace, - Name: "rejected", - }) if err := params.Metrics.Register(numProcessing); err != nil { t.Fatal(err) } + + sm.Initialize(ctx, params, GenesisID) + + block := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(1), + status: choices.Processing, + } + + sm.Add(block) + + votes := ids.Bag{} + votes.Add(block.id) + + sm.RecordPoll(votes) + + if !sm.Finalized() { + t.Fatalf("Snowman instance didn't finalize") + } +} + +func MetricsAcceptedErrorTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + } + + numAccepted := prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: params.Namespace, + Name: "accepted", + }) + if err := params.Metrics.Register(numAccepted); err != nil { t.Fatal(err) } + + sm.Initialize(ctx, params, GenesisID) + + block := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(1), + status: choices.Processing, + } + + sm.Add(block) + + votes := ids.Bag{} + votes.Add(block.id) + + sm.RecordPoll(votes) + + if !sm.Finalized() { + t.Fatalf("Snowman instance didn't finalize") + } +} + +func MetricsRejectedErrorTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + } + + numRejected := prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: params.Namespace, + Name: "rejected", + }) + if err := params.Metrics.Register(numRejected); err != nil { t.Fatal(err) } - sm.Initialize(ctx, params, Genesis.ID()) + sm.Initialize(ctx, params, GenesisID) + + block := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(1), + status: choices.Processing, + } + + sm.Add(block) + + votes := ids.Bag{} + votes.Add(block.id) + + sm.RecordPoll(votes) + + if !sm.Finalized() { + t.Fatalf("Snowman instance didn't finalize") + } } -func ConsistentTest(t *testing.T, factory Factory) { +func RandomizedConsistencyTest(t *testing.T, factory Factory) { numColors := 50 numNodes := 100 params := snowball.Parameters{ diff --git a/snow/consensus/snowman/ids_test.go b/snow/consensus/snowman/ids_test.go deleted file mode 100644 index 2fe47a1..0000000 --- a/snow/consensus/snowman/ids_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowman - -import ( - "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/snow/choices" -) - -var ( - Genesis = &Blk{ - id: ids.Empty.Prefix(0), - status: choices.Accepted, - } -) - -func Matches(a, b []ids.ID) bool { - if len(a) != len(b) { - return false - } - set := ids.Set{} - set.Add(a...) - for _, id := range b { - if !set.Contains(id) { - return false - } - } - return true -} -func MatchesShort(a, b []ids.ShortID) bool { - if len(a) != len(b) { - return false - } - set := ids.ShortSet{} - set.Add(a...) - for _, id := range b { - if !set.Contains(id) { - return false - } - } - return true -} diff --git a/snow/consensus/snowman/metrics.go b/snow/consensus/snowman/metrics.go new file mode 100644 index 0000000..f415909 --- /dev/null +++ b/snow/consensus/snowman/metrics.go @@ -0,0 +1,87 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/timer" +) + +type metrics struct { + numProcessing prometheus.Gauge + latAccepted, latRejected prometheus.Histogram + + clock timer.Clock + processing map[[32]byte]time.Time +} + +// Initialize implements the Engine interface +func (m *metrics) Initialize(log logging.Logger, namespace string, registerer prometheus.Registerer) error { + m.processing = make(map[[32]byte]time.Time) + + m.numProcessing = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "processing", + Help: "Number of currently processing blocks", + }) + m.latAccepted = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: namespace, + Name: "accepted", + Help: "Latency of accepting from the time the block was issued in milliseconds", + Buckets: timer.Buckets, + }) + m.latRejected = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: namespace, + Name: "rejected", + Help: "Latency of rejecting from the time the block was issued in milliseconds", + Buckets: timer.Buckets, + }) + + if err := registerer.Register(m.numProcessing); err != nil { + return fmt.Errorf("Failed to register processing statistics due to %w", err) + } + if err := registerer.Register(m.latAccepted); err != nil { + return fmt.Errorf("Failed to register accepted statistics due to %w", err) + } + if err := registerer.Register(m.latRejected); err != nil { + return fmt.Errorf("Failed to register rejected statistics due to %w", err) + } + return nil +} + +func (m *metrics) Issued(id ids.ID) { + m.processing[id.Key()] = m.clock.Time() + m.numProcessing.Inc() +} + +func (m *metrics) Accepted(id ids.ID) { + key := id.Key() + start := m.processing[key] + end := m.clock.Time() + + delete(m.processing, key) + + m.latAccepted.Observe(float64(end.Sub(start).Milliseconds())) + m.numProcessing.Dec() +} + +func (m *metrics) Rejected(id ids.ID) { + key := id.Key() + start := m.processing[key] + end := m.clock.Time() + + delete(m.processing, key) + + m.latRejected.Observe(float64(end.Sub(start).Milliseconds())) + m.numProcessing.Dec() +} diff --git a/snow/consensus/snowman/network_test.go b/snow/consensus/snowman/network_test.go index 7b27021..0639448 100644 --- a/snow/consensus/snowman/network_test.go +++ b/snow/consensus/snowman/network_test.go @@ -15,13 +15,13 @@ import ( type Network struct { params snowball.Parameters - colors []*Blk + colors []*TestBlock nodes, running []Consensus } func (n *Network) shuffleColors() { s := random.Uniform{N: len(n.colors)} - colors := []*Blk(nil) + colors := []*TestBlock(nil) for s.CanSample() { colors = append(colors, n.colors[s.Sample()]) } @@ -31,7 +31,7 @@ func (n *Network) shuffleColors() { func (n *Network) Initialize(params snowball.Parameters, numColors int) { n.params = params - n.colors = append(n.colors, &Blk{ + n.colors = append(n.colors, &TestBlock{ parent: Genesis, id: ids.Empty.Prefix(uint64(random.Rand(0, math.MaxInt64))), status: choices.Processing, @@ -39,7 +39,7 @@ func (n *Network) Initialize(params snowball.Parameters, numColors int) { for i := 1; i < numColors; i++ { dependency := n.colors[random.Rand(0, len(n.colors))] - n.colors = append(n.colors, &Blk{ + n.colors = append(n.colors, &TestBlock{ parent: dependency, id: ids.Empty.Prefix(uint64(random.Rand(0, math.MaxInt64))), height: dependency.height + 1, @@ -58,7 +58,7 @@ func (n *Network) AddNode(sm Consensus) { if !found { myDep = blk.parent } - myVtx := &Blk{ + myVtx := &TestBlock{ parent: myDep, id: blk.id, height: blk.height, diff --git a/snow/consensus/snowman/snowman_block.go b/snow/consensus/snowman/snowman_block.go new file mode 100644 index 0000000..b0c1ba4 --- /dev/null +++ b/snow/consensus/snowman/snowman_block.go @@ -0,0 +1,58 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowman + +import ( + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowball" +) + +// Tracks the state of a snowman block +type snowmanBlock struct { + // pointer to the snowman instance this node is managed by + sm Consensus + + // block that this node contains. For the genesis, this value will be nil + blk Block + + // shouldFalter is set to true if this node, and all its decendants received + // less than Alpha votes + shouldFalter bool + + // sb is the snowball instance used to decided which child is the canonical + // child of this block. If this node has not had a child issued under it, + // this value will be nil + sb snowball.Consensus + + // children is the set of blocks that have been issued that name this block + // as their parent. If this node has not had a child issued under it, this value + // will be nil + children map[[32]byte]Block +} + +func (n *snowmanBlock) AddChild(child Block) { + childID := child.ID() + childKey := childID.Key() + + // if the snowball instance is nil, this is the first child. So the instance + // should be initialized. + if n.sb == nil { + n.sb = &snowball.Tree{} + n.sb.Initialize(n.sm.Parameters(), childID) + n.children = make(map[[32]byte]Block) + } else { + n.sb.Add(childID) + } + + n.children[childKey] = child +} + +func (n *snowmanBlock) Accepted() bool { + // if the block is nil, then this is the genesis which is defined as + // accepted + if n.blk == nil { + return true + } + return n.blk.Status() == choices.Accepted +} diff --git a/snow/consensus/snowman/topological.go b/snow/consensus/snowman/topological.go index 6ad92c8..459673f 100644 --- a/snow/consensus/snowman/topological.go +++ b/snow/consensus/snowman/topological.go @@ -4,8 +4,6 @@ package snowman import ( - "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/consensus/snowball" @@ -19,83 +17,57 @@ func (TopologicalFactory) New() Consensus { return &Topological{} } // Topological implements the Snowman interface by using a tree tracking the // strongly preferred branch. This tree structure amortizes network polls to -// vote on more than just the next position. +// vote on more than just the next block. type Topological struct { - ctx *snow.Context + metrics + + // ctx is the context this snowman instance is executing in + ctx *snow.Context + + // params are the parameters that should be used to initialize snowball + // instances params snowball.Parameters - numProcessing prometheus.Gauge - numAccepted, numRejected prometheus.Counter + // head is the last accepted block + head ids.ID - head ids.ID - nodes map[[32]byte]node // ParentID -> Snowball instance - tail ids.ID -} + // blocks stores the last accepted block and all the pending blocks + blocks map[[32]byte]*snowmanBlock // blockID -> snowmanBlock -// Tracks the state of a snowman vertex -type node struct { - ts *Topological - blkID ids.ID - blk Block - - shouldFalter bool - sb snowball.Consensus - children map[[32]byte]Block + // tail is the preferred block with no children + tail ids.ID } // Used to track the kahn topological sort status type kahnNode struct { + // inDegree is the number of children that haven't been processed yet. If + // inDegree is 0, then this node is a leaf inDegree int - votes ids.Bag + // votes for all the children of this node, so far + votes ids.Bag } // Used to track which children should receive votes type votes struct { - id ids.ID + // parentID is the parent of all the votes provided in the votes bag + parentID ids.ID + // votes for all the children of the parent votes ids.Bag } // Initialize implements the Snowman interface func (ts *Topological) Initialize(ctx *snow.Context, params snowball.Parameters, rootID ids.ID) { - ctx.Log.AssertDeferredNoError(params.Valid) - ts.ctx = ctx ts.params = params - ts.numProcessing = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: params.Namespace, - Name: "processing", - Help: "Number of currently processing blocks", - }) - ts.numAccepted = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: params.Namespace, - Name: "accepted", - Help: "Number of blocks accepted", - }) - ts.numRejected = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: params.Namespace, - Name: "rejected", - Help: "Number of blocks rejected", - }) - - if err := ts.params.Metrics.Register(ts.numProcessing); err != nil { - ts.ctx.Log.Error("Failed to register processing statistics due to %s", err) - } - if err := ts.params.Metrics.Register(ts.numAccepted); err != nil { - ts.ctx.Log.Error("Failed to register accepted statistics due to %s", err) - } - if err := ts.params.Metrics.Register(ts.numRejected); err != nil { - ts.ctx.Log.Error("Failed to register rejected statistics due to %s", err) + if err := ts.metrics.Initialize(ctx.Log, params.Namespace, params.Metrics); err != nil { + ts.ctx.Log.Error("%s", err) } ts.head = rootID - ts.nodes = map[[32]byte]node{ - rootID.Key(): node{ - ts: ts, - blkID: rootID, + ts.blocks = map[[32]byte]*snowmanBlock{ + rootID.Key(): &snowmanBlock{ + sm: ts, }, } ts.tail = rootID @@ -111,46 +83,48 @@ func (ts *Topological) Add(blk Block) { parentKey := parentID.Key() blkID := blk.ID() + blkBytes := blk.Bytes() - bytes := blk.Bytes() - ts.ctx.DecisionDispatcher.Issue(ts.ctx.ChainID, blkID, bytes) - ts.ctx.ConsensusDispatcher.Issue(ts.ctx.ChainID, blkID, bytes) + // Notify anyone listening that this block was issued. + ts.ctx.DecisionDispatcher.Issue(ts.ctx.ChainID, blkID, blkBytes) + ts.ctx.ConsensusDispatcher.Issue(ts.ctx.ChainID, blkID, blkBytes) + ts.metrics.Issued(blkID) - if parent, ok := ts.nodes[parentKey]; ok { - parent.Add(blk) - ts.nodes[parentKey] = parent - - ts.nodes[blkID.Key()] = node{ - ts: ts, - blkID: blkID, - blk: blk, - } - - // If we are extending the tail, this is the new tail - if ts.tail.Equals(parentID) { - ts.tail = blkID - } - - ts.numProcessing.Inc() - } else { + parentNode, ok := ts.blocks[parentKey] + if !ok { // If the ancestor is missing, this means the ancestor must have already - // been pruned. Therefore, the dependent is transitively rejected. + // been pruned. Therefore, the dependent should be transitively + // rejected. blk.Reject() - bytes := blk.Bytes() - ts.ctx.DecisionDispatcher.Reject(ts.ctx.ChainID, blkID, bytes) - ts.ctx.ConsensusDispatcher.Reject(ts.ctx.ChainID, blkID, bytes) + // Notify anyone listening that this block was rejected. + ts.ctx.DecisionDispatcher.Reject(ts.ctx.ChainID, blkID, blkBytes) + ts.ctx.ConsensusDispatcher.Reject(ts.ctx.ChainID, blkID, blkBytes) + ts.metrics.Rejected(blkID) + return + } - ts.numRejected.Inc() + // add the block as a child of its parent, and add the block to the tree + parentNode.AddChild(blk) + ts.blocks[blkID.Key()] = &snowmanBlock{ + sm: ts, + blk: blk, + } + + // If we are extending the tail, this is the new tail + if ts.tail.Equals(parentID) { + ts.tail = blkID } } // Issued implements the Snowman interface func (ts *Topological) Issued(blk Block) bool { + // If the block is decided, then it must have been previously issued. if blk.Status().Decided() { return true } - _, ok := ts.nodes[blk.ID().Key()] + // If the block is in the map of current blocks, then the block was issued. + _, ok := ts.blocks[blk.ID().Key()] return ok } @@ -158,15 +132,28 @@ func (ts *Topological) Issued(blk Block) bool { func (ts *Topological) Preference() ids.ID { return ts.tail } // RecordPoll implements the Snowman interface -// This performs Kahn’s algorithm. -// When a node is removed from the leaf queue, it is checked to see if the -// number of votes is >= alpha. If it is, then it is added to the vote stack. -// Once there are no nodes in the leaf queue. The vote stack is unwound and -// voted on. If a decision is made, then that choice is marked as accepted, and -// all alternative choices are marked as rejected. +// +// The votes bag contains at most K votes for blocks in the tree. If there is a +// vote for a block that isn't in the tree, the vote is dropped. +// +// Votes are propagated transitively towards the genesis. All blocks in the tree +// that result in at least Alpha votes will record the poll on their children. +// Every other block will have an unsuccessful poll registered. +// +// After collecting which blocks should be voted on, the polls are registered +// and blocks are accepted/rejected as needed. The tail is then updated to equal +// the leaf on the preferred branch. +// +// To optimize the theoretical complexity of the vote propagation, a topological +// sort is done over the blocks that are reachable from the provided votes. +// During the sort, votes are pushed towards the genesis. To prevent interating +// over all blocks that had unsuccessful polls, we set a flag on the block to +// know that any future traversal through that block should register an +// unsuccessful poll on that block and every decendant block. +// // The complexity of this function is: -// Runtime = 3 * |live set| + |votes| -// Space = |live set| + |votes| +// - Runtime = 3 * |live set| + |votes| +// - Space = 2 * |live set| + |votes| func (ts *Topological) RecordPoll(votes ids.Bag) { // Runtime = |live set| + |votes| ; Space = |live set| + |votes| kahnGraph, leaves := ts.calculateInDegree(votes) @@ -175,254 +162,326 @@ func (ts *Topological) RecordPoll(votes ids.Bag) { voteStack := ts.pushVotes(kahnGraph, leaves) // Runtime = |live set| ; Space = Constant - tail := ts.vote(voteStack) - tn := node{} - for tn = ts.nodes[tail.Key()]; tn.sb != nil; tn = ts.nodes[tail.Key()] { - tail = tn.sb.Preference() - } + preferred := ts.vote(voteStack) - ts.tail = tn.blkID + // Runtime = |live set| ; Space = Constant + ts.tail = ts.getPreferredDecendent(preferred) } // Finalized implements the Snowman interface -func (ts *Topological) Finalized() bool { return len(ts.nodes) == 1 } +func (ts *Topological) Finalized() bool { return len(ts.blocks) == 1 } // takes in a list of votes and sets up the topological ordering. Returns the // reachable section of the graph annotated with the number of inbound edges and -// the non-transitively applied votes. Also returns the list of leaf nodes. +// the non-transitively applied votes. Also returns the list of leaf blocks. func (ts *Topological) calculateInDegree( votes ids.Bag) (map[[32]byte]kahnNode, []ids.ID) { kahns := make(map[[32]byte]kahnNode) leaves := ids.Set{} for _, vote := range votes.List() { - voteNode, validVote := ts.nodes[vote.Key()] - // If it is not found, then the vote is either for something rejected, - // or something we haven't heard of yet. - if validVote && voteNode.blk != nil && !voteNode.blk.Status().Decided() { - parentID := voteNode.blk.Parent().ID() - parentKey := parentID.Key() - kahn, previouslySeen := kahns[parentKey] - // Add this new vote to the current bag of votes - kahn.votes.AddCount(vote, votes.Count(vote)) - kahns[parentKey] = kahn + voteKey := vote.Key() + votedBlock, validVote := ts.blocks[voteKey] - if !previouslySeen { - // If I've never seen this node before, it is currently a leaf. - leaves.Add(parentID) + // If the vote is for a block that isn't in the current pending set, + // then the vote is dropped + if !validVote { + continue + } - for n, e := ts.nodes[parentKey]; e; n, e = ts.nodes[parentKey] { - if n.blk == nil || n.blk.Status().Decided() { - break // Ensure that we haven't traversed off the tree - } - parentID := n.blk.Parent().ID() - parentKey = parentID.Key() + // If the vote is for the last accepted block, the vote is dropped + if votedBlock.Accepted() { + continue + } - kahn := kahns[parentKey] - kahn.inDegree++ - kahns[parentKey] = kahn + // The parent contains the snowball instance of its children + parent := votedBlock.blk.Parent() + parentID := parent.ID() + parentIDKey := parentID.Key() - if kahn.inDegree == 1 { - // If I am transitively seeing this node for the first - // time, it is no longer a leaf. - leaves.Remove(parentID) - } else { - // If I have already traversed this branch, stop. - break - } - } + // Add the votes for this block to the parent's set of responces + numVotes := votes.Count(vote) + kahn, previouslySeen := kahns[parentIDKey] + kahn.votes.AddCount(vote, numVotes) + kahns[parentIDKey] = kahn + + // If the parent block already had registered votes, then there is no + // need to iterate into the parents + if previouslySeen { + continue + } + + // If I've never seen this parent block before, it is currently a leaf. + leaves.Add(parentID) + + // iterate through all the block's ancestors and set up the inDegrees of + // the blocks + for n := ts.blocks[parentIDKey]; !n.Accepted(); n = ts.blocks[parentIDKey] { + parent := n.blk.Parent() + parentID := parent.ID() + parentIDKey = parentID.Key() // move the loop variable forward + + // Increase the inDegree by one + kahn := kahns[parentIDKey] + kahn.inDegree++ + kahns[parentIDKey] = kahn + + // If we have already seen this block, then we shouldn't increase + // the inDegree of the ancestors through this block again. + if kahn.inDegree != 1 { + break } + + // If I am transitively seeing this block for the first time, either + // the block was previously unknown or it was previously a leaf. + // Regardless, it shouldn't be tracked as a leaf. + leaves.Remove(parentID) } } return kahns, leaves.List() } -// convert the tree into a branch of snowball instances with an alpha threshold +// convert the tree into a branch of snowball instances with at least alpha +// votes func (ts *Topological) pushVotes( kahnNodes map[[32]byte]kahnNode, leaves []ids.ID) []votes { voteStack := []votes(nil) for len(leaves) > 0 { + // pop a leaf off the stack newLeavesSize := len(leaves) - 1 - leaf := leaves[newLeavesSize] + leafID := leaves[newLeavesSize] leaves = leaves[:newLeavesSize] - leafKey := leaf.Key() - kahn := kahnNodes[leafKey] + // get the block and sort infomation about the block + leafIDKey := leafID.Key() + kahnNode := kahnNodes[leafIDKey] + block := ts.blocks[leafIDKey] - if node, shouldVote := ts.nodes[leafKey]; shouldVote { - if kahn.votes.Len() >= ts.params.Alpha { - voteStack = append(voteStack, votes{ - id: leaf, - votes: kahn.votes, - }) - } + // If there are at least Alpha votes, then this block needs to record + // the poll on the snowball instance + if kahnNode.votes.Len() >= ts.params.Alpha { + voteStack = append(voteStack, votes{ + parentID: leafID, + votes: kahnNode.votes, + }) + } - if node.blk == nil || node.blk.Status().Decided() { - continue // Stop traversing once we pass into the decided frontier - } + // If the block is accepted, then we don't need to push votes to the + // parent block + if block.Accepted() { + continue + } - parentID := node.blk.Parent().ID() - parentKey := parentID.Key() - if depNode, notPruned := kahnNodes[parentKey]; notPruned { - // Remove one of the in-bound edges - depNode.inDegree-- - // Push the votes to my parent - depNode.votes.AddCount(leaf, kahn.votes.Len()) - kahnNodes[parentKey] = depNode + parent := block.blk.Parent() + parentID := parent.ID() + parentIDKey := parentID.Key() - if depNode.inDegree == 0 { - // Once I have no in-bound edges, I'm a leaf - leaves = append(leaves, parentID) - } - } + // Remove an inbound edge from the parent kahn node and push the votes. + parentKahnNode := kahnNodes[parentIDKey] + parentKahnNode.inDegree-- + parentKahnNode.votes.AddCount(leafID, kahnNode.votes.Len()) + kahnNodes[parentIDKey] = parentKahnNode + + // If the inDegree is zero, then the parent node is now a leaf + if parentKahnNode.inDegree == 0 { + leaves = append(leaves, parentID) } } return voteStack } +// apply votes to the branch that received an Alpha threshold func (ts *Topological) vote(voteStack []votes) ids.ID { + // If the voteStack is empty, then the full tree should falter. This won't + // change the preferred branch. if len(voteStack) == 0 { + ts.ctx.Log.Verbo("No progress was made after a vote with %d pending blocks", len(ts.blocks)-1) + headKey := ts.head.Key() - headNode := ts.nodes[headKey] - headNode.shouldFalter = true - - ts.ctx.Log.Verbo("No progress was made on this vote even though we have %d nodes", len(ts.nodes)) - - ts.nodes[headKey] = headNode + headBlock := ts.blocks[headKey] + headBlock.shouldFalter = true return ts.tail } - onTail := true - tail := ts.head + // keep track of the new preferred block + newPreferred := ts.head + onPreferredBranch := true for len(voteStack) > 0 { + // pop a vote off the stack newStackSize := len(voteStack) - 1 - voteGroup := voteStack[newStackSize] + vote := voteStack[newStackSize] voteStack = voteStack[:newStackSize] - voteParentKey := voteGroup.id.Key() - parentNode, stillExists := ts.nodes[voteParentKey] - if !stillExists { + // get the block that we are going to vote on + voteParentIDKey := vote.parentID.Key() + parentBlock, notRejected := ts.blocks[voteParentIDKey] + + // if the block block we are going to vote on was already rejected, then + // we should stop applying the votes + if !notRejected { break } - shouldTransFalter := parentNode.shouldFalter - if parentNode.shouldFalter { - parentNode.sb.RecordUnsuccessfulPoll() - parentNode.shouldFalter = false - ts.ctx.Log.Verbo("Reset confidence on %s", parentNode.blkID) + // keep track of transitive falters to propagate to this block's + // children + shouldTransitivelyFalter := parentBlock.shouldFalter + + // if the block was previously marked as needing to falter, the block + // should falter before applying the vote + if shouldTransitivelyFalter { + ts.ctx.Log.Verbo("Resetting confidence below %s", vote.parentID) + + parentBlock.sb.RecordUnsuccessfulPoll() + parentBlock.shouldFalter = false } - parentNode.sb.RecordPoll(voteGroup.votes) + + // apply the votes for this snowball instance + parentBlock.sb.RecordPoll(vote.votes) // Only accept when you are finalized and the head. - if parentNode.sb.Finalized() && ts.head.Equals(voteGroup.id) { - ts.accept(parentNode) - tail = parentNode.sb.Preference() - delete(ts.nodes, voteParentKey) - ts.numProcessing.Dec() - } else { - ts.nodes[voteParentKey] = parentNode + if parentBlock.sb.Finalized() && ts.head.Equals(vote.parentID) { + ts.accept(parentBlock) + + // by accepting the child of parentBlock, the last accepted block is + // no longer voteParentID, but its child. So, voteParentID can be + // removed from the tree. + delete(ts.blocks, voteParentIDKey) } - // If this is the last id that got votes, default to the empty id. This - // will cause all my children to be reset below. + // If we are on the preferred branch, then the parent's preference is + // the next block on the preferred branch. + parentPreference := parentBlock.sb.Preference() + if onPreferredBranch { + newPreferred = parentPreference + } + + // Get the ID of the child that is having a RecordPoll called. All other + // children will need to have their confidence reset. If there isn't a + // child having RecordPoll called, then the nextID will default to the + // nil ID. nextID := ids.ID{} if len(voteStack) > 0 { - nextID = voteStack[newStackSize-1].id + nextID = voteStack[newStackSize-1].parentID } - onTail = onTail && nextID.Equals(parentNode.sb.Preference()) - if onTail { - tail = nextID - } + // If we are on the preferred branch and the nextID is the preference of + // the snowball instance, then we are following the preferred branch. + onPreferredBranch = onPreferredBranch && nextID.Equals(parentPreference) // If there wasn't an alpha threshold on the branch (either on this vote // or a past transitive vote), I should falter now. - for childIDBytes := range parentNode.children { - if childID := ids.NewID(childIDBytes); shouldTransFalter || !childID.Equals(nextID) { - if childNode, childExists := ts.nodes[childIDBytes]; childExists { - // The existence check is needed in case the current node - // was finalized. However, in this case, we still need to - // check for the next id. - ts.ctx.Log.Verbo("Defering confidence reset on %s with %d children. NextID: %s", childID, len(parentNode.children), nextID) - childNode.shouldFalter = true - ts.nodes[childIDBytes] = childNode - } + for childIDKey := range parentBlock.children { + childID := ids.NewID(childIDKey) + // If we don't need to transitively falter and the child is going to + // have RecordPoll called on it, then there is no reason to reset + // the block's confidence + if !shouldTransitivelyFalter && childID.Equals(nextID) { + continue + } + + // If we finalized a child of the current block, then all other + // children will have been rejected and removed from the tree. + // Therefore, we need to make sure the child is still in the tree. + childBlock, notRejected := ts.blocks[childIDKey] + if notRejected { + ts.ctx.Log.Verbo("Defering confidence reset of %s. Voting for %s", childID, nextID) + + // If the child is ever voted for positively, the confidence + // must be reset first. + childBlock.shouldFalter = true } } } - return tail + return newPreferred } -func (ts *Topological) accept(n node) { - // Accept the preference, reject all transitive rejections +// Get the preferred decendent of the provided block ID +func (ts *Topological) getPreferredDecendent(blkID ids.ID) ids.ID { + // Traverse from the provided ID to the preferred child until there are no + // children. + for block := ts.blocks[blkID.Key()]; block.sb != nil; block = ts.blocks[blkID.Key()] { + blkID = block.sb.Preference() + } + return blkID +} + +// accept the preferred child of the provided snowman block. By accepting the +// preferred child, all other children will be rejected. When these children are +// rejected, all their decendants will be rejected. +func (ts *Topological) accept(n *snowmanBlock) { + // We are finalizing the block's child, so we need to get the preference pref := n.sb.Preference() - rejects := []ids.ID(nil) - for childIDBytes := range n.children { - if childID := ids.NewID(childIDBytes); !childID.Equals(pref) { - child := n.children[childIDBytes] - child.Reject() + ts.ctx.Log.Verbo("Accepting block with ID %s", pref) - bytes := child.Bytes() - ts.ctx.DecisionDispatcher.Reject(ts.ctx.ChainID, childID, bytes) - ts.ctx.ConsensusDispatcher.Reject(ts.ctx.ChainID, childID, bytes) - - ts.numRejected.Inc() - rejects = append(rejects, childID) - } - } - ts.rejectTransitively(rejects...) - - ts.head = pref + // Get the child and accept it child := n.children[pref.Key()] - ts.ctx.Log.Verbo("Accepting block with ID %s", child.ID()) - - bytes := child.Bytes() - ts.ctx.DecisionDispatcher.Accept(ts.ctx.ChainID, child.ID(), bytes) - ts.ctx.ConsensusDispatcher.Accept(ts.ctx.ChainID, child.ID(), bytes) - child.Accept() - ts.numAccepted.Inc() + + // Notify anyone listening that this block was accepted. + bytes := child.Bytes() + ts.ctx.DecisionDispatcher.Accept(ts.ctx.ChainID, pref, bytes) + ts.ctx.ConsensusDispatcher.Accept(ts.ctx.ChainID, pref, bytes) + ts.metrics.Accepted(pref) + + // Because this is the newest accepted block, this is the new head. + ts.head = pref + + // Because ts.blocks contains the last accepted block, we don't delete the + // block from the blocks map here. + + rejects := []ids.ID(nil) + for childIDKey, child := range n.children { + childID := ids.NewID(childIDKey) + if childID.Equals(pref) { + // don't reject the block we just accepted + continue + } + + child.Reject() + + // Notify anyone listening that this block was rejected. + bytes := child.Bytes() + ts.ctx.DecisionDispatcher.Reject(ts.ctx.ChainID, childID, bytes) + ts.ctx.ConsensusDispatcher.Reject(ts.ctx.ChainID, childID, bytes) + ts.metrics.Rejected(childID) + + // Track which blocks have been directly rejected + rejects = append(rejects, childID) + } + + // reject all the decendants of the blocks we just rejected + ts.rejectTransitively(rejects) } -// Takes in a list of newly rejected ids and rejects everything that depends on -// them -func (ts *Topological) rejectTransitively(rejected ...ids.ID) { +// Takes in a list of rejected ids and rejects all decendants of these IDs +func (ts *Topological) rejectTransitively(rejected []ids.ID) { + // the rejected array is treated as a queue, with the next element at index + // 0 and the last element at the end of the slice. for len(rejected) > 0 { + // pop the rejected ID off the queue newRejectedSize := len(rejected) - 1 - rejectID := rejected[newRejectedSize] + rejectedID := rejected[newRejectedSize] rejected = rejected[:newRejectedSize] - rejectKey := rejectID.Key() - rejectNode := ts.nodes[rejectKey] - delete(ts.nodes, rejectKey) - ts.numProcessing.Dec() + // get the rejected node, and remove it from the tree + rejectedKey := rejectedID.Key() + rejectedNode := ts.blocks[rejectedKey] + delete(ts.blocks, rejectedKey) - for childIDBytes, child := range rejectNode.children { - childID := ids.NewID(childIDBytes) - rejected = append(rejected, childID) + for childIDKey, child := range rejectedNode.children { child.Reject() + // Notify anyone listening that this block was rejected. + childID := ids.NewID(childIDKey) bytes := child.Bytes() ts.ctx.DecisionDispatcher.Reject(ts.ctx.ChainID, childID, bytes) ts.ctx.ConsensusDispatcher.Reject(ts.ctx.ChainID, childID, bytes) + ts.metrics.Rejected(childID) - ts.numRejected.Inc() + // add the newly rejected block to the end of the queue + rejected = append(rejected, childID) } } } - -func (n *node) Add(child Block) { - childID := child.ID() - if n.sb == nil { - n.sb = &snowball.Tree{} - n.sb.Initialize(n.ts.params, childID) - } else { - n.sb.Add(childID) - } - if n.children == nil { - n.children = make(map[[32]byte]Block) - } - n.children[childID.Key()] = child -} diff --git a/snow/consensus/snowman/topological_test.go b/snow/consensus/snowman/topological_test.go index a99f3b7..4a7e6c4 100644 --- a/snow/consensus/snowman/topological_test.go +++ b/snow/consensus/snowman/topological_test.go @@ -7,26 +7,4 @@ import ( "testing" ) -func TestTopologicalParams(t *testing.T) { ParamsTest(t, TopologicalFactory{}) } - -func TestTopologicalAdd(t *testing.T) { AddTest(t, TopologicalFactory{}) } - -func TestTopologicalCollect(t *testing.T) { CollectTest(t, TopologicalFactory{}) } - -func TestTopologicalCollectNothing(t *testing.T) { CollectNothingTest(t, TopologicalFactory{}) } - -func TestTopologicalCollectTransReject(t *testing.T) { CollectTransRejectTest(t, TopologicalFactory{}) } - -func TestTopologicalCollectTransResetTest(t *testing.T) { - CollectTransResetTest(t, TopologicalFactory{}) -} - -func TestTopologicalCollectTransVote(t *testing.T) { CollectTransVoteTest(t, TopologicalFactory{}) } - -func TestTopologicalDivergedVoting(t *testing.T) { DivergedVotingTest(t, TopologicalFactory{}) } - -func TestTopologicalIssuedTest(t *testing.T) { IssuedTest(t, TopologicalFactory{}) } - -func TestTopologicalMetricsError(t *testing.T) { MetricsErrorTest(t, TopologicalFactory{}) } - -func TestTopologicalConsistent(t *testing.T) { ConsistentTest(t, TopologicalFactory{}) } +func TestTopological(t *testing.T) { ConsensusTest(t, TopologicalFactory{}) } diff --git a/snow/consensus/snowstorm/directed.go b/snow/consensus/snowstorm/directed.go index 375b9b3..570bfce 100644 --- a/snow/consensus/snowstorm/directed.go +++ b/snow/consensus/snowstorm/directed.go @@ -9,8 +9,6 @@ import ( "sort" "strings" - "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/consensus/snowball" @@ -27,12 +25,11 @@ func (DirectedFactory) New() Consensus { return &Directed{} } // Directed is an implementation of a multi-color, non-transitive, snowball // instance type Directed struct { + metrics + ctx *snow.Context params snowball.Parameters - numProcessingVirtuous, numProcessingRogue prometheus.Gauge - numAccepted, numRejected prometheus.Counter - // Each element of preferences is the ID of a transaction that is preferred. // That is, each transaction has no out edges preferences ids.Set @@ -75,42 +72,8 @@ func (dg *Directed) Initialize(ctx *snow.Context, params snowball.Parameters) { dg.ctx = ctx dg.params = params - dg.numProcessingVirtuous = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: params.Namespace, - Name: "tx_processing_virtuous", - Help: "Number of processing virtuous transactions", - }) - dg.numProcessingRogue = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: params.Namespace, - Name: "tx_processing_rogue", - Help: "Number of processing rogue transactions", - }) - dg.numAccepted = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: params.Namespace, - Name: "tx_accepted", - Help: "Number of transactions accepted", - }) - dg.numRejected = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: params.Namespace, - Name: "tx_rejected", - Help: "Number of transactions rejected", - }) - - if err := dg.params.Metrics.Register(dg.numProcessingVirtuous); err != nil { - dg.ctx.Log.Error("Failed to register tx_processing_virtuous statistics due to %s", err) - } - if err := dg.params.Metrics.Register(dg.numProcessingRogue); err != nil { - dg.ctx.Log.Error("Failed to register tx_processing_rogue statistics due to %s", err) - } - if err := dg.params.Metrics.Register(dg.numAccepted); err != nil { - dg.ctx.Log.Error("Failed to register tx_accepted statistics due to %s", err) - } - if err := dg.params.Metrics.Register(dg.numRejected); err != nil { - dg.ctx.Log.Error("Failed to register tx_rejected statistics due to %s", err) + if err := dg.metrics.Initialize(ctx.Log, params.Namespace, params.Metrics); err != nil { + dg.ctx.Log.Error("%s", err) } dg.spends = make(map[[32]byte]ids.Set) @@ -169,11 +132,11 @@ func (dg *Directed) Add(tx Tx) { if inputs.Len() == 0 { tx.Accept() dg.ctx.DecisionDispatcher.Accept(dg.ctx.ChainID, txID, bytes) - dg.numAccepted.Inc() + dg.metrics.Issued(txID) + dg.metrics.Accepted(txID) return } - id := tx.ID() fn := &flatNode{tx: tx} // Note: Below, for readability, we sometimes say "transaction" when we actually mean @@ -194,38 +157,31 @@ func (dg *Directed) Add(tx Tx) { conflictKey := conflictID.Key() conflict := dg.nodes[conflictKey] - if !conflict.rogue { - dg.numProcessingVirtuous.Dec() - dg.numProcessingRogue.Inc() - } - dg.virtuous.Remove(conflictID) dg.virtuousVoting.Remove(conflictID) conflict.rogue = true - conflict.ins.Add(id) + conflict.ins.Add(txID) dg.nodes[conflictKey] = conflict } // Add Tx to list of transactions consuming UTXO whose ID is id - spends.Add(id) + spends.Add(txID) dg.spends[inputKey] = spends } fn.rogue = fn.outs.Len() != 0 // Mark this transaction as rogue if it has conflicts // Add the node representing Tx to the node set - dg.nodes[id.Key()] = fn + dg.nodes[txID.Key()] = fn if !fn.rogue { // I'm not rogue - dg.virtuous.Add(id) - dg.virtuousVoting.Add(id) + dg.virtuous.Add(txID) + dg.virtuousVoting.Add(txID) // If I'm not rogue, I must be preferred - dg.preferences.Add(id) - dg.numProcessingVirtuous.Inc() - } else { - dg.numProcessingRogue.Inc() + dg.preferences.Add(txID) } + dg.metrics.Issued(txID) // Tx can be accepted only if the transactions it depends on are also accepted // If any transactions that Tx depends on are rejected, reject Tx @@ -361,12 +317,6 @@ func (dg *Directed) reject(ids ...ids.ID) { conf := dg.nodes[conflictKey] delete(dg.nodes, conflictKey) - if conf.rogue { - dg.numProcessingRogue.Dec() - } else { - dg.numProcessingVirtuous.Dec() - } - dg.preferences.Remove(conflict) // remove the edge between this node and all its neighbors @@ -376,7 +326,8 @@ func (dg *Directed) reject(ids ...ids.ID) { // Mark it as rejected conf.tx.Reject() dg.ctx.DecisionDispatcher.Reject(dg.ctx.ChainID, conf.tx.ID(), conf.tx.Bytes()) - dg.numRejected.Inc() + dg.metrics.Rejected(conflict) + dg.pendingAccept.Abandon(conflict) dg.pendingReject.Fulfill(conflict) } @@ -466,13 +417,7 @@ func (a *directedAccepter) Update() { a.fn.accepted = true a.fn.tx.Accept() a.dg.ctx.DecisionDispatcher.Accept(a.dg.ctx.ChainID, id, a.fn.tx.Bytes()) - a.dg.numAccepted.Inc() - - if a.fn.rogue { - a.dg.numProcessingRogue.Dec() - } else { - a.dg.numProcessingVirtuous.Dec() - } + a.dg.metrics.Accepted(id) a.dg.pendingAccept.Fulfill(id) a.dg.pendingReject.Abandon(id) diff --git a/snow/consensus/snowstorm/input.go b/snow/consensus/snowstorm/input.go index ec9f767..7646562 100644 --- a/snow/consensus/snowstorm/input.go +++ b/snow/consensus/snowstorm/input.go @@ -9,8 +9,6 @@ import ( "sort" "strings" - "github.com/prometheus/client_golang/prometheus" - "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/consensus/snowball" @@ -27,12 +25,11 @@ func (InputFactory) New() Consensus { return &Input{} } // Input is an implementation of a multi-color, non-transitive, snowball // instance type Input struct { + metrics + ctx *snow.Context params snowball.Parameters - numProcessing prometheus.Gauge - numAccepted, numRejected prometheus.Counter - // preferences is the set of consumerIDs that have only in edges // virtuous is the set of consumerIDs that have no edges preferences, virtuous, virtuousVoting ids.Set @@ -70,35 +67,8 @@ func (ig *Input) Initialize(ctx *snow.Context, params snowball.Parameters) { ig.ctx = ctx ig.params = params - namespace := fmt.Sprintf("gecko_%s", ig.ctx.ChainID) - - ig.numProcessing = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "tx_processing", - Help: "Number of processing transactions", - }) - ig.numAccepted = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "tx_accepted", - Help: "Number of transactions accepted", - }) - ig.numRejected = prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: namespace, - Name: "tx_rejected", - Help: "Number of transactions rejected", - }) - - if err := ig.params.Metrics.Register(ig.numProcessing); err != nil { - ig.ctx.Log.Error("Failed to register tx_processing statistics due to %s", err) - } - if err := ig.params.Metrics.Register(ig.numAccepted); err != nil { - ig.ctx.Log.Error("Failed to register tx_accepted statistics due to %s", err) - } - if err := ig.params.Metrics.Register(ig.numRejected); err != nil { - ig.ctx.Log.Error("Failed to register tx_rejected statistics due to %s", err) + if err := ig.metrics.Initialize(ctx.Log, params.Namespace, params.Metrics); err != nil { + ig.ctx.Log.Error("%s", err) } ig.txs = make(map[[32]byte]txNode) @@ -136,11 +106,11 @@ func (ig *Input) Add(tx Tx) { if inputs.Len() == 0 { tx.Accept() ig.ctx.DecisionDispatcher.Accept(ig.ctx.ChainID, txID, bytes) - ig.numAccepted.Inc() + ig.metrics.Issued(txID) + ig.metrics.Accepted(txID) return } - id := tx.ID() cn := txNode{tx: tx} virtuous := true // If there are inputs, they must be voted on @@ -154,26 +124,25 @@ func (ig *Input) Add(tx Tx) { ig.virtuousVoting.Remove(conflictID) } } else { - input.preference = id // If there isn't a conflict, I'm preferred + input.preference = txID // If there isn't a conflict, I'm preferred } - input.conflicts.Add(id) + input.conflicts.Add(txID) ig.inputs[consumptionKey] = input virtuous = virtuous && !exists } // Add the node to the set - ig.txs[id.Key()] = cn + ig.txs[txID.Key()] = cn if virtuous { // If I'm preferred in all my conflict sets, I'm preferred. // Because the preference graph is a DAG, there will always be at least // one preferred consumer, if there is a consumer - ig.preferences.Add(id) - ig.virtuous.Add(id) - ig.virtuousVoting.Add(id) + ig.preferences.Add(txID) + ig.virtuous.Add(txID) + ig.virtuousVoting.Add(txID) } - - ig.numProcessing.Inc() + ig.metrics.Issued(txID) toReject := &inputRejector{ ig: ig, @@ -321,7 +290,6 @@ func (ig *Input) reject(ids ...ids.ID) { conflictKey := conflict.Key() cn := ig.txs[conflictKey] delete(ig.txs, conflictKey) - ig.numProcessing.Dec() ig.preferences.Remove(conflict) // A rejected value isn't preferred // Remove from all conflict sets @@ -330,7 +298,7 @@ func (ig *Input) reject(ids ...ids.ID) { // Mark it as rejected cn.tx.Reject() ig.ctx.DecisionDispatcher.Reject(ig.ctx.ChainID, cn.tx.ID(), cn.tx.Bytes()) - ig.numRejected.Inc() + ig.metrics.Rejected(conflict) ig.pendingAccept.Abandon(conflict) ig.pendingReject.Fulfill(conflict) } @@ -517,8 +485,7 @@ func (a *inputAccepter) Update() { // Mark it as accepted a.tn.tx.Accept() a.ig.ctx.DecisionDispatcher.Accept(a.ig.ctx.ChainID, id, a.tn.tx.Bytes()) - a.ig.numAccepted.Inc() - a.ig.numProcessing.Dec() + a.ig.metrics.Accepted(id) a.ig.pendingAccept.Fulfill(id) a.ig.pendingReject.Abandon(id) diff --git a/snow/consensus/snowstorm/metrics.go b/snow/consensus/snowstorm/metrics.go new file mode 100644 index 0000000..7591637 --- /dev/null +++ b/snow/consensus/snowstorm/metrics.go @@ -0,0 +1,87 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package snowstorm + +import ( + "fmt" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/timer" +) + +type metrics struct { + numProcessing prometheus.Gauge + latAccepted, latRejected prometheus.Histogram + + clock timer.Clock + processing map[[32]byte]time.Time +} + +// Initialize implements the Engine interface +func (m *metrics) Initialize(log logging.Logger, namespace string, registerer prometheus.Registerer) error { + m.processing = make(map[[32]byte]time.Time) + + m.numProcessing = prometheus.NewGauge( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "tx_processing", + Help: "Number of processing transactions", + }) + m.latAccepted = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: namespace, + Name: "tx_accepted", + Help: "Latency of accepting from the time the transaction was issued in milliseconds", + Buckets: timer.Buckets, + }) + m.latRejected = prometheus.NewHistogram( + prometheus.HistogramOpts{ + Namespace: namespace, + Name: "tx_rejected", + Help: "Latency of rejecting from the time the transaction was issued in milliseconds", + Buckets: timer.Buckets, + }) + + if err := registerer.Register(m.numProcessing); err != nil { + return fmt.Errorf("Failed to register tx_processing statistics due to %s", err) + } + if err := registerer.Register(m.latAccepted); err != nil { + return fmt.Errorf("Failed to register tx_accepted statistics due to %s", err) + } + if err := registerer.Register(m.latRejected); err != nil { + return fmt.Errorf("Failed to register tx_rejected statistics due to %s", err) + } + return nil +} + +func (m *metrics) Issued(id ids.ID) { + m.processing[id.Key()] = m.clock.Time() + m.numProcessing.Inc() +} + +func (m *metrics) Accepted(id ids.ID) { + key := id.Key() + start := m.processing[key] + end := m.clock.Time() + + delete(m.processing, key) + + m.latAccepted.Observe(float64(end.Sub(start).Milliseconds())) + m.numProcessing.Dec() +} + +func (m *metrics) Rejected(id ids.ID) { + key := id.Key() + start := m.processing[key] + end := m.clock.Time() + + delete(m.processing, key) + + m.latRejected.Observe(float64(end.Sub(start).Milliseconds())) + m.numProcessing.Dec() +} diff --git a/snow/consensus/snowstorm/test_tx.go b/snow/consensus/snowstorm/test_tx.go index d5fec1b..12c2f73 100644 --- a/snow/consensus/snowstorm/test_tx.go +++ b/snow/consensus/snowstorm/test_tx.go @@ -14,6 +14,7 @@ type TestTx struct { Deps []Tx Ins ids.Set Stat choices.Status + Validity error Bits []byte } @@ -39,7 +40,7 @@ func (tx *TestTx) Reject() { tx.Stat = choices.Rejected } func (tx *TestTx) Reset() { tx.Stat = choices.Processing } // Verify returns nil -func (tx *TestTx) Verify() error { return nil } +func (tx *TestTx) Verify() error { return tx.Validity } // Bytes returns the bits func (tx *TestTx) Bytes() []byte { return tx.Bits } diff --git a/snow/context.go b/snow/context.go index ce213c1..f359553 100644 --- a/snow/context.go +++ b/snow/context.go @@ -24,6 +24,12 @@ type Keystore interface { GetDatabase(username, password string) (database.Database, error) } +// SharedMemory ... +type SharedMemory interface { + GetDatabase(id ids.ID) database.Database + ReleaseDatabase(id ids.ID) +} + // AliasLookup ... type AliasLookup interface { Lookup(alias string) (ids.ID, error) @@ -44,6 +50,7 @@ type Context struct { Lock sync.RWMutex HTTP Callable Keystore Keystore + SharedMemory SharedMemory BCLookup AliasLookup } diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index 7d3d7c8..0f58194 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -119,7 +119,7 @@ func (b *bootstrapper) fetch(vtxID ids.ID) { b.sendRequest(vtxID) return } - b.addVertex(vtx) + b.storeVertex(vtx) } func (b *bootstrapper) sendRequest(vtxID ids.ID) { @@ -138,6 +138,14 @@ func (b *bootstrapper) sendRequest(vtxID ids.ID) { } func (b *bootstrapper) addVertex(vtx avalanche.Vertex) { + b.storeVertex(vtx) + + if numPending := b.pending.Len(); numPending == 0 { + b.finish() + } +} + +func (b *bootstrapper) storeVertex(vtx avalanche.Vertex) { vts := []avalanche.Vertex{vtx} for len(vts) > 0 { @@ -181,9 +189,6 @@ func (b *bootstrapper) addVertex(vtx avalanche.Vertex) { numPending := b.pending.Len() b.numPendingRequests.Set(float64(numPending)) - if numPending == 0 { - b.finish() - } } func (b *bootstrapper) finish() { diff --git a/snow/engine/avalanche/bootstrapper_test.go b/snow/engine/avalanche/bootstrapper_test.go index d1be936..cc63b68 100644 --- a/snow/engine/avalanche/bootstrapper_test.go +++ b/snow/engine/avalanche/bootstrapper_test.go @@ -69,7 +69,7 @@ func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest, Context: ctx, Validators: peers, Beacons: peers, - Alpha: peers.Len()/2 + 1, + Alpha: uint64(peers.Len()/2 + 1), Sender: sender, } return BootstrapConfig{ @@ -957,3 +957,53 @@ func TestBootstrapperFilterAccepted(t *testing.T) { t.Fatalf("Vtx shouldn't be accepted") } } + +func TestBootstrapperPartialFetch(t *testing.T) { + config, _, sender, state, _ := newConfig(t) + + vtxID0 := ids.Empty.Prefix(0) + vtxID1 := ids.Empty.Prefix(1) + + vtxBytes0 := []byte{0} + + vtx0 := &Vtx{ + id: vtxID0, + height: 0, + status: choices.Processing, + bytes: vtxBytes0, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + acceptedIDs := ids.Set{} + acceptedIDs.Add( + vtxID0, + vtxID1, + ) + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0): + return vtx0, nil + case vtxID.Equals(vtxID1): + return nil, errUnknownVertex + default: + t.Fatal(errUnknownVertex) + panic(errUnknownVertex) + } + } + + sender.CantGet = false + + bs.ForceAccepted(acceptedIDs) + + if bs.finished { + t.Fatalf("should have requested a vertex") + } + + if bs.pending.Len() != 1 { + t.Fatalf("wrong number pending") + } +} diff --git a/snow/engine/avalanche/config_test.go b/snow/engine/avalanche/config_test.go index 4906559..b11d186 100644 --- a/snow/engine/avalanche/config_test.go +++ b/snow/engine/avalanche/config_test.go @@ -26,11 +26,12 @@ func DefaultConfig() Config { }, Params: avalanche.Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, diff --git a/snow/engine/avalanche/issuer.go b/snow/engine/avalanche/issuer.go index befe973..4be29b3 100644 --- a/snow/engine/avalanche/issuer.go +++ b/snow/engine/avalanche/issuer.go @@ -6,6 +6,7 @@ package avalanche import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/consensus/avalanche" + "github.com/ava-labs/gecko/snow/consensus/snowstorm" ) type issuer struct { @@ -44,14 +45,24 @@ func (i *issuer) Update() { vtxID := i.vtx.ID() i.t.pending.Remove(vtxID) - for _, tx := range i.vtx.Txs() { + txs := i.vtx.Txs() + validTxs := []snowstorm.Tx{} + for _, tx := range txs { if err := tx.Verify(); err != nil { - i.t.Config.Context.Log.Debug("Transaction failed verification due to %s, dropping vertex", err) - i.t.vtxBlocked.Abandon(vtxID) - return + i.t.Config.Context.Log.Debug("Transaction %s failed verification due to %s", tx.ID(), err) + } else { + validTxs = append(validTxs, tx) } } + if len(validTxs) != len(txs) { + i.t.Config.Context.Log.Debug("Abandoning %s due to failed transaction verification", vtxID) + + i.t.batch(validTxs, false /*=force*/, false /*=empty*/) + i.t.vtxBlocked.Abandon(vtxID) + return + } + i.t.Config.Context.Log.Verbo("Adding vertex to consensus:\n%s", i.vtx) i.t.Consensus.Add(i.vtx) @@ -65,8 +76,10 @@ func (i *issuer) Update() { } i.t.RequestID++ + polled := false if numVdrs := len(vdrs); numVdrs == p.K && i.t.polls.Add(i.t.RequestID, vdrSet.Len()) { i.t.Config.Sender.PushQuery(vdrSet, i.t.RequestID, vtxID, i.vtx.Bytes()) + polled = true } else if numVdrs < p.K { i.t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", vtxID) } @@ -75,6 +88,10 @@ func (i *issuer) Update() { for _, tx := range i.vtx.Txs() { i.t.txBlocked.Fulfill(tx.ID()) } + + if polled && len(i.t.polls.m) < i.t.Params.ConcurrentRepolls { + i.t.repoll() + } } type vtxIssuer struct{ i *issuer } diff --git a/snow/engine/avalanche/transitive.go b/snow/engine/avalanche/transitive.go index 4d6617f..4de2aa5 100644 --- a/snow/engine/avalanche/transitive.go +++ b/snow/engine/avalanche/transitive.go @@ -309,7 +309,7 @@ func (t *Transitive) batch(txs []snowstorm.Tx, force, empty bool) { } // Force allows for a conflict to be issued - if txID := tx.ID(); !overlaps && !issuedTxs.Contains(txID) && (force || (t.Consensus.IsVirtuous(tx))) && !tx.Status().Decided() { + if txID := tx.ID(); !overlaps && !issuedTxs.Contains(txID) && (force || t.Consensus.IsVirtuous(tx)) && !tx.Status().Decided() { batch = append(batch, tx) issuedTxs.Add(txID) consumed.Union(inputs) diff --git a/snow/engine/avalanche/transitive_test.go b/snow/engine/avalanche/transitive_test.go index 6f5b5ed..defd2df 100644 --- a/snow/engine/avalanche/transitive_test.go +++ b/snow/engine/avalanche/transitive_test.go @@ -24,6 +24,22 @@ var ( errMissing = errors.New("missing") ) +func TestEngineShutdown(t *testing.T) { + config := DefaultConfig() + vmShutdownCalled := false + vm := &VMTest{} + vm.ShutdownF = func() { vmShutdownCalled = true } + config.VM = vm + + transitive := &Transitive{} + + transitive.Initialize(config) + transitive.finishBootstrapping() + transitive.Shutdown() + if !vmShutdownCalled { + t.Fatal("Shutting down the Transitive did not shutdown the VM") + } +} func TestEngineAdd(t *testing.T) { config := DefaultConfig() @@ -315,6 +331,18 @@ func TestEngineQuery(t *testing.T) { if !bytes.Equal(b, vtx1.Bytes()) { t.Fatalf("Wrong bytes") } + + st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + if vtxID.Equals(vtx0.ID()) { + return &Vtx{status: choices.Processing}, nil + } + if vtxID.Equals(vtx1.ID()) { + return vtx1, nil + } + t.Fatalf("Wrong vertex requested") + panic("Should have failed") + } + return vtx1, nil } te.Put(vdr.ID(), 0, vtx1.ID(), vtx1.Bytes()) @@ -340,11 +368,12 @@ func TestEngineMultipleQuery(t *testing.T) { config.Params = avalanche.Parameters{ Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 3, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 3, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Parents: 2, BatchSize: 1, @@ -2363,3 +2392,175 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { sender.PushQueryF = nil st.getVertex = nil } + +func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + st := &stateTest{t: t} + config.State = st + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx} + utxos := []ids.ID{GenerateID(), GenerateID()} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + }, + } + tx0.Ins.Add(utxos[0]) + + tx1 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + Validity: errors.New(""), + }, + } + tx1.Ins.Add(utxos[1]) + + vtx0 := &Vtx{ + parents: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + vtx1 := &Vtx{ + parents: []avalanche.Vertex{vtx0}, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 2, + status: choices.Processing, + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + sender := &common.SenderTest{} + sender.T = t + te.Config.Sender = sender + + reqID := new(uint32) + sender.PushQueryF = func(_ ids.ShortSet, requestID uint32, _ ids.ID, _ []byte) { + *reqID = requestID + } + + te.insert(vtx0) + + sender.PushQueryF = func(ids.ShortSet, uint32, ids.ID, []byte) { + t.Fatalf("should have failed verification") + } + + te.insert(vtx1) + + st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtx0.ID()): + return vtx0, nil + case vtxID.Equals(vtx1.ID()): + return vtx1, nil + } + return nil, errors.New("Unknown vtx") + } + + votes := ids.Set{} + votes.Add(vtx1.ID()) + te.Chits(vdr.ID(), *reqID, votes) + + if status := vtx0.Status(); status != choices.Accepted { + t.Fatalf("should have accepted the vertex due to transitive voting") + } +} + +func TestEnginePartiallyValidVertex(t *testing.T) { + config := DefaultConfig() + + vdr := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr) + + st := &stateTest{t: t} + config.State = st + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx} + utxos := []ids.ID{GenerateID(), GenerateID()} + + tx0 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + }, + } + tx0.Ins.Add(utxos[0]) + + tx1 := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + Validity: errors.New(""), + }, + } + tx1.Ins.Add(utxos[1]) + + vtx := &Vtx{ + parents: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0, tx1}, + height: 1, + status: choices.Processing, + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + expectedVtxID := GenerateID() + st.buildVertex = func(_ ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) { + consumers := []snowstorm.Tx{} + for _, tx := range txs { + consumers = append(consumers, tx) + } + return &Vtx{ + parents: vts, + id: expectedVtxID, + txs: consumers, + status: choices.Processing, + bytes: []byte{1}, + }, nil + } + + sender := &common.SenderTest{} + sender.T = t + te.Config.Sender = sender + + sender.PushQueryF = func(_ ids.ShortSet, _ uint32, vtxID ids.ID, _ []byte) { + if !expectedVtxID.Equals(vtxID) { + t.Fatalf("wrong vertex queried") + } + } + + te.insert(vtx) +} diff --git a/snow/engine/avalanche/tx_job.go b/snow/engine/avalanche/tx_job.go index 0462bd3..f0ffe70 100644 --- a/snow/engine/avalanche/tx_job.go +++ b/snow/engine/avalanche/tx_job.go @@ -54,12 +54,9 @@ func (t *txJob) Execute() { case choices.Unknown, choices.Rejected: t.numDropped.Inc() case choices.Processing: - if err := t.tx.Verify(); err == nil { - t.tx.Accept() - t.numAccepted.Inc() - } else { - t.numDropped.Inc() - } + t.tx.Verify() + t.tx.Accept() + t.numAccepted.Inc() } } func (t *txJob) Bytes() []byte { return t.tx.Bytes() } diff --git a/snow/engine/avalanche/voter.go b/snow/engine/avalanche/voter.go index 72a1b53..7430495 100644 --- a/snow/engine/avalanche/voter.go +++ b/snow/engine/avalanche/voter.go @@ -5,6 +5,7 @@ package avalanche import ( "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/consensus/avalanche" "github.com/ava-labs/gecko/snow/consensus/snowstorm" ) @@ -34,6 +35,7 @@ func (v *voter) Update() { if !finished { return } + results = v.bubbleVotes(results) v.t.Config.Context.Log.Debug("Finishing poll with:\n%s", &results) v.t.Consensus.RecordPoll(results) @@ -58,7 +60,33 @@ func (v *voter) Update() { v.t.Config.Context.Log.Verbo("Avalanche engine can't quiesce") - if len(v.t.polls.m) == 0 { + if len(v.t.polls.m) < v.t.Config.Params.ConcurrentRepolls { v.t.repoll() } } + +func (v *voter) bubbleVotes(votes ids.UniqueBag) ids.UniqueBag { + bubbledVotes := ids.UniqueBag{} + for _, vote := range votes.List() { + set := votes.GetSet(vote) + vtx, err := v.t.Config.State.GetVertex(vote) + if err != nil { + continue + } + + vts := []avalanche.Vertex{vtx} + for len(vts) > 0 { + vtx := vts[0] + vts = vts[1:] + + if status := vtx.Status(); status.Fetched() && !v.t.Consensus.VertexIssued(vtx) { + vts = append(vts, vtx.Parents()...) + } else if !status.Decided() && v.t.Consensus.VertexIssued(vtx) { + bubbledVotes.UnionSet(vtx.ID(), set) + } else { + v.t.Config.Context.Log.Debug("Dropping %d vote(s) for %s because the vertex is invalid", set.Len(), vtx.ID()) + } + } + } + return bubbledVotes +} diff --git a/snow/engine/common/bootstrapper.go b/snow/engine/common/bootstrapper.go index 9eebe0e..cda4a43 100644 --- a/snow/engine/common/bootstrapper.go +++ b/snow/engine/common/bootstrapper.go @@ -4,7 +4,10 @@ package common import ( + stdmath "math" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/math" ) // Bootstrapper implements the Engine interface. @@ -15,7 +18,7 @@ type Bootstrapper struct { acceptedFrontier ids.Set pendingAccepted ids.ShortSet - accepted ids.Bag + acceptedVotes map[[32]byte]uint64 RequestID uint32 } @@ -30,7 +33,7 @@ func (b *Bootstrapper) Initialize(config Config) { b.pendingAccepted.Add(vdrID) } - b.accepted.SetThreshold(config.Alpha) + b.acceptedVotes = make(map[[32]byte]uint64) } // Startup implements the Engine interface. @@ -95,10 +98,29 @@ func (b *Bootstrapper) Accepted(validatorID ids.ShortID, requestID uint32, conta } b.pendingAccepted.Remove(validatorID) - b.accepted.Add(containerIDs.List()...) + weight := uint64(0) + if vdr, ok := b.Validators.Get(validatorID); ok { + weight = vdr.Weight() + } + + for _, containerID := range containerIDs.List() { + key := containerID.Key() + previousWeight := b.acceptedVotes[key] + newWeight, err := math.Add64(weight, previousWeight) + if err != nil { + newWeight = stdmath.MaxUint64 + } + b.acceptedVotes[key] = newWeight + } if b.pendingAccepted.Len() == 0 { - accepted := b.accepted.Threshold() + accepted := ids.Set{} + for key, weight := range b.acceptedVotes { + if weight >= b.Config.Alpha { + accepted.Add(ids.NewID(key)) + } + } + if size := accepted.Len(); size == 0 && b.Config.Beacons.Len() > 0 { b.Context.Log.Warn("Bootstrapping finished with no accepted frontier. This is likely a result of failing to be able to connect to the specified bootstraps, or no transactions have been issued on this network yet") } else { diff --git a/snow/engine/common/config.go b/snow/engine/common/config.go index e3e6b10..e75a957 100644 --- a/snow/engine/common/config.go +++ b/snow/engine/common/config.go @@ -15,7 +15,7 @@ type Config struct { Validators validators.Set Beacons validators.Set - Alpha int + Alpha uint64 Sender Sender Bootstrapable Bootstrapable } diff --git a/snow/engine/common/http_handler.go b/snow/engine/common/http_handler.go index c6b898d..ca00136 100644 --- a/snow/engine/common/http_handler.go +++ b/snow/engine/common/http_handler.go @@ -8,7 +8,7 @@ import ( ) // LockOption allows the vm to specify their lock option based on their endpoint -type LockOption int +type LockOption uint32 // List of all allowed options const ( diff --git a/snow/engine/common/message.go b/snow/engine/common/message.go index f987902..19f4205 100644 --- a/snow/engine/common/message.go +++ b/snow/engine/common/message.go @@ -10,7 +10,7 @@ import ( // TODO: Consider renaming Message to, say, VMMessage // Message is an enum of the message types that vms can send to consensus -type Message int +type Message uint32 const ( // PendingTxs notifies a consensus engine that diff --git a/snow/engine/snowman/block_job.go b/snow/engine/snowman/block_job.go index aab227f..ec5f4a3 100644 --- a/snow/engine/snowman/block_job.go +++ b/snow/engine/snowman/block_job.go @@ -51,12 +51,9 @@ func (b *blockJob) Execute() { case choices.Unknown, choices.Rejected: b.numDropped.Inc() case choices.Processing: - if err := b.blk.Verify(); err == nil { - b.blk.Accept() - b.numAccepted.Inc() - } else { - b.numDropped.Inc() - } + b.blk.Verify() + b.blk.Accept() + b.numAccepted.Inc() } } func (b *blockJob) Bytes() []byte { return b.blk.Bytes() } diff --git a/snow/engine/snowman/bootstrapper.go b/snow/engine/snowman/bootstrapper.go index 88724ed..46ced68 100644 --- a/snow/engine/snowman/bootstrapper.go +++ b/snow/engine/snowman/bootstrapper.go @@ -113,7 +113,7 @@ func (b *bootstrapper) fetch(blkID ids.ID) { b.sendRequest(blkID) return } - b.addBlock(blk) + b.storeBlock(blk) } func (b *bootstrapper) sendRequest(blkID ids.ID) { @@ -132,6 +132,14 @@ func (b *bootstrapper) sendRequest(blkID ids.ID) { } func (b *bootstrapper) addBlock(blk snowman.Block) { + b.storeBlock(blk) + + if numPending := b.pending.Len(); numPending == 0 { + b.finish() + } +} + +func (b *bootstrapper) storeBlock(blk snowman.Block) { status := blk.Status() blkID := blk.ID() for status == choices.Processing { @@ -161,9 +169,6 @@ func (b *bootstrapper) addBlock(blk snowman.Block) { numPending := b.pending.Len() b.numPendingRequests.Set(float64(numPending)) - if numPending == 0 { - b.finish() - } } func (b *bootstrapper) finish() { diff --git a/snow/engine/snowman/bootstrapper_test.go b/snow/engine/snowman/bootstrapper_test.go index 9cb0968..6168df2 100644 --- a/snow/engine/snowman/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrapper_test.go @@ -62,7 +62,7 @@ func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest, Context: ctx, Validators: peers, Beacons: peers, - Alpha: peers.Len()/2 + 1, + Alpha: uint64(peers.Len()/2 + 1), Sender: sender, } return BootstrapConfig{ @@ -425,3 +425,54 @@ func TestBootstrapperFilterAccepted(t *testing.T) { t.Fatalf("Blk shouldn't be accepted") } } + +func TestBootstrapperPartialFetch(t *testing.T) { + config, _, sender, vm := newConfig(t) + + blkID0 := ids.Empty.Prefix(0) + blkID1 := ids.Empty.Prefix(1) + + blkBytes0 := []byte{0} + + blk0 := &Blk{ + id: blkID0, + height: 0, + status: choices.Accepted, + bytes: blkBytes0, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + acceptedIDs := ids.Set{} + acceptedIDs.Add( + blkID0, + blkID1, + ) + + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blkID0): + return blk0, nil + case blkID.Equals(blkID1): + return nil, errUnknownBlock + default: + t.Fatal(errUnknownBlock) + panic(errUnknownBlock) + } + } + + sender.CantGet = false + bs.onFinished = func() {} + + bs.ForceAccepted(acceptedIDs) + + if bs.finished { + t.Fatalf("should have requested a block") + } + + if bs.pending.Len() != 1 { + t.Fatalf("wrong number pending") + } +} diff --git a/snow/engine/snowman/config_test.go b/snow/engine/snowman/config_test.go index 1b590b7..6cf7c51 100644 --- a/snow/engine/snowman/config_test.go +++ b/snow/engine/snowman/config_test.go @@ -23,10 +23,11 @@ func DefaultConfig() Config { }, Params: snowball.Parameters{ Metrics: prometheus.NewRegistry(), - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 2, + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, }, Consensus: &snowman.Topological{}, } diff --git a/snow/engine/snowman/engine_test.go b/snow/engine/snowman/engine_test.go index e149970..bc4ed59 100644 --- a/snow/engine/snowman/engine_test.go +++ b/snow/engine/snowman/engine_test.go @@ -25,8 +25,9 @@ type Blk struct { parent snowman.Block id ids.ID - height int - status choices.Status + height int + status choices.Status + validity error bytes []byte } @@ -36,7 +37,7 @@ func (b *Blk) Parent() snowman.Block { return b.parent } func (b *Blk) Accept() { b.status = choices.Accepted } func (b *Blk) Reject() { b.status = choices.Rejected } func (b *Blk) Status() choices.Status { return b.status } -func (b *Blk) Verify() error { return nil } +func (b *Blk) Verify() error { return b.validity } func (b *Blk) Bytes() []byte { return b.bytes } type sortBks []*Blk diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index e023a7d..947967a 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -305,7 +305,7 @@ func (t *Transitive) pullSample(blkID ids.ID) { } } -func (t *Transitive) pushSample(blk snowman.Block) { +func (t *Transitive) pushSample(blk snowman.Block) bool { t.Config.Context.Log.Verbo("About to sample from: %s", t.Config.Validators) p := t.Consensus.Parameters() vdrs := t.Config.Validators.Sample(p.K) @@ -315,11 +315,14 @@ func (t *Transitive) pushSample(blk snowman.Block) { } t.RequestID++ + queryIssued := false if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet.Len()) { t.Config.Sender.PushQuery(vdrSet, t.RequestID, blk.ID(), blk.Bytes()) + queryIssued = true } else if numVdrs < p.K { t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blk.ID()) } + return queryIssued } func (t *Transitive) deliver(blk snowman.Block) { @@ -338,9 +341,8 @@ func (t *Transitive) deliver(blk snowman.Block) { } t.Config.Context.Log.Verbo("Adding block to consensus: %s", blkID) - t.Consensus.Add(blk) - t.pushSample(blk) + polled := t.pushSample(blk) added := []snowman.Block{} dropped := []snowman.Block{} @@ -373,6 +375,10 @@ func (t *Transitive) deliver(blk snowman.Block) { t.blocked.Abandon(blkID) } + if polled && len(t.polls.m) < t.Params.ConcurrentRepolls { + t.repoll() + } + // Tracks performance statistics t.numBlkRequests.Set(float64(t.blkReqs.Len())) t.numBlockedBlk.Set(float64(t.pending.Len())) diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 1920d8c..6000324 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -64,6 +64,17 @@ func setup(t *testing.T) (validators.Validator, validators.Set, *common.SenderTe return vdr, vals, sender, vm, te, gBlk } +func TestEngineShutdown(t *testing.T) { + _, _, _, vm, transitive, _ := setup(t) + vmShutdownCalled := false + vm.ShutdownF = func() { vmShutdownCalled = true } + vm.CantShutdown = false + transitive.Shutdown() + if !vmShutdownCalled { + t.Fatal("Shutting down the Transitive did not shutdown the VM") + } +} + func TestEngineAdd(t *testing.T) { vdr, _, sender, vm, te, _ := setup(t) @@ -280,6 +291,18 @@ func TestEngineQuery(t *testing.T) { if !bytes.Equal(b, blk1.Bytes()) { t.Fatalf("Wrong bytes") } + + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blk.ID()): + return blk, nil + case blkID.Equals(blk1.ID()): + return blk1, nil + } + t.Fatalf("Wrong block requested") + panic("Should have failed") + } + return blk1, nil } te.Put(vdr.ID(), *getRequestID, blk1.ID(), blk1.Bytes()) @@ -304,11 +327,12 @@ func TestEngineMultipleQuery(t *testing.T) { config := DefaultConfig() config.Params = snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 3, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 3, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } vdr0 := validators.GenerateRandomValidator(1) @@ -418,6 +442,17 @@ func TestEngineMultipleQuery(t *testing.T) { te.Chits(vdr1.ID(), *queryRequestID, blkSet) vm.ParseBlockF = func(b []byte) (snowman.Block, error) { + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blk0.ID()): + return blk0, nil + case blkID.Equals(blk1.ID()): + return blk1, nil + } + t.Fatalf("Wrong block requested") + panic("Should have failed") + } + return blk1, nil } @@ -672,11 +707,12 @@ func TestVoteCanceling(t *testing.T) { config := DefaultConfig() config.Params = snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 3, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, + Metrics: prometheus.NewRegistry(), + K: 3, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, } vdr0 := validators.GenerateRandomValidator(1) @@ -1076,3 +1112,60 @@ func TestEngineRetryFetch(t *testing.T) { t.Fatalf("Should have requested the block again") } } + +func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { + vdr, _, sender, vm, te, gBlk := setup(t) + + sender.Default(true) + + validBlk := &Blk{ + parent: gBlk, + id: GenerateID(), + height: 1, + status: choices.Processing, + bytes: []byte{1}, + } + + invalidBlk := &Blk{ + parent: validBlk, + id: GenerateID(), + height: 2, + status: choices.Processing, + validity: errors.New("invalid due to an undeclared dependency"), + bytes: []byte{2}, + } + + validBlkID := validBlk.ID() + invalidBlkID := invalidBlk.ID() + + reqID := new(uint32) + sender.PushQueryF = func(_ ids.ShortSet, requestID uint32, _ ids.ID, _ []byte) { + *reqID = requestID + } + + te.insert(validBlk) + + sender.PushQueryF = nil + + te.insert(invalidBlk) + + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(validBlkID): + return validBlk, nil + case blkID.Equals(invalidBlkID): + return invalidBlk, nil + } + return nil, errUnknownBlock + } + + votes := ids.Set{} + votes.Add(invalidBlkID) + te.Chits(vdr.ID(), *reqID, votes) + + vm.GetBlockF = nil + + if status := validBlk.Status(); status != choices.Accepted { + t.Fatalf("Should have bubbled invalid votes to the valid parent") + } +} diff --git a/snow/engine/snowman/voter.go b/snow/engine/snowman/voter.go index d9c8a7f..0c9779a 100644 --- a/snow/engine/snowman/voter.go +++ b/snow/engine/snowman/voter.go @@ -41,6 +41,10 @@ func (v *voter) Update() { return } + // To prevent any potential deadlocks with un-disclosed dependencies, votes + // must be bubbled to the nearest valid block + results = v.bubbleVotes(results) + v.t.Config.Context.Log.Verbo("Finishing poll [%d] with:\n%s", v.requestID, &results) v.t.Consensus.RecordPoll(results) @@ -53,7 +57,27 @@ func (v *voter) Update() { v.t.Config.Context.Log.Verbo("Snowman engine can't quiesce") - if len(v.t.polls.m) == 0 { + if len(v.t.polls.m) < v.t.Config.Params.ConcurrentRepolls { v.t.repoll() } } + +func (v *voter) bubbleVotes(votes ids.Bag) ids.Bag { + bubbledVotes := ids.Bag{} + for _, vote := range votes.List() { + count := votes.Count(vote) + blk, err := v.t.Config.VM.GetBlock(vote) + if err != nil { + continue + } + + for blk.Status().Fetched() && !v.t.Consensus.Issued(blk) { + blk = blk.Parent() + } + + if !blk.Status().Decided() && v.t.Consensus.Issued(blk) { + bubbledVotes.AddCount(blk.ID(), count) + } + } + return bubbledVotes +} diff --git a/snow/networking/awaiting_connections.go b/snow/networking/awaiting_connections.go index 0b5047d..5887cea 100644 --- a/snow/networking/awaiting_connections.go +++ b/snow/networking/awaiting_connections.go @@ -4,31 +4,43 @@ package networking import ( + stdmath "math" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/math" ) // AwaitingConnections ... type AwaitingConnections struct { - Requested ids.ShortSet - NumRequired int - Finish func() + Requested validators.Set + WeightRequired uint64 + Finish func() - connected ids.ShortSet + weight uint64 } // Add ... func (aw *AwaitingConnections) Add(conn ids.ShortID) { - if aw.Requested.Contains(conn) { - aw.connected.Add(conn) + vdr, ok := aw.Requested.Get(conn) + if !ok { + return } + weight, err := math.Add64(vdr.Weight(), aw.weight) + if err != nil { + weight = stdmath.MaxUint64 + } + aw.weight = weight } // Remove ... func (aw *AwaitingConnections) Remove(conn ids.ShortID) { - aw.connected.Remove(conn) + vdr, ok := aw.Requested.Get(conn) + if !ok { + return + } + aw.weight -= vdr.Weight() } // Ready ... -func (aw *AwaitingConnections) Ready() bool { - return aw.connected.Len() >= aw.NumRequired -} +func (aw *AwaitingConnections) Ready() bool { return aw.weight >= aw.WeightRequired } diff --git a/snow/networking/router/subnet_router.go b/snow/networking/router/subnet_router.go index 93da106..ca1f6de 100644 --- a/snow/networking/router/subnet_router.go +++ b/snow/networking/router/subnet_router.go @@ -38,7 +38,9 @@ func (sr *ChainRouter) AddChain(chain *handler.Handler) { sr.lock.Lock() defer sr.lock.Unlock() - sr.chains[chain.Context().ChainID.Key()] = chain + chainID := chain.Context().ChainID + sr.log.Debug("Adding %s to the routing table", chainID) + sr.chains[chainID.Key()] = chain } // RemoveChain removes the specified chain so that incoming diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index f33a68c..3d6e3af 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -4,6 +4,7 @@ package sender import ( + "reflect" "sync" "testing" "time" @@ -17,6 +18,20 @@ import ( "github.com/ava-labs/gecko/utils/logging" ) +func TestSenderContext(t *testing.T) { + context := snow.DefaultContextTest() + sender := Sender{} + sender.Initialize( + context, + &ExternalSenderTest{}, + &router.ChainRouter{}, + &timeout.Manager{}, + ) + if res := sender.Context(); !reflect.DeepEqual(res, context) { + t.Fatalf("Got %#v, expected %#v", res, context) + } +} + func TestTimeout(t *testing.T) { tm := timeout.Manager{} tm.Initialize(time.Millisecond) diff --git a/snow/validators/set.go b/snow/validators/set.go index 26dd22f..50210bf 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -24,6 +24,9 @@ type Set interface { // Add the provided validator to the set. Add(Validator) + // Get the validator from the set. + Get(ids.ShortID) (Validator, bool) + // Remove the validator with the specified ID. Remove(ids.ShortID) @@ -102,6 +105,22 @@ func (s *set) add(vdr Validator) { s.sampler.Weights = append(s.sampler.Weights, w) } +// Get implements the Set interface. +func (s *set) Get(vdrID ids.ShortID) (Validator, bool) { + s.lock.Lock() + defer s.lock.Unlock() + + return s.get(vdrID) +} + +func (s *set) get(vdrID ids.ShortID) (Validator, bool) { + index, ok := s.vdrMap[vdrID.Key()] + if !ok { + return nil, false + } + return s.vdrSlice[index], true +} + // Remove implements the Set interface. func (s *set) Remove(vdrID ids.ShortID) { s.lock.Lock() diff --git a/snow/validators/set_test.go b/snow/validators/set_test.go index 5ad381c..0518555 100644 --- a/snow/validators/set_test.go +++ b/snow/validators/set_test.go @@ -10,6 +10,31 @@ import ( "github.com/ava-labs/gecko/ids" ) +func TestSetSet(t *testing.T) { + vdr0 := NewValidator(ids.ShortEmpty, 1) + vdr1_0 := NewValidator(ids.NewShortID([20]byte{0xFF}), 1) + // Should replace vdr1_0, because later additions replace earlier ones + vdr1_1 := NewValidator(ids.NewShortID([20]byte{0xFF}), math.MaxInt64-1) + // Should be discarded, because it has a weight of 0 + vdr2 := NewValidator(ids.NewShortID([20]byte{0xAA}), 0) + + s := NewSet() + s.Set([]Validator{vdr0, vdr1_0, vdr1_1, vdr2}) + + if !s.Contains(vdr0.ID()) { + t.Fatal("Should have contained vdr0", vdr0.ID()) + } + if !s.Contains(vdr1_0.ID()) { + t.Fatal("Should have contained vdr1", vdr1_0.ID()) + } + if sampled := s.Sample(1); !sampled[0].ID().Equals(vdr1_0.ID()) { + t.Fatal("Should have sampled vdr1") + } + if len := s.Len(); len != 2 { + t.Fatalf("Got size %d, expected 2", len) + } +} + func TestSamplerSample(t *testing.T) { vdr0 := GenerateRandomValidator(1) vdr1 := GenerateRandomValidator(math.MaxInt64 - 1) diff --git a/utils/crypto/crypto_benchmark_test.go b/utils/crypto/crypto_benchmark_test.go index 2d4d2f9..6262a34 100644 --- a/utils/crypto/crypto_benchmark_test.go +++ b/utils/crypto/crypto_benchmark_test.go @@ -41,7 +41,7 @@ func init() { RSA: &FactoryRSA{}, RSAPSS: &FactoryRSAPSS{}, ED25519: &FactoryED25519{}, - SECP256K1: &FactorySECP256K1{}, + SECP256K1: &FactorySECP256K1R{}, } for _, f := range factories { fKeys := []PublicKey{} diff --git a/utils/crypto/secp256k1.go b/utils/crypto/secp256k1.go deleted file mode 100644 index 72f4451..0000000 --- a/utils/crypto/secp256k1.go +++ /dev/null @@ -1,146 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package crypto - -import ( - "crypto/ecdsa" - "crypto/rand" - "math/big" - - "github.com/ava-labs/go-ethereum/crypto" - "github.com/ava-labs/go-ethereum/crypto/secp256k1" - - "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/utils/hashing" -) - -const ( - // SECP256K1SigLen is the number of bytes in a secp2561k signature - SECP256K1SigLen = 64 - - // SECP256K1SKLen is the number of bytes in a secp2561k private key - SECP256K1SKLen = 32 -) - -// FactorySECP256K1 ... -type FactorySECP256K1 struct{} - -// NewPrivateKey implements the Factory interface -func (*FactorySECP256K1) NewPrivateKey() (PrivateKey, error) { - k, err := ecdsa.GenerateKey(secp256k1.S256(), rand.Reader) - if err != nil { - return nil, err - } - return &PrivateKeySECP256K1{sk: k}, nil -} - -// ToPublicKey implements the Factory interface -func (*FactorySECP256K1) ToPublicKey(b []byte) (PublicKey, error) { - key, err := crypto.DecompressPubkey(b) - return &PublicKeySECP256K1{ - pk: key, - bytes: b, - }, err -} - -// ToPrivateKey implements the Factory interface -func (*FactorySECP256K1) ToPrivateKey(b []byte) (PrivateKey, error) { - key, err := crypto.ToECDSA(b) - return &PrivateKeySECP256K1{ - sk: key, - bytes: b, - }, err -} - -// PublicKeySECP256K1 ... -type PublicKeySECP256K1 struct { - pk *ecdsa.PublicKey - addr ids.ShortID - bytes []byte -} - -// Verify implements the PublicKey interface -func (k *PublicKeySECP256K1) Verify(msg, sig []byte) bool { - return k.VerifyHash(hashing.ComputeHash256(msg), sig) -} - -// VerifyHash implements the PublicKey interface -func (k *PublicKeySECP256K1) VerifyHash(hash, sig []byte) bool { - if verifySECP256K1SignatureFormat(sig) != nil { - return false - } - return crypto.VerifySignature(k.Bytes(), hash, sig) -} - -// Address implements the PublicKey interface -func (k *PublicKeySECP256K1) Address() ids.ShortID { - if k.addr.IsZero() { - addr, err := ids.ToShortID(hashing.PubkeyBytesToAddress(k.Bytes())) - if err != nil { - panic(err) - } - k.addr = addr - } - return k.addr -} - -// Bytes implements the PublicKey interface -func (k *PublicKeySECP256K1) Bytes() []byte { - if k.bytes == nil { - k.bytes = crypto.CompressPubkey(k.pk) - } - return k.bytes -} - -// PrivateKeySECP256K1 ... -type PrivateKeySECP256K1 struct { - sk *ecdsa.PrivateKey - pk *PublicKeySECP256K1 - bytes []byte -} - -// PublicKey implements the PrivateKey interface -func (k *PrivateKeySECP256K1) PublicKey() PublicKey { - if k.pk == nil { - k.pk = &PublicKeySECP256K1{pk: (*ecdsa.PublicKey)(&k.sk.PublicKey)} - } - return k.pk -} - -// Sign implements the PrivateKey interface -func (k *PrivateKeySECP256K1) Sign(msg []byte) ([]byte, error) { - return k.SignHash(hashing.ComputeHash256(msg)) -} - -// SignHash implements the PrivateKey interface -func (k *PrivateKeySECP256K1) SignHash(hash []byte) ([]byte, error) { - sig, err := crypto.Sign(hash, k.sk) - if err != nil { - return nil, err - } - return sig[:len(sig)-1], err -} - -// Bytes implements the PrivateKey interface -func (k *PrivateKeySECP256K1) Bytes() []byte { - if k.bytes == nil { - k.bytes = make([]byte, SECP256K1SKLen) - bytes := k.sk.D.Bytes() - copy(k.bytes[SECP256K1SKLen-len(bytes):], bytes) - } - return k.bytes -} - -func verifySECP256K1SignatureFormat(sig []byte) error { - if len(sig) != SECP256K1SigLen { - return errInvalidSigLen - } - var r, s big.Int - r.SetBytes(sig[:32]) - s.SetBytes(sig[32:]) - if !crypto.ValidateSignatureValues(0, &r, &s, true) { - return errMutatedSig - } - return nil -} diff --git a/utils/crypto/secp256k1_recover_benchmark_test.go b/utils/crypto/secp256k1_recover_benchmark_test.go deleted file mode 100644 index d1a3530..0000000 --- a/utils/crypto/secp256k1_recover_benchmark_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package crypto - -import ( - "testing" - - "github.com/ava-labs/gecko/utils/hashing" -) - -// NumRecoveries is the number of recoveries to run per operation -const NumRecoveries = 1 - -var ( - secpSigs [][]byte -) - -func init() { - factory := FactorySECP256K1R{} - - hash := hashing.ComputeHash256(nil) - for i := byte(0); i < NumRecoveries; i++ { - key, err := factory.NewPrivateKey() - if err != nil { - panic(err) - } - sig, err := key.SignHash(hash) - if err != nil { - panic(err) - } - secpSigs = append(secpSigs, sig) - } -} - -func recover() { - factory := FactorySECP256K1R{} - hash := hashing.ComputeHash256(nil) - for _, sig := range secpSigs { - if _, err := factory.RecoverHashPublicKey(hash, sig); err != nil { - panic(err) - } - } -} - -// BenchmarkSecp256k1RecoverVerify runs the benchmark with secp sig -func BenchmarkSecp256k1RecoverVerify(b *testing.B) { - for n := 0; n < b.N; n++ { - recover() - } -} diff --git a/utils/crypto/secp256k1r.go b/utils/crypto/secp256k1r.go index 6de3515..1fde767 100644 --- a/utils/crypto/secp256k1r.go +++ b/utils/crypto/secp256k1r.go @@ -5,13 +5,11 @@ package crypto import ( "bytes" - "crypto/ecdsa" - "crypto/rand" - "math/big" + "errors" "sort" - "github.com/ava-labs/go-ethereum/crypto" - "github.com/ava-labs/go-ethereum/crypto/secp256k1" + "github.com/decred/dcrd/dcrec/secp256k1" + "github.com/decred/dcrd/dcrec/secp256k1/ecdsa" "github.com/ava-labs/gecko/cache" "github.com/ava-labs/gecko/ids" @@ -27,6 +25,18 @@ const ( // SECP256K1RSKLen is the number of bytes in a secp2561k recoverable private // key SECP256K1RSKLen = 32 + + // SECP256K1RPKLen is the number of bytes in a secp2561k recoverable public + // key + SECP256K1RPKLen = 33 + + // from the decred library: + // compactSigMagicOffset is a value used when creating the compact signature + // recovery code inherited from Bitcoin and has no meaning, but has been + // retained for compatibility. For historical purposes, it was originally + // picked to avoid a binary representation that would allow compact + // signatures to be mistaken for other components. + compactSigMagicOffset = 27 ) // FactorySECP256K1R ... @@ -34,16 +44,13 @@ type FactorySECP256K1R struct{ Cache cache.LRU } // NewPrivateKey implements the Factory interface func (*FactorySECP256K1R) NewPrivateKey() (PrivateKey, error) { - k, err := ecdsa.GenerateKey(secp256k1.S256(), rand.Reader) - if err != nil { - return nil, err - } - return &PrivateKeySECP256K1R{sk: k}, nil + k, err := secp256k1.GeneratePrivateKey() + return &PrivateKeySECP256K1R{sk: k}, err } // ToPublicKey implements the Factory interface func (*FactorySECP256K1R) ToPublicKey(b []byte) (PublicKey, error) { - key, err := crypto.DecompressPubkey(b) + key, err := secp256k1.ParsePubKey(b) return &PublicKeySECP256K1R{ pk: key, bytes: b, @@ -52,11 +59,10 @@ func (*FactorySECP256K1R) ToPublicKey(b []byte) (PublicKey, error) { // ToPrivateKey implements the Factory interface func (*FactorySECP256K1R) ToPrivateKey(b []byte) (PrivateKey, error) { - key, err := crypto.ToECDSA(b) return &PrivateKeySECP256K1R{ - sk: key, + sk: secp256k1.PrivKeyFromBytes(b), bytes: b, - }, err + }, nil } // RecoverPublicKey returns the public key from a 65 byte signature @@ -71,25 +77,35 @@ func (f *FactorySECP256K1R) RecoverHashPublicKey(hash, sig []byte) (PublicKey, e copy(cacheBytes[len(hash):], sig) id := ids.NewID(hashing.ComputeHash256Array(cacheBytes)) if cachedPublicKey, ok := f.Cache.Get(id); ok { - return cachedPublicKey.(*PublicKeySECP256K1), nil + return cachedPublicKey.(*PublicKeySECP256K1R), nil } if err := verifySECP256K1RSignatureFormat(sig); err != nil { return nil, err } - rawPubkey, err := crypto.SigToPub(hash, sig) + sig, err := sigToRawSig(sig) if err != nil { return nil, err } - pubkey := &PublicKeySECP256K1{pk: rawPubkey} + + rawPubkey, compressed, err := ecdsa.RecoverCompact(sig, hash) + if err != nil { + return nil, err + } + + if compressed { + return nil, errors.New("wasn't expecting a compresses key") + } + + pubkey := &PublicKeySECP256K1R{pk: rawPubkey} f.Cache.Put(id, pubkey) return pubkey, nil } // PublicKeySECP256K1R ... type PublicKeySECP256K1R struct { - pk *ecdsa.PublicKey + pk *secp256k1.PublicKey addr ids.ShortID bytes []byte } @@ -101,10 +117,12 @@ func (k *PublicKeySECP256K1R) Verify(msg, sig []byte) bool { // VerifyHash implements the PublicKey interface func (k *PublicKeySECP256K1R) VerifyHash(hash, sig []byte) bool { - if verifySECP256K1RSignatureFormat(sig) != nil { + factory := FactorySECP256K1R{} + pk, err := factory.RecoverHashPublicKey(hash, sig) + if err != nil { return false } - return crypto.VerifySignature(k.Bytes(), hash, sig[:SECP256K1RSigLen-1]) + return k.Address().Equals(pk.Address()) } // Address implements the PublicKey interface @@ -122,14 +140,14 @@ func (k *PublicKeySECP256K1R) Address() ids.ShortID { // Bytes implements the PublicKey interface func (k *PublicKeySECP256K1R) Bytes() []byte { if k.bytes == nil { - k.bytes = crypto.CompressPubkey(k.pk) + k.bytes = k.pk.SerializeCompressed() } return k.bytes } // PrivateKeySECP256K1R ... type PrivateKeySECP256K1R struct { - sk *ecdsa.PrivateKey + sk *secp256k1.PrivateKey pk *PublicKeySECP256K1R bytes []byte } @@ -137,7 +155,7 @@ type PrivateKeySECP256K1R struct { // PublicKey implements the PrivateKey interface func (k *PrivateKeySECP256K1R) PublicKey() PublicKey { if k.pk == nil { - k.pk = &PublicKeySECP256K1R{pk: (*ecdsa.PublicKey)(&k.sk.PublicKey)} + k.pk = &PublicKeySECP256K1R{pk: k.sk.PubKey()} } return k.pk } @@ -149,27 +167,49 @@ func (k *PrivateKeySECP256K1R) Sign(msg []byte) ([]byte, error) { // SignHash implements the PrivateKey interface func (k *PrivateKeySECP256K1R) SignHash(hash []byte) ([]byte, error) { - return crypto.Sign(hash, k.sk) + sig := ecdsa.SignCompact(k.sk, hash, false) // returns [v || r || s] + return rawSigToSig(sig) } // Bytes implements the PrivateKey interface func (k *PrivateKeySECP256K1R) Bytes() []byte { if k.bytes == nil { - k.bytes = make([]byte, SECP256K1RSKLen) - bytes := k.sk.D.Bytes() - copy(k.bytes[SECP256K1RSKLen-len(bytes):], bytes) + k.bytes = k.sk.Serialize() } return k.bytes } +// raw sig has format [v || r || s] whereas the sig has format [r || s || v] +func rawSigToSig(sig []byte) ([]byte, error) { + if len(sig) != SECP256K1RSigLen { + return nil, errInvalidSigLen + } + recCode := sig[0] + copy(sig, sig[1:]) + sig[SECP256K1RSigLen-1] = recCode - compactSigMagicOffset + return sig, nil +} + +// sig has format [r || s || v] whereas the raw sig has format [v || r || s] +func sigToRawSig(sig []byte) ([]byte, error) { + if len(sig) != SECP256K1RSigLen { + return nil, errInvalidSigLen + } + newSig := make([]byte, SECP256K1RSigLen) + newSig[0] = sig[SECP256K1RSigLen-1] + compactSigMagicOffset + copy(newSig[1:], sig) + return newSig, nil +} + +// verifies the signature format in format [r || s || v] func verifySECP256K1RSignatureFormat(sig []byte) error { if len(sig) != SECP256K1RSigLen { return errInvalidSigLen } - var r, s big.Int - r.SetBytes(sig[:32]) - s.SetBytes(sig[32:64]) - if !crypto.ValidateSignatureValues(sig[64], &r, &s, true) { + + var s secp256k1.ModNScalar + s.SetByteSlice(sig[32:64]) + if s.IsOverHalfOrder() { return errMutatedSig } return nil diff --git a/utils/crypto/secp256k1r_test.go b/utils/crypto/secp256k1r_test.go index 244b30b..4ed7f75 100644 --- a/utils/crypto/secp256k1r_test.go +++ b/utils/crypto/secp256k1r_test.go @@ -10,6 +10,8 @@ import ( "github.com/ava-labs/gecko/cache" "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/hashing" + "github.com/decred/dcrd/dcrec/secp256k1" + "github.com/stretchr/testify/assert" ) func TestRecover(t *testing.T) { @@ -73,3 +75,24 @@ func TestGenRecreate(t *testing.T) { } } } + +func TestVerifyMutatedSignature(t *testing.T) { + factory := FactorySECP256K1R{} + + sk, err := factory.NewPrivateKey() + assert.NoError(t, err) + + msg := []byte{'h', 'e', 'l', 'l', 'o'} + + sig, err := sk.Sign(msg) + assert.NoError(t, err) + + var s secp256k1.ModNScalar + s.SetByteSlice(sig[32:64]) + s.Negate() + newSBytes := s.Bytes() + copy(sig[32:], newSBytes[:]) + + _, err = factory.RecoverPublicKey(msg, sig) + assert.Error(t, err) +} diff --git a/utils/formatting/cb58_test.go b/utils/formatting/cb58_test.go index 74e7aae..7ecb09a 100644 --- a/utils/formatting/cb58_test.go +++ b/utils/formatting/cb58_test.go @@ -26,6 +26,59 @@ func TestCB58Single(t *testing.T) { } } +func TestCB58UnmarshalJSON(t *testing.T) { + expected := CB58{[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255}} + cb58 := CB58{} + err := cb58.UnmarshalJSON([]byte("\"1NVSVezva3bAtJesnUj\"")) + if err != nil { + t.Fatalf("CB58.UnmarshalJSON unexpected error unmarshalling: %s", err) + } else if !bytes.Equal(cb58.Bytes, expected.Bytes) { + t.Fatalf("CB58.UnmarshalJSON got 0x%x, expected 0x%x", cb58, expected) + } +} + +func TestCB58UnmarshalJSONNull(t *testing.T) { + cb58 := CB58{} + err := cb58.UnmarshalJSON([]byte("null")) + if err != nil { + t.Fatalf("CB58.UnmarshalJSON unexpected error unmarshalling null: %s", err) + } +} + +func TestCB58UnmarshalJSONError(t *testing.T) { + tests := []struct { + in string + expected error + }{ + {"", errMissingQuotes}, + {"\"foo", errMissingQuotes}, + {"foo", errMissingQuotes}, + {"foo\"", errMissingQuotes}, + {"\"foo\"", errMissingChecksum}, + {"\"foobar\"", errBadChecksum}, + } + cb58 := CB58{} + for _, tt := range tests { + t.Run(tt.in, func(t *testing.T) { + err := cb58.UnmarshalJSON([]byte(tt.in)) + if err != tt.expected { + t.Errorf("got error %q, expected error %q", err, tt.expected) + } + }) + } +} + +func TestCB58MarshalJSONError(t *testing.T) { + cb58 := CB58{[]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 255}} + expected := []byte("\"1NVSVezva3bAtJesnUj\"") + result, err := cb58.MarshalJSON() + if err != nil { + t.Fatalf("CB58.MarshalJSON unexpected error: %s", err) + } else if !bytes.Equal(result, expected) { + t.Fatalf("CB58.MarshalJSON got %q, expected %q", result, expected) + } +} + func TestCB58ParseBytes(t *testing.T) { ui := "1NVSVezva3bAtJesnUj" cb58 := CB58{} diff --git a/utils/ip.go b/utils/ip.go index cca055d..1b35fa7 100644 --- a/utils/ip.go +++ b/utils/ip.go @@ -8,7 +8,6 @@ import ( "fmt" "net" "strconv" - "strings" ) var ( @@ -33,21 +32,21 @@ func (ipDesc IPDesc) PortString() string { } func (ipDesc IPDesc) String() string { - return fmt.Sprintf("%s%s", ipDesc.IP, ipDesc.PortString()) + return net.JoinHostPort(ipDesc.IP.String(), fmt.Sprintf("%d", ipDesc.Port)) } // ToIPDesc ... -// TODO: this was kinda hacked together, it should be verified. func ToIPDesc(str string) (IPDesc, error) { - parts := strings.Split(str, ":") - if len(parts) != 2 { + host, portStr, err := net.SplitHostPort(str) + if err != nil { return IPDesc{}, errBadIP } - port, err := strconv.ParseUint(parts[1], 10 /*=base*/, 16 /*=size*/) + port, err := strconv.ParseUint(portStr, 10 /*=base*/, 16 /*=size*/) if err != nil { + // TODO: Should this return a locally defined error? (e.g. errBadPort) return IPDesc{}, err } - ip := net.ParseIP(parts[0]) + ip := net.ParseIP(host) if ip == nil { return IPDesc{}, errBadIP } @@ -56,9 +55,3 @@ func ToIPDesc(str string) (IPDesc, error) { Port: uint16(port), }, nil } - -// MyIP ... -func MyIP() net.IP { - // TODO: Change this to consult a json-returning external service - return net.ParseIP("127.0.0.1") -} diff --git a/utils/ip_test.go b/utils/ip_test.go new file mode 100644 index 0000000..179014f --- /dev/null +++ b/utils/ip_test.go @@ -0,0 +1,151 @@ +// (c) 2020, Alex Willmer. All rights reserved. +// See the file LICENSE for licensing terms. + +package utils + +import ( + "fmt" + "net" + "testing" +) + +func TestIPDescEqual(t *testing.T) { + tests := []struct { + ipDesc1 IPDesc + ipDesc2 IPDesc + result bool + }{ + // Expected equal + { + IPDesc{net.ParseIP("127.0.0.1"), 0}, + IPDesc{net.ParseIP("127.0.0.1"), 0}, + true, + }, { + IPDesc{net.ParseIP("::1"), 0}, + IPDesc{net.ParseIP("::1"), 0}, + true, + }, { + IPDesc{net.ParseIP("127.0.0.1"), 0}, + IPDesc{net.ParseIP("::ffff:127.0.0.1"), 0}, + true, + }, + + // Expected unequal + { + IPDesc{net.ParseIP("127.0.0.1"), 0}, + IPDesc{net.ParseIP("1.2.3.4"), 0}, + false, + }, { + IPDesc{net.ParseIP("::1"), 0}, + IPDesc{net.ParseIP("2001::1"), 0}, + false, + }, { + IPDesc{net.ParseIP("127.0.0.1"), 0}, + IPDesc{net.ParseIP("127.0.0.1"), 1}, + false, + }, + } + for i, tt := range tests { + t.Run(fmt.Sprintf("%d", i), func(t *testing.T) { + if tt.ipDesc1.IP == nil { + t.Error("ipDesc1 nil") + } else if tt.ipDesc2.IP == nil { + t.Error("ipDesc2 nil") + } + result := tt.ipDesc1.Equal(tt.ipDesc2) + if result && result != tt.result { + t.Error("Expected IPDesc to be equal, but they were not") + } + if !result && result != tt.result { + t.Error("Expected IPDesc to be unequal, but they were equal") + } + }) + } +} + +func TestIPDescPortString(t *testing.T) { + tests := []struct { + ipDesc IPDesc + result string + }{ + {IPDesc{net.ParseIP("127.0.0.1"), 0}, ":0"}, + {IPDesc{net.ParseIP("::1"), 42}, ":42"}, + {IPDesc{net.ParseIP("::ffff:127.0.0.1"), 65535}, ":65535"}, + {IPDesc{net.IP{}, 1234}, ":1234"}, + } + for _, tt := range tests { + t.Run(tt.result, func(t *testing.T) { + if result := tt.ipDesc.PortString(); result != tt.result { + t.Errorf("Expected %q, got %q", tt.result, result) + } + }) + } +} + +func TestIPDescString(t *testing.T) { + tests := []struct { + ipDesc IPDesc + result string + }{ + {IPDesc{net.ParseIP("127.0.0.1"), 0}, "127.0.0.1:0"}, + {IPDesc{net.ParseIP("::1"), 42}, "[::1]:42"}, + {IPDesc{net.ParseIP("::ffff:127.0.0.1"), 65535}, "127.0.0.1:65535"}, + {IPDesc{net.IP{}, 1234}, ":1234"}, + } + for _, tt := range tests { + t.Run(tt.result, func(t *testing.T) { + if result := tt.ipDesc.String(); result != tt.result { + t.Errorf("Expected %q, got %q", tt.result, result) + } + }) + } +} + +func TestToIPDescError(t *testing.T) { + tests := []struct { + in string + out IPDesc + }{ + {"", IPDesc{}}, + {":", IPDesc{}}, + {"abc:", IPDesc{}}, + {":abc", IPDesc{}}, + {"abc:abc", IPDesc{}}, + {"127.0.0.1:", IPDesc{}}, + {":1", IPDesc{}}, + {"::1", IPDesc{}}, + {"::1:42", IPDesc{}}, + } + for _, tt := range tests { + t.Run(tt.in, func(t *testing.T) { + result, err := ToIPDesc(tt.in) + if err == nil { + t.Errorf("Unexpected success") + } + if !tt.out.Equal(result) { + t.Errorf("Expected %v, got %v", tt.out, result) + } + }) + } +} + +func TestToIPDesc(t *testing.T) { + tests := []struct { + in string + out IPDesc + }{ + {"127.0.0.1:42", IPDesc{net.ParseIP("127.0.0.1"), 42}}, + {"[::1]:42", IPDesc{net.ParseIP("::1"), 42}}, + } + for _, tt := range tests { + t.Run(tt.in, func(t *testing.T) { + result, err := ToIPDesc(tt.in) + if err != nil { + t.Errorf("Unexpected error %v", err) + } + if !tt.out.Equal(result) { + t.Errorf("Expected %#v, got %#v", tt.out, result) + } + }) + } +} diff --git a/utils/logging/log.go b/utils/logging/log.go index 04d69fb..a9e6aec 100644 --- a/utils/logging/log.go +++ b/utils/logging/log.go @@ -171,7 +171,7 @@ func (l *Log) format(level Level, format string, args ...interface{}) string { return fmt.Sprintf("%s[%s]%s %s\n", level, - time.Now().Format("01-02|15:04:05.000"), + time.Now().Format("01-02|15:04:05"), prefix, text) } diff --git a/utils/math/safe_math_test.go b/utils/math/safe_math_test.go index c8428b5..47f65f9 100644 --- a/utils/math/safe_math_test.go +++ b/utils/math/safe_math_test.go @@ -10,6 +10,28 @@ import ( const maxUint64 uint64 = math.MaxUint64 +func TestMax64(t *testing.T) { + actual := Max64(0, maxUint64) + if actual != maxUint64 { + t.Fatalf("Expected %d, got %d", maxUint64, actual) + } + actual = Max64(maxUint64, 0) + if actual != maxUint64 { + t.Fatalf("Expected %d, got %d", maxUint64, actual) + } +} + +func TestMin64(t *testing.T) { + actual := Min64(0, maxUint64) + if actual != 0 { + t.Fatalf("Expected %d, got %d", 0, actual) + } + actual = Min64(maxUint64, 0) + if actual != 0 { + t.Fatalf("Expected %d, got %d", 0, actual) + } +} + func TestAdd64(t *testing.T) { sum, err := Add64(0, maxUint64) if err != nil { @@ -51,6 +73,20 @@ func TestAdd64(t *testing.T) { } } +func TestSub64(t *testing.T) { + actual, err := Sub64(2, 1) + if err != nil { + t.Fatalf("Sub64 failed unexpectedly") + } else if actual != 1 { + t.Fatalf("Expected %d, got %d", 1, actual) + } + + _, err = Sub64(1, 2) + if err == nil { + t.Fatalf("Sub64 did not fail in the manner expected") + } +} + func TestMul64(t *testing.T) { if prod, err := Mul64(maxUint64, 0); err != nil { t.Fatalf("Mul64 failed unexpectedly") @@ -68,3 +104,15 @@ func TestMul64(t *testing.T) { t.Fatalf("Mul64 overflowed") } } + +func TestDiff64(t *testing.T) { + actual := Diff64(0, maxUint64) + if actual != maxUint64 { + t.Fatalf("Expected %d, got %d", maxUint64, actual) + } + + actual = Diff64(maxUint64, 0) + if actual != maxUint64 { + t.Fatalf("Expected %d, got %d", maxUint64, actual) + } +} diff --git a/utils/timer/clock_test.go b/utils/timer/clock_test.go new file mode 100644 index 0000000..ef8eb67 --- /dev/null +++ b/utils/timer/clock_test.go @@ -0,0 +1,37 @@ +package timer + +import ( + "testing" + "time" +) + +func TestClockSet(t *testing.T) { + clock := Clock{} + clock.Set(time.Unix(1000000, 0)) + if clock.faked == false { + t.Error("Fake time was set, but .faked flag was not set") + } + if !clock.Time().Equal(time.Unix(1000000, 0)) { + t.Error("Fake time was set, but not returned") + } +} + +func TestClockSync(t *testing.T) { + clock := Clock{true, time.Unix(0, 0)} + clock.Sync() + if clock.faked == true { + t.Error("Clock was synced, but .faked flag was set") + } + if clock.Time().Equal(time.Unix(0, 0)) { + t.Error("Clock was synced, but returned a fake time") + } +} + +func TestClockUnix(t *testing.T) { + clock := Clock{true, time.Unix(-14159040, 0)} + actual := clock.Unix() + if actual != 0 { + // We are Unix of 1970s, Moon landings are irrelevant + t.Errorf("Expected time prior to Unix epoch to be clamped to 0, got %d", actual) + } +} diff --git a/utils/timer/latency.go b/utils/timer/latency.go new file mode 100644 index 0000000..2d2371f --- /dev/null +++ b/utils/timer/latency.go @@ -0,0 +1,21 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package timer + +// Useful latency buckets +var ( + Buckets = []float64{ + 10, // 10 ms is ~ instant + 100, // 100 ms + 250, // 250 ms + 500, // 500 ms + 1000, // 1 second + 1500, // 1.5 seconds + 2000, // 2 seconds + 3000, // 3 seconds + 5000, // 5 seconds + 10000, // 10 seconds + // anything larger than 10 seconds will be bucketed together + } +) diff --git a/utils/wrappers/packing.go b/utils/wrappers/packing.go index da9bfc7..cd00f98 100644 --- a/utils/wrappers/packing.go +++ b/utils/wrappers/packing.go @@ -24,6 +24,8 @@ const ( IntLen = 4 // LongLen is the number of bytes per long LongLen = 8 + // BoolLen is the number of bytes per bool + BoolLen = 1 ) var ( @@ -242,7 +244,9 @@ func (p *Packer) PackFixedByteSlices(byteSlices [][]byte) { } } -// UnpackFixedByteSlices unpack a byte slice slice to the byte array +// UnpackFixedByteSlices returns a byte slice slice from the byte array. +// Each byte slice has the specified size. The number of byte slices is +// read from the byte array. func (p *Packer) UnpackFixedByteSlices(size int) [][]byte { sliceSize := p.UnpackInt() bytes := [][]byte(nil) diff --git a/utils/wrappers/packing_test.go b/utils/wrappers/packing_test.go index a97463f..6937d27 100644 --- a/utils/wrappers/packing_test.go +++ b/utils/wrappers/packing_test.go @@ -5,10 +5,61 @@ package wrappers import ( "bytes" + "reflect" "testing" ) -func TestPackerByte(t *testing.T) { +const ( + ByteSentinal = 0 + ShortSentinal = 0 + IntSentinal = 0 + LongSentinal = 0 + BoolSentinal = false +) + +func TestPackerCheckSpace(t *testing.T) { + p := Packer{Offset: -1} + p.CheckSpace(1) + if !p.Errored() { + t.Fatal("Expected errNegativeOffset") + } + + p = Packer{} + p.CheckSpace(-1) + if !p.Errored() { + t.Fatal("Expected errInvalidInput") + } + + p = Packer{Bytes: []byte{0x01}, Offset: 1} + p.CheckSpace(1) + if !p.Errored() { + t.Fatal("Expected errBadLength") + } + + p = Packer{Bytes: []byte{0x01}, Offset: 2} + p.CheckSpace(0) + if !p.Errored() { + t.Fatal("Expected errBadLength, due to out of bounds offset") + } +} + +func TestPackerExpand(t *testing.T) { + p := Packer{Bytes: []byte{0x01}, Offset: 2} + p.Expand(1) + if !p.Errored() { + t.Fatal("packer.Expand didn't notice packer had out of bounds offset") + } + + p = Packer{Bytes: []byte{0x01, 0x02, 0x03}, Offset: 0} + p.Expand(1) + if p.Errored() { + t.Fatalf("packer.Expand unexpectedly had error %s", p.Err) + } else if len(p.Bytes) != 3 { + t.Fatalf("packer.Expand modified byte array, when it didn't need to") + } +} + +func TestPackerPackByte(t *testing.T) { p := Packer{MaxSize: 1} p.PackByte(0x01) @@ -25,9 +76,37 @@ func TestPackerByte(t *testing.T) { if !bytes.Equal(p.Bytes, expected) { t.Fatalf("Packer.PackByte wrote:\n%v\nExpected:\n%v", p.Bytes, expected) } + + p.PackByte(0x02) + if !p.Errored() { + t.Fatal("Packer.PackByte did not fail when attempt was beyond p.MaxSize") + } } -func TestPackerShort(t *testing.T) { +func TestPackerUnpackByte(t *testing.T) { + var ( + p = Packer{Bytes: []byte{0x01}, Offset: 0} + actual = p.UnpackByte() + expected byte = 1 + expectedLen = ByteLen + ) + if p.Errored() { + t.Fatalf("Packer.UnpackByte unexpectedly raised %s", p.Err) + } else if actual != expected { + t.Fatalf("Packer.UnpackByte returned %d, but expected %d", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackByte left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackByte() + if !p.Errored() { + t.Fatalf("Packer.UnpackByte should have set error, due to attempted out of bounds read") + } else if actual != ByteSentinal { + t.Fatalf("Packer.UnpackByte returned %d, expected sentinal value %d", actual, ByteSentinal) + } +} + +func TestPackerPackShort(t *testing.T) { p := Packer{MaxSize: 2} p.PackShort(0x0102) @@ -46,7 +125,30 @@ func TestPackerShort(t *testing.T) { } } -func TestPackerInt(t *testing.T) { +func TestPackerUnpackShort(t *testing.T) { + var ( + p = Packer{Bytes: []byte{0x01, 0x02}, Offset: 0} + actual = p.UnpackShort() + expected uint16 = 0x0102 + expectedLen = ShortLen + ) + if p.Errored() { + t.Fatalf("Packer.UnpackShort unexpectedly raised %s", p.Err) + } else if actual != expected { + t.Fatalf("Packer.UnpackShort returned %d, but expected %d", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackShort left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackShort() + if !p.Errored() { + t.Fatalf("Packer.UnpackShort should have set error, due to attempted out of bounds read") + } else if actual != ShortSentinal { + t.Fatalf("Packer.UnpackShort returned %d, expected sentinal value %d", actual, ShortSentinal) + } +} + +func TestPackerPackInt(t *testing.T) { p := Packer{MaxSize: 4} p.PackInt(0x01020304) @@ -63,9 +165,37 @@ func TestPackerInt(t *testing.T) { if !bytes.Equal(p.Bytes, expected) { t.Fatalf("Packer.PackInt wrote:\n%v\nExpected:\n%v", p.Bytes, expected) } + + p.PackInt(0x05060708) + if !p.Errored() { + t.Fatal("Packer.PackInt did not fail when attempt was beyond p.MaxSize") + } } -func TestPackerLong(t *testing.T) { +func TestPackerUnpackInt(t *testing.T) { + var ( + p = Packer{Bytes: []byte{0x01, 0x02, 0x03, 0x04}, Offset: 0} + actual = p.UnpackInt() + expected uint32 = 0x01020304 + expectedLen = IntLen + ) + if p.Errored() { + t.Fatalf("Packer.UnpackInt unexpectedly raised %s", p.Err) + } else if actual != expected { + t.Fatalf("Packer.UnpackInt returned %d, but expected %d", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackInt left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackInt() + if !p.Errored() { + t.Fatalf("Packer.UnpackInt should have set error, due to attempted out of bounds read") + } else if actual != IntSentinal { + t.Fatalf("Packer.UnpackInt returned %d, expected sentinal value %d", actual, IntSentinal) + } +} + +func TestPackerPackLong(t *testing.T) { p := Packer{MaxSize: 8} p.PackLong(0x0102030405060708) @@ -82,6 +212,175 @@ func TestPackerLong(t *testing.T) { if !bytes.Equal(p.Bytes, expected) { t.Fatalf("Packer.PackLong wrote:\n%v\nExpected:\n%v", p.Bytes, expected) } + + p.PackLong(0x090a0b0c0d0e0f00) + if !p.Errored() { + t.Fatal("Packer.PackLong did not fail when attempt was beyond p.MaxSize") + } +} + +func TestPackerUnpackLong(t *testing.T) { + var ( + p = Packer{Bytes: []byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}, Offset: 0} + actual = p.UnpackLong() + expected uint64 = 0x0102030405060708 + expectedLen = LongLen + ) + if p.Errored() { + t.Fatalf("Packer.UnpackLong unexpectedly raised %s", p.Err) + } else if actual != expected { + t.Fatalf("Packer.UnpackLong returned %d, but expected %d", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackLong left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackLong() + if !p.Errored() { + t.Fatalf("Packer.UnpackLong should have set error, due to attempted out of bounds read") + } else if actual != LongSentinal { + t.Fatalf("Packer.UnpackLong returned %d, expected sentinal value %d", actual, LongSentinal) + } +} + +func TestPackerPackFixedBytes(t *testing.T) { + p := Packer{MaxSize: 3} + + p.PackFixedBytes([]byte("Ava")) + + if p.Errored() { + t.Fatal(p.Err) + } + + if size := len(p.Bytes); size != 3 { + t.Fatalf("Packer.PackFixedBytes wrote %d byte(s) but expected %d byte(s)", size, 3) + } + + expected := []byte("Ava") + if !bytes.Equal(p.Bytes, expected) { + t.Fatalf("Packer.PackFixedBytes wrote:\n%v\nExpected:\n%v", p.Bytes, expected) + } + + p.PackFixedBytes([]byte("Ava")) + if !p.Errored() { + t.Fatal("Packer.PackFixedBytes did not fail when attempt was beyond p.MaxSize") + } +} + +func TestPackerUnpackFixedBytes(t *testing.T) { + var ( + p = Packer{Bytes: []byte("Ava")} + actual = p.UnpackFixedBytes(3) + expected = []byte("Ava") + expectedLen = 3 + ) + if p.Errored() { + t.Fatalf("Packer.UnpackFixedBytes unexpectedly raised %s", p.Err) + } else if !bytes.Equal(actual, expected) { + t.Fatalf("Packer.UnpackFixedBytes returned %d, but expected %d", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackFixedBytes left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackFixedBytes(3) + if !p.Errored() { + t.Fatalf("Packer.UnpackFixedBytes should have set error, due to attempted out of bounds read") + } else if actual != nil { + t.Fatalf("Packer.UnpackFixedBytes returned %v, expected sentinal value %v", actual, nil) + } +} + +func TestPackerPackBytes(t *testing.T) { + p := Packer{MaxSize: 7} + + p.PackBytes([]byte("Ava")) + + if p.Errored() { + t.Fatal(p.Err) + } + + if size := len(p.Bytes); size != 7 { + t.Fatalf("Packer.PackBytes wrote %d byte(s) but expected %d byte(s)", size, 7) + } + + expected := []byte("\x00\x00\x00\x03Ava") + if !bytes.Equal(p.Bytes, expected) { + t.Fatalf("Packer.PackBytes wrote:\n%v\nExpected:\n%v", p.Bytes, expected) + } + + p.PackBytes([]byte("Ava")) + if !p.Errored() { + t.Fatal("Packer.PackBytes did not fail when attempt was beyond p.MaxSize") + } +} + +func TestPackerUnpackBytes(t *testing.T) { + var ( + p = Packer{Bytes: []byte("\x00\x00\x00\x03Ava")} + actual = p.UnpackBytes() + expected = []byte("Ava") + expectedLen = 7 + ) + if p.Errored() { + t.Fatalf("Packer.UnpackBytes unexpectedly raised %s", p.Err) + } else if !bytes.Equal(actual, expected) { + t.Fatalf("Packer.UnpackBytes returned %d, but expected %d", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackBytes left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackBytes() + if !p.Errored() { + t.Fatalf("Packer.UnpackBytes should have set error, due to attempted out of bounds read") + } else if actual != nil { + t.Fatalf("Packer.UnpackBytes returned %v, expected sentinal value %v", actual, nil) + } +} + +func TestPackerPackFixedByteSlices(t *testing.T) { + p := Packer{MaxSize: 10} + + p.PackFixedByteSlices([][]byte{[]byte("Ava"), []byte("Eva")}) + + if p.Errored() { + t.Fatal(p.Err) + } + + if size := len(p.Bytes); size != 10 { + t.Fatalf("Packer.PackFixedByteSlices wrote %d byte(s) but expected %d byte(s)", size, 13) + } + + expected := []byte("\x00\x00\x00\x02AvaEva") + if !bytes.Equal(p.Bytes, expected) { + t.Fatalf("Packer.PackPackFixedByteSlicesBytes wrote:\n%v\nExpected:\n%v", p.Bytes, expected) + } + + p.PackFixedByteSlices([][]byte{[]byte("Ava"), []byte("Eva")}) + if !p.Errored() { + t.Fatal("Packer.PackFixedByteSlices did not fail when attempt was beyond p.MaxSize") + } +} + +func TestPackerUnpackFixedByteSlices(t *testing.T) { + var ( + p = Packer{Bytes: []byte("\x00\x00\x00\x02AvaEva")} + actual = p.UnpackFixedByteSlices(3) + expected = [][]byte{[]byte("Ava"), []byte("Eva")} + expectedLen = 10 + ) + if p.Errored() { + t.Fatalf("Packer.UnpackFixedByteSlices unexpectedly raised %s", p.Err) + } else if !reflect.DeepEqual(actual, expected) { + t.Fatalf("Packer.UnpackFixedByteSlices returned %d, but expected %d", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackFixedByteSlices left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackFixedByteSlices(3) + if !p.Errored() { + t.Fatalf("Packer.UnpackFixedByteSlices should have set error, due to attempted out of bounds read") + } else if actual != nil { + t.Fatalf("Packer.UnpackFixedByteSlices returned %v, expected sentinal value %v", actual, nil) + } } func TestPackerString(t *testing.T) { @@ -151,3 +450,59 @@ func TestPackBool(t *testing.T) { t.Fatal("got back wrong values") } } + +func TestPackerPackBool(t *testing.T) { + p := Packer{MaxSize: 1} + + p.PackBool(true) + + if p.Errored() { + t.Fatal(p.Err) + } + + if size := len(p.Bytes); size != 1 { + t.Fatalf("Packer.PackBool wrote %d byte(s) but expected %d byte(s)", size, 1) + } + + expected := []byte{0x01} + if !bytes.Equal(p.Bytes, expected) { + t.Fatalf("Packer.PackBool wrote:\n%v\nExpected:\n%v", p.Bytes, expected) + } + + p.PackBool(false) + if !p.Errored() { + t.Fatal("Packer.PackLong did not fail when attempt was beyond p.MaxSize") + } +} + +func TestPackerUnpackBool(t *testing.T) { + var ( + p = Packer{Bytes: []byte{0x01}, Offset: 0} + actual = p.UnpackBool() + expected bool = true + expectedLen = BoolLen + ) + if p.Errored() { + t.Fatalf("Packer.UnpackBool unexpectedly raised %s", p.Err) + } else if actual != expected { + t.Fatalf("Packer.UnpackBool returned %t, but expected %t", actual, expected) + } else if p.Offset != expectedLen { + t.Fatalf("Packer.UnpackBool left Offset %d, expected %d", p.Offset, expectedLen) + } + + actual = p.UnpackBool() + if !p.Errored() { + t.Fatalf("Packer.UnpackBool should have set error, due to attempted out of bounds read") + } else if actual != BoolSentinal { + t.Fatalf("Packer.UnpackBool returned %t, expected sentinal value %t", actual, BoolSentinal) + } + + p = Packer{Bytes: []byte{0x42}, Offset: 0} + expected = false + actual = p.UnpackBool() + if !p.Errored() { + t.Fatalf("Packer.UnpackBool id not raise error for invalid boolean value %v", p.Bytes) + } else if actual != expected { + t.Fatalf("Packer.UnpackBool returned %t, expected sentinal value %t", actual, BoolSentinal) + } +} diff --git a/vms/avm/base_tx.go b/vms/avm/base_tx.go index 8f90eb7..33cba51 100644 --- a/vms/avm/base_tx.go +++ b/vms/avm/base_tx.go @@ -6,10 +6,12 @@ package avm import ( "errors" + "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" ) var ( @@ -27,31 +29,17 @@ var ( // BaseTx is the basis of all transactions. type BaseTx struct { - metadata + ava.Metadata - NetID uint32 `serialize:"true"` // ID of the network this chain lives on - BCID ids.ID `serialize:"true"` // ID of the chain on which this transaction exists (prevents replay attacks) - Outs []*TransferableOutput `serialize:"true"` // The outputs of this transaction - Ins []*TransferableInput `serialize:"true"` // The inputs to this transaction + NetID uint32 `serialize:"true" json:"networkID"` // ID of the network this chain lives on + BCID ids.ID `serialize:"true" json:"blockchainID"` // ID of the chain on which this transaction exists (prevents replay attacks) + Outs []*ava.TransferableOutput `serialize:"true" json:"outputs"` // The outputs of this transaction + Ins []*ava.TransferableInput `serialize:"true" json:"inputs"` // The inputs to this transaction } -// NetworkID is the ID of the network on which this transaction exists -func (t *BaseTx) NetworkID() uint32 { return t.NetID } - -// ChainID is the ID of the chain on which this transaction exists -func (t *BaseTx) ChainID() ids.ID { return t.BCID } - -// Outputs track which outputs this transaction is producing. The returned array -// should not be modified. -func (t *BaseTx) Outputs() []*TransferableOutput { return t.Outs } - -// Inputs track which UTXOs this transaction is consuming. The returned array -// should not be modified. -func (t *BaseTx) Inputs() []*TransferableInput { return t.Ins } - // InputUTXOs track which UTXOs this transaction is consuming. -func (t *BaseTx) InputUTXOs() []*UTXOID { - utxos := []*UTXOID(nil) +func (t *BaseTx) InputUTXOs() []*ava.UTXOID { + utxos := []*ava.UTXOID(nil) for _, in := range t.Ins { utxos = append(utxos, &in.UTXOID) } @@ -67,20 +55,21 @@ func (t *BaseTx) AssetIDs() ids.Set { return assets } +// NumCredentials returns the number of expected credentials +func (t *BaseTx) NumCredentials() int { return len(t.Ins) } + // UTXOs returns the UTXOs transaction is producing. -func (t *BaseTx) UTXOs() []*UTXO { +func (t *BaseTx) UTXOs() []*ava.UTXO { txID := t.ID() - utxos := make([]*UTXO, len(t.Outs)) + utxos := make([]*ava.UTXO, len(t.Outs)) for i, out := range t.Outs { - utxos[i] = &UTXO{ - UTXOID: UTXOID{ + utxos[i] = &ava.UTXO{ + UTXOID: ava.UTXOID{ TxID: txID, OutputIndex: uint32(i), }, - Asset: Asset{ - ID: out.AssetID(), - }, - Out: out.Out, + Asset: ava.Asset{ID: out.AssetID()}, + Out: out.Out, } } return utxos @@ -97,12 +86,14 @@ func (t *BaseTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, _ int) error return errWrongChainID } + fc := ava.NewFlowChecker() for _, out := range t.Outs { if err := out.Verify(); err != nil { return err } + fc.Produce(out.AssetID(), out.Output().Amount()) } - if !isSortedTransferableOutputs(t.Outs, c) { + if !ava.IsSortedTransferableOutputs(t.Outs, c) { return errOutputsNotSorted } @@ -110,101 +101,37 @@ func (t *BaseTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, _ int) error if err := in.Verify(); err != nil { return err } + fc.Consume(in.AssetID(), in.Input().Amount()) } - if !isSortedAndUniqueTransferableInputs(t.Ins) { + if !ava.IsSortedAndUniqueTransferableInputs(t.Ins) { return errInputsNotSortedUnique } - consumedFunds := map[[32]byte]uint64{} - for _, in := range t.Ins { - assetID := in.AssetID() - amount := in.Input().Amount() + // TODO: Add the Tx fee to the produced side - var err error - assetIDKey := assetID.Key() - consumedFunds[assetIDKey], err = math.Add64(consumedFunds[assetIDKey], amount) - - if err != nil { - return errInputOverflow - } - } - producedFunds := map[[32]byte]uint64{} - for _, out := range t.Outs { - assetID := out.AssetID() - amount := out.Output().Amount() - - var err error - assetIDKey := assetID.Key() - producedFunds[assetIDKey], err = math.Add64(producedFunds[assetIDKey], amount) - - if err != nil { - return errOutputOverflow - } + if err := fc.Verify(); err != nil { + return err } - // TODO: Add the Tx fee to the producedFunds - - for assetID, producedAssetAmount := range producedFunds { - consumedAssetAmount := consumedFunds[assetID] - if producedAssetAmount > consumedAssetAmount { - return errInsufficientFunds - } - } - - return t.metadata.Verify() + return t.Metadata.Verify() } // SemanticVerify that this transaction is valid to be spent. -func (t *BaseTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) error { +func (t *BaseTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []verify.Verifiable) error { for i, in := range t.Ins { cred := creds[i] - fxIndex, err := vm.getFx(cred.Cred) + fxIndex, err := vm.getFx(cred) if err != nil { return err } fx := vm.fxs[fxIndex].Fx - utxoID := in.InputID() - utxo, err := vm.state.UTXO(utxoID) - if err == nil { - utxoAssetID := utxo.AssetID() - inAssetID := in.AssetID() - if !utxoAssetID.Equals(inAssetID) { - return errAssetIDMismatch - } - - if !vm.verifyFxUsage(fxIndex, inAssetID) { - return errIncompatibleFx - } - - err = fx.VerifyTransfer(uTx, utxo.Out, in.In, cred.Cred) - if err == nil { - continue - } + utxo, err := vm.getUTXO(&in.UTXOID) + if err != nil { return err } - inputTx, inputIndex := in.InputSource() - parent := UniqueTx{ - vm: vm, - txID: inputTx, - } - - if err := parent.Verify(); err != nil { - return errMissingUTXO - } else if status := parent.Status(); status.Decided() { - return errMissingUTXO - } - - utxos := parent.UTXOs() - - if uint32(len(utxos)) <= inputIndex || int(inputIndex) < 0 { - return errInvalidUTXO - } - - utxo = utxos[int(inputIndex)] - utxoAssetID := utxo.AssetID() inAssetID := in.AssetID() if !utxoAssetID.Equals(inAssetID) { @@ -215,9 +142,12 @@ func (t *BaseTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) erro return errIncompatibleFx } - if err := fx.VerifyTransfer(uTx, utxo.Out, in.In, cred); err != nil { + if err := fx.VerifyTransfer(uTx, in.In, cred, utxo.Out); err != nil { return err } } return nil } + +// ExecuteWithSideEffects writes the batch with any additional side effects +func (t *BaseTx) ExecuteWithSideEffects(_ *VM, batch database.Batch) error { return batch.Write() } diff --git a/vms/avm/base_tx_test.go b/vms/avm/base_tx_test.go index 0c3732d..1d3233f 100644 --- a/vms/avm/base_tx_test.go +++ b/vms/avm/base_tx_test.go @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/utils/crypto" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -36,7 +36,7 @@ func TestBaseTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // fxID: - 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x07, // secp256k1 Transferable Output: // amount: 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, @@ -65,7 +65,7 @@ func TestBaseTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // fxID: - 0x00, 0x00, 0x00, 0x06, + 0x00, 0x00, 0x00, 0x05, // amount: 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, // number of signatures: @@ -77,49 +77,37 @@ func TestBaseTxSerialization(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 1, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, }} - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) + c := setupCodec() b, err := c.Marshal(&tx.UnsignedTx) if err != nil { @@ -137,56 +125,40 @@ func TestBaseTxGetters(t *testing.T) { tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 1, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } tx.Initialize([]byte{}) txID := tx.ID() - if netID := tx.NetworkID(); netID != networkID { - t.Fatalf("Wrong network ID returned") - } else if bcID := tx.ChainID(); !bcID.Equals(chainID) { - t.Fatalf("Wrong chain ID returned") - } else if outs := tx.Outputs(); len(outs) != 1 { - t.Fatalf("Outputs returned wrong number of outs") - } else if out := outs[0]; out != tx.Outs[0] { - t.Fatalf("Outputs returned wrong output") - } else if ins := tx.Inputs(); len(ins) != 1 { - t.Fatalf("Inputs returned wrong number of ins") - } else if in := ins[0]; in != tx.Ins[0] { - t.Fatalf("Inputs returned wrong input") - } else if assets := tx.AssetIDs(); assets.Len() != 1 { + if assets := tx.AssetIDs(); assets.Len() != 1 { t.Fatalf("Wrong number of assets returned") } else if !assets.Contains(asset) { t.Fatalf("Wrong asset returned") @@ -198,57 +170,43 @@ func TestBaseTxGetters(t *testing.T) { t.Fatalf("Wrong output index returned") } else if assetID := utxo.AssetID(); !assetID.Equals(asset) { t.Fatalf("Wrong asset ID returned") - } else if utxoOut := utxo.Out; utxoOut != out.Out { - t.Fatalf("Wrong output returned") } } func TestBaseTxSyntacticVerify(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) + c := setupCodec() tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 0, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } tx.Initialize([]byte{}) @@ -258,15 +216,7 @@ func TestBaseTxSyntacticVerify(t *testing.T) { } func TestBaseTxSyntacticVerifyNil(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) + c := setupCodec() tx := (*BaseTx)(nil) if err := tx.SyntacticVerify(ctx, c, 0); err == nil { @@ -275,51 +225,39 @@ func TestBaseTxSyntacticVerifyNil(t *testing.T) { } func TestBaseTxSyntacticVerifyWrongNetworkID(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) + c := setupCodec() tx := &BaseTx{ NetID: 0, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 1, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } tx.Initialize([]byte{}) @@ -329,51 +267,39 @@ func TestBaseTxSyntacticVerifyWrongNetworkID(t *testing.T) { } func TestBaseTxSyntacticVerifyWrongChainID(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) + c := setupCodec() tx := &BaseTx{ NetID: networkID, BCID: ids.Empty, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 1, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } tx.Initialize([]byte{}) @@ -383,42 +309,30 @@ func TestBaseTxSyntacticVerifyWrongChainID(t *testing.T) { } func TestBaseTxSyntacticVerifyInvalidOutput(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) + c := setupCodec() tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - nil, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 1, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + Outs: []*ava.TransferableOutput{nil}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } tx.Initialize([]byte{}) @@ -428,22 +342,14 @@ func TestBaseTxSyntacticVerifyInvalidOutput(t *testing.T) { } func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) + c := setupCodec() tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, + Outs: []*ava.TransferableOutput{ + &ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 2, OutputOwners: secp256k1fx.OutputOwners{ @@ -452,8 +358,8 @@ func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { }, }, }, - &TransferableOutput{ - Asset: Asset{ID: asset}, + &ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 1, OutputOwners: secp256k1fx.OutputOwners{ @@ -463,9 +369,9 @@ func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { }, }, }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ + Ins: []*ava.TransferableInput{ + &ava.TransferableInput{ + UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, @@ -474,7 +380,7 @@ func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { }), OutputIndex: 1, }, - Asset: Asset{ID: asset}, + Asset: ava.Asset{ID: asset}, In: &secp256k1fx.TransferInput{ Amt: 54321, Input: secp256k1fx.Input{ @@ -492,34 +398,22 @@ func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { } func TestBaseTxSyntacticVerifyInvalidInput(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) + c := setupCodec() tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - nil, - }, + }}, + Ins: []*ava.TransferableInput{nil}, } tx.Initialize([]byte{}) @@ -529,34 +423,24 @@ func TestBaseTxSyntacticVerifyInvalidInput(t *testing.T) { } func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) + c := setupCodec() tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ + }}, + Ins: []*ava.TransferableInput{ + &ava.TransferableInput{ + UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, @@ -565,7 +449,7 @@ func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { }), OutputIndex: 0, }, - Asset: Asset{ID: asset}, + Asset: ava.Asset{ID: asset}, In: &secp256k1fx.TransferInput{ Amt: math.MaxUint64, Input: secp256k1fx.Input{ @@ -573,8 +457,8 @@ func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { }, }, }, - &TransferableInput{ - UTXOID: UTXOID{ + &ava.TransferableInput{ + UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, @@ -583,7 +467,7 @@ func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { }), OutputIndex: 1, }, - Asset: Asset{ID: asset}, + Asset: ava.Asset{ID: asset}, In: &secp256k1fx.TransferInput{ Amt: 1, Input: secp256k1fx.Input{ @@ -601,22 +485,14 @@ func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { } func TestBaseTxSyntacticVerifyOutputOverflow(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) + c := setupCodec() tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, + Outs: []*ava.TransferableOutput{ + &ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 2, OutputOwners: secp256k1fx.OutputOwners{ @@ -625,8 +501,8 @@ func TestBaseTxSyntacticVerifyOutputOverflow(t *testing.T) { }, }, }, - &TransferableOutput{ - Asset: Asset{ID: asset}, + &ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: math.MaxUint64, OutputOwners: secp256k1fx.OutputOwners{ @@ -636,26 +512,24 @@ func TestBaseTxSyntacticVerifyOutputOverflow(t *testing.T) { }, }, }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 0, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 1, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 1, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } tx.Initialize([]byte{}) @@ -665,51 +539,39 @@ func TestBaseTxSyntacticVerifyOutputOverflow(t *testing.T) { } func TestBaseTxSyntacticVerifyInsufficientFunds(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) + c := setupCodec() tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: math.MaxUint64, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: math.MaxUint64, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 0, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 1, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 1, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } tx.Initialize([]byte{}) @@ -719,51 +581,39 @@ func TestBaseTxSyntacticVerifyInsufficientFunds(t *testing.T) { } func TestBaseTxSyntacticVerifyUninitialized(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) + c := setupCodec() tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ID: asset}, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, - 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, - 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, - 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, - }), - OutputIndex: 0, - }, - Asset: Asset{ID: asset}, - In: &secp256k1fx.TransferInput{ - Amt: 54321, - Input: secp256k1fx.Input{ - SigIndices: []uint32{2}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, + 0xf7, 0xf6, 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, + 0xef, 0xee, 0xed, 0xec, 0xeb, 0xea, 0xe9, 0xe8, + 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2, 0xe1, 0xe0, + }), + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 54321, + Input: secp256k1fx.Input{ + SigIndices: []uint32{2}, }, }, - }, + }}, } if err := tx.SyntacticVerify(ctx, c, 0); err == nil { @@ -797,29 +647,25 @@ func TestBaseTxSemanticVerify(t *testing.T) { genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) if err != nil { @@ -834,11 +680,9 @@ func TestBaseTxSemanticVerify(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -849,11 +693,11 @@ func TestBaseTxSemanticVerify(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err != nil { @@ -885,37 +729,31 @@ func TestBaseTxSemanticVerifyUnknownFx(t *testing.T) { } vm.batchTimeout = 0 - vm.codec.RegisterType(&testVerifiable{}) + vm.codec.RegisterType(&ava.TestVerifiable{}) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} - tx.Creds = append(tx.Creds, &Credential{ - Cred: &testVerifiable{}, - }) + tx.Creds = append(tx.Creds, &ava.TestVerifiable{}) b, err := vm.codec.Marshal(tx) if err != nil { @@ -924,11 +762,11 @@ func TestBaseTxSemanticVerifyUnknownFx(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -960,33 +798,29 @@ func TestBaseTxSemanticVerifyWrongAssetID(t *testing.T) { } vm.batchTimeout = 0 - vm.codec.RegisterType(&testVerifiable{}) + vm.codec.RegisterType(&ava.TestVerifiable{}) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: asset, - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) if err != nil { @@ -1001,11 +835,9 @@ func TestBaseTxSemanticVerifyWrongAssetID(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1016,11 +848,11 @@ func TestBaseTxSemanticVerifyWrongAssetID(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1043,14 +875,14 @@ func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) { genesisBytes, issuer, []*common.Fx{ - &common.Fx{ - ID: ids.NewID([32]byte{1}), - Fx: &testFx{}, - }, &common.Fx{ ID: ids.Empty, Fx: &secp256k1fx.Fx{}, }, + &common.Fx{ + ID: ids.NewID([32]byte{1}), + Fx: &testFx{}, + }, }, ) if err != nil { @@ -1064,26 +896,22 @@ func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) { codec: vm.codec, } - cr.RegisterType(&TestTransferable{}) + cr.RegisterType(&ava.TestTransferable{}) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &TestTransferable{}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, }, - }, - }}} + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &ava.TestTransferable{}, + }}, + }} unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) if err != nil { @@ -1098,11 +926,9 @@ func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1113,11 +939,11 @@ func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1151,35 +977,29 @@ func TestBaseTxSemanticVerifyInvalidSignature(t *testing.T) { genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - [crypto.SECP256K1RSigLen]byte{}, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + [crypto.SECP256K1RSigLen]byte{}, }, }) @@ -1190,11 +1010,11 @@ func TestBaseTxSemanticVerifyInvalidSignature(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1228,29 +1048,25 @@ func TestBaseTxSemanticVerifyMissingUTXO(t *testing.T) { genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) if err != nil { @@ -1265,11 +1081,9 @@ func TestBaseTxSemanticVerifyMissingUTXO(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1280,11 +1094,11 @@ func TestBaseTxSemanticVerifyMissingUTXO(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1318,29 +1132,25 @@ func TestBaseTxSemanticVerifyInvalidUTXO(t *testing.T) { genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: math.MaxUint32, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: math.MaxUint32, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) if err != nil { @@ -1355,11 +1165,9 @@ func TestBaseTxSemanticVerifyInvalidUTXO(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1370,11 +1178,11 @@ func TestBaseTxSemanticVerifyInvalidUTXO(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1407,44 +1215,36 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - pendingTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + pendingTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ - ID: genesisTx.ID(), - }, - Out: &secp256k1fx.TransferOutput{ - Amt: 50000, - Locktime: 0, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + }}, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&pendingTx.UnsignedTx) if err != nil { @@ -1459,11 +1259,9 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - pendingTx.Creds = append(pendingTx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + pendingTx.Creds = append(pendingTx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1472,7 +1270,7 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { t.Fatal(err) } - txID, err := vm.IssueTx(b) + txID, err := vm.IssueTx(b, nil) if err != nil { t.Fatal(err) } @@ -1486,29 +1284,25 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { vm.PendingTxs() - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: txID, - OutputIndex: 2, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: txID, + OutputIndex: 2, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err = vm.codec.Marshal(&tx.UnsignedTx) if err != nil { @@ -1522,11 +1316,9 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { fixedSig = [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1537,11 +1329,11 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1574,44 +1366,36 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - pendingTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + pendingTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ - ID: genesisTx.ID(), - }, - Out: &secp256k1fx.TransferOutput{ - Amt: 50000, - Locktime: 0, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + }}, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&pendingTx.UnsignedTx) if err != nil { @@ -1626,11 +1410,9 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - pendingTx.Creds = append(pendingTx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + pendingTx.Creds = append(pendingTx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1639,7 +1421,7 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { t.Fatal(err) } - txID, err := vm.IssueTx(b) + txID, err := vm.IssueTx(b, nil) if err != nil { t.Fatal(err) } @@ -1653,29 +1435,25 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { vm.PendingTxs() - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: txID, - OutputIndex: 0, - }, - Asset: Asset{ - ID: asset, - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: txID, + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err = vm.codec.Marshal(&tx.UnsignedTx) if err != nil { @@ -1689,11 +1467,9 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { fixedSig = [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1704,11 +1480,11 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1751,48 +1527,40 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { codec: vm.codec, } - cr.RegisterType(&testVerifiable{}) + cr.RegisterType(&ava.TestVerifiable{}) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - pendingTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + pendingTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ - ID: genesisTx.ID(), - }, - Out: &secp256k1fx.TransferOutput{ - Amt: 50000, - Locktime: 0, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + }}, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&pendingTx.UnsignedTx) if err != nil { @@ -1807,11 +1575,9 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - pendingTx.Creds = append(pendingTx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + pendingTx.Creds = append(pendingTx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1820,7 +1586,7 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { t.Fatal(err) } - txID, err := vm.IssueTx(b) + txID, err := vm.IssueTx(b, nil) if err != nil { t.Fatal(err) } @@ -1834,33 +1600,27 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { vm.PendingTxs() - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: txID, - OutputIndex: 0, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: txID, + OutputIndex: 0, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} - tx.Creds = append(tx.Creds, &Credential{ - Cred: &testVerifiable{}, - }) + tx.Creds = append(tx.Creds, &ava.TestVerifiable{}) b, err = vm.codec.Marshal(tx) if err != nil { @@ -1869,11 +1629,11 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { @@ -1916,48 +1676,40 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { codec: vm.codec, } - cr.RegisterType(&testVerifiable{}) + cr.RegisterType(&ava.TestVerifiable{}) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - pendingTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + pendingTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ - ID: genesisTx.ID(), - }, - Out: &secp256k1fx.TransferOutput{ - Amt: 50000, - Locktime: 0, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + }}, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&pendingTx.UnsignedTx) if err != nil { @@ -1972,11 +1724,9 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - pendingTx.Creds = append(pendingTx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + pendingTx.Creds = append(pendingTx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -1985,7 +1735,7 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { t.Fatal(err) } - txID, err := vm.IssueTx(b) + txID, err := vm.IssueTx(b, nil) if err != nil { t.Fatal(err) } @@ -1999,35 +1749,29 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { vm.PendingTxs() - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: txID, - OutputIndex: 0, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: txID, + OutputIndex: 0, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - [crypto.SECP256K1RSigLen]byte{}, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + [crypto.SECP256K1RSigLen]byte{}, }, }) @@ -2038,11 +1782,11 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { tx.Initialize(b) uTx := &UniqueTx{ + TxState: &TxState{ + Tx: tx, + }, vm: vm, txID: tx.ID(), - t: &txState{ - tx: tx, - }, } if err := tx.UnsignedTx.SemanticVerify(vm, uTx, tx.Creds); err == nil { diff --git a/vms/avm/create_asset_tx.go b/vms/avm/create_asset_tx.go index c606b6b..9f95a15 100644 --- a/vms/avm/create_asset_tx.go +++ b/vms/avm/create_asset_tx.go @@ -10,6 +10,7 @@ import ( "unicode" "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" ) @@ -32,10 +33,10 @@ var ( // CreateAssetTx is a transaction that creates a new asset. type CreateAssetTx struct { BaseTx `serialize:"true"` - Name string `serialize:"true"` - Symbol string `serialize:"true"` - Denomination byte `serialize:"true"` - States []*InitialState `serialize:"true"` + Name string `serialize:"true" json:"name"` + Symbol string `serialize:"true" json:"symbol"` + Denomination byte `serialize:"true" json:"denomination"` + States []*InitialState `serialize:"true" json:"initialStates"` } // InitialStates track which virtual machines, and the initial state of these @@ -43,18 +44,18 @@ type CreateAssetTx struct { func (t *CreateAssetTx) InitialStates() []*InitialState { return t.States } // UTXOs returns the UTXOs transaction is producing. -func (t *CreateAssetTx) UTXOs() []*UTXO { +func (t *CreateAssetTx) UTXOs() []*ava.UTXO { txID := t.ID() utxos := t.BaseTx.UTXOs() for _, state := range t.States { for _, out := range state.Outs { - utxos = append(utxos, &UTXO{ - UTXOID: UTXOID{ + utxos = append(utxos, &ava.UTXO{ + UTXOID: ava.UTXOID{ TxID: txID, OutputIndex: uint32(len(utxos)), }, - Asset: Asset{ + Asset: ava.Asset{ ID: txID, }, Out: out, @@ -110,10 +111,5 @@ func (t *CreateAssetTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs return nil } -// SemanticVerify that this transaction is well-formed. -func (t *CreateAssetTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) error { - return t.BaseTx.SemanticVerify(vm, uTx, creds) -} - // Sort ... func (t *CreateAssetTx) Sort() { sortInitialStates(t.States) } diff --git a/vms/avm/create_asset_tx_test.go b/vms/avm/create_asset_tx_test.go index 2dabd5c..a26a815 100644 --- a/vms/avm/create_asset_tx_test.go +++ b/vms/avm/create_asset_tx_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" "github.com/ava-labs/gecko/vms/components/verify" "github.com/ava-labs/gecko/vms/secp256k1fx" @@ -33,7 +34,7 @@ func TestCreateAssetTxSerialization(t *testing.T) { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, // output: - 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x51, 0x02, 0x5c, 0x61, @@ -57,7 +58,7 @@ func TestCreateAssetTxSerialization(t *testing.T) { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, // input: - 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, 0x00, 0x07, 0x5b, 0xcd, 0x15, 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, // name: @@ -72,7 +73,7 @@ func TestCreateAssetTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x01, // InitialStates[0]: 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, - 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x51, 0x02, 0x5c, 0x61, @@ -92,64 +93,60 @@ func TestCreateAssetTxSerialization(t *testing.T) { 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, }), - Outs: []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ - ID: ids.NewID([32]byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - }), - }, - Out: &secp256k1fx.TransferOutput{ - Amt: 12345, - Locktime: 54321, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID([20]byte{ - 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, - 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, - 0x6d, 0x55, 0xa9, 0x55, - }), - ids.NewShortID([20]byte{ - 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, - 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, - 0x43, 0xab, 0x08, 0x59, - }), - }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ + ID: ids.NewID([32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }), + }, + Out: &secp256k1fx.TransferOutput{ + Amt: 12345, + Locktime: 54321, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID([20]byte{ + 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, + 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, + 0x6d, 0x55, 0xa9, 0x55, + }), + ids.NewShortID([20]byte{ + 0xc3, 0x34, 0x41, 0x28, 0xe0, 0x60, 0x12, 0x8e, + 0xde, 0x35, 0x23, 0xa2, 0x4a, 0x46, 0x1c, 0x89, + 0x43, 0xab, 0x08, 0x59, + }), }, }, }, - }, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{ - 0xf1, 0xe1, 0xd1, 0xc1, 0xb1, 0xa1, 0x91, 0x81, - 0x71, 0x61, 0x51, 0x41, 0x31, 0x21, 0x11, 0x01, - 0xf0, 0xe0, 0xd0, 0xc0, 0xb0, 0xa0, 0x90, 0x80, - 0x70, 0x60, 0x50, 0x40, 0x30, 0x20, 0x10, 0x00, - }), - OutputIndex: 5, - }, - Asset: Asset{ - ID: ids.NewID([32]byte{ - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - }), - }, - In: &secp256k1fx.TransferInput{ - Amt: 123456789, - Input: secp256k1fx.Input{ - SigIndices: []uint32{3, 7}, - }, + }}, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0xf1, 0xe1, 0xd1, 0xc1, 0xb1, 0xa1, 0x91, 0x81, + 0x71, 0x61, 0x51, 0x41, 0x31, 0x21, 0x11, 0x01, + 0xf0, 0xe0, 0xd0, 0xc0, 0xb0, 0xa0, 0x90, 0x80, + 0x70, 0x60, 0x50, 0x40, 0x30, 0x20, 0x10, 0x00, + }), + OutputIndex: 5, + }, + Asset: ava.Asset{ + ID: ids.NewID([32]byte{ + 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, + 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, + 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, + 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, + }), + }, + In: &secp256k1fx.TransferInput{ + Amt: 123456789, + Input: secp256k1fx.Input{ + SigIndices: []uint32{3, 7}, }, }, - }, + }}, }, Name: "Volatility Index", Symbol: "VIX", @@ -186,10 +183,12 @@ func TestCreateAssetTxSerialization(t *testing.T) { c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) c.RegisterType(&secp256k1fx.Credential{}) b, err := c.Marshal(&tx.UnsignedTx) diff --git a/vms/avm/credential.go b/vms/avm/credential.go deleted file mode 100644 index d5fb8ee..0000000 --- a/vms/avm/credential.go +++ /dev/null @@ -1,36 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "errors" - - "github.com/ava-labs/gecko/vms/components/verify" -) - -var ( - errNilCredential = errors.New("nil credential is not valid") - errNilFxCredential = errors.New("nil feature extension credential is not valid") -) - -// Credential ... -type Credential struct { - Cred verify.Verifiable `serialize:"true"` -} - -// Credential returns the feature extension credential that this Credential is -// using. -func (cred *Credential) Credential() verify.Verifiable { return cred.Cred } - -// Verify implements the verify.Verifiable interface -func (cred *Credential) Verify() error { - switch { - case cred == nil: - return errNilCredential - case cred.Cred == nil: - return errNilFxCredential - default: - return cred.Cred.Verify() - } -} diff --git a/vms/avm/credential_test.go b/vms/avm/credential_test.go deleted file mode 100644 index 867a89f..0000000 --- a/vms/avm/credential_test.go +++ /dev/null @@ -1,36 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "testing" -) - -func TestCredentialVerifyNil(t *testing.T) { - cred := (*Credential)(nil) - if err := cred.Verify(); err == nil { - t.Fatalf("Should have errored due to nil credential") - } -} - -func TestCredentialVerifyNilFx(t *testing.T) { - cred := &Credential{} - if err := cred.Verify(); err == nil { - t.Fatalf("Should have errored due to nil fx credential") - } -} - -func TestCredential(t *testing.T) { - cred := &Credential{ - Cred: &testVerifiable{}, - } - - if err := cred.Verify(); err != nil { - t.Fatal(err) - } - - if cred.Credential() != cred.Cred { - t.Fatalf("Should have returned the fx credential") - } -} diff --git a/vms/avm/export_tx.go b/vms/avm/export_tx.go new file mode 100644 index 0000000..d5222f4 --- /dev/null +++ b/vms/avm/export_tx.go @@ -0,0 +1,144 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "github.com/ava-labs/gecko/chains/atomic" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" +) + +// ExportTx is the basis of all transactions. +type ExportTx struct { + BaseTx `serialize:"true"` + + Outs []*ava.TransferableOutput `serialize:"true" json:"exportedOutputs"` // The outputs this transaction is sending to the other chain +} + +// SyntacticVerify that this transaction is well-formed. +func (t *ExportTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, _ int) error { + switch { + case t == nil: + return errNilTx + case t.NetID != ctx.NetworkID: + return errWrongNetworkID + case !t.BCID.Equals(ctx.ChainID): + return errWrongChainID + } + + fc := ava.NewFlowChecker() + for _, out := range t.BaseTx.Outs { + if err := out.Verify(); err != nil { + return err + } + fc.Produce(out.AssetID(), out.Output().Amount()) + } + if !ava.IsSortedTransferableOutputs(t.BaseTx.Outs, c) { + return errOutputsNotSorted + } + + for _, out := range t.Outs { + if err := out.Verify(); err != nil { + return err + } + fc.Produce(out.AssetID(), out.Output().Amount()) + } + if !ava.IsSortedTransferableOutputs(t.Outs, c) { + return errOutputsNotSorted + } + + for _, in := range t.Ins { + if err := in.Verify(); err != nil { + return err + } + fc.Consume(in.AssetID(), in.Input().Amount()) + } + if !ava.IsSortedAndUniqueTransferableInputs(t.Ins) { + return errInputsNotSortedUnique + } + + // TODO: Add the Tx fee to the produced side + + if err := fc.Verify(); err != nil { + return err + } + + return t.Metadata.Verify() +} + +// SemanticVerify that this transaction is valid to be spent. +func (t *ExportTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []verify.Verifiable) error { + for i, in := range t.Ins { + cred := creds[i] + + fxIndex, err := vm.getFx(cred) + if err != nil { + return err + } + fx := vm.fxs[fxIndex].Fx + + utxo, err := vm.getUTXO(&in.UTXOID) + if err != nil { + return err + } + + utxoAssetID := utxo.AssetID() + inAssetID := in.AssetID() + if !utxoAssetID.Equals(inAssetID) { + return errAssetIDMismatch + } + + if !vm.verifyFxUsage(fxIndex, inAssetID) { + return errIncompatibleFx + } + + if err := fx.VerifyTransfer(uTx, in.In, cred, utxo.Out); err != nil { + return err + } + } + + for _, out := range t.Outs { + if !out.AssetID().Equals(vm.ava) { + return errWrongAssetID + } + } + + return nil +} + +// ExecuteWithSideEffects writes the batch with any additional side effects +func (t *ExportTx) ExecuteWithSideEffects(vm *VM, batch database.Batch) error { + txID := t.ID() + + smDB := vm.ctx.SharedMemory.GetDatabase(vm.platform) + defer vm.ctx.SharedMemory.ReleaseDatabase(vm.platform) + + vsmDB := versiondb.New(smDB) + + state := ava.NewPrefixedState(vsmDB, vm.codec) + for i, out := range t.Outs { + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{ + TxID: txID, + OutputIndex: uint32(len(t.BaseTx.Outs) + i), + }, + Asset: ava.Asset{ID: out.AssetID()}, + Out: out.Out, + } + if err := state.FundAVMUTXO(utxo); err != nil { + return err + } + } + + sharedBatch, err := vsmDB.CommitBatch() + if err != nil { + return err + } + + return atomic.WriteAll(batch, sharedBatch) +} diff --git a/vms/avm/export_tx_test.go b/vms/avm/export_tx_test.go new file mode 100644 index 0000000..98df9b0 --- /dev/null +++ b/vms/avm/export_tx_test.go @@ -0,0 +1,389 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/chains/atomic" + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestExportTxSerialization(t *testing.T) { + expected := []byte{ + // txID: + 0x00, 0x00, 0x00, 0x04, + // networkID: + 0x00, 0x00, 0x00, 0x02, + // blockchainID: + 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, + 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, + 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, + 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, + // number of outs: + 0x00, 0x00, 0x00, 0x00, + // number of inputs: + 0x00, 0x00, 0x00, 0x01, + // utxoID: + 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, + 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, + 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, + 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + // output index + 0x00, 0x00, 0x00, 0x00, + // assetID: + 0x1f, 0x3f, 0x5f, 0x7f, 0x9e, 0xbe, 0xde, 0xfe, + 0x1d, 0x3d, 0x5d, 0x7d, 0x9c, 0xbc, 0xdc, 0xfc, + 0x1b, 0x3b, 0x5b, 0x7b, 0x9a, 0xba, 0xda, 0xfa, + 0x19, 0x39, 0x59, 0x79, 0x98, 0xb8, 0xd8, 0xf8, + // input: + // input ID: + 0x00, 0x00, 0x00, 0x05, + // amount: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, + // num sig indices: + 0x00, 0x00, 0x00, 0x01, + // sig index[0]: + 0x00, 0x00, 0x00, 0x00, + // number of exported outs: + 0x00, 0x00, 0x00, 0x00, + } + + tx := &Tx{UnsignedTx: &ExportTx{BaseTx: BaseTx{ + NetID: 2, + BCID: ids.NewID([32]byte{ + 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, + 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, + 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, + 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, + }), + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{TxID: ids.NewID([32]byte{ + 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, + 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, + 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, + 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + })}, + Asset: ava.Asset{ID: ids.NewID([32]byte{ + 0x1f, 0x3f, 0x5f, 0x7f, 0x9e, 0xbe, 0xde, 0xfe, + 0x1d, 0x3d, 0x5d, 0x7d, 0x9c, 0xbc, 0xdc, 0xfc, + 0x1b, 0x3b, 0x5b, 0x7b, 0x9a, 0xba, 0xda, 0xfa, + 0x19, 0x39, 0x59, 0x79, 0x98, 0xb8, 0xd8, 0xf8, + })}, + In: &secp256k1fx.TransferInput{ + Amt: 1000, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + }}} + + c := codec.NewDefault() + c.RegisterType(&BaseTx{}) + c.RegisterType(&CreateAssetTx{}) + c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) + c.RegisterType(&secp256k1fx.Credential{}) + + b, err := c.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + result := tx.Bytes() + if !bytes.Equal(expected, result) { + t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + } +} + +// Test issuing an import transaction. +func TestIssueExportTx(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + sm := &atomic.SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + ctx := snow.DefaultContextTest() + ctx.NetworkID = networkID + ctx.ChainID = chainID + ctx.SharedMemory = sm.NewBlockchainSharedMemory(chainID) + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + avaID := genesisTx.ID() + platformID := ids.Empty.Prefix(0) + + ctx.Lock.Lock() + vm := &VM{ + ava: avaID, + platform: platformID, + } + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + key := keys[0] + + tx := &Tx{UnsignedTx: &ExportTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: avaID, + OutputIndex: 1, + }, + Asset: ava.Asset{ID: avaID}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: avaID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }} + + unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + if _, err := vm.IssueTx(tx.Bytes(), nil); err != nil { + t.Fatal(err) + } + + ctx.Lock.Unlock() + + msg := <-issuer + if msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + txs := vm.PendingTxs() + if len(txs) != 1 { + t.Fatalf("Should have returned %d tx(s)", 1) + } + + parsedTx := txs[0] + if err := parsedTx.Verify(); err != nil { + t.Fatal(err) + } + parsedTx.Accept() + + smDB := vm.ctx.SharedMemory.GetDatabase(platformID) + defer vm.ctx.SharedMemory.ReleaseDatabase(platformID) + + state := ava.NewPrefixedState(smDB, vm.codec) + + utxo := ava.UTXOID{ + TxID: tx.ID(), + OutputIndex: 0, + } + utxoID := utxo.InputID() + if _, err := state.AVMUTXO(utxoID); err != nil { + t.Fatal(err) + } +} + +// Test force accepting an import transaction. +func TestClearForceAcceptedExportTx(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + sm := &atomic.SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + ctx := snow.DefaultContextTest() + ctx.NetworkID = networkID + ctx.ChainID = chainID + ctx.SharedMemory = sm.NewBlockchainSharedMemory(chainID) + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + avaID := genesisTx.ID() + platformID := ids.Empty.Prefix(0) + + ctx.Lock.Lock() + vm := &VM{ + ava: avaID, + platform: platformID, + } + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + key := keys[0] + + tx := &Tx{UnsignedTx: &ExportTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: avaID, + OutputIndex: 1, + }, + Asset: ava.Asset{ID: avaID}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + }, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: avaID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }} + + unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + if _, err := vm.IssueTx(tx.Bytes(), nil); err != nil { + t.Fatal(err) + } + + ctx.Lock.Unlock() + + msg := <-issuer + if msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + txs := vm.PendingTxs() + if len(txs) != 1 { + t.Fatalf("Should have returned %d tx(s)", 1) + } + + parsedTx := txs[0] + if err := parsedTx.Verify(); err != nil { + t.Fatal(err) + } + + smDB := vm.ctx.SharedMemory.GetDatabase(platformID) + + state := ava.NewPrefixedState(smDB, vm.codec) + + utxo := ava.UTXOID{ + TxID: tx.ID(), + OutputIndex: 0, + } + utxoID := utxo.InputID() + if err := state.SpendAVMUTXO(utxoID); err != nil { + t.Fatal(err) + } + + vm.ctx.SharedMemory.ReleaseDatabase(platformID) + + parsedTx.Accept() + + smDB = vm.ctx.SharedMemory.GetDatabase(platformID) + defer vm.ctx.SharedMemory.ReleaseDatabase(platformID) + + state = ava.NewPrefixedState(smDB, vm.codec) + + if _, err := state.AVMUTXO(utxoID); err == nil { + t.Fatalf("should have failed to read the utxo") + } +} diff --git a/vms/avm/factory.go b/vms/avm/factory.go index b76606d..96865c6 100644 --- a/vms/avm/factory.go +++ b/vms/avm/factory.go @@ -13,7 +13,15 @@ var ( ) // Factory ... -type Factory struct{} +type Factory struct { + AVA ids.ID + Platform ids.ID +} // New ... -func (f *Factory) New() interface{} { return &VM{} } +func (f *Factory) New() (interface{}, error) { + return &VM{ + ava: f.AVA, + platform: f.Platform, + }, nil +} diff --git a/vms/avm/fx.go b/vms/avm/fx.go index cc4d8e4..432b177 100644 --- a/vms/avm/fx.go +++ b/vms/avm/fx.go @@ -23,27 +23,18 @@ type Fx interface { // provided utxo with no restrictions on the destination. If the transaction // can't spend the output based on the input and credential, a non-nil error // should be returned. - VerifyTransfer(tx, utxo, in, cred interface{}) error + VerifyTransfer(tx, in, cred, utxo interface{}) error // VerifyOperation verifies that the specified transaction can spend the // provided utxos conditioned on the result being restricted to the provided // outputs. If the transaction can't spend the output based on the input and // credential, a non-nil error should be returned. - VerifyOperation(tx interface{}, utxos, ins, creds, outs []interface{}) error + VerifyOperation(tx, op, cred interface{}, utxos []interface{}) error } -// FxTransferable is the interface a feature extension must provide to transfer -// value between features extensions. -type FxTransferable interface { +// FxOperation ... +type FxOperation interface { verify.Verifiable - // Amount returns how much value this output consumes of the asset in its - // transaction. - Amount() uint64 -} - -// FxAddressable is the interface a feature extension must provide to be able to -// be tracked as a part of the utxo set for a set of addresses -type FxAddressable interface { - Addresses() [][]byte + Outs() []verify.Verifiable } diff --git a/vms/avm/fx_test.go b/vms/avm/fx_test.go index a0863b2..59639e9 100644 --- a/vms/avm/fx_test.go +++ b/vms/avm/fx_test.go @@ -9,6 +9,6 @@ type testFx struct { func (fx *testFx) Initialize(_ interface{}) error { return fx.initialize } func (fx *testFx) VerifyTransfer(_, _, _, _ interface{}) error { return fx.verifyTransfer } -func (fx *testFx) VerifyOperation(_ interface{}, _, _, _, _ []interface{}) error { +func (fx *testFx) VerifyOperation(_, _, _ interface{}, _ []interface{}) error { return fx.verifyOperation } diff --git a/vms/avm/import_tx.go b/vms/avm/import_tx.go new file mode 100644 index 0000000..09dec6e --- /dev/null +++ b/vms/avm/import_tx.go @@ -0,0 +1,168 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "errors" + + "github.com/ava-labs/gecko/chains/atomic" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" +) + +// ImportTx is a transaction that imports an asset from another blockchain. +type ImportTx struct { + BaseTx `serialize:"true"` + + Ins []*ava.TransferableInput `serialize:"true" json:"importedInputs"` // The inputs to this transaction +} + +// InputUTXOs track which UTXOs this transaction is consuming. +func (t *ImportTx) InputUTXOs() []*ava.UTXOID { + utxos := t.BaseTx.InputUTXOs() + for _, in := range t.Ins { + in.Symbol = true + utxos = append(utxos, &in.UTXOID) + } + return utxos +} + +// AssetIDs returns the IDs of the assets this transaction depends on +func (t *ImportTx) AssetIDs() ids.Set { + assets := t.BaseTx.AssetIDs() + for _, in := range t.Ins { + assets.Add(in.AssetID()) + } + return assets +} + +// NumCredentials returns the number of expected credentials +func (t *ImportTx) NumCredentials() int { return t.BaseTx.NumCredentials() + len(t.Ins) } + +var ( + errNoImportInputs = errors.New("no import inputs") +) + +// SyntacticVerify that this transaction is well-formed. +func (t *ImportTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs int) error { + switch { + case t == nil: + return errNilTx + case t.NetID != ctx.NetworkID: + return errWrongNetworkID + case !t.BCID.Equals(ctx.ChainID): + return errWrongChainID + case len(t.Ins) == 0: + return errNoImportInputs + } + + fc := ava.NewFlowChecker() + for _, out := range t.Outs { + if err := out.Verify(); err != nil { + return err + } + fc.Produce(out.AssetID(), out.Output().Amount()) + } + if !ava.IsSortedTransferableOutputs(t.Outs, c) { + return errOutputsNotSorted + } + + for _, in := range t.BaseTx.Ins { + if err := in.Verify(); err != nil { + return err + } + fc.Consume(in.AssetID(), in.Input().Amount()) + } + if !ava.IsSortedAndUniqueTransferableInputs(t.BaseTx.Ins) { + return errInputsNotSortedUnique + } + + for _, in := range t.Ins { + if err := in.Verify(); err != nil { + return err + } + fc.Consume(in.AssetID(), in.Input().Amount()) + } + if !ava.IsSortedAndUniqueTransferableInputs(t.Ins) { + return errInputsNotSortedUnique + } + + // TODO: Add the Tx fee to the produced side + + return fc.Verify() +} + +// SemanticVerify that this transaction is well-formed. +func (t *ImportTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []verify.Verifiable) error { + if err := t.BaseTx.SemanticVerify(vm, uTx, creds); err != nil { + return err + } + + smDB := vm.ctx.SharedMemory.GetDatabase(vm.platform) + defer vm.ctx.SharedMemory.ReleaseDatabase(vm.platform) + + state := ava.NewPrefixedState(smDB, vm.codec) + + offset := t.BaseTx.NumCredentials() + for i, in := range t.Ins { + cred := creds[i+offset] + + fxIndex, err := vm.getFx(cred) + if err != nil { + return err + } + fx := vm.fxs[fxIndex].Fx + + utxoID := in.UTXOID.InputID() + utxo, err := state.PlatformUTXO(utxoID) + if err != nil { + return err + } + utxoAssetID := utxo.AssetID() + inAssetID := in.AssetID() + if !utxoAssetID.Equals(inAssetID) { + return errAssetIDMismatch + } + if !utxoAssetID.Equals(vm.ava) { + return errWrongAssetID + } + + if !vm.verifyFxUsage(fxIndex, inAssetID) { + return errIncompatibleFx + } + + if err := fx.VerifyTransfer(uTx, in.In, cred, utxo.Out); err != nil { + return err + } + } + return nil +} + +// ExecuteWithSideEffects writes the batch with any additional side effects +func (t *ImportTx) ExecuteWithSideEffects(vm *VM, batch database.Batch) error { + smDB := vm.ctx.SharedMemory.GetDatabase(vm.platform) + defer vm.ctx.SharedMemory.ReleaseDatabase(vm.platform) + + vsmDB := versiondb.New(smDB) + + state := ava.NewPrefixedState(vsmDB, vm.codec) + for _, in := range t.Ins { + utxoID := in.UTXOID.InputID() + if err := state.SpendPlatformUTXO(utxoID); err != nil { + return err + } + } + + sharedBatch, err := vsmDB.CommitBatch() + if err != nil { + return err + } + + return atomic.WriteAll(batch, sharedBatch) +} diff --git a/vms/avm/import_tx_test.go b/vms/avm/import_tx_test.go new file mode 100644 index 0000000..e0f5605 --- /dev/null +++ b/vms/avm/import_tx_test.go @@ -0,0 +1,354 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avm + +import ( + "bytes" + "testing" + + "github.com/ava-labs/gecko/chains/atomic" + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestImportTxSerialization(t *testing.T) { + expected := []byte{ + // txID: + 0x00, 0x00, 0x00, 0x03, + // networkID: + 0x00, 0x00, 0x00, 0x02, + // blockchainID: + 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, + 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, + 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, + 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, + // number of base outs: + 0x00, 0x00, 0x00, 0x00, + // number of base inputs: + 0x00, 0x00, 0x00, 0x00, + // number of inputs: + 0x00, 0x00, 0x00, 0x01, + // utxoID: + 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, + 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, + 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, + 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + // output index + 0x00, 0x00, 0x00, 0x00, + // assetID: + 0x1f, 0x3f, 0x5f, 0x7f, 0x9e, 0xbe, 0xde, 0xfe, + 0x1d, 0x3d, 0x5d, 0x7d, 0x9c, 0xbc, 0xdc, 0xfc, + 0x1b, 0x3b, 0x5b, 0x7b, 0x9a, 0xba, 0xda, 0xfa, + 0x19, 0x39, 0x59, 0x79, 0x98, 0xb8, 0xd8, 0xf8, + // input: + // input ID: + 0x00, 0x00, 0x00, 0x05, + // amount: + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0xe8, + // num sig indices: + 0x00, 0x00, 0x00, 0x01, + // sig index[0]: + 0x00, 0x00, 0x00, 0x00, + } + + tx := &Tx{UnsignedTx: &ImportTx{ + BaseTx: BaseTx{ + NetID: 2, + BCID: ids.NewID([32]byte{ + 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, + 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, + 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, + 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, + }), + }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{TxID: ids.NewID([32]byte{ + 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, + 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, + 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, + 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + })}, + Asset: ava.Asset{ID: ids.NewID([32]byte{ + 0x1f, 0x3f, 0x5f, 0x7f, 0x9e, 0xbe, 0xde, 0xfe, + 0x1d, 0x3d, 0x5d, 0x7d, 0x9c, 0xbc, 0xdc, 0xfc, + 0x1b, 0x3b, 0x5b, 0x7b, 0x9a, 0xba, 0xda, 0xfa, + 0x19, 0x39, 0x59, 0x79, 0x98, 0xb8, 0xd8, 0xf8, + })}, + In: &secp256k1fx.TransferInput{ + Amt: 1000, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + }} + + c := setupCodec() + b, err := c.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + result := tx.Bytes() + if !bytes.Equal(expected, result) { + t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + } +} + +// Test issuing an import transaction. +func TestIssueImportTx(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + sm := &atomic.SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + ctx := snow.DefaultContextTest() + ctx.NetworkID = networkID + ctx.ChainID = chainID + ctx.SharedMemory = sm.NewBlockchainSharedMemory(chainID) + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + avaID := genesisTx.ID() + platformID := ids.Empty.Prefix(0) + + ctx.Lock.Lock() + vm := &VM{ + ava: avaID, + platform: platformID, + } + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + key := keys[0] + + utxoID := ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, + 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, + 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, + 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + }), + } + + tx := &Tx{UnsignedTx: &ImportTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: utxoID, + Asset: ava.Asset{ID: avaID}, + In: &secp256k1fx.TransferInput{ + Amt: 1000, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + }} + + unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + if _, err := vm.IssueTx(tx.Bytes(), nil); err == nil { + t.Fatal(err) + } + + // Provide the platform UTXO: + + smDB := vm.ctx.SharedMemory.GetDatabase(platformID) + + utxo := &ava.UTXO{ + UTXOID: utxoID, + Asset: ava.Asset{ID: avaID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 1000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + } + + state := ava.NewPrefixedState(smDB, vm.codec) + if err := state.FundPlatformUTXO(utxo); err != nil { + t.Fatal(err) + } + + vm.ctx.SharedMemory.ReleaseDatabase(platformID) + + if _, err := vm.IssueTx(tx.Bytes(), nil); err != nil { + t.Fatalf("should have issued the transaction correctly but errored: %s", err) + } + + ctx.Lock.Unlock() + + msg := <-issuer + if msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + txs := vm.PendingTxs() + if len(txs) != 1 { + t.Fatalf("Should have returned %d tx(s)", 1) + } + + parsedTx := txs[0] + parsedTx.Accept() + + smDB = vm.ctx.SharedMemory.GetDatabase(platformID) + defer vm.ctx.SharedMemory.ReleaseDatabase(platformID) + + state = ava.NewPrefixedState(smDB, vm.codec) + if _, err := state.PlatformUTXO(utxoID.InputID()); err == nil { + t.Fatalf("shouldn't have been able to read the utxo") + } +} + +// Test force accepting an import transaction. +func TestForceAcceptImportTx(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + sm := &atomic.SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + ctx := snow.DefaultContextTest() + ctx.NetworkID = networkID + ctx.ChainID = chainID + ctx.SharedMemory = sm.NewBlockchainSharedMemory(chainID) + + platformID := ids.Empty.Prefix(0) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{platform: platformID} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + key := keys[0] + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + utxoID := ava.UTXOID{ + TxID: ids.NewID([32]byte{ + 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, + 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, + 0x0b, 0x2b, 0x4b, 0x6b, 0x8a, 0xaa, 0xca, 0xea, + 0x09, 0x29, 0x49, 0x69, 0x88, 0xa8, 0xc8, 0xe8, + }), + } + + tx := &Tx{UnsignedTx: &ImportTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: utxoID, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 1000, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + }} + + unsignedBytes, err := vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }) + + b, err := vm.codec.Marshal(tx) + if err != nil { + t.Fatal(err) + } + tx.Initialize(b) + + parsedTx, err := vm.ParseTx(tx.Bytes()) + if err != nil { + t.Fatal(err) + } + + if err := parsedTx.Verify(); err == nil { + t.Fatalf("Should have failed verification") + } + + parsedTx.Accept() + + smDB := vm.ctx.SharedMemory.GetDatabase(platformID) + defer vm.ctx.SharedMemory.ReleaseDatabase(platformID) + + state := ava.NewPrefixedState(smDB, vm.codec) + utxoSource := utxoID.InputID() + if _, err := state.PlatformUTXO(utxoSource); err == nil { + t.Fatalf("shouldn't have been able to read the utxo") + } +} diff --git a/vms/avm/initial_state.go b/vms/avm/initial_state.go index 58dae84..c3d4b16 100644 --- a/vms/avm/initial_state.go +++ b/vms/avm/initial_state.go @@ -19,8 +19,8 @@ var ( // InitialState ... type InitialState struct { - FxID uint32 `serialize:"true"` - Outs []verify.Verifiable `serialize:"true"` + FxID uint32 `serialize:"true" json:"fxID"` + Outs []verify.Verifiable `serialize:"true" json:"outputs"` } // Verify implements the verify.Verifiable interface diff --git a/vms/avm/initial_state_test.go b/vms/avm/initial_state_test.go index 267947e..67c4b15 100644 --- a/vms/avm/initial_state_test.go +++ b/vms/avm/initial_state_test.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" "github.com/ava-labs/gecko/vms/components/verify" "github.com/ava-labs/gecko/vms/secp256k1fx" @@ -52,14 +53,12 @@ func TestInitialStateVerifyNilOutput(t *testing.T) { func TestInitialStateVerifyInvalidOutput(t *testing.T) { c := codec.NewDefault() - c.RegisterType(&testVerifiable{}) + c.RegisterType(&ava.TestVerifiable{}) numFxs := 1 is := InitialState{ FxID: 0, - Outs: []verify.Verifiable{ - &testVerifiable{err: errors.New("")}, - }, + Outs: []verify.Verifiable{&ava.TestVerifiable{Err: errors.New("")}}, } if err := is.Verify(c, numFxs); err == nil { t.Fatalf("Should have errored due to an invalid output") @@ -68,14 +67,14 @@ func TestInitialStateVerifyInvalidOutput(t *testing.T) { func TestInitialStateVerifyUnsortedOutputs(t *testing.T) { c := codec.NewDefault() - c.RegisterType(&TestTransferable{}) + c.RegisterType(&ava.TestTransferable{}) numFxs := 1 is := InitialState{ FxID: 0, Outs: []verify.Verifiable{ - &TestTransferable{Val: 1}, - &TestTransferable{Val: 0}, + &ava.TestTransferable{Val: 1}, + &ava.TestTransferable{Val: 0}, }, } if err := is.Verify(c, numFxs); err == nil { diff --git a/vms/avm/operables.go b/vms/avm/operables.go deleted file mode 100644 index 7aac3a0..0000000 --- a/vms/avm/operables.go +++ /dev/null @@ -1,116 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "bytes" - "errors" - "sort" - - "github.com/ava-labs/gecko/utils" - "github.com/ava-labs/gecko/vms/components/codec" - "github.com/ava-labs/gecko/vms/components/verify" -) - -var ( - errNilOperableOutput = errors.New("nil operable output is not valid") - errNilOperableFxOutput = errors.New("nil operable feature extension output is not valid") - - errNilOperableInput = errors.New("nil operable input is not valid") - errNilOperableFxInput = errors.New("nil operable feature extension input is not valid") -) - -// OperableOutput ... -type OperableOutput struct { - Out verify.Verifiable `serialize:"true"` -} - -// Output returns the feature extension output that this Output is using. -func (out *OperableOutput) Output() verify.Verifiable { return out.Out } - -// Verify implements the verify.Verifiable interface -func (out *OperableOutput) Verify() error { - switch { - case out == nil: - return errNilOperableOutput - case out.Out == nil: - return errNilOperableFxOutput - default: - return out.Out.Verify() - } -} - -type innerSortOperableOutputs struct { - outs []*OperableOutput - codec codec.Codec -} - -func (outs *innerSortOperableOutputs) Less(i, j int) bool { - iOut := outs.outs[i] - jOut := outs.outs[j] - - iBytes, err := outs.codec.Marshal(&iOut.Out) - if err != nil { - return false - } - jBytes, err := outs.codec.Marshal(&jOut.Out) - if err != nil { - return false - } - return bytes.Compare(iBytes, jBytes) == -1 -} -func (outs *innerSortOperableOutputs) Len() int { return len(outs.outs) } -func (outs *innerSortOperableOutputs) Swap(i, j int) { o := outs.outs; o[j], o[i] = o[i], o[j] } - -func sortOperableOutputs(outs []*OperableOutput, c codec.Codec) { - sort.Sort(&innerSortOperableOutputs{outs: outs, codec: c}) -} -func isSortedOperableOutputs(outs []*OperableOutput, c codec.Codec) bool { - return sort.IsSorted(&innerSortOperableOutputs{outs: outs, codec: c}) -} - -// OperableInput ... -type OperableInput struct { - UTXOID `serialize:"true"` - - In verify.Verifiable `serialize:"true"` -} - -// Input returns the feature extension input that this Input is using. -func (in *OperableInput) Input() verify.Verifiable { return in.In } - -// Verify implements the verify.Verifiable interface -func (in *OperableInput) Verify() error { - switch { - case in == nil: - return errNilOperableInput - case in.In == nil: - return errNilOperableFxInput - default: - return verify.All(&in.UTXOID, in.In) - } -} - -type innerSortOperableInputs []*OperableInput - -func (ins innerSortOperableInputs) Less(i, j int) bool { - iID, iIndex := ins[i].InputSource() - jID, jIndex := ins[j].InputSource() - - switch bytes.Compare(iID.Bytes(), jID.Bytes()) { - case -1: - return true - case 0: - return iIndex < jIndex - default: - return false - } -} -func (ins innerSortOperableInputs) Len() int { return len(ins) } -func (ins innerSortOperableInputs) Swap(i, j int) { ins[j], ins[i] = ins[i], ins[j] } - -func sortOperableInputs(ins []*OperableInput) { sort.Sort(innerSortOperableInputs(ins)) } -func isSortedAndUniqueOperableInputs(ins []*OperableInput) bool { - return utils.IsSortedAndUnique(innerSortOperableInputs(ins)) -} diff --git a/vms/avm/operables_test.go b/vms/avm/operables_test.go deleted file mode 100644 index 98e0996..0000000 --- a/vms/avm/operables_test.go +++ /dev/null @@ -1,175 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "testing" - - "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/vms/components/codec" -) - -func TestOperableOutputVerifyNil(t *testing.T) { - oo := (*OperableOutput)(nil) - if err := oo.Verify(); err == nil { - t.Fatalf("Should have errored due to nil operable output") - } -} - -func TestOperableOutputVerifyNilFx(t *testing.T) { - oo := &OperableOutput{} - if err := oo.Verify(); err == nil { - t.Fatalf("Should have errored due to nil operable fx output") - } -} - -func TestOperableOutputVerify(t *testing.T) { - oo := &OperableOutput{ - Out: &testVerifiable{}, - } - if err := oo.Verify(); err != nil { - t.Fatal(err) - } - if oo.Output() != oo.Out { - t.Fatalf("Should have returned the fx output") - } -} - -func TestOperableOutputSorting(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&TestTransferable{}) - c.RegisterType(&testVerifiable{}) - - outs := []*OperableOutput{ - &OperableOutput{ - Out: &TestTransferable{Val: 1}, - }, - &OperableOutput{ - Out: &TestTransferable{Val: 0}, - }, - &OperableOutput{ - Out: &TestTransferable{Val: 0}, - }, - &OperableOutput{ - Out: &testVerifiable{}, - }, - } - - if isSortedOperableOutputs(outs, c) { - t.Fatalf("Shouldn't be sorted") - } - sortOperableOutputs(outs, c) - if !isSortedOperableOutputs(outs, c) { - t.Fatalf("Should be sorted") - } - if result := outs[0].Out.(*TestTransferable).Val; result != 0 { - t.Fatalf("Val expected: %d ; result: %d", 0, result) - } - if result := outs[1].Out.(*TestTransferable).Val; result != 0 { - t.Fatalf("Val expected: %d ; result: %d", 0, result) - } - if result := outs[2].Out.(*TestTransferable).Val; result != 1 { - t.Fatalf("Val expected: %d ; result: %d", 0, result) - } - if _, ok := outs[3].Out.(*testVerifiable); !ok { - t.Fatalf("testVerifiable expected") - } -} - -func TestOperableInputVerifyNil(t *testing.T) { - oi := (*OperableInput)(nil) - if err := oi.Verify(); err == nil { - t.Fatalf("Should have errored due to nil operable input") - } -} - -func TestOperableInputVerifyNilFx(t *testing.T) { - oi := &OperableInput{} - if err := oi.Verify(); err == nil { - t.Fatalf("Should have errored due to nil operable fx input") - } -} - -func TestOperableInputVerify(t *testing.T) { - oi := &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - }, - In: &testVerifiable{}, - } - if err := oi.Verify(); err != nil { - t.Fatal(err) - } - if oi.Input() != oi.In { - t.Fatalf("Should have returned the fx input") - } -} - -func TestOperableInputSorting(t *testing.T) { - ins := []*OperableInput{ - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - In: &testVerifiable{}, - }, - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{1}), - OutputIndex: 1, - }, - In: &testVerifiable{}, - }, - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - In: &testVerifiable{}, - }, - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.NewID([32]byte{1}), - OutputIndex: 0, - }, - In: &testVerifiable{}, - }, - } - if isSortedAndUniqueOperableInputs(ins) { - t.Fatalf("Shouldn't be sorted") - } - sortOperableInputs(ins) - if !isSortedAndUniqueOperableInputs(ins) { - t.Fatalf("Should be sorted") - } - if result := ins[0].OutputIndex; result != 0 { - t.Fatalf("OutputIndex expected: %d ; result: %d", 0, result) - } - if result := ins[1].OutputIndex; result != 1 { - t.Fatalf("OutputIndex expected: %d ; result: %d", 1, result) - } - if result := ins[2].OutputIndex; result != 0 { - t.Fatalf("OutputIndex expected: %d ; result: %d", 0, result) - } - if result := ins[3].OutputIndex; result != 1 { - t.Fatalf("OutputIndex expected: %d ; result: %d", 1, result) - } - if result := ins[0].TxID; !result.Equals(ids.Empty) { - t.Fatalf("OutputIndex expected: %s ; result: %s", ids.Empty, result) - } - if result := ins[0].TxID; !result.Equals(ids.Empty) { - t.Fatalf("OutputIndex expected: %s ; result: %s", ids.Empty, result) - } - ins = append(ins, &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - In: &testVerifiable{}, - }) - if isSortedAndUniqueOperableInputs(ins) { - t.Fatalf("Shouldn't be unique") - } -} diff --git a/vms/avm/operation.go b/vms/avm/operation.go index 516e8fa..3b5fc9a 100644 --- a/vms/avm/operation.go +++ b/vms/avm/operation.go @@ -9,20 +9,23 @@ import ( "sort" "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" ) var ( - errNilOperation = errors.New("nil operation is not valid") - errEmptyOperation = errors.New("empty operation is not valid") + errNilOperation = errors.New("nil operation is not valid") + errNilFxOperation = errors.New("nil fx operation is not valid") + errNotSortedAndUniqueUTXOIDs = errors.New("utxo IDs not sorted and unique") ) // Operation ... type Operation struct { - Asset `serialize:"true"` + ava.Asset `serialize:"true"` - Ins []*OperableInput `serialize:"true"` - Outs []*OperableOutput `serialize:"true"` + UTXOIDs []*ava.UTXOID `serialize:"true" json:"inputIDs"` + Op FxOperation `serialize:"true" json:"operation"` } // Verify implements the verify.Verifiable interface @@ -30,29 +33,13 @@ func (op *Operation) Verify(c codec.Codec) error { switch { case op == nil: return errNilOperation - case len(op.Ins) == 0 && len(op.Outs) == 0: - return errEmptyOperation + case op.Op == nil: + return errNilFxOperation + case !ava.IsSortedAndUniqueUTXOIDs(op.UTXOIDs): + return errNotSortedAndUniqueUTXOIDs + default: + return verify.All(&op.Asset, op.Op) } - - for _, in := range op.Ins { - if err := in.Verify(); err != nil { - return err - } - } - if !isSortedAndUniqueOperableInputs(op.Ins) { - return errInputsNotSortedUnique - } - - for _, out := range op.Outs { - if err := out.Verify(); err != nil { - return err - } - } - if !isSortedOperableOutputs(op.Outs, c) { - return errOutputsNotSorted - } - - return op.Asset.Verify() } type innerSortOperation struct { diff --git a/vms/avm/operation_test.go b/vms/avm/operation_test.go index 9215448..8948388 100644 --- a/vms/avm/operation_test.go +++ b/vms/avm/operation_test.go @@ -7,9 +7,19 @@ import ( "testing" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" ) +type testOperable struct { + ava.TestTransferable `serialize:"true"` + + Outputs []verify.Verifiable `serialize:"true"` +} + +func (o *testOperable) Outs() []verify.Verifiable { return o.Outputs } + func TestOperationVerifyNil(t *testing.T) { c := codec.NewDefault() op := (*Operation)(nil) @@ -21,106 +31,45 @@ func TestOperationVerifyNil(t *testing.T) { func TestOperationVerifyEmpty(t *testing.T) { c := codec.NewDefault() op := &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, + Asset: ava.Asset{ID: ids.Empty}, } if err := op.Verify(c); err == nil { t.Fatalf("Should have errored due to empty operation") } } -func TestOperationVerifyInvalidInput(t *testing.T) { +func TestOperationVerifyUTXOIDsNotSorted(t *testing.T) { c := codec.NewDefault() op := &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Ins: []*OperableInput{ - &OperableInput{}, - }, - } - if err := op.Verify(c); err == nil { - t.Fatalf("Should have errored due to an invalid input") - } -} - -func TestOperationVerifyInvalidOutput(t *testing.T) { - c := codec.NewDefault() - op := &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Outs: []*OperableOutput{ - &OperableOutput{}, - }, - } - if err := op.Verify(c); err == nil { - t.Fatalf("Should have errored due to an invalid output") - } -} - -func TestOperationVerifyInputsNotSorted(t *testing.T) { - c := codec.NewDefault() - op := &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Ins: []*OperableInput{ - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - In: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + UTXOIDs: []*ava.UTXOID{ + &ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, }, - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - In: &testVerifiable{}, + &ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, }, }, + Op: &testOperable{}, } if err := op.Verify(c); err == nil { - t.Fatalf("Should have errored due to unsorted inputs") - } -} - -func TestOperationVerifyOutputsNotSorted(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&TestTransferable{}) - - op := &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Outs: []*OperableOutput{ - &OperableOutput{ - Out: &TestTransferable{Val: 1}, - }, - &OperableOutput{ - Out: &TestTransferable{Val: 0}, - }, - }, - } - if err := op.Verify(c); err == nil { - t.Fatalf("Should have errored due to unsorted outputs") + t.Fatalf("Should have errored due to unsorted utxoIDs") } } func TestOperationVerify(t *testing.T) { c := codec.NewDefault() op := &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Outs: []*OperableOutput{ - &OperableOutput{ - Out: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + UTXOIDs: []*ava.UTXOID{ + &ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, }, }, + Op: &testOperable{}, } if err := op.Verify(c); err != nil { t.Fatal(err) @@ -129,36 +78,28 @@ func TestOperationVerify(t *testing.T) { func TestOperationSorting(t *testing.T) { c := codec.NewDefault() - c.RegisterType(&testVerifiable{}) + c.RegisterType(&testOperable{}) ops := []*Operation{ &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Ins: []*OperableInput{ - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - In: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + UTXOIDs: []*ava.UTXOID{ + &ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, }, }, + Op: &testOperable{}, }, &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Ins: []*OperableInput{ - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - In: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + UTXOIDs: []*ava.UTXOID{ + &ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, }, }, + Op: &testOperable{}, }, } if isSortedAndUniqueOperations(ops, c) { @@ -169,18 +110,14 @@ func TestOperationSorting(t *testing.T) { t.Fatalf("Should be sorted") } ops = append(ops, &Operation{ - Asset: Asset{ - ID: ids.Empty, - }, - Ins: []*OperableInput{ - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - In: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + UTXOIDs: []*ava.UTXOID{ + &ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 1, }, }, + Op: &testOperable{}, }) if isSortedAndUniqueOperations(ops, c) { t.Fatalf("Shouldn't be unique") diff --git a/vms/avm/operation_tx.go b/vms/avm/operation_tx.go index 07d8947..9384f8d 100644 --- a/vms/avm/operation_tx.go +++ b/vms/avm/operation_tx.go @@ -8,11 +8,14 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" ) var ( errOperationsNotSortedUnique = errors.New("operations not sorted and unique") + errNoOperations = errors.New("an operationTx must have at least one operation") errDoubleSpend = errors.New("inputs attempt to double spend an input") ) @@ -20,7 +23,7 @@ var ( // OperationTx is a transaction with no credentials. type OperationTx struct { BaseTx `serialize:"true"` - Ops []*Operation `serialize:"true"` + Ops []*Operation `serialize:"true" json:"operations"` } // Operations track which ops this transaction is performing. The returned array @@ -28,12 +31,10 @@ type OperationTx struct { func (t *OperationTx) Operations() []*Operation { return t.Ops } // InputUTXOs track which UTXOs this transaction is consuming. -func (t *OperationTx) InputUTXOs() []*UTXOID { +func (t *OperationTx) InputUTXOs() []*ava.UTXOID { utxos := t.BaseTx.InputUTXOs() for _, op := range t.Ops { - for _, in := range op.Ins { - utxos = append(utxos, &in.UTXOID) - } + utxos = append(utxos, op.UTXOIDs...) } return utxos } @@ -47,23 +48,24 @@ func (t *OperationTx) AssetIDs() ids.Set { return assets } +// NumCredentials returns the number of expected credentials +func (t *OperationTx) NumCredentials() int { return t.BaseTx.NumCredentials() + len(t.Ops) } + // UTXOs returns the UTXOs transaction is producing. -func (t *OperationTx) UTXOs() []*UTXO { +func (t *OperationTx) UTXOs() []*ava.UTXO { txID := t.ID() utxos := t.BaseTx.UTXOs() for _, op := range t.Ops { asset := op.AssetID() - for _, out := range op.Outs { - utxos = append(utxos, &UTXO{ - UTXOID: UTXOID{ + for _, out := range op.Op.Outs() { + utxos = append(utxos, &ava.UTXO{ + UTXOID: ava.UTXOID{ TxID: txID, OutputIndex: uint32(len(utxos)), }, - Asset: Asset{ - ID: asset, - }, - Out: out.Out, + Asset: ava.Asset{ID: asset}, + Out: out, }) } } @@ -76,6 +78,8 @@ func (t *OperationTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs i switch { case t == nil: return errNilTx + case len(t.Ops) == 0: + return errNoOperations } if err := t.BaseTx.SyntacticVerify(ctx, c, numFxs); err != nil { @@ -91,8 +95,8 @@ func (t *OperationTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs i if err := op.Verify(c); err != nil { return err } - for _, in := range op.Ins { - inputID := in.InputID() + for _, utxoID := range op.UTXOIDs { + inputID := utxoID.InputID() if inputs.Contains(inputID) { return errDoubleSpend } @@ -106,77 +110,30 @@ func (t *OperationTx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs i } // SemanticVerify that this transaction is well-formed. -func (t *OperationTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) error { +func (t *OperationTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []verify.Verifiable) error { if err := t.BaseTx.SemanticVerify(vm, uTx, creds); err != nil { return err } - offset := len(t.BaseTx.Ins) - for _, op := range t.Ops { + + offset := t.BaseTx.NumCredentials() + for i, op := range t.Ops { opAssetID := op.AssetID() utxos := []interface{}{} - ins := []interface{}{} - credIntfs := []interface{}{} - outs := []interface{}{} - - for i, in := range op.Ins { - ins = append(ins, in.In) - - cred := creds[i+offset] - credIntfs = append(credIntfs, cred.Cred) - - utxoID := in.InputID() - utxo, err := vm.state.UTXO(utxoID) - if err == nil { - utxoAssetID := utxo.AssetID() - if !utxoAssetID.Equals(opAssetID) { - return errAssetIDMismatch - } - - utxos = append(utxos, utxo.Out) - continue + for _, utxoID := range op.UTXOIDs { + utxo, err := vm.getUTXO(utxoID) + if err != nil { + return err } - inputTx, inputIndex := in.InputSource() - parent := UniqueTx{ - vm: vm, - txID: inputTx, - } - - if err := parent.Verify(); err != nil { - return errMissingUTXO - } else if status := parent.Status(); status.Decided() { - return errMissingUTXO - } - - parentUTXOs := parent.UTXOs() - - if uint32(len(parentUTXOs)) <= inputIndex || int(inputIndex) < 0 { - return errInvalidUTXO - } - - utxo = parentUTXOs[int(inputIndex)] - utxoAssetID := utxo.AssetID() if !utxoAssetID.Equals(opAssetID) { return errAssetIDMismatch } utxos = append(utxos, utxo.Out) } - offset += len(op.Ins) - for _, out := range op.Outs { - outs = append(outs, out.Out) - } - var fxObj interface{} - switch { - case len(ins) > 0: - fxObj = ins[0] - case len(outs) > 0: - fxObj = outs[0] - } - - fxIndex, err := vm.getFx(fxObj) + fxIndex, err := vm.getFx(op.Op) if err != nil { return err } @@ -186,8 +143,7 @@ func (t *OperationTx) SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) return errIncompatibleFx } - err = fx.VerifyOperation(uTx, utxos, ins, credIntfs, outs) - if err != nil { + if err := fx.VerifyOperation(uTx, op.Op, creds[offset+i], utxos); err != nil { return err } } diff --git a/vms/avm/prefixed_state.go b/vms/avm/prefixed_state.go index 1314857..8a1898d 100644 --- a/vms/avm/prefixed_state.go +++ b/vms/avm/prefixed_state.go @@ -8,6 +8,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/vms/components/ava" ) const ( @@ -37,31 +38,31 @@ func (s *prefixedState) UniqueTx(tx *UniqueTx) *UniqueTx { } // Tx attempts to load a transaction from storage. -func (s *prefixedState) Tx(id ids.ID) (*Tx, error) { return s.state.Tx(s.uniqueID(id, txID, s.tx)) } +func (s *prefixedState) Tx(id ids.ID) (*Tx, error) { return s.state.Tx(uniqueID(id, txID, s.tx)) } // SetTx saves the provided transaction to storage. func (s *prefixedState) SetTx(id ids.ID, tx *Tx) error { - return s.state.SetTx(s.uniqueID(id, txID, s.tx), tx) + return s.state.SetTx(uniqueID(id, txID, s.tx), tx) } // UTXO attempts to load a utxo from storage. -func (s *prefixedState) UTXO(id ids.ID) (*UTXO, error) { - return s.state.UTXO(s.uniqueID(id, utxoID, s.utxo)) +func (s *prefixedState) UTXO(id ids.ID) (*ava.UTXO, error) { + return s.state.UTXO(uniqueID(id, utxoID, s.utxo)) } // SetUTXO saves the provided utxo to storage. -func (s *prefixedState) SetUTXO(id ids.ID, utxo *UTXO) error { - return s.state.SetUTXO(s.uniqueID(id, utxoID, s.utxo), utxo) +func (s *prefixedState) SetUTXO(id ids.ID, utxo *ava.UTXO) error { + return s.state.SetUTXO(uniqueID(id, utxoID, s.utxo), utxo) } // Status returns the status of the provided transaction id from storage. func (s *prefixedState) Status(id ids.ID) (choices.Status, error) { - return s.state.Status(s.uniqueID(id, txStatusID, s.txStatus)) + return s.state.Status(uniqueID(id, txStatusID, s.txStatus)) } // SetStatus saves the provided status to storage. func (s *prefixedState) SetStatus(id ids.ID, status choices.Status) error { - return s.state.SetStatus(s.uniqueID(id, txStatusID, s.txStatus), status) + return s.state.SetStatus(uniqueID(id, txStatusID, s.txStatus), status) } // DBInitialized returns the status of this database. If the database is @@ -76,21 +77,12 @@ func (s *prefixedState) SetDBInitialized(status choices.Status) error { // Funds returns the mapping from the 32 byte representation of an address to a // list of utxo IDs that reference the address. func (s *prefixedState) Funds(id ids.ID) ([]ids.ID, error) { - return s.state.IDs(s.uniqueID(id, fundsID, s.funds)) + return s.state.IDs(uniqueID(id, fundsID, s.funds)) } // SetFunds saves the mapping from address to utxo IDs to storage. func (s *prefixedState) SetFunds(id ids.ID, idSlice []ids.ID) error { - return s.state.SetIDs(s.uniqueID(id, fundsID, s.funds), idSlice) -} - -func (s *prefixedState) uniqueID(id ids.ID, prefix uint64, cacher cache.Cacher) ids.ID { - if cachedIDIntf, found := cacher.Get(id); found { - return cachedIDIntf.(ids.ID) - } - uID := id.Prefix(prefix) - cacher.Put(id, uID) - return uID + return s.state.SetIDs(uniqueID(id, fundsID, s.funds), idSlice) } // SpendUTXO consumes the provided utxo. @@ -103,7 +95,7 @@ func (s *prefixedState) SpendUTXO(utxoID ids.ID) error { return err } - addressable, ok := utxo.Out.(FxAddressable) + addressable, ok := utxo.Out.(ava.Addressable) if !ok { return nil } @@ -126,13 +118,13 @@ func (s *prefixedState) removeUTXO(addrs [][]byte, utxoID ids.ID) error { } // FundUTXO adds the provided utxo to the database -func (s *prefixedState) FundUTXO(utxo *UTXO) error { +func (s *prefixedState) FundUTXO(utxo *ava.UTXO) error { utxoID := utxo.InputID() if err := s.SetUTXO(utxoID, utxo); err != nil { return err } - addressable, ok := utxo.Out.(FxAddressable) + addressable, ok := utxo.Out.(ava.Addressable) if !ok { return nil } diff --git a/vms/avm/prefixed_state_test.go b/vms/avm/prefixed_state_test.go index 2b5d739..8a69f91 100644 --- a/vms/avm/prefixed_state_test.go +++ b/vms/avm/prefixed_state_test.go @@ -11,6 +11,7 @@ import ( "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -18,40 +19,36 @@ func TestPrefixedSetsAndGets(t *testing.T) { vm := GenesisVM(t) state := vm.state - vm.codec.RegisterType(&testVerifiable{}) + vm.codec.RegisterType(&ava.TestVerifiable{}) - utxo := &UTXO{ - UTXOID: UTXOID{ + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 1, }, - Asset: Asset{ID: ids.Empty}, - Out: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + Out: &ava.TestVerifiable{}, } - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - Asset: Asset{ - ID: asset, - }, - In: &secp256k1fx.TransferInput{ - Amt: 20 * units.KiloAva, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(tx.UnsignedTx) if err != nil { @@ -66,11 +63,9 @@ func TestPrefixedSetsAndGets(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -118,15 +113,15 @@ func TestPrefixedFundingNoAddresses(t *testing.T) { vm := GenesisVM(t) state := vm.state - vm.codec.RegisterType(&testVerifiable{}) + vm.codec.RegisterType(&ava.TestVerifiable{}) - utxo := &UTXO{ - UTXOID: UTXOID{ + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 1, }, - Asset: Asset{ID: ids.Empty}, - Out: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + Out: &ava.TestVerifiable{}, } if err := state.FundUTXO(utxo); err != nil { @@ -143,12 +138,12 @@ func TestPrefixedFundingAddresses(t *testing.T) { vm.codec.RegisterType(&testAddressable{}) - utxo := &UTXO{ - UTXOID: UTXOID{ + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 1, }, - Asset: Asset{ID: ids.Empty}, + Asset: ava.Asset{ID: ids.Empty}, Out: &testAddressable{ Addrs: [][]byte{ []byte{0}, diff --git a/vms/avm/service.go b/vms/avm/service.go index 36408be..a96200d 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -8,16 +8,15 @@ import ( "errors" "fmt" "net/http" - "sort" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/choices" - "github.com/ava-labs/gecko/utils" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/json" "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/verify" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -56,7 +55,7 @@ type IssueTxReply struct { func (service *Service) IssueTx(r *http.Request, args *IssueTxArgs, reply *IssueTxReply) error { service.vm.ctx.Log.Verbo("IssueTx called with %s", args.Tx) - txID, err := service.vm.IssueTx(args.Tx.Bytes) + txID, err := service.vm.IssueTx(args.Tx.Bytes, nil) if err != nil { return err } @@ -75,6 +74,10 @@ type GetTxStatusReply struct { Status choices.Status `json:"status"` } +var ( + errNilTxID = errors.New("nil transaction ID") +) + // GetTxStatus returns the status of the specified transaction func (service *Service) GetTxStatus(r *http.Request, args *GetTxStatusArgs, reply *GetTxStatusReply) error { service.vm.ctx.Log.Verbo("GetTxStatus called with %s", args.TxID) @@ -163,7 +166,7 @@ func (service *Service) GetAssetDescription(_ *http.Request, args *GetAssetDescr if status := tx.Status(); !status.Fetched() { return errUnknownAssetID } - createAssetTx, ok := tx.t.tx.UnsignedTx.(*CreateAssetTx) + createAssetTx, ok := tx.UnsignedTx.(*CreateAssetTx) if !ok { return errTxNotCreateAsset } @@ -214,7 +217,7 @@ func (service *Service) GetBalance(r *http.Request, args *GetBalanceArgs, reply for _, utxo := range utxos { if utxo.AssetID().Equals(assetID) { - transferable, ok := utxo.Out.(FxTransferable) + transferable, ok := utxo.Out.(ava.Transferable) if !ok { continue } @@ -303,7 +306,7 @@ func (service *Service) CreateFixedCapAsset(r *http.Request, args *CreateFixedCa return fmt.Errorf("problem creating transaction: %w", err) } - assetID, err := service.vm.IssueTx(b) + assetID, err := service.vm.IssueTx(b, nil) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } @@ -391,7 +394,7 @@ func (service *Service) CreateVariableCapAsset(r *http.Request, args *CreateVari return fmt.Errorf("problem creating transaction: %w", err) } - assetID, err := service.vm.IssueTx(b) + assetID, err := service.vm.IssueTx(b, nil) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } @@ -596,7 +599,7 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) amountSpent := uint64(0) time := service.vm.clock.Unix() - ins := []*TransferableInput{} + ins := []*ava.TransferableInput{} keys := [][]*crypto.PrivateKeySECP256K1R{} for _, utxo := range utxos { if !utxo.AssetID().Equals(assetID) { @@ -606,7 +609,7 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) if err != nil { continue } - input, ok := inputIntf.(FxTransferable) + input, ok := inputIntf.(ava.Transferable) if !ok { continue } @@ -616,9 +619,9 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) } amountSpent = spent - in := &TransferableInput{ + in := &ava.TransferableInput{ UTXOID: utxo.UTXOID, - Asset: Asset{ID: assetID}, + Asset: ava.Asset{ID: assetID}, In: input, } @@ -634,44 +637,36 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) return errInsufficientFunds } - sortTransferableInputsWithSigners(ins, keys) + ava.SortTransferableInputsWithSigners(ins, keys) - outs := []*TransferableOutput{ - &TransferableOutput{ - Asset: Asset{ - ID: assetID, - }, - Out: &secp256k1fx.TransferOutput{ - Amt: uint64(args.Amount), - Locktime: 0, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{to}, - }, + outs := []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: uint64(args.Amount), + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, }, }, - } + }} if amountSpent > uint64(args.Amount) { changeAddr := kc.Keys[0].PublicKey().Address() - outs = append(outs, - &TransferableOutput{ - Asset: Asset{ - ID: assetID, - }, - Out: &secp256k1fx.TransferOutput{ - Amt: amountSpent - uint64(args.Amount), - Locktime: 0, - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{changeAddr}, - }, + outs = append(outs, &ava.TransferableOutput{ + Asset: ava.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amountSpent - uint64(args.Amount), + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, }, }, - ) + }) } - sortTransferableOutputs(outs, service.vm.codec) + ava.SortTransferableOutputs(outs, service.vm.codec) tx := Tx{ UnsignedTx: &BaseTx{ @@ -700,7 +695,7 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) cred.Sigs = append(cred.Sigs, fixedSig) } - tx.Creds = append(tx.Creds, &Credential{Cred: cred}) + tx.Creds = append(tx.Creds, cred) } b, err := service.vm.codec.Marshal(tx) @@ -708,7 +703,7 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) return fmt.Errorf("problem creating transaction: %w", err) } - txID, err := service.vm.IssueTx(b) + txID, err := service.vm.IssueTx(b, nil) if err != nil { return fmt.Errorf("problem issuing transaction: %w", err) } @@ -717,37 +712,6 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) return nil } -type innerSortTransferableInputsWithSigners struct { - ins []*TransferableInput - signers [][]*crypto.PrivateKeySECP256K1R -} - -func (ins *innerSortTransferableInputsWithSigners) Less(i, j int) bool { - iID, iIndex := ins.ins[i].InputSource() - jID, jIndex := ins.ins[j].InputSource() - - switch bytes.Compare(iID.Bytes(), jID.Bytes()) { - case -1: - return true - case 0: - return iIndex < jIndex - default: - return false - } -} -func (ins *innerSortTransferableInputsWithSigners) Len() int { return len(ins.ins) } -func (ins *innerSortTransferableInputsWithSigners) Swap(i, j int) { - ins.ins[j], ins.ins[i] = ins.ins[i], ins.ins[j] - ins.signers[j], ins.signers[i] = ins.signers[i], ins.signers[j] -} - -func sortTransferableInputsWithSigners(ins []*TransferableInput, signers [][]*crypto.PrivateKeySECP256K1R) { - sort.Sort(&innerSortTransferableInputsWithSigners{ins: ins, signers: signers}) -} -func isSortedAndUniqueTransferableInputsWithSigners(ins []*TransferableInput, signers [][]*crypto.PrivateKeySECP256K1R) bool { - return utils.IsSortedAndUnique(&innerSortTransferableInputsWithSigners{ins: ins, signers: signers}) -} - // CreateMintTxArgs are arguments for passing into CreateMintTx requests type CreateMintTxArgs struct { Amount json.Uint64 `json:"amount"` @@ -823,47 +787,35 @@ func (service *Service) CreateMintTx(r *http.Request, args *CreateMintTxArgs, re continue } - tx := Tx{ - UnsignedTx: &OperationTx{ - BaseTx: BaseTx{ - NetID: service.vm.ctx.NetworkID, - BCID: service.vm.ctx.ChainID, - }, - Ops: []*Operation{ - &Operation{ - Asset: Asset{ - ID: assetID, + tx := Tx{UnsignedTx: &OperationTx{ + BaseTx: BaseTx{ + NetID: service.vm.ctx.NetworkID, + BCID: service.vm.ctx.ChainID, + }, + Ops: []*Operation{ + &Operation{ + Asset: ava.Asset{ID: assetID}, + UTXOIDs: []*ava.UTXOID{ + &utxo.UTXOID, + }, + Op: &secp256k1fx.MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: sigs, }, - Ins: []*OperableInput{ - &OperableInput{ - UTXOID: utxo.UTXOID, - In: &secp256k1fx.MintInput{ - Input: secp256k1fx.Input{ - SigIndices: sigs, - }, - }, - }, + MintOutput: secp256k1fx.MintOutput{ + OutputOwners: out.OutputOwners, }, - Outs: []*OperableOutput{ - &OperableOutput{ - &secp256k1fx.MintOutput{ - OutputOwners: out.OutputOwners, - }, - }, - &OperableOutput{ - &secp256k1fx.TransferOutput{ - Amt: uint64(args.Amount), - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{to}, - }, - }, + TransferOutput: secp256k1fx.TransferOutput{ + Amt: uint64(args.Amount), + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, }, }, }, }, }, - } + }} txBytes, err := service.vm.codec.Marshal(&tx) if err != nil { @@ -917,71 +869,77 @@ func (service *Service) SignMintTx(r *http.Request, args *SignMintTxArgs, reply return fmt.Errorf("problem creating transaction: %w", err) } - inputUTXOs := tx.InputUTXOs() - if len(inputUTXOs) != 1 { + opTx, ok := tx.UnsignedTx.(*OperationTx) + if !ok { + return errors.New("transaction must be a mint transaction") + } + if len(opTx.Ins) != 0 { return errCanOnlySignSingleInputTxs } - inputUTXO := inputUTXOs[0] + if len(opTx.Ops) != 1 { + return errCanOnlySignSingleInputTxs + } + op := opTx.Ops[0] - inputTxID, utxoIndex := inputUTXO.InputSource() - utx := UniqueTx{ - vm: service.vm, - txID: inputTxID, + if len(op.UTXOIDs) != 1 { + return errCanOnlySignSingleInputTxs } - if !utx.Status().Fetched() { - return errUnknownUTXO - } - utxos := utx.UTXOs() - if uint32(len(utxos)) <= utxoIndex { - return errInvalidUTXO + inputUTXO := op.UTXOIDs[0] + + utxo, err := service.vm.getUTXO(inputUTXO) + if err != nil { + return err } - utxo := utxos[int(utxoIndex)] - - i := -1 - size := 0 - switch out := utxo.Out.(type) { - case *secp256k1fx.MintOutput: - size = int(out.Threshold) - for j, addr := range out.Addrs { - if bytes.Equal(addr.Bytes(), minter) { - i = j - break - } - } - default: + out, ok := utxo.Out.(*secp256k1fx.MintOutput) + if !ok { return errUnknownOutputType } - if i == -1 { - return errUnneededAddress + secpOp, ok := op.Op.(*secp256k1fx.MintOperation) + if !ok { + return errors.New("unknown mint operation") + } + + sigIndex := -1 + size := int(out.Threshold) + for i, addrIndex := range secpOp.MintInput.SigIndices { + if addrIndex >= uint32(len(out.Addrs)) { + return errors.New("input output mismatch") + } + if bytes.Equal(out.Addrs[int(addrIndex)].Bytes(), minter) { + sigIndex = i + break + } + } + if sigIndex == -1 { + return errUnneededAddress } if len(tx.Creds) == 0 { - tx.Creds = append(tx.Creds, &Credential{Cred: &secp256k1fx.Credential{}}) + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{}) } - cred := tx.Creds[0] - switch cred := cred.Cred.(type) { - case *secp256k1fx.Credential: - if len(cred.Sigs) != size { - cred.Sigs = make([][crypto.SECP256K1RSigLen]byte, size) - } - - unsignedBytes, err := service.vm.codec.Marshal(&tx.UnsignedTx) - if err != nil { - return fmt.Errorf("problem creating transaction: %w", err) - } - - sig, err := sk.Sign(unsignedBytes) - if err != nil { - return fmt.Errorf("problem signing transaction: %w", err) - } - copy(cred.Sigs[i][:], sig) - default: + cred, ok := tx.Creds[0].(*secp256k1fx.Credential) + if !ok { return errUnknownCredentialType } + if len(cred.Sigs) != size { + cred.Sigs = make([][crypto.SECP256K1RSigLen]byte, size) + } + + unsignedBytes, err := service.vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + + sig, err := sk.Sign(unsignedBytes) + if err != nil { + return fmt.Errorf("problem signing transaction: %w", err) + } + copy(cred.Sigs[sigIndex][:], sig) + txBytes, err := service.vm.codec.Marshal(&tx) if err != nil { return fmt.Errorf("problem creating transaction: %w", err) @@ -989,3 +947,320 @@ func (service *Service) SignMintTx(r *http.Request, args *SignMintTxArgs, reply reply.Tx.Bytes = txBytes return nil } + +// ImportAVAArgs are arguments for passing into ImportAVA requests +type ImportAVAArgs struct { + // User that controls To + Username string `json:"username"` + Password string `json:"password"` + + // Address receiving the imported AVA + To string `json:"to"` +} + +// ImportAVAReply defines the ImportAVA replies returned from the API +type ImportAVAReply struct { + TxID ids.ID `json:"txID"` +} + +// ImportAVA imports AVA to this chain from the P-Chain. +// The AVA must have already been exported from the P-Chain. +// Returns the ID of the newly created atomic transaction +func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, reply *ImportAVAReply) error { + service.vm.ctx.Log.Verbo("ImportAVA called with username: %s", args.Username) + + toBytes, err := service.vm.Parse(args.To) + if err != nil { + return fmt.Errorf("problem parsing to address: %w", err) + } + to, err := ids.ToShortID(toBytes) + if err != nil { + return fmt.Errorf("problem parsing to address: %w", err) + } + + db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) + if err != nil { + return fmt.Errorf("problem retrieving user: %w", err) + } + + user := userState{vm: service.vm} + + addresses, _ := user.Addresses(db) + + addrs := ids.Set{} + addrs.Add(addresses...) + utxos, err := service.vm.GetAtomicUTXOs(addrs) + if err != nil { + return fmt.Errorf("problem retrieving user's atomic UTXOs: %w", err) + } + + kc := secp256k1fx.NewKeychain() + for _, addr := range addresses { + sk, err := user.Key(db, addr) + if err != nil { + return fmt.Errorf("problem retrieving private key: %w", err) + } + kc.Add(sk) + } + + amount := uint64(0) + time := service.vm.clock.Unix() + + ins := []*ava.TransferableInput{} + keys := [][]*crypto.PrivateKeySECP256K1R{} + for _, utxo := range utxos { + if !utxo.AssetID().Equals(service.vm.ava) { + continue + } + inputIntf, signers, err := kc.Spend(utxo.Out, time) + if err != nil { + continue + } + input, ok := inputIntf.(ava.Transferable) + if !ok { + continue + } + spent, err := math.Add64(amount, input.Amount()) + if err != nil { + return errSpendOverflow + } + amount = spent + + in := &ava.TransferableInput{ + UTXOID: utxo.UTXOID, + Asset: ava.Asset{ID: service.vm.ava}, + In: input, + } + + ins = append(ins, in) + keys = append(keys, signers) + } + + ava.SortTransferableInputsWithSigners(ins, keys) + + outs := []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: service.vm.ava}, + Out: &secp256k1fx.TransferOutput{ + Amt: amount, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{to}, + }, + }, + }} + + tx := Tx{UnsignedTx: &ImportTx{ + BaseTx: BaseTx{ + NetID: service.vm.ctx.NetworkID, + BCID: service.vm.ctx.ChainID, + Outs: outs, + }, + Ins: ins, + }} + + unsignedBytes, err := service.vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + hash := hashing.ComputeHash256(unsignedBytes) + + for _, credKeys := range keys { + cred := &secp256k1fx.Credential{} + for _, key := range credKeys { + sig, err := key.SignHash(hash) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + cred.Sigs = append(cred.Sigs, fixedSig) + } + tx.Creds = append(tx.Creds, cred) + } + + b, err := service.vm.codec.Marshal(tx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + + txID, err := service.vm.IssueTx(b, nil) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + return nil +} + +// ExportAVAArgs are arguments for passing into ExportAVA requests +type ExportAVAArgs struct { + // User providing exported AVA + Username string `json:"username"` + Password string `json:"password"` + + // Amount of nAVA to send + Amount json.Uint64 `json:"amount"` + + // ID of P-Chain account that will receive the AVA + To ids.ShortID `json:"to"` +} + +// ExportAVAReply defines the Send replies returned from the API +type ExportAVAReply struct { + TxID ids.ID `json:"txID"` +} + +// ExportAVA sends AVA from this chain to the P-Chain. +// After this tx is accepted, the AVA must be imported to the P-chain with an importTx. +// Returns the ID of the newly created atomic transaction +func (service *Service) ExportAVA(_ *http.Request, args *ExportAVAArgs, reply *ExportAVAReply) error { + service.vm.ctx.Log.Verbo("ExportAVA called with username: %s", args.Username) + + if args.Amount == 0 { + return errInvalidAmount + } + + db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) + if err != nil { + return fmt.Errorf("problem retrieving user: %w", err) + } + + user := userState{vm: service.vm} + + addresses, _ := user.Addresses(db) + + addrs := ids.Set{} + addrs.Add(addresses...) + utxos, err := service.vm.GetUTXOs(addrs) + if err != nil { + return fmt.Errorf("problem retrieving user's UTXOs: %w", err) + } + + kc := secp256k1fx.NewKeychain() + for _, addr := range addresses { + sk, err := user.Key(db, addr) + if err != nil { + return fmt.Errorf("problem retrieving private key: %w", err) + } + kc.Add(sk) + } + + amountSpent := uint64(0) + time := service.vm.clock.Unix() + + ins := []*ava.TransferableInput{} + keys := [][]*crypto.PrivateKeySECP256K1R{} + for _, utxo := range utxos { + if !utxo.AssetID().Equals(service.vm.ava) { + continue + } + inputIntf, signers, err := kc.Spend(utxo.Out, time) + if err != nil { + continue + } + input, ok := inputIntf.(ava.Transferable) + if !ok { + continue + } + spent, err := math.Add64(amountSpent, input.Amount()) + if err != nil { + return errSpendOverflow + } + amountSpent = spent + + in := &ava.TransferableInput{ + UTXOID: utxo.UTXOID, + Asset: ava.Asset{ID: service.vm.ava}, + In: input, + } + + ins = append(ins, in) + keys = append(keys, signers) + + if amountSpent >= uint64(args.Amount) { + break + } + } + + if amountSpent < uint64(args.Amount) { + return errInsufficientFunds + } + + ava.SortTransferableInputsWithSigners(ins, keys) + + exportOuts := []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: service.vm.ava}, + Out: &secp256k1fx.TransferOutput{ + Amt: uint64(args.Amount), + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{args.To}, + }, + }, + }} + + outs := []*ava.TransferableOutput{} + if amountSpent > uint64(args.Amount) { + changeAddr := kc.Keys[0].PublicKey().Address() + outs = append(outs, &ava.TransferableOutput{ + Asset: ava.Asset{ID: service.vm.ava}, + Out: &secp256k1fx.TransferOutput{ + Amt: amountSpent - uint64(args.Amount), + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }, + }, + }) + } + + ava.SortTransferableOutputs(outs, service.vm.codec) + + tx := Tx{UnsignedTx: &ExportTx{ + BaseTx: BaseTx{ + NetID: service.vm.ctx.NetworkID, + BCID: service.vm.ctx.ChainID, + Outs: outs, + Ins: ins, + }, + Outs: exportOuts, + }} + + unsignedBytes, err := service.vm.codec.Marshal(&tx.UnsignedTx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + hash := hashing.ComputeHash256(unsignedBytes) + + for _, credKeys := range keys { + cred := &secp256k1fx.Credential{} + for _, key := range credKeys { + sig, err := key.SignHash(hash) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + cred.Sigs = append(cred.Sigs, fixedSig) + } + tx.Creds = append(tx.Creds, cred) + } + + b, err := service.vm.codec.Marshal(tx) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + + txID, err := service.vm.IssueTx(b, nil) + if err != nil { + return fmt.Errorf("problem issuing transaction: %w", err) + } + + reply.TxID = txID + return nil +} diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index 16be290..9221634 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -136,7 +136,7 @@ func TestCreateFixedCapAsset(t *testing.T) { t.Fatal(err) } - if reply.AssetID.String() != "27ySRc5CE4obYwkS6kyvj5S8eGxGkr994157Hdo82mKVHTWpUT" { + if reply.AssetID.String() != "wWBk78PGAU4VkXhESr3jiYyMCEzzPPcnVYeEnNr9g4JuvYs2x" { t.Fatalf("Wrong assetID returned from CreateFixedCapAsset %s", reply.AssetID) } } @@ -182,7 +182,7 @@ func TestCreateVariableCapAsset(t *testing.T) { t.Fatal(err) } - if reply.AssetID.String() != "2vnRkWvRN3G9JJ7pixBmNdq4pfwRFkpew4kccf27WokYLH9VYY" { + if reply.AssetID.String() != "SscTvpQFCZPNiRXyueDc7LdHT9EstHiva3AK6kuTgHTMd7DsU" { t.Fatalf("Wrong assetID returned from CreateFixedCapAsset %s", reply.AssetID) } } diff --git a/vms/avm/state.go b/vms/avm/state.go index b9d045f..6033b8b 100644 --- a/vms/avm/state.go +++ b/vms/avm/state.go @@ -8,169 +8,58 @@ import ( "github.com/ava-labs/gecko/cache" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/vms/components/ava" ) var ( errCacheTypeMismatch = errors.New("type returned from cache doesn't match the expected type") ) +func uniqueID(id ids.ID, prefix uint64, cacher cache.Cacher) ids.ID { + if cachedIDIntf, found := cacher.Get(id); found { + return cachedIDIntf.(ids.ID) + } + uID := id.Prefix(prefix) + cacher.Put(id, uID) + return uID +} + // state is a thin wrapper around a database to provide, caching, serialization, // and de-serialization. -type state struct { - c cache.Cacher - vm *VM -} +type state struct{ ava.State } // Tx attempts to load a transaction from storage. func (s *state) Tx(id ids.ID) (*Tx, error) { - if txIntf, found := s.c.Get(id); found { + if txIntf, found := s.Cache.Get(id); found { if tx, ok := txIntf.(*Tx); ok { return tx, nil } return nil, errCacheTypeMismatch } - bytes, err := s.vm.db.Get(id.Bytes()) + bytes, err := s.DB.Get(id.Bytes()) if err != nil { return nil, err } // The key was in the database tx := &Tx{} - if err := s.vm.codec.Unmarshal(bytes, tx); err != nil { + if err := s.Codec.Unmarshal(bytes, tx); err != nil { return nil, err } tx.Initialize(bytes) - s.c.Put(id, tx) + s.Cache.Put(id, tx) return tx, nil } // SetTx saves the provided transaction to storage. func (s *state) SetTx(id ids.ID, tx *Tx) error { if tx == nil { - s.c.Evict(id) - return s.vm.db.Delete(id.Bytes()) + s.Cache.Evict(id) + return s.DB.Delete(id.Bytes()) } - s.c.Put(id, tx) - return s.vm.db.Put(id.Bytes(), tx.Bytes()) -} - -// UTXO attempts to load a utxo from storage. -func (s *state) UTXO(id ids.ID) (*UTXO, error) { - if utxoIntf, found := s.c.Get(id); found { - if utxo, ok := utxoIntf.(*UTXO); ok { - return utxo, nil - } - return nil, errCacheTypeMismatch - } - - bytes, err := s.vm.db.Get(id.Bytes()) - if err != nil { - return nil, err - } - - // The key was in the database - utxo := &UTXO{} - if err := s.vm.codec.Unmarshal(bytes, utxo); err != nil { - return nil, err - } - - s.c.Put(id, utxo) - return utxo, nil -} - -// SetUTXO saves the provided utxo to storage. -func (s *state) SetUTXO(id ids.ID, utxo *UTXO) error { - if utxo == nil { - s.c.Evict(id) - return s.vm.db.Delete(id.Bytes()) - } - - bytes, err := s.vm.codec.Marshal(utxo) - if err != nil { - return err - } - - s.c.Put(id, utxo) - return s.vm.db.Put(id.Bytes(), bytes) -} - -// Status returns a status from storage. -func (s *state) Status(id ids.ID) (choices.Status, error) { - if statusIntf, found := s.c.Get(id); found { - if status, ok := statusIntf.(choices.Status); ok { - return status, nil - } - return choices.Unknown, errCacheTypeMismatch - } - - bytes, err := s.vm.db.Get(id.Bytes()) - if err != nil { - return choices.Unknown, err - } - - var status choices.Status - s.vm.codec.Unmarshal(bytes, &status) - - s.c.Put(id, status) - return status, nil -} - -// SetStatus saves a status in storage. -func (s *state) SetStatus(id ids.ID, status choices.Status) error { - if status == choices.Unknown { - s.c.Evict(id) - return s.vm.db.Delete(id.Bytes()) - } - - s.c.Put(id, status) - - bytes, err := s.vm.codec.Marshal(status) - if err != nil { - return err - } - return s.vm.db.Put(id.Bytes(), bytes) -} - -// IDs returns a slice of IDs from storage -func (s *state) IDs(id ids.ID) ([]ids.ID, error) { - if idsIntf, found := s.c.Get(id); found { - if idSlice, ok := idsIntf.([]ids.ID); ok { - return idSlice, nil - } - return nil, errCacheTypeMismatch - } - - bytes, err := s.vm.db.Get(id.Bytes()) - if err != nil { - return nil, err - } - - idSlice := []ids.ID(nil) - if err := s.vm.codec.Unmarshal(bytes, &idSlice); err != nil { - return nil, err - } - - s.c.Put(id, idSlice) - return idSlice, nil -} - -// SetIDs saves a slice of IDs to the database. -func (s *state) SetIDs(id ids.ID, idSlice []ids.ID) error { - if len(idSlice) == 0 { - s.c.Evict(id) - return s.vm.db.Delete(id.Bytes()) - } - - s.c.Put(id, idSlice) - - bytes, err := s.vm.codec.Marshal(idSlice) - if err != nil { - return err - } - - return s.vm.db.Put(id.Bytes(), bytes) + s.Cache.Put(id, tx) + return s.DB.Put(id.Bytes(), tx.Bytes()) } diff --git a/vms/avm/state_test.go b/vms/avm/state_test.go index 0485a1e..212fc18 100644 --- a/vms/avm/state_test.go +++ b/vms/avm/state_test.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -67,7 +68,7 @@ func TestStateIDs(t *testing.T) { } } - state.c.Flush() + state.Cache.Flush() result, err = state.IDs(ids.Empty) if err != nil { @@ -94,7 +95,7 @@ func TestStateIDs(t *testing.T) { t.Fatalf("Should have errored during cache lookup") } - state.c.Flush() + state.Cache.Flush() result, err = state.IDs(ids.Empty) if err == nil { @@ -174,19 +175,19 @@ func TestStateUTXOs(t *testing.T) { vm := GenesisVM(t) state := vm.state.state - vm.codec.RegisterType(&testVerifiable{}) + vm.codec.RegisterType(&ava.TestVerifiable{}) if _, err := state.UTXO(ids.Empty); err == nil { t.Fatalf("Should have errored when reading utxo") } - utxo := &UTXO{ - UTXOID: UTXOID{ + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 1, }, - Asset: Asset{ID: ids.Empty}, - Out: &testVerifiable{}, + Asset: ava.Asset{ID: ids.Empty}, + Out: &ava.TestVerifiable{}, } if err := state.SetUTXO(ids.Empty, utxo); err != nil { @@ -202,7 +203,7 @@ func TestStateUTXOs(t *testing.T) { t.Fatalf("Wrong UTXO returned") } - state.c.Flush() + state.Cache.Flush() result, err = state.UTXO(ids.Empty) if err != nil { @@ -221,7 +222,7 @@ func TestStateUTXOs(t *testing.T) { t.Fatalf("Should have errored when reading utxo") } - if err := state.SetUTXO(ids.Empty, &UTXO{}); err == nil { + if err := state.SetUTXO(ids.Empty, &ava.UTXO{}); err == nil { t.Fatalf("Should have errored packing the utxo") } @@ -233,7 +234,7 @@ func TestStateUTXOs(t *testing.T) { t.Fatalf("Should have errored when reading utxo") } - state.c.Flush() + state.Cache.Flush() if _, err := state.UTXO(ids.Empty); err == nil { t.Fatalf("Should have errored when reading utxo") @@ -244,35 +245,31 @@ func TestStateTXs(t *testing.T) { vm := GenesisVM(t) state := vm.state.state - vm.codec.RegisterType(&TestTransferable{}) + vm.codec.RegisterType(&ava.TestTransferable{}) if _, err := state.Tx(ids.Empty); err == nil { t.Fatalf("Should have errored when reading tx") } - tx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - Asset: Asset{ - ID: asset, - }, - In: &secp256k1fx.TransferInput{ - Amt: 20 * units.KiloAva, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(tx.UnsignedTx) if err != nil { @@ -287,11 +284,9 @@ func TestStateTXs(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - tx.Creds = append(tx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -314,7 +309,7 @@ func TestStateTXs(t *testing.T) { t.Fatalf("Wrong Tx returned") } - state.c.Flush() + state.Cache.Flush() result, err = state.Tx(ids.Empty) if err != nil { @@ -341,7 +336,7 @@ func TestStateTXs(t *testing.T) { t.Fatalf("Should have errored when reading tx") } - state.c.Flush() + state.Cache.Flush() if _, err := state.Tx(ids.Empty); err == nil { t.Fatalf("Should have errored when reading tx") diff --git a/vms/avm/static_service.go b/vms/avm/static_service.go index 47ef942..3fd58f3 100644 --- a/vms/avm/static_service.go +++ b/vms/avm/static_service.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/components/codec" "github.com/ava-labs/gecko/vms/secp256k1fx" @@ -44,15 +45,24 @@ type BuildGenesisReply struct { // BuildGenesis returns the UTXOs such that at least one address in [args.Addresses] is // referenced in the UTXO. func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, reply *BuildGenesisReply) error { + errs := wrappers.Errs{} + c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) + errs.Add( + c.RegisterType(&BaseTx{}), + c.RegisterType(&CreateAssetTx{}), + c.RegisterType(&OperationTx{}), + c.RegisterType(&ImportTx{}), + c.RegisterType(&ExportTx{}), + c.RegisterType(&secp256k1fx.TransferInput{}), + c.RegisterType(&secp256k1fx.MintOutput{}), + c.RegisterType(&secp256k1fx.TransferOutput{}), + c.RegisterType(&secp256k1fx.MintOperation{}), + c.RegisterType(&secp256k1fx.Credential{}), + ) + if errs.Errored() { + return errs.Err + } g := Genesis{} for assetAlias, assetDefinition := range args.GenesisData { @@ -67,78 +77,75 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl Denomination: byte(assetDefinition.Denomination), }, } - for assetType, initialStates := range assetDefinition.InitialState { - switch assetType { - case "fixedCap": - initialState := &InitialState{ - FxID: 0, // TODO: Should lookup secp256k1fx FxID - } - for _, state := range initialStates { - b, err := json.Marshal(state) - if err != nil { - return err - } - holder := Holder{} - if err := json.Unmarshal(b, &holder); err != nil { - return err - } - cb58 := formatting.CB58{} - if err := cb58.FromString(holder.Address); err != nil { - return err - } - addr, err := ids.ToShortID(cb58.Bytes) - if err != nil { - return err - } - initialState.Outs = append(initialState.Outs, &secp256k1fx.TransferOutput{ - Amt: uint64(holder.Amount), - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{addr}, - }, - }) - } - initialState.Sort(c) - asset.States = append(asset.States, initialState) - case "variableCap": - initialState := &InitialState{ - FxID: 0, // TODO: Should lookup secp256k1fx FxID - } - for _, state := range initialStates { - b, err := json.Marshal(state) - if err != nil { - return err - } - owners := Owners{} - if err := json.Unmarshal(b, &owners); err != nil { - return err - } - - out := &secp256k1fx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - }, - } - for _, address := range owners.Minters { + if len(assetDefinition.InitialState) > 0 { + initialState := &InitialState{ + FxID: 0, // TODO: Should lookup secp256k1fx FxID + } + for assetType, initialStates := range assetDefinition.InitialState { + switch assetType { + case "fixedCap": + for _, state := range initialStates { + b, err := json.Marshal(state) + if err != nil { + return err + } + holder := Holder{} + if err := json.Unmarshal(b, &holder); err != nil { + return err + } cb58 := formatting.CB58{} - if err := cb58.FromString(address); err != nil { + if err := cb58.FromString(holder.Address); err != nil { return err } addr, err := ids.ToShortID(cb58.Bytes) if err != nil { return err } - out.Addrs = append(out.Addrs, addr) + initialState.Outs = append(initialState.Outs, &secp256k1fx.TransferOutput{ + Amt: uint64(holder.Amount), + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }) } - out.Sort() + case "variableCap": + for _, state := range initialStates { + b, err := json.Marshal(state) + if err != nil { + return err + } + owners := Owners{} + if err := json.Unmarshal(b, &owners); err != nil { + return err + } - initialState.Outs = append(initialState.Outs, out) + out := &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + }, + } + for _, address := range owners.Minters { + cb58 := formatting.CB58{} + if err := cb58.FromString(address); err != nil { + return err + } + addr, err := ids.ToShortID(cb58.Bytes) + if err != nil { + return err + } + out.Addrs = append(out.Addrs, addr) + } + out.Sort() + + initialState.Outs = append(initialState.Outs, out) + } + default: + return errUnknownAssetType } - initialState.Sort(c) - asset.States = append(asset.States, initialState) - default: - return errUnknownAssetType } + initialState.Sort(c) + asset.States = append(asset.States, initialState) } asset.Sort() g.Txs = append(g.Txs, &asset) diff --git a/vms/avm/static_service_test.go b/vms/avm/static_service_test.go index 612132e..fd9acc0 100644 --- a/vms/avm/static_service_test.go +++ b/vms/avm/static_service_test.go @@ -5,8 +5,6 @@ package avm import ( "testing" - - "github.com/ava-labs/gecko/utils/formatting" ) func TestBuildGenesis(t *testing.T) { @@ -79,20 +77,4 @@ func TestBuildGenesis(t *testing.T) { if err != nil { t.Fatal(err) } - - expected := "1112YAVd1YsJ7JBDMQssciuuu9ySgebznWfmfT8JSw5vUKERtP4WGyitE7z38J8tExNmvK2kuwHsUP3erfcncXBWmJkdnd9nDJoj9tCiQHJmW1pstNQn3zXHdTnw6KJcG8Ro36ahknQkuy9ZSXgnZtpFhqUuwSd7mPj8vzZcqJMXLXorCBfvhwypTbZKogM9tUshyUfngfkg256ZsoU2ufMjhTG14PBBrgJkXD2F38uVSXWvYbubMVWDZbDnUzbyD3Azrs2Hydf8Paio6aNjwfwc1py61oXS5ehC55wiYbKpfzwE4px3bfYBu9yV6rvhivksB56vop9LEo8Pdo71tFAMkhR5toZmYcqRKyLXAnYqonUgmPsyxNwU22as8oscT5dj3Qxy1jsg6bEp6GwQepNqsWufGYx6Hiby2r5hyRZeYdk6xsXMPGBSBWUXhKX3ReTxBnjcrVE2Zc3G9eMvRho1tKzt7ppkutpcQemdDy2dxGryMqaFmPJaTaqcH2vB197KgVFbPgmHZY3ufUdfpVzzHax365pwCmzQD2PQh8hCqEP7rfV5e8uXKQiSynngoNDM4ak145zTpcUaX8htMGinfs45aKQvo5WHcD6ccRnHzc7dyXN8xJRnMznsuRN7D6k66DdbfDYhc2NbVUgXRAF4wSNTtsuZGxCGTEjQyYaoUoJowGXvnxmXAWHvLyMJswNizBeYgw1agRg5qB4AEKX96BFXhJq3MbsBRiypLR6nSuZgPFhCrLdBtstxEC2SPQNuUVWW9Qy68dDWQ3Fxx95n1pnjVru9wDJFoemg2imXRR" - - cb58 := formatting.CB58{} - if err := cb58.FromString(expected); err != nil { - t.Fatal(err) - } - expectedBytes := cb58.Bytes - - if result := reply.Bytes.String(); result != expected { - t.Fatalf("Create genesis returned unexpected bytes:\n\n%s\n\n%s\n\n%s", - reply.Bytes, - formatting.DumpBytes{Bytes: reply.Bytes.Bytes}, - formatting.DumpBytes{Bytes: expectedBytes}, - ) - } } diff --git a/vms/avm/tx.go b/vms/avm/tx.go index 4fced32..c35fd80 100644 --- a/vms/avm/tx.go +++ b/vms/avm/tx.go @@ -6,10 +6,12 @@ package avm import ( "errors" + "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" ) var ( @@ -22,16 +24,14 @@ type UnsignedTx interface { ID() ids.ID Bytes() []byte - NetworkID() uint32 - ChainID() ids.ID - Outputs() []*TransferableOutput - Inputs() []*TransferableInput - AssetIDs() ids.Set - InputUTXOs() []*UTXOID - UTXOs() []*UTXO + NumCredentials() int + InputUTXOs() []*ava.UTXOID + UTXOs() []*ava.UTXO + SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs int) error - SemanticVerify(vm *VM, uTx *UniqueTx, creds []*Credential) error + SemanticVerify(vm *VM, uTx *UniqueTx, creds []verify.Verifiable) error + ExecuteWithSideEffects(vm *VM, batch database.Batch) error } // Tx is the core operation that can be performed. The tx uses the UTXO model. @@ -40,14 +40,14 @@ type UnsignedTx interface { // attempting to consume and the inputs consume sufficient state to produce the // outputs. type Tx struct { - UnsignedTx `serialize:"true"` + UnsignedTx `serialize:"true" json:"unsignedTx"` - Creds []*Credential `serialize:"true"` // The credentials of this transaction + Creds []verify.Verifiable `serialize:"true" json:"credentials"` // The credentials of this transaction } // Credentials describes the authorization that allows the Inputs to consume the // specified UTXOs. The returned array should not be modified. -func (t *Tx) Credentials() []*Credential { return t.Creds } +func (t *Tx) Credentials() []verify.Verifiable { return t.Creds } // SyntacticVerify verifies that this transaction is well-formed. func (t *Tx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs int) error { @@ -66,8 +66,7 @@ func (t *Tx) SyntacticVerify(ctx *snow.Context, c codec.Codec, numFxs int) error } } - numInputs := len(t.InputUTXOs()) - if numInputs != len(t.Creds) { + if numCreds := t.UnsignedTx.NumCredentials(); numCreds != len(t.Creds) { return errWrongNumberOfCredentials } return nil diff --git a/vms/avm/tx_test.go b/vms/avm/tx_test.go index bc01f36..2f269e9 100644 --- a/vms/avm/tx_test.go +++ b/vms/avm/tx_test.go @@ -8,7 +8,9 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/units" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/verify" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -18,14 +20,28 @@ func TestTxNil(t *testing.T) { if err := tx.SyntacticVerify(ctx, c, 1); err == nil { t.Fatalf("Should have errored due to nil tx") } + if err := tx.SemanticVerify(nil, nil); err == nil { + t.Fatalf("Should have errored due to nil tx") + } } -func TestTxEmpty(t *testing.T) { +func setupCodec() codec.Codec { c := codec.NewDefault() c.RegisterType(&BaseTx{}) c.RegisterType(&CreateAssetTx{}) c.RegisterType(&OperationTx{}) + c.RegisterType(&ImportTx{}) + c.RegisterType(&ExportTx{}) + c.RegisterType(&secp256k1fx.TransferInput{}) + c.RegisterType(&secp256k1fx.MintOutput{}) + c.RegisterType(&secp256k1fx.TransferOutput{}) + c.RegisterType(&secp256k1fx.MintOperation{}) + c.RegisterType(&secp256k1fx.Credential{}) + return c +} +func TestTxEmpty(t *testing.T) { + c := setupCodec() tx := &Tx{} if err := tx.SyntacticVerify(ctx, c, 1); err == nil { t.Fatalf("Should have errored due to nil tx") @@ -33,46 +49,30 @@ func TestTxEmpty(t *testing.T) { } func TestTxInvalidCredential(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) - c.RegisterType(&testVerifiable{}) + c := setupCodec() + c.RegisterType(&ava.TestVerifiable{}) tx := &Tx{ - UnsignedTx: &OperationTx{BaseTx: BaseTx{ + UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, - }, - Asset: Asset{ - ID: asset, - }, - In: &secp256k1fx.TransferInput{ - Amt: 20 * units.KiloAva, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: ids.Empty, + OutputIndex: 0, + }, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}, - Creds: []*Credential{ - &Credential{ - Cred: &testVerifiable{err: errUnneededAddress}, - }, + }}, }, + Creds: []verify.Verifiable{&ava.TestVerifiable{Err: errUnneededAddress}}, } b, err := c.Marshal(tx) @@ -87,30 +87,20 @@ func TestTxInvalidCredential(t *testing.T) { } func TestTxInvalidUnsignedTx(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) - c.RegisterType(&testVerifiable{}) + c := setupCodec() + c.RegisterType(&ava.TestVerifiable{}) tx := &Tx{ - UnsignedTx: &OperationTx{BaseTx: BaseTx{ + UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ + Ins: []*ava.TransferableInput{ + &ava.TransferableInput{ + UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 0, }, - Asset: Asset{ - ID: asset, - }, + Asset: ava.Asset{ID: asset}, In: &secp256k1fx.TransferInput{ Amt: 20 * units.KiloAva, Input: secp256k1fx.Input{ @@ -120,14 +110,12 @@ func TestTxInvalidUnsignedTx(t *testing.T) { }, }, }, - &TransferableInput{ - UTXOID: UTXOID{ + &ava.TransferableInput{ + UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 0, }, - Asset: Asset{ - ID: asset, - }, + Asset: ava.Asset{ID: asset}, In: &secp256k1fx.TransferInput{ Amt: 20 * units.KiloAva, Input: secp256k1fx.Input{ @@ -138,14 +126,10 @@ func TestTxInvalidUnsignedTx(t *testing.T) { }, }, }, - }}, - Creds: []*Credential{ - &Credential{ - Cred: &testVerifiable{}, - }, - &Credential{ - Cred: &testVerifiable{}, - }, + }, + Creds: []verify.Verifiable{ + &ava.TestVerifiable{}, + &ava.TestVerifiable{}, }, } @@ -161,64 +145,41 @@ func TestTxInvalidUnsignedTx(t *testing.T) { } func TestTxInvalidNumberOfCredentials(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) - c.RegisterType(&testVerifiable{}) + c := setupCodec() + c.RegisterType(&ava.TestVerifiable{}) tx := &Tx{ - UnsignedTx: &OperationTx{ - BaseTx: BaseTx{ - NetID: networkID, - BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 0, + UnsignedTx: &BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*ava.TransferableInput{ + &ava.TransferableInput{ + UTXOID: ava.UTXOID{TxID: ids.Empty, OutputIndex: 0}, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, }, - Asset: Asset{ - ID: asset, - }, - In: &secp256k1fx.TransferInput{ - Amt: 20 * units.KiloAva, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + }, + }, + &ava.TransferableInput{ + UTXOID: ava.UTXOID{TxID: ids.Empty, OutputIndex: 1}, + Asset: ava.Asset{ID: asset}, + In: &secp256k1fx.TransferInput{ + Amt: 20 * units.KiloAva, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, }, }, - Ops: []*Operation{ - &Operation{ - Asset: Asset{ - ID: asset, - }, - Ins: []*OperableInput{ - &OperableInput{ - UTXOID: UTXOID{ - TxID: ids.Empty, - OutputIndex: 1, - }, - In: &testVerifiable{}, - }, - }, - }, - }, - }, - Creds: []*Credential{ - &Credential{ - Cred: &testVerifiable{}, - }, }, + Creds: []verify.Verifiable{&ava.TestVerifiable{}}, } b, err := c.Marshal(tx) @@ -231,76 +192,3 @@ func TestTxInvalidNumberOfCredentials(t *testing.T) { t.Fatalf("Tx should have failed due to an invalid unsigned tx") } } - -func TestTxDocumentation(t *testing.T) { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) - - txBytes := []byte{ - // unsigned transaction: - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, - 0xff, 0xff, 0xff, 0xff, 0xee, 0xee, 0xee, 0xee, - 0xdd, 0xdd, 0xdd, 0xdd, 0xcc, 0xcc, 0xcc, 0xcc, - 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, - 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, - 0x00, 0x00, 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, - 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, - 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, - 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, - 0x1c, 0x1d, 0x1e, 0x1f, 0x00, 0x00, 0x00, 0x04, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, - 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, - 0x51, 0x02, 0x5c, 0x61, 0xfb, 0xcf, 0xc0, 0x78, - 0xf6, 0x93, 0x34, 0xf8, 0x34, 0xbe, 0x6d, 0xd2, - 0x6d, 0x55, 0xa9, 0x55, 0xc3, 0x34, 0x41, 0x28, - 0xe0, 0x60, 0x12, 0x8e, 0xde, 0x35, 0x23, 0xa2, - 0x4a, 0x46, 0x1c, 0x89, 0x43, 0xab, 0x08, 0x59, - 0x00, 0x00, 0x00, 0x01, 0xf1, 0xe1, 0xd1, 0xc1, - 0xb1, 0xa1, 0x91, 0x81, 0x71, 0x61, 0x51, 0x41, - 0x31, 0x21, 0x11, 0x01, 0xf0, 0xe0, 0xd0, 0xc0, - 0xb0, 0xa0, 0x90, 0x80, 0x70, 0x60, 0x50, 0x40, - 0x30, 0x20, 0x10, 0x00, 0x00, 0x00, 0x00, 0x05, - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, - 0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x00, 0x00, - 0x07, 0x5b, 0xcd, 0x15, 0x00, 0x00, 0x00, 0x02, - 0x00, 0x00, 0x00, 0x03, 0x00, 0x00, 0x00, 0x07, - // number of credentials: - 0x00, 0x00, 0x00, 0x01, - // credential[0]: - 0x00, 0x00, 0x00, 0x07, 0x00, 0x00, 0x00, 0x02, - 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, - 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, - 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, - 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1e, 0x1d, 0x1f, - 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, - 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2e, 0x2d, 0x2f, - 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, - 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, - 0x00, 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, - 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, - 0x4f, 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, - 0x57, 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5e, 0x5d, - 0x5f, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, - 0x67, 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6e, 0x6d, - 0x6f, 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, - 0x77, 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, - 0x7f, 0x00, - } - - tx := Tx{} - err := c.Unmarshal(txBytes, &tx) - if err != nil { - t.Fatal(err) - } -} diff --git a/vms/avm/unique_tx.go b/vms/avm/unique_tx.go index 6d219f3..788ef52 100644 --- a/vms/avm/unique_tx.go +++ b/vms/avm/unique_tx.go @@ -9,10 +9,12 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/vms/components/ava" ) var ( errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") + errWrongAssetID = errors.New("asset ID must be AVA in the atomic tx") errMissingUTXO = errors.New("missing utxo") errUnknownTx = errors.New("transaction is unknown") errRejectedTx = errors.New("transaction is rejected") @@ -21,70 +23,75 @@ var ( // UniqueTx provides a de-duplication service for txs. This only provides a // performance boost type UniqueTx struct { + *TxState + vm *VM txID ids.ID - t *txState } -type txState struct { +// TxState ... +type TxState struct { + *Tx + unique, verifiedTx, verifiedState bool validity error - tx *Tx inputs ids.Set - inputUTXOs []*UTXOID - utxos []*UTXO + inputUTXOs []*ava.UTXOID + utxos []*ava.UTXO deps []snowstorm.Tx status choices.Status + + onDecide func(choices.Status) } func (tx *UniqueTx) refresh() { - if tx.t == nil { - tx.t = &txState{} + if tx.TxState == nil { + tx.TxState = &TxState{} } - if tx.t.unique { + if tx.unique { return } unique := tx.vm.state.UniqueTx(tx) - prevTx := tx.t.tx + prevTx := tx.Tx if unique == tx { // If no one was in the cache, make sure that there wasn't an // intermediate object whose state I must reflect if status, err := tx.vm.state.Status(tx.ID()); err == nil { - tx.t.status = status - tx.t.unique = true + tx.status = status + tx.unique = true } } else { // If someone is in the cache, they must be up to date // This ensures that every unique tx object points to the same tx state - tx.t = unique.t + tx.TxState = unique.TxState } - if tx.t.tx != nil { + if tx.Tx != nil { return } if prevTx == nil { if innerTx, err := tx.vm.state.Tx(tx.ID()); err == nil { - tx.t.tx = innerTx + tx.Tx = innerTx } } else { - tx.t.tx = prevTx + tx.Tx = prevTx } } // Evict is called when this UniqueTx will no longer be returned from a cache // lookup -func (tx *UniqueTx) Evict() { tx.t.unique = false } // Lock is already held here +func (tx *UniqueTx) Evict() { tx.unique = false } // Lock is already held here func (tx *UniqueTx) setStatus(status choices.Status) error { tx.refresh() - if tx.t.status == status { + if tx.status == status { return nil } - tx.t.status = status + tx.status = status return tx.vm.state.SetStatus(tx.ID(), status) } @@ -93,13 +100,20 @@ func (tx *UniqueTx) ID() ids.ID { return tx.txID } // Accept is called when the transaction was finalized as accepted by consensus func (tx *UniqueTx) Accept() { + defer tx.vm.db.Abort() + if err := tx.setStatus(choices.Accepted); err != nil { tx.vm.ctx.Log.Error("Failed to accept tx %s due to %s", tx.txID, err) return } // Remove spent utxos - for _, utxoID := range tx.InputIDs().List() { + for _, utxo := range tx.InputUTXOs() { + if utxo.Symbolic() { + // If the UTXO is symbolic, it can't be spent + continue + } + utxoID := utxo.InputID() if err := tx.vm.state.SpendUTXO(utxoID); err != nil { tx.vm.ctx.Log.Error("Failed to spend utxo %s due to %s", utxoID, err) return @@ -115,19 +129,32 @@ func (tx *UniqueTx) Accept() { } txID := tx.ID() - tx.vm.ctx.Log.Verbo("Accepting Tx: %s", txID) - - if err := tx.vm.db.Commit(); err != nil { - tx.vm.ctx.Log.Error("Failed to commit accept %s due to %s", tx.txID, err) + commitBatch, err := tx.vm.db.CommitBatch() + if err != nil { + tx.vm.ctx.Log.Error("Failed to calculate CommitBatch for %s due to %s", txID, err) + return } + if err := tx.ExecuteWithSideEffects(tx.vm, commitBatch); err != nil { + tx.vm.ctx.Log.Error("Failed to commit accept %s due to %s", txID, err) + return + } + + tx.vm.ctx.Log.Verbo("Accepted Tx: %s", txID) + tx.vm.pubsub.Publish("accepted", txID) - tx.t.deps = nil // Needed to prevent a memory leak + tx.deps = nil // Needed to prevent a memory leak + + if tx.onDecide != nil { + tx.onDecide(choices.Accepted) + } } // Reject is called when the transaction was finalized as rejected by consensus func (tx *UniqueTx) Reject() { + defer tx.vm.db.Abort() + if err := tx.setStatus(choices.Rejected); err != nil { tx.vm.ctx.Log.Error("Failed to reject tx %s due to %s", tx.txID, err) return @@ -142,82 +169,89 @@ func (tx *UniqueTx) Reject() { tx.vm.pubsub.Publish("rejected", txID) - tx.t.deps = nil // Needed to prevent a memory leak + tx.deps = nil // Needed to prevent a memory leak + + if tx.onDecide != nil { + tx.onDecide(choices.Rejected) + } } // Status returns the current status of this transaction func (tx *UniqueTx) Status() choices.Status { tx.refresh() - return tx.t.status + return tx.status } // Dependencies returns the set of transactions this transaction builds on func (tx *UniqueTx) Dependencies() []snowstorm.Tx { tx.refresh() - if tx.t.tx == nil || len(tx.t.deps) != 0 { - return tx.t.deps + if tx.Tx == nil || len(tx.deps) != 0 { + return tx.deps } txIDs := ids.Set{} for _, in := range tx.InputUTXOs() { + if in.Symbolic() { + continue + } txID, _ := in.InputSource() if !txIDs.Contains(txID) { txIDs.Add(txID) - tx.t.deps = append(tx.t.deps, &UniqueTx{ + tx.deps = append(tx.deps, &UniqueTx{ vm: tx.vm, txID: txID, }) } } - for _, assetID := range tx.t.tx.AssetIDs().List() { + for _, assetID := range tx.Tx.AssetIDs().List() { if !txIDs.Contains(assetID) { txIDs.Add(assetID) - tx.t.deps = append(tx.t.deps, &UniqueTx{ + tx.deps = append(tx.deps, &UniqueTx{ vm: tx.vm, txID: assetID, }) } } - return tx.t.deps + return tx.deps } // InputIDs returns the set of utxoIDs this transaction consumes func (tx *UniqueTx) InputIDs() ids.Set { tx.refresh() - if tx.t.tx == nil || tx.t.inputs.Len() != 0 { - return tx.t.inputs + if tx.Tx == nil || tx.inputs.Len() != 0 { + return tx.inputs } for _, utxo := range tx.InputUTXOs() { - tx.t.inputs.Add(utxo.InputID()) + tx.inputs.Add(utxo.InputID()) } - return tx.t.inputs + return tx.inputs } // InputUTXOs returns the utxos that will be consumed on tx acceptance -func (tx *UniqueTx) InputUTXOs() []*UTXOID { +func (tx *UniqueTx) InputUTXOs() []*ava.UTXOID { tx.refresh() - if tx.t.tx == nil || len(tx.t.inputUTXOs) != 0 { - return tx.t.inputUTXOs + if tx.Tx == nil || len(tx.inputUTXOs) != 0 { + return tx.inputUTXOs } - tx.t.inputUTXOs = tx.t.tx.InputUTXOs() - return tx.t.inputUTXOs + tx.inputUTXOs = tx.Tx.InputUTXOs() + return tx.inputUTXOs } // UTXOs returns the utxos that will be added to the UTXO set on tx acceptance -func (tx *UniqueTx) UTXOs() []*UTXO { +func (tx *UniqueTx) UTXOs() []*ava.UTXO { tx.refresh() - if tx.t.tx == nil || len(tx.t.utxos) != 0 { - return tx.t.utxos + if tx.Tx == nil || len(tx.utxos) != 0 { + return tx.utxos } - tx.t.utxos = tx.t.tx.UTXOs() - return tx.t.utxos + tx.utxos = tx.Tx.UTXOs() + return tx.utxos } // Bytes returns the binary representation of this transaction func (tx *UniqueTx) Bytes() []byte { tx.refresh() - return tx.t.tx.Bytes() + return tx.Tx.Bytes() } // Verify the validity of this transaction @@ -238,39 +272,39 @@ func (tx *UniqueTx) Verify() error { func (tx *UniqueTx) SyntacticVerify() error { tx.refresh() - if tx.t.tx == nil { + if tx.Tx == nil { return errUnknownTx } - if tx.t.verifiedTx { - return tx.t.validity + if tx.verifiedTx { + return tx.validity } - tx.t.verifiedTx = true - tx.t.validity = tx.t.tx.SyntacticVerify(tx.vm.ctx, tx.vm.codec, len(tx.vm.fxs)) - return tx.t.validity + tx.verifiedTx = true + tx.validity = tx.Tx.SyntacticVerify(tx.vm.ctx, tx.vm.codec, len(tx.vm.fxs)) + return tx.validity } // SemanticVerify the validity of this transaction func (tx *UniqueTx) SemanticVerify() error { tx.SyntacticVerify() - if tx.t.validity != nil || tx.t.verifiedState { - return tx.t.validity + if tx.validity != nil || tx.verifiedState { + return tx.validity } - tx.t.verifiedState = true - tx.t.validity = tx.t.tx.SemanticVerify(tx.vm, tx) - - if tx.t.validity == nil { - tx.vm.pubsub.Publish("verified", tx.ID()) + if err := tx.Tx.SemanticVerify(tx.vm, tx); err != nil { + return err } - return tx.t.validity + + tx.verifiedState = true + tx.vm.pubsub.Publish("verified", tx.ID()) + return nil } // UnsignedBytes returns the unsigned bytes of the transaction func (tx *UniqueTx) UnsignedBytes() []byte { - b, err := tx.vm.codec.Marshal(&tx.t.tx.UnsignedTx) + b, err := tx.vm.codec.Marshal(&tx.UnsignedTx) tx.vm.ctx.Log.AssertNoError(err) return b } diff --git a/vms/avm/utxo_id.go b/vms/avm/utxo_id.go deleted file mode 100644 index 9852c5d..0000000 --- a/vms/avm/utxo_id.go +++ /dev/null @@ -1,48 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avm - -import ( - "errors" - - "github.com/ava-labs/gecko/ids" -) - -var ( - errNilUTXOID = errors.New("nil utxo ID is not valid") - errNilTxID = errors.New("nil tx ID is not valid") -) - -// UTXOID ... -type UTXOID struct { - // Serialized: - TxID ids.ID `serialize:"true"` - OutputIndex uint32 `serialize:"true"` - - // Cached: - id ids.ID -} - -// InputSource returns the source of the UTXO that this input is spending -func (utxo *UTXOID) InputSource() (ids.ID, uint32) { return utxo.TxID, utxo.OutputIndex } - -// InputID returns a unique ID of the UTXO that this input is spending -func (utxo *UTXOID) InputID() ids.ID { - if utxo.id.IsZero() { - utxo.id = utxo.TxID.Prefix(uint64(utxo.OutputIndex)) - } - return utxo.id -} - -// Verify implements the verify.Verifiable interface -func (utxo *UTXOID) Verify() error { - switch { - case utxo == nil: - return errNilUTXOID - case utxo.TxID.IsZero(): - return errNilTxID - default: - return nil - } -} diff --git a/vms/avm/verifiable_test.go b/vms/avm/verifiable_test.go index 65630d2..6e6337a 100644 --- a/vms/avm/verifiable_test.go +++ b/vms/avm/verifiable_test.go @@ -3,20 +3,10 @@ package avm -type testVerifiable struct{ err error } - -func (v *testVerifiable) Verify() error { return v.err } - -type TestTransferable struct { - testVerifiable - - Val uint64 `serialize:"true"` -} - -func (t *TestTransferable) Amount() uint64 { return t.Val } +import "github.com/ava-labs/gecko/vms/components/ava" type testAddressable struct { - TestTransferable `serialize:"true"` + ava.TestTransferable `serialize:"true"` Addrs [][]byte `serialize:"true"` } diff --git a/vms/avm/vm.go b/vms/avm/vm.go index d1097aa..c8b33f1 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -21,8 +21,10 @@ import ( "github.com/ava-labs/gecko/snow/consensus/snowstorm" "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/utils/wrappers" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" cjson "github.com/ava-labs/gecko/utils/json" @@ -49,6 +51,9 @@ var ( type VM struct { ids.Aliaser + ava ids.ID + platform ids.ID + // Contains information of where this VM is executing ctx *snow.Context @@ -111,36 +116,24 @@ func (vm *VM) Initialize( vm.Aliaser.Initialize() vm.pubsub = cjson.NewPubSubServer(ctx) + c := codec.NewDefault() errs := wrappers.Errs{} errs.Add( vm.pubsub.Register("accepted"), vm.pubsub.Register("rejected"), vm.pubsub.Register("verified"), + + c.RegisterType(&BaseTx{}), + c.RegisterType(&CreateAssetTx{}), + c.RegisterType(&OperationTx{}), + c.RegisterType(&ImportTx{}), + c.RegisterType(&ExportTx{}), ) if errs.Errored() { return errs.Err } - vm.state = &prefixedState{ - state: &state{ - c: &cache.LRU{Size: stateCacheSize}, - vm: vm, - }, - - tx: &cache.LRU{Size: idCacheSize}, - utxo: &cache.LRU{Size: idCacheSize}, - txStatus: &cache.LRU{Size: idCacheSize}, - funds: &cache.LRU{Size: idCacheSize}, - - uniqueTx: &cache.EvictableLRU{Size: txCacheSize}, - } - - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - vm.fxs = make([]*parsedFx, len(fxs)) for i, fxContainer := range fxs { if fxContainer == nil { @@ -166,6 +159,21 @@ func (vm *VM) Initialize( vm.codec = c + vm.state = &prefixedState{ + state: &state{State: ava.State{ + Cache: &cache.LRU{Size: stateCacheSize}, + DB: vm.db, + Codec: vm.codec, + }}, + + tx: &cache.LRU{Size: idCacheSize}, + utxo: &cache.LRU{Size: idCacheSize}, + txStatus: &cache.LRU{Size: idCacheSize}, + funds: &cache.LRU{Size: idCacheSize}, + + uniqueTx: &cache.EvictableLRU{Size: txCacheSize}, + } + if err := vm.initAliases(genesisBytes); err != nil { return err } @@ -190,6 +198,10 @@ func (vm *VM) Initialize( // Shutdown implements the avalanche.DAGVM interface func (vm *VM) Shutdown() { + if vm.timer == nil { + return + } + vm.timer.Stop() if err := vm.baseDB.Close(); err != nil { vm.ctx.Log.Error("Closing the database failed with %s", err) @@ -251,8 +263,11 @@ func (vm *VM) GetTx(txID ids.ID) (snowstorm.Tx, error) { ****************************************************************************** */ -// IssueTx attempts to send a transaction to consensus -func (vm *VM) IssueTx(b []byte) (ids.ID, error) { +// IssueTx attempts to send a transaction to consensus. +// If onDecide is specified, the function will be called when the transaction is +// either accepted or rejected with the appropriate status. This function will +// go out of scope when the transaction is removed from memory. +func (vm *VM) IssueTx(b []byte, onDecide func(choices.Status)) (ids.ID, error) { tx, err := vm.parseTx(b) if err != nil { return ids.ID{}, err @@ -261,19 +276,45 @@ func (vm *VM) IssueTx(b []byte) (ids.ID, error) { return ids.ID{}, err } vm.issueTx(tx) + tx.onDecide = onDecide return tx.ID(), nil } +// GetAtomicUTXOs returns the utxos that at least one of the provided addresses is +// referenced in. +func (vm *VM) GetAtomicUTXOs(addrs ids.Set) ([]*ava.UTXO, error) { + smDB := vm.ctx.SharedMemory.GetDatabase(vm.platform) + defer vm.ctx.SharedMemory.ReleaseDatabase(vm.platform) + + state := ava.NewPrefixedState(smDB, vm.codec) + + utxoIDs := ids.Set{} + for _, addr := range addrs.List() { + utxos, _ := state.PlatformFunds(addr) + utxoIDs.Add(utxos...) + } + + utxos := []*ava.UTXO{} + for _, utxoID := range utxoIDs.List() { + utxo, err := state.PlatformUTXO(utxoID) + if err != nil { + return nil, err + } + utxos = append(utxos, utxo) + } + return utxos, nil +} + // GetUTXOs returns the utxos that at least one of the provided addresses is // referenced in. -func (vm *VM) GetUTXOs(addrs ids.Set) ([]*UTXO, error) { +func (vm *VM) GetUTXOs(addrs ids.Set) ([]*ava.UTXO, error) { utxoIDs := ids.Set{} for _, addr := range addrs.List() { utxos, _ := vm.state.Funds(addr) utxoIDs.Add(utxos...) } - utxos := []*UTXO{} + utxos := []*ava.UTXO{} for _, utxoID := range utxoIDs.List() { utxo, err := vm.state.UTXO(utxoID) if err != nil { @@ -296,6 +337,9 @@ func (vm *VM) Clock() *timer.Clock { return &vm.clock } // Codec returns a reference to the internal codec of this VM func (vm *VM) Codec() codec.Codec { return vm.codec } +// Logger returns a reference to the internal logger of this VM +func (vm *VM) Logger() logging.Logger { return vm.ctx.Log } + /* ****************************************************************************** ********************************** Timer API ********************************* @@ -398,18 +442,18 @@ func (vm *VM) parseTx(b []byte) (*UniqueTx, error) { rawTx.Initialize(b) tx := &UniqueTx{ + TxState: &TxState{ + Tx: rawTx, + }, vm: vm, txID: rawTx.ID(), - t: &txState{ - tx: rawTx, - }, } if err := tx.SyntacticVerify(); err != nil { return nil, err } if tx.Status() == choices.Unknown { - if err := vm.state.SetTx(tx.ID(), tx.t.tx); err != nil { + if err := vm.state.SetTx(tx.ID(), tx.Tx); err != nil { return nil, err } tx.setStatus(choices.Processing) @@ -428,6 +472,32 @@ func (vm *VM) issueTx(tx snowstorm.Tx) { } } +func (vm *VM) getUTXO(utxoID *ava.UTXOID) (*ava.UTXO, error) { + inputID := utxoID.InputID() + utxo, err := vm.state.UTXO(inputID) + if err == nil { + return utxo, nil + } + + inputTx, inputIndex := utxoID.InputSource() + parent := UniqueTx{ + vm: vm, + txID: inputTx, + } + + if err := parent.Verify(); err != nil { + return nil, errMissingUTXO + } else if status := parent.Status(); status.Decided() { + return nil, errMissingUTXO + } + + parentUTXOs := parent.UTXOs() + if uint32(len(parentUTXOs)) <= inputIndex || int(inputIndex) < 0 { + return nil, errInvalidUTXO + } + return parentUTXOs[int(inputIndex)], nil +} + func (vm *VM) getFx(val interface{}) (int, error) { valType := reflect.TypeOf(val) fx, exists := vm.typeToFxIndex[valType] @@ -445,7 +515,7 @@ func (vm *VM) verifyFxUsage(fxID int, assetID ids.ID) bool { if status := tx.Status(); !status.Fetched() { return false } - createAssetTx, ok := tx.t.tx.UnsignedTx.(*CreateAssetTx) + createAssetTx, ok := tx.UnsignedTx.(*CreateAssetTx) if !ok { return false } diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index 65c82a5..ef54a18 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -15,7 +15,10 @@ import ( "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/units" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/nftfx" + "github.com/ava-labs/gecko/vms/propertyfx" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -46,16 +49,7 @@ func init() { } func GetFirstTxFromGenesisTest(genesisBytes []byte, t *testing.T) *Tx { - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) - + c := setupCodec() genesis := Genesis{} if err := c.Unmarshal(genesisBytes, &genesis); err != nil { t.Fatal(err) @@ -188,7 +182,7 @@ func GenesisVM(t *testing.T) *VM { func TestTxSerialization(t *testing.T) { expected := []byte{ // txID: - 0x00, 0x00, 0x00, 0x02, + 0x00, 0x00, 0x00, 0x01, // networkID: 0x00, 0x00, 0xa8, 0x66, // chainID: @@ -205,7 +199,7 @@ func TestTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // fxID: - 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x07, // secp256k1 Transferable Output: // amount: 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, @@ -226,7 +220,7 @@ func TestTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // fxID: - 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x07, // secp256k1 Transferable Output: // amount: 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, @@ -247,7 +241,7 @@ func TestTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, // fxID: - 0x00, 0x00, 0x00, 0x04, + 0x00, 0x00, 0x00, 0x07, // secp256k1 Transferable Output: // amount: 0x00, 0x00, 0x12, 0x30, 0x9c, 0xe5, 0x40, 0x00, @@ -263,20 +257,24 @@ func TestTxSerialization(t *testing.T) { 0x92, 0xf0, 0xee, 0x31, // number of inputs: 0x00, 0x00, 0x00, 0x00, - // number of operations: + // name length: + 0x00, 0x04, + // name: + 'n', 'a', 'm', 'e', + // symbol length: + 0x00, 0x04, + // symbol: + 's', 'y', 'm', 'b', + // denomination + 0x00, + // number of initial states: 0x00, 0x00, 0x00, 0x01, - // operation[0]: - // assetID: - 0x01, 0x02, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - // number of inputs: + // fx index: 0x00, 0x00, 0x00, 0x00, // number of outputs: 0x00, 0x00, 0x00, 0x01, // fxID: - 0x00, 0x00, 0x00, 0x03, + 0x00, 0x00, 0x00, 0x06, // secp256k1 Mint Output: // threshold: 0x00, 0x00, 0x00, 0x01, @@ -290,23 +288,22 @@ func TestTxSerialization(t *testing.T) { 0x00, 0x00, 0x00, 0x00, } - unsignedTx := &OperationTx{ + unsignedTx := &CreateAssetTx{ BaseTx: BaseTx{ NetID: networkID, BCID: chainID, }, - Ops: []*Operation{ - &Operation{ - Asset: Asset{ - ID: asset, - }, - Outs: []*OperableOutput{ - &OperableOutput{ - Out: &secp256k1fx.MintOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, - }, + Name: "name", + Symbol: "symb", + Denomination: 0, + States: []*InitialState{ + &InitialState{ + FxID: 0, + Outs: []verify.Verifiable{ + &secp256k1fx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, }, }, }, @@ -317,10 +314,8 @@ func TestTxSerialization(t *testing.T) { for _, key := range keys { addr := key.PublicKey().Address() - unsignedTx.Outs = append(unsignedTx.Outs, &TransferableOutput{ - Asset: Asset{ - ID: asset, - }, + unsignedTx.Outs = append(unsignedTx.Outs, &ava.TransferableOutput{ + Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 20 * units.KiloAva, OutputOwners: secp256k1fx.OutputOwners{ @@ -331,16 +326,7 @@ func TestTxSerialization(t *testing.T) { }) } - c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) - c.RegisterType(&secp256k1fx.MintOutput{}) - c.RegisterType(&secp256k1fx.TransferOutput{}) - c.RegisterType(&secp256k1fx.MintInput{}) - c.RegisterType(&secp256k1fx.TransferInput{}) - c.RegisterType(&secp256k1fx.Credential{}) - + c := setupCodec() b, err := c.Marshal(tx) if err != nil { t.Fatal(err) @@ -441,29 +427,25 @@ func TestIssueTx(t *testing.T) { genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) - newTx := &Tx{UnsignedTx: &OperationTx{BaseTx: BaseTx{ + newTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*TransferableInput{ - &TransferableInput{ - UTXOID: UTXOID{ - TxID: genesisTx.ID(), - OutputIndex: 1, - }, - Asset: Asset{ - ID: genesisTx.ID(), - }, - In: &secp256k1fx.TransferInput{ - Amt: 50000, - Input: secp256k1fx.Input{ - SigIndices: []uint32{ - 0, - }, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, }, }, }, - }, - }}} + }}, + }} unsignedBytes, err := vm.codec.Marshal(&newTx.UnsignedTx) if err != nil { @@ -478,11 +460,9 @@ func TestIssueTx(t *testing.T) { fixedSig := [crypto.SECP256K1RSigLen]byte{} copy(fixedSig[:], sig) - newTx.Creds = append(newTx.Creds, &Credential{ - Cred: &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - fixedSig, - }, + newTx.Creds = append(newTx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, }, }) @@ -492,7 +472,7 @@ func TestIssueTx(t *testing.T) { } newTx.Initialize(b) - txID, err := vm.IssueTx(newTx.Bytes()) + txID, err := vm.IssueTx(newTx.Bytes(), nil) if err != nil { t.Fatal(err) } @@ -544,6 +524,461 @@ func TestGenesisGetUTXOs(t *testing.T) { ctx.Lock.Unlock() if len(utxos) != 7 { - t.Fatalf("Wrong number of utxos (%d) returned", len(utxos)) + t.Fatalf("Wrong number of utxos. Expected (%d) returned (%d)", 7, len(utxos)) + } +} + +// Test issuing a transaction that consumes a currently pending UTXO. The +// transaction should be issued successfully. +func TestIssueDependentTx(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{&common.Fx{ + ID: ids.Empty, + Fx: &secp256k1fx.Fx{}, + }}, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) + + key := keys[0] + + firstTx := &Tx{UnsignedTx: &BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: genesisTx.ID(), + OutputIndex: 1, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: genesisTx.ID()}, + Out: &secp256k1fx.TransferOutput{ + Amt: 50000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + }}, + }} + + unsignedBytes, err := vm.codec.Marshal(&firstTx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + firstTx.Creds = append(firstTx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }) + + b, err := vm.codec.Marshal(firstTx) + if err != nil { + t.Fatal(err) + } + firstTx.Initialize(b) + + _, err = vm.IssueTx(firstTx.Bytes(), nil) + if err != nil { + t.Fatal(err) + } + + secondTx := &Tx{UnsignedTx: &BaseTx{ + NetID: networkID, + BCID: chainID, + Ins: []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: ava.UTXOID{ + TxID: firstTx.ID(), + OutputIndex: 0, + }, + Asset: ava.Asset{ID: genesisTx.ID()}, + In: &secp256k1fx.TransferInput{ + Amt: 50000, + Input: secp256k1fx.Input{ + SigIndices: []uint32{ + 0, + }, + }, + }, + }}, + }} + + unsignedBytes, err = vm.codec.Marshal(&secondTx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + sig, err = key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig = [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + secondTx.Creds = append(secondTx.Creds, &secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }, + }) + + b, err = vm.codec.Marshal(secondTx) + if err != nil { + t.Fatal(err) + } + secondTx.Initialize(b) + + _, err = vm.IssueTx(secondTx.Bytes(), nil) + if err != nil { + t.Fatal(err) + } + + ctx.Lock.Unlock() + + msg := <-issuer + if msg != common.PendingTxs { + t.Fatalf("Wrong message") + } + + if txs := vm.PendingTxs(); len(txs) != 2 { + t.Fatalf("Should have returned %d tx(s)", 2) + } +} + +// Test issuing a transaction that creates an NFT family +func TestIssueNFT(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{ + &common.Fx{ + ID: ids.Empty.Prefix(0), + Fx: &secp256k1fx.Fx{}, + }, + &common.Fx{ + ID: ids.Empty.Prefix(1), + Fx: &nftfx.Fx{}, + }, + }, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + createAssetTx := &Tx{UnsignedTx: &CreateAssetTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Name: "Team Rocket", + Symbol: "TR", + Denomination: 0, + States: []*InitialState{&InitialState{ + FxID: 1, + Outs: []verify.Verifiable{ + &nftfx.MintOutput{ + GroupID: 1, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + &nftfx.MintOutput{ + GroupID: 2, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }}, + }} + + b, err := vm.codec.Marshal(createAssetTx) + if err != nil { + t.Fatal(err) + } + createAssetTx.Initialize(b) + + if _, err = vm.IssueTx(createAssetTx.Bytes(), nil); err != nil { + t.Fatal(err) + } + + mintNFTTx := &Tx{UnsignedTx: &OperationTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Ops: []*Operation{&Operation{ + Asset: ava.Asset{ID: createAssetTx.ID()}, + UTXOIDs: []*ava.UTXOID{&ava.UTXOID{ + TxID: createAssetTx.ID(), + OutputIndex: 0, + }}, + Op: &nftfx.MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + GroupID: 1, + Payload: []byte{'h', 'e', 'l', 'l', 'o'}, + Outputs: []*secp256k1fx.OutputOwners{ + &secp256k1fx.OutputOwners{}, + }, + }, + }}, + }} + + unsignedBytes, err := vm.codec.Marshal(&mintNFTTx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + mintNFTTx.Creds = append(mintNFTTx.Creds, &nftfx.Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }}, + }) + + b, err = vm.codec.Marshal(mintNFTTx) + if err != nil { + t.Fatal(err) + } + mintNFTTx.Initialize(b) + + if _, err = vm.IssueTx(mintNFTTx.Bytes(), nil); err != nil { + t.Fatal(err) + } + + transferNFTTx := &Tx{UnsignedTx: &OperationTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Ops: []*Operation{&Operation{ + Asset: ava.Asset{ID: createAssetTx.ID()}, + UTXOIDs: []*ava.UTXOID{&ava.UTXOID{ + TxID: mintNFTTx.ID(), + OutputIndex: 0, + }}, + Op: &nftfx.TransferOperation{ + Input: secp256k1fx.Input{}, + Output: nftfx.TransferOutput{ + GroupID: 1, + Payload: []byte{'h', 'e', 'l', 'l', 'o'}, + OutputOwners: secp256k1fx.OutputOwners{}, + }, + }, + }}, + }} + + transferNFTTx.Creds = append(transferNFTTx.Creds, &nftfx.Credential{}) + + b, err = vm.codec.Marshal(transferNFTTx) + if err != nil { + t.Fatal(err) + } + transferNFTTx.Initialize(b) + + if _, err = vm.IssueTx(transferNFTTx.Bytes(), nil); err != nil { + t.Fatal(err) + } +} + +// Test issuing a transaction that creates an Property family +func TestIssueProperty(t *testing.T) { + genesisBytes := BuildGenesisTest(t) + + issuer := make(chan common.Message, 1) + + ctx.Lock.Lock() + defer ctx.Lock.Unlock() + + vm := &VM{} + err := vm.Initialize( + ctx, + memdb.New(), + genesisBytes, + issuer, + []*common.Fx{ + &common.Fx{ + ID: ids.Empty.Prefix(0), + Fx: &secp256k1fx.Fx{}, + }, + &common.Fx{ + ID: ids.Empty.Prefix(1), + Fx: &nftfx.Fx{}, + }, + &common.Fx{ + ID: ids.Empty.Prefix(2), + Fx: &propertyfx.Fx{}, + }, + }, + ) + if err != nil { + t.Fatal(err) + } + vm.batchTimeout = 0 + + createAssetTx := &Tx{UnsignedTx: &CreateAssetTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Name: "Team Rocket", + Symbol: "TR", + Denomination: 0, + States: []*InitialState{&InitialState{ + FxID: 2, + Outs: []verify.Verifiable{ + &propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + }, + }}, + }} + + b, err := vm.codec.Marshal(createAssetTx) + if err != nil { + t.Fatal(err) + } + createAssetTx.Initialize(b) + + if _, err = vm.IssueTx(createAssetTx.Bytes(), nil); err != nil { + t.Fatal(err) + } + + mintPropertyTx := &Tx{UnsignedTx: &OperationTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Ops: []*Operation{&Operation{ + Asset: ava.Asset{ID: createAssetTx.ID()}, + UTXOIDs: []*ava.UTXOID{&ava.UTXOID{ + TxID: createAssetTx.ID(), + OutputIndex: 0, + }}, + Op: &propertyfx.MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + MintOutput: propertyfx.MintOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{keys[0].PublicKey().Address()}, + }, + }, + OwnedOutput: propertyfx.OwnedOutput{}, + }, + }}, + }} + + unsignedBytes, err := vm.codec.Marshal(&mintPropertyTx.UnsignedTx) + if err != nil { + t.Fatal(err) + } + + key := keys[0] + sig, err := key.Sign(unsignedBytes) + if err != nil { + t.Fatal(err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + mintPropertyTx.Creds = append(mintPropertyTx.Creds, &propertyfx.Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + fixedSig, + }}, + }) + + b, err = vm.codec.Marshal(mintPropertyTx) + if err != nil { + t.Fatal(err) + } + mintPropertyTx.Initialize(b) + + if _, err = vm.IssueTx(mintPropertyTx.Bytes(), nil); err != nil { + t.Fatal(err) + } + + burnPropertyTx := &Tx{UnsignedTx: &OperationTx{ + BaseTx: BaseTx{ + NetID: networkID, + BCID: chainID, + }, + Ops: []*Operation{&Operation{ + Asset: ava.Asset{ID: createAssetTx.ID()}, + UTXOIDs: []*ava.UTXOID{&ava.UTXOID{ + TxID: mintPropertyTx.ID(), + OutputIndex: 1, + }}, + Op: &propertyfx.BurnOperation{Input: secp256k1fx.Input{}}, + }}, + }} + + burnPropertyTx.Creds = append(burnPropertyTx.Creds, &propertyfx.Credential{}) + + b, err = vm.codec.Marshal(burnPropertyTx) + if err != nil { + t.Fatal(err) + } + burnPropertyTx.Initialize(b) + + if _, err = vm.IssueTx(burnPropertyTx.Bytes(), nil); err != nil { + t.Fatal(err) } } diff --git a/vms/avm/asset.go b/vms/components/ava/asset.go similarity index 91% rename from vms/avm/asset.go rename to vms/components/ava/asset.go index fc9ef07..710e6e4 100644 --- a/vms/avm/asset.go +++ b/vms/components/ava/asset.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "errors" @@ -16,7 +16,7 @@ var ( // Asset ... type Asset struct { - ID ids.ID `serialize:"true"` + ID ids.ID `serialize:"true" json:"assetID"` } // AssetID returns the ID of the contained asset diff --git a/vms/avm/asset_test.go b/vms/components/ava/asset_test.go similarity index 99% rename from vms/avm/asset_test.go rename to vms/components/ava/asset_test.go index 209cc81..40d6ea8 100644 --- a/vms/avm/asset_test.go +++ b/vms/components/ava/asset_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "testing" diff --git a/vms/components/ava/flow_checker.go b/vms/components/ava/flow_checker.go new file mode 100644 index 0000000..321f8dd --- /dev/null +++ b/vms/components/ava/flow_checker.go @@ -0,0 +1,57 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ava + +import ( + "errors" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/utils/wrappers" +) + +var ( + errInsufficientFunds = errors.New("insufficient funds") +) + +// FlowChecker ... +type FlowChecker struct { + consumed, produced map[[32]byte]uint64 + errs wrappers.Errs +} + +// NewFlowChecker ... +func NewFlowChecker() *FlowChecker { + return &FlowChecker{ + consumed: make(map[[32]byte]uint64), + produced: make(map[[32]byte]uint64), + } +} + +// Consume ... +func (fc *FlowChecker) Consume(assetID ids.ID, amount uint64) { fc.add(fc.consumed, assetID, amount) } + +// Produce ... +func (fc *FlowChecker) Produce(assetID ids.ID, amount uint64) { fc.add(fc.produced, assetID, amount) } + +func (fc *FlowChecker) add(value map[[32]byte]uint64, assetID ids.ID, amount uint64) { + var err error + assetIDKey := assetID.Key() + value[assetIDKey], err = math.Add64(value[assetIDKey], amount) + fc.errs.Add(err) +} + +// Verify ... +func (fc *FlowChecker) Verify() error { + if !fc.errs.Errored() { + for assetID, producedAssetAmount := range fc.produced { + consumedAssetAmount := fc.consumed[assetID] + if producedAssetAmount > consumedAssetAmount { + fc.errs.Add(errInsufficientFunds) + break + } + } + } + return fc.errs.Err +} diff --git a/vms/avm/metadata.go b/vms/components/ava/metadata.go similarity index 74% rename from vms/avm/metadata.go rename to vms/components/ava/metadata.go index fb29b44..3ae9228 100644 --- a/vms/avm/metadata.go +++ b/vms/components/ava/metadata.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "errors" @@ -15,25 +15,26 @@ var ( errMetadataNotInitialize = errors.New("metadata was never initialized and is not valid") ) -type metadata struct { +// Metadata ... +type Metadata struct { id ids.ID // The ID of this data bytes []byte // Byte representation of this data } -// Bytes returns the binary representation of this data -func (md *metadata) Initialize(bytes []byte) { +// Initialize set the bytes and ID +func (md *Metadata) Initialize(bytes []byte) { md.id = ids.NewID(hashing.ComputeHash256Array(bytes)) md.bytes = bytes } // ID returns the unique ID of this data -func (md *metadata) ID() ids.ID { return md.id } +func (md *Metadata) ID() ids.ID { return md.id } // Bytes returns the binary representation of this data -func (md *metadata) Bytes() []byte { return md.bytes } +func (md *Metadata) Bytes() []byte { return md.bytes } // Verify implements the verify.Verifiable interface -func (md *metadata) Verify() error { +func (md *Metadata) Verify() error { switch { case md == nil: return errNilMetadata diff --git a/vms/avm/metadata_test.go b/vms/components/ava/metadata_test.go similarity index 88% rename from vms/avm/metadata_test.go rename to vms/components/ava/metadata_test.go index 09c559b..bd6563b 100644 --- a/vms/avm/metadata_test.go +++ b/vms/components/ava/metadata_test.go @@ -1,21 +1,21 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "testing" ) func TestMetaDataVerifyNil(t *testing.T) { - md := (*metadata)(nil) + md := (*Metadata)(nil) if err := md.Verify(); err == nil { t.Fatalf("Should have errored due to nil metadata") } } func TestMetaDataVerifyUninitialized(t *testing.T) { - md := &metadata{} + md := &Metadata{} if err := md.Verify(); err == nil { t.Fatalf("Should have errored due to uninitialized metadata") } diff --git a/vms/components/ava/prefixed_state.go b/vms/components/ava/prefixed_state.go new file mode 100644 index 0000000..dd7f3e8 --- /dev/null +++ b/vms/components/ava/prefixed_state.go @@ -0,0 +1,208 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ava + +import ( + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/vms/components/codec" +) + +// Addressable is the interface a feature extension must provide to be able to +// be tracked as a part of the utxo set for a set of addresses +type Addressable interface { + Addresses() [][]byte +} + +const ( + platformUTXOID uint64 = iota + platformStatusID + platformFundsID + avmUTXOID + avmStatusID + avmFundsID +) + +const ( + stateCacheSize = 10000 + idCacheSize = 10000 +) + +type chainState struct { + *State + + utxoIDPrefix, statusIDPrefix, fundsIDPrefix uint64 + utxoID, statusID, fundsID cache.Cacher +} + +// UTXO attempts to load a utxo from platform's storage. +func (s *chainState) UTXO(id ids.ID) (*UTXO, error) { + return s.State.UTXO(UniqueID(id, s.utxoIDPrefix, s.utxoID)) +} + +// Funds returns the mapping from the 32 byte representation of an +// address to a list of utxo IDs that reference the address. +func (s *chainState) Funds(id ids.ID) ([]ids.ID, error) { + return s.IDs(UniqueID(id, s.fundsIDPrefix, s.fundsID)) +} + +// SpendUTXO consumes the provided platform utxo. +func (s *chainState) SpendUTXO(utxoID ids.ID) error { + utxo, err := s.UTXO(utxoID) + if err != nil { + return s.setStatus(utxoID, choices.Accepted) + } else if err := s.setUTXO(utxoID, nil); err != nil { + return err + } + + if addressable, ok := utxo.Out.(Addressable); ok { + return s.removeUTXO(addressable.Addresses(), utxoID) + } + return nil +} + +// FundUTXO adds the provided utxo to the database +func (s *chainState) FundUTXO(utxo *UTXO) error { + utxoID := utxo.InputID() + if _, err := s.status(utxoID); err == nil { + return s.setStatus(utxoID, choices.Unknown) + } else if err := s.setUTXO(utxoID, utxo); err != nil { + return err + } + + if addressable, ok := utxo.Out.(Addressable); ok { + return s.addUTXO(addressable.Addresses(), utxoID) + } + return nil +} + +// setUTXO saves the provided utxo to platform's storage. +func (s *chainState) setUTXO(id ids.ID, utxo *UTXO) error { + return s.SetUTXO(UniqueID(id, s.utxoIDPrefix, s.utxoID), utxo) +} + +func (s *chainState) status(id ids.ID) (choices.Status, error) { + return s.Status(UniqueID(id, s.statusIDPrefix, s.statusID)) +} + +// setStatus saves the provided platform status to storage. +func (s *chainState) setStatus(id ids.ID, status choices.Status) error { + return s.State.SetStatus(UniqueID(id, s.statusIDPrefix, s.statusID), status) +} + +func (s *chainState) removeUTXO(addrs [][]byte, utxoID ids.ID) error { + for _, addr := range addrs { + addrID := ids.NewID(hashing.ComputeHash256Array(addr)) + utxos := ids.Set{} + funds, _ := s.Funds(addrID) + utxos.Add(funds...) + utxos.Remove(utxoID) + if err := s.setFunds(addrID, utxos.List()); err != nil { + return err + } + } + return nil +} + +func (s *chainState) addUTXO(addrs [][]byte, utxoID ids.ID) error { + for _, addr := range addrs { + addrID := ids.NewID(hashing.ComputeHash256Array(addr)) + utxos := ids.Set{} + funds, _ := s.Funds(addrID) + utxos.Add(funds...) + utxos.Add(utxoID) + if err := s.setFunds(addrID, utxos.List()); err != nil { + return err + } + } + return nil +} + +func (s *chainState) setFunds(id ids.ID, idSlice []ids.ID) error { + return s.SetIDs(UniqueID(id, s.fundsIDPrefix, s.fundsID), idSlice) +} + +// PrefixedState wraps a state object. By prefixing the state, there will +// be no collisions between different types of objects that have the same hash. +type PrefixedState struct { + platform, avm chainState +} + +// NewPrefixedState ... +func NewPrefixedState(db database.Database, codec codec.Codec) *PrefixedState { + state := &State{ + Cache: &cache.LRU{Size: stateCacheSize}, + DB: db, + Codec: codec, + } + return &PrefixedState{ + platform: chainState{ + State: state, + + utxoIDPrefix: platformUTXOID, + statusIDPrefix: platformStatusID, + fundsIDPrefix: platformFundsID, + + utxoID: &cache.LRU{Size: idCacheSize}, + statusID: &cache.LRU{Size: idCacheSize}, + fundsID: &cache.LRU{Size: idCacheSize}, + }, + avm: chainState{ + State: state, + + utxoIDPrefix: avmUTXOID, + statusIDPrefix: avmStatusID, + fundsIDPrefix: avmFundsID, + + utxoID: &cache.LRU{Size: idCacheSize}, + statusID: &cache.LRU{Size: idCacheSize}, + fundsID: &cache.LRU{Size: idCacheSize}, + }, + } +} + +// PlatformUTXO attempts to load a utxo from platform's storage. +func (s *PrefixedState) PlatformUTXO(id ids.ID) (*UTXO, error) { + return s.platform.UTXO(id) +} + +// PlatformFunds returns the mapping from the 32 byte representation of an +// address to a list of utxo IDs that reference the address. +func (s *PrefixedState) PlatformFunds(id ids.ID) ([]ids.ID, error) { + return s.platform.Funds(id) +} + +// SpendPlatformUTXO consumes the provided platform utxo. +func (s *PrefixedState) SpendPlatformUTXO(utxoID ids.ID) error { + return s.platform.SpendUTXO(utxoID) +} + +// FundPlatformUTXO adds the provided utxo to the database +func (s *PrefixedState) FundPlatformUTXO(utxo *UTXO) error { + return s.platform.FundUTXO(utxo) +} + +// AVMUTXO attempts to load a utxo from avm's storage. +func (s *PrefixedState) AVMUTXO(id ids.ID) (*UTXO, error) { + return s.avm.UTXO(id) +} + +// AVMFunds returns the mapping from the 32 byte representation of an +// address to a list of utxo IDs that reference the address. +func (s *PrefixedState) AVMFunds(id ids.ID) ([]ids.ID, error) { + return s.avm.Funds(id) +} + +// SpendAVMUTXO consumes the provided platform utxo. +func (s *PrefixedState) SpendAVMUTXO(utxoID ids.ID) error { + return s.avm.SpendUTXO(utxoID) +} + +// FundAVMUTXO adds the provided utxo to the database +func (s *PrefixedState) FundAVMUTXO(utxo *UTXO) error { + return s.avm.FundUTXO(utxo) +} diff --git a/vms/components/ava/state.go b/vms/components/ava/state.go new file mode 100644 index 0000000..a9c5424 --- /dev/null +++ b/vms/components/ava/state.go @@ -0,0 +1,153 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ava + +import ( + "errors" + + "github.com/ava-labs/gecko/cache" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/vms/components/codec" +) + +var ( + errCacheTypeMismatch = errors.New("type returned from cache doesn't match the expected type") +) + +// UniqueID returns a unique identifier +func UniqueID(id ids.ID, prefix uint64, cacher cache.Cacher) ids.ID { + if cachedIDIntf, found := cacher.Get(id); found { + return cachedIDIntf.(ids.ID) + } + uID := id.Prefix(prefix) + cacher.Put(id, uID) + return uID +} + +// State is a thin wrapper around a database to provide, caching, serialization, +// and de-serialization. +type State struct { + Cache cache.Cacher + DB database.Database + Codec codec.Codec +} + +// UTXO attempts to load a utxo from storage. +func (s *State) UTXO(id ids.ID) (*UTXO, error) { + if utxoIntf, found := s.Cache.Get(id); found { + if utxo, ok := utxoIntf.(*UTXO); ok { + return utxo, nil + } + return nil, errCacheTypeMismatch + } + + bytes, err := s.DB.Get(id.Bytes()) + if err != nil { + return nil, err + } + + // The key was in the database + utxo := &UTXO{} + if err := s.Codec.Unmarshal(bytes, utxo); err != nil { + return nil, err + } + + s.Cache.Put(id, utxo) + return utxo, nil +} + +// SetUTXO saves the provided utxo to storage. +func (s *State) SetUTXO(id ids.ID, utxo *UTXO) error { + if utxo == nil { + s.Cache.Evict(id) + return s.DB.Delete(id.Bytes()) + } + + bytes, err := s.Codec.Marshal(utxo) + if err != nil { + return err + } + + s.Cache.Put(id, utxo) + return s.DB.Put(id.Bytes(), bytes) +} + +// Status returns a status from storage. +func (s *State) Status(id ids.ID) (choices.Status, error) { + if statusIntf, found := s.Cache.Get(id); found { + if status, ok := statusIntf.(choices.Status); ok { + return status, nil + } + return choices.Unknown, errCacheTypeMismatch + } + + bytes, err := s.DB.Get(id.Bytes()) + if err != nil { + return choices.Unknown, err + } + + var status choices.Status + s.Codec.Unmarshal(bytes, &status) + + s.Cache.Put(id, status) + return status, nil +} + +// SetStatus saves a status in storage. +func (s *State) SetStatus(id ids.ID, status choices.Status) error { + if status == choices.Unknown { + s.Cache.Evict(id) + return s.DB.Delete(id.Bytes()) + } + + s.Cache.Put(id, status) + + bytes, err := s.Codec.Marshal(status) + if err != nil { + return err + } + return s.DB.Put(id.Bytes(), bytes) +} + +// IDs returns a slice of IDs from storage +func (s *State) IDs(id ids.ID) ([]ids.ID, error) { + if idsIntf, found := s.Cache.Get(id); found { + if idSlice, ok := idsIntf.([]ids.ID); ok { + return idSlice, nil + } + return nil, errCacheTypeMismatch + } + + bytes, err := s.DB.Get(id.Bytes()) + if err != nil { + return nil, err + } + + idSlice := []ids.ID(nil) + if err := s.Codec.Unmarshal(bytes, &idSlice); err != nil { + return nil, err + } + + s.Cache.Put(id, idSlice) + return idSlice, nil +} + +// SetIDs saves a slice of IDs to the database. +func (s *State) SetIDs(id ids.ID, idSlice []ids.ID) error { + if len(idSlice) == 0 { + s.Cache.Evict(id) + return s.DB.Delete(id.Bytes()) + } + + s.Cache.Put(id, idSlice) + + bytes, err := s.Codec.Marshal(idSlice) + if err != nil { + return err + } + + return s.DB.Put(id.Bytes(), bytes) +} diff --git a/vms/components/ava/test_verifiable.go b/vms/components/ava/test_verifiable.go new file mode 100644 index 0000000..34dce1d --- /dev/null +++ b/vms/components/ava/test_verifiable.go @@ -0,0 +1,20 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ava + +// TestVerifiable ... +type TestVerifiable struct{ Err error } + +// Verify ... +func (v *TestVerifiable) Verify() error { return v.Err } + +// TestTransferable ... +type TestTransferable struct { + TestVerifiable + + Val uint64 `serialize:"true"` +} + +// Amount ... +func (t *TestTransferable) Amount() uint64 { return t.Val } diff --git a/vms/avm/transferables.go b/vms/components/ava/transferables.go similarity index 57% rename from vms/avm/transferables.go rename to vms/components/ava/transferables.go index fc5536b..4aa906d 100644 --- a/vms/avm/transferables.go +++ b/vms/components/ava/transferables.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "bytes" @@ -9,6 +9,7 @@ import ( "sort" "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/vms/components/codec" "github.com/ava-labs/gecko/vms/components/verify" ) @@ -21,15 +22,25 @@ var ( errNilTransferableFxInput = errors.New("nil transferable feature extension input is not valid") ) +// Transferable is the interface a feature extension must provide to transfer +// value between features extensions. +type Transferable interface { + verify.Verifiable + + // Amount returns how much value this output consumes of the asset in its + // transaction. + Amount() uint64 +} + // TransferableOutput ... type TransferableOutput struct { Asset `serialize:"true"` - Out FxTransferable `serialize:"true"` + Out Transferable `serialize:"true" json:"output"` } // Output returns the feature extension output that this Output is using. -func (out *TransferableOutput) Output() FxTransferable { return out.Out } +func (out *TransferableOutput) Output() Transferable { return out.Out } // Verify implements the verify.Verifiable interface func (out *TransferableOutput) Verify() error { @@ -75,10 +86,13 @@ func (outs *innerSortTransferableOutputs) Less(i, j int) bool { func (outs *innerSortTransferableOutputs) Len() int { return len(outs.outs) } func (outs *innerSortTransferableOutputs) Swap(i, j int) { o := outs.outs; o[j], o[i] = o[i], o[j] } -func sortTransferableOutputs(outs []*TransferableOutput, c codec.Codec) { +// SortTransferableOutputs sorts output objects +func SortTransferableOutputs(outs []*TransferableOutput, c codec.Codec) { sort.Sort(&innerSortTransferableOutputs{outs: outs, codec: c}) } -func isSortedTransferableOutputs(outs []*TransferableOutput, c codec.Codec) bool { + +// IsSortedTransferableOutputs returns true if output objects are sorted +func IsSortedTransferableOutputs(outs []*TransferableOutput, c codec.Codec) bool { return sort.IsSorted(&innerSortTransferableOutputs{outs: outs, codec: c}) } @@ -87,11 +101,11 @@ type TransferableInput struct { UTXOID `serialize:"true"` Asset `serialize:"true"` - In FxTransferable `serialize:"true"` + In Transferable `serialize:"true" json:"input"` } // Input returns the feature extension input that this Input is using. -func (in *TransferableInput) Input() FxTransferable { return in.In } +func (in *TransferableInput) Input() Transferable { return in.In } // Verify implements the verify.Verifiable interface func (in *TransferableInput) Verify() error { @@ -123,7 +137,46 @@ func (ins innerSortTransferableInputs) Less(i, j int) bool { func (ins innerSortTransferableInputs) Len() int { return len(ins) } func (ins innerSortTransferableInputs) Swap(i, j int) { ins[j], ins[i] = ins[i], ins[j] } -func sortTransferableInputs(ins []*TransferableInput) { sort.Sort(innerSortTransferableInputs(ins)) } -func isSortedAndUniqueTransferableInputs(ins []*TransferableInput) bool { +// SortTransferableInputs ... +func SortTransferableInputs(ins []*TransferableInput) { sort.Sort(innerSortTransferableInputs(ins)) } + +// IsSortedAndUniqueTransferableInputs ... +func IsSortedAndUniqueTransferableInputs(ins []*TransferableInput) bool { return utils.IsSortedAndUnique(innerSortTransferableInputs(ins)) } + +type innerSortTransferableInputsWithSigners struct { + ins []*TransferableInput + signers [][]*crypto.PrivateKeySECP256K1R +} + +func (ins *innerSortTransferableInputsWithSigners) Less(i, j int) bool { + iID, iIndex := ins.ins[i].InputSource() + jID, jIndex := ins.ins[j].InputSource() + + switch bytes.Compare(iID.Bytes(), jID.Bytes()) { + case -1: + return true + case 0: + return iIndex < jIndex + default: + return false + } +} +func (ins *innerSortTransferableInputsWithSigners) Len() int { return len(ins.ins) } +func (ins *innerSortTransferableInputsWithSigners) Swap(i, j int) { + ins.ins[j], ins.ins[i] = ins.ins[i], ins.ins[j] + ins.signers[j], ins.signers[i] = ins.signers[i], ins.signers[j] +} + +// SortTransferableInputsWithSigners sorts the inputs and signers based on the +// input's utxo ID +func SortTransferableInputsWithSigners(ins []*TransferableInput, signers [][]*crypto.PrivateKeySECP256K1R) { + sort.Sort(&innerSortTransferableInputsWithSigners{ins: ins, signers: signers}) +} + +// IsSortedAndUniqueTransferableInputsWithSigners returns true if the inputs are +// sorted and unique +func IsSortedAndUniqueTransferableInputsWithSigners(ins []*TransferableInput, signers [][]*crypto.PrivateKeySECP256K1R) bool { + return utils.IsSortedAndUnique(&innerSortTransferableInputsWithSigners{ins: ins, signers: signers}) +} diff --git a/vms/avm/transferables_test.go b/vms/components/ava/transferables_test.go similarity index 89% rename from vms/avm/transferables_test.go rename to vms/components/ava/transferables_test.go index 015ac5d..80205a6 100644 --- a/vms/avm/transferables_test.go +++ b/vms/components/ava/transferables_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "bytes" @@ -21,11 +21,7 @@ func TestTransferableOutputVerifyNil(t *testing.T) { } func TestTransferableOutputVerifyNilFx(t *testing.T) { - to := &TransferableOutput{ - Asset: Asset{ - ID: ids.Empty, - }, - } + to := &TransferableOutput{Asset: Asset{ID: ids.Empty}} if err := to.Verify(); err == nil { t.Fatalf("Should have errored due to nil transferable fx output") } @@ -33,12 +29,8 @@ func TestTransferableOutputVerifyNilFx(t *testing.T) { func TestTransferableOutputVerify(t *testing.T) { to := &TransferableOutput{ - Asset: Asset{ - ID: ids.Empty, - }, - Out: &TestTransferable{ - Val: 1, - }, + Asset: Asset{ID: ids.Empty}, + Out: &TestTransferable{Val: 1}, } if err := to.Verify(); err != nil { t.Fatal(err) @@ -54,42 +46,32 @@ func TestTransferableOutputSorting(t *testing.T) { outs := []*TransferableOutput{ &TransferableOutput{ - Asset: Asset{ - ID: ids.NewID([32]byte{1}), - }, - Out: &TestTransferable{Val: 1}, + Asset: Asset{ID: ids.NewID([32]byte{1})}, + Out: &TestTransferable{Val: 1}, }, &TransferableOutput{ - Asset: Asset{ - ID: ids.Empty, - }, - Out: &TestTransferable{Val: 1}, + Asset: Asset{ID: ids.Empty}, + Out: &TestTransferable{Val: 1}, }, &TransferableOutput{ - Asset: Asset{ - ID: ids.NewID([32]byte{1}), - }, - Out: &TestTransferable{Val: 0}, + Asset: Asset{ID: ids.NewID([32]byte{1})}, + Out: &TestTransferable{Val: 0}, }, &TransferableOutput{ - Asset: Asset{ - ID: ids.Empty, - }, - Out: &TestTransferable{Val: 0}, + Asset: Asset{ID: ids.Empty}, + Out: &TestTransferable{Val: 0}, }, &TransferableOutput{ - Asset: Asset{ - ID: ids.Empty, - }, - Out: &TestTransferable{Val: 0}, + Asset: Asset{ID: ids.Empty}, + Out: &TestTransferable{Val: 0}, }, } - if isSortedTransferableOutputs(outs, c) { + if IsSortedTransferableOutputs(outs, c) { t.Fatalf("Shouldn't be sorted") } - sortTransferableOutputs(outs, c) - if !isSortedTransferableOutputs(outs, c) { + SortTransferableOutputs(outs, c) + if !IsSortedTransferableOutputs(outs, c) { t.Fatalf("Should be sorted") } if result := outs[0].Out.(*TestTransferable).Val; result != 0 { @@ -243,11 +225,11 @@ func TestTransferableInputSorting(t *testing.T) { }, } - if isSortedAndUniqueTransferableInputs(ins) { + if IsSortedAndUniqueTransferableInputs(ins) { t.Fatalf("Shouldn't be sorted") } - sortTransferableInputs(ins) - if !isSortedAndUniqueTransferableInputs(ins) { + SortTransferableInputs(ins) + if !IsSortedAndUniqueTransferableInputs(ins) { t.Fatalf("Should be sorted") } @@ -260,7 +242,7 @@ func TestTransferableInputSorting(t *testing.T) { In: &TestTransferable{}, }) - if isSortedAndUniqueTransferableInputs(ins) { + if IsSortedAndUniqueTransferableInputs(ins) { t.Fatalf("Shouldn't be unique") } } diff --git a/vms/avm/utxo.go b/vms/components/ava/utxo.go similarity index 90% rename from vms/avm/utxo.go rename to vms/components/ava/utxo.go index e431e55..dee62ec 100644 --- a/vms/avm/utxo.go +++ b/vms/components/ava/utxo.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "errors" @@ -19,7 +19,7 @@ type UTXO struct { UTXOID `serialize:"true"` Asset `serialize:"true"` - Out verify.Verifiable `serialize:"true"` + Out verify.Verifiable `serialize:"true" json:"output"` } // Verify implements the verify.Verifiable interface diff --git a/vms/components/ava/utxo_id.go b/vms/components/ava/utxo_id.go new file mode 100644 index 0000000..b21000a --- /dev/null +++ b/vms/components/ava/utxo_id.go @@ -0,0 +1,83 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ava + +import ( + "bytes" + "errors" + "sort" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils" +) + +var ( + errNilUTXOID = errors.New("nil utxo ID is not valid") + errNilTxID = errors.New("nil tx ID is not valid") +) + +// UTXOID ... +type UTXOID struct { + // Serialized: + TxID ids.ID `serialize:"true" json:"txID"` + OutputIndex uint32 `serialize:"true" json:"outputIndex"` + + // Symbol is false if the UTXO should be part of the DB + Symbol bool + // id is the unique ID of a UTXO, it is calculated from TxID and OutputIndex + id ids.ID +} + +// InputSource returns the source of the UTXO that this input is spending +func (utxo *UTXOID) InputSource() (ids.ID, uint32) { return utxo.TxID, utxo.OutputIndex } + +// InputID returns a unique ID of the UTXO that this input is spending +func (utxo *UTXOID) InputID() ids.ID { + if utxo.id.IsZero() { + utxo.id = utxo.TxID.Prefix(uint64(utxo.OutputIndex)) + } + return utxo.id +} + +// Symbolic returns if this is the ID of a UTXO in the DB, or if it is a +// symbolic input +func (utxo *UTXOID) Symbolic() bool { return utxo.Symbol } + +// Verify implements the verify.Verifiable interface +func (utxo *UTXOID) Verify() error { + switch { + case utxo == nil: + return errNilUTXOID + case utxo.TxID.IsZero(): + return errNilTxID + default: + return nil + } +} + +type innerSortUTXOIDs []*UTXOID + +func (utxos innerSortUTXOIDs) Less(i, j int) bool { + iID, iIndex := utxos[i].InputSource() + jID, jIndex := utxos[j].InputSource() + + switch bytes.Compare(iID.Bytes(), jID.Bytes()) { + case -1: + return true + case 0: + return iIndex < jIndex + default: + return false + } +} +func (utxos innerSortUTXOIDs) Len() int { return len(utxos) } +func (utxos innerSortUTXOIDs) Swap(i, j int) { utxos[j], utxos[i] = utxos[i], utxos[j] } + +// SortUTXOIDs ... +func SortUTXOIDs(utxos []*UTXOID) { sort.Sort(innerSortUTXOIDs(utxos)) } + +// IsSortedAndUniqueUTXOIDs ... +func IsSortedAndUniqueUTXOIDs(utxos []*UTXOID) bool { + return utils.IsSortedAndUnique(innerSortUTXOIDs(utxos)) +} diff --git a/vms/avm/utxo_id_test.go b/vms/components/ava/utxo_id_test.go similarity index 99% rename from vms/avm/utxo_id_test.go rename to vms/components/ava/utxo_id_test.go index fed513f..7944961 100644 --- a/vms/avm/utxo_id_test.go +++ b/vms/components/ava/utxo_id_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "testing" diff --git a/vms/avm/utxo_test.go b/vms/components/ava/utxo_test.go similarity index 95% rename from vms/avm/utxo_test.go rename to vms/components/ava/utxo_test.go index 6f043db..07b067a 100644 --- a/vms/avm/utxo_test.go +++ b/vms/components/ava/utxo_test.go @@ -1,7 +1,7 @@ // (c) 2019-2020, Ava Labs, Inc. All rights reserved. // See the file LICENSE for licensing terms. -package avm +package ava import ( "bytes" @@ -34,9 +34,6 @@ func TestUTXOVerifyEmpty(t *testing.T) { func TestUTXOSerialize(t *testing.T) { c := codec.NewDefault() - c.RegisterType(&BaseTx{}) - c.RegisterType(&CreateAssetTx{}) - c.RegisterType(&OperationTx{}) c.RegisterType(&secp256k1fx.MintOutput{}) c.RegisterType(&secp256k1fx.TransferOutput{}) c.RegisterType(&secp256k1fx.MintInput{}) @@ -57,7 +54,7 @@ func TestUTXOSerialize(t *testing.T) { 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, // output: - 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x30, 0x39, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd4, 0x31, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x02, 0x00, 0x01, 0x02, 0x03, diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index a80d9bc..8005f34 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -12,18 +12,6 @@ import ( "github.com/ava-labs/gecko/utils/wrappers" ) -// Type is an identifier for a codec -type Type uint32 - -// Codec types -const ( - NoType Type = iota - GenericType - CustomType - // TODO: Utilize a standard serialization library. Must have a canonical - // serialization format. -) - const ( defaultMaxSize = 1 << 18 // default max size, in bytes, of something being marshalled by Marshal() defaultMaxSliceLength = 1 << 18 // default max length of a slice being marshalled by Marshal() @@ -45,30 +33,6 @@ var ( errSliceTooLarge = errors.New("slice too large") ) -// Verify that the codec is a known codec value. Returns nil if the codec is -// valid. -func (c Type) Verify() error { - switch c { - case NoType, GenericType, CustomType: - return nil - default: - return errBadCodec - } -} - -func (c Type) String() string { - switch c { - case NoType: - return "No Codec" - case GenericType: - return "Generic Codec" - case CustomType: - return "Custom Codec" - default: - return "Unknown Codec" - } -} - // Codec handles marshaling and unmarshaling of structs type codec struct { maxSize int @@ -324,6 +288,11 @@ func (c codec) unmarshal(p *wrappers.Packer, field reflect.Value) error { if !ok { return errUnmarshalUnregisteredType } + // Ensure struct actually does implement the interface + fieldType := field.Type() + if !typ.Implements(fieldType) { + return fmt.Errorf("%s does not implement interface %s", typ, fieldType) + } concreteInstancePtr := reflect.New(typ) // instance of the proper type // Unmarshal into the struct if err := c.unmarshal(p, concreteInstancePtr.Elem()); err != nil { diff --git a/vms/components/codec/codec_test.go b/vms/components/codec/codec_test.go index 6fc4f25..6fdfeba 100644 --- a/vms/components/codec/codec_test.go +++ b/vms/components/codec/codec_test.go @@ -46,6 +46,7 @@ type myStruct struct { InnerStruct MyInnerStruct `serialize:"true"` InnerStruct2 *MyInnerStruct `serialize:"true"` Member1 int64 `serialize:"true"` + Member2 uint16 `serialize:"true"` MyArray2 [5]string `serialize:"true"` MyArray3 [3]MyInnerStruct `serialize:"true"` MyArray4 [2]*MyInnerStruct2 `serialize:"true"` @@ -67,6 +68,7 @@ func TestStruct(t *testing.T) { InnerStruct: MyInnerStruct{"hello"}, InnerStruct2: &MyInnerStruct{"yello"}, Member1: 1, + Member2: 2, MySlice: []byte{1, 2, 3, 4}, MySlice2: []string{"one", "two", "three"}, MySlice3: []MyInnerStruct{MyInnerStruct{"a"}, MyInnerStruct{"b"}, MyInnerStruct{"c"}}, @@ -410,6 +412,33 @@ func TestSerializeUnexportedField(t *testing.T) { } } +func TestSerializeOfNoSerializeField(t *testing.T) { + type s struct { + SerializedField string `serialize:"true"` + UnserializedField string `serialize:"false"` + UnmarkedField string + } + myS := s{ + SerializedField: "Serialize me", + UnserializedField: "Do not serialize me", + UnmarkedField: "No declared serialize", + } + codec := NewDefault() + marshalled, err := codec.Marshal(myS) + if err != nil { + t.Fatalf("Unexpected error %q", err) + } + unmarshalled := s{} + err = codec.Unmarshal(marshalled, &unmarshalled) + if err != nil { + t.Fatalf("Unexpected error %q", err) + } + expectedUnmarshalled := s{SerializedField: "Serialize me"} + if !reflect.DeepEqual(unmarshalled, expectedUnmarshalled) { + t.Fatalf("Got %#v, expected %#v", unmarshalled, expectedUnmarshalled) + } +} + type simpleSliceStruct struct { Arr []uint32 `serialize:"true"` } @@ -538,3 +567,42 @@ func TestTooLargeUnmarshal(t *testing.T) { t.Fatalf("Should have errored due to too many bytes provided") } } + +type outerInterface interface { + ToInt() int +} + +type outer struct { + Interface outerInterface `serialize:"true"` +} + +type innerInterface struct{} + +func (it *innerInterface) ToInt() int { + return 0 +} + +type innerNoInterface struct{} + +// Ensure deserializing structs into the wrong interface errors gracefully +func TestUnmarshalInvalidInterface(t *testing.T) { + codec := NewDefault() + + codec.RegisterType(&innerInterface{}) + codec.RegisterType(&innerNoInterface{}) + + { + bytes := []byte{0, 0, 0, 0} + s := outer{} + if err := codec.Unmarshal(bytes, &s); err != nil { + t.Fatal(err) + } + } + { + bytes := []byte{0, 0, 0, 1} + s := outer{} + if err := codec.Unmarshal(bytes, &s); err == nil { + t.Fatalf("should have errored") + } + } +} diff --git a/vms/components/core/snowman_vm.go b/vms/components/core/snowman_vm.go index a659b1b..f710edd 100644 --- a/vms/components/core/snowman_vm.go +++ b/vms/components/core/snowman_vm.go @@ -83,6 +83,10 @@ func (svm *SnowmanVM) GetBlock(ID ids.ID) (snowman.Block, error) { // Shutdown this vm func (svm *SnowmanVM) Shutdown() { + if svm.DB == nil { + return + } + svm.DB.Commit() // Flush DB svm.DB.GetDatabase().Close() // close underlying database svm.DB.Close() // close versionDB diff --git a/vms/evm/block.go b/vms/evm/block.go deleted file mode 100644 index ec47490..0000000 --- a/vms/evm/block.go +++ /dev/null @@ -1,75 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "fmt" - - "github.com/ava-labs/go-ethereum/core/types" - "github.com/ava-labs/go-ethereum/rlp" - - "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/snow/choices" - "github.com/ava-labs/gecko/snow/consensus/snowman" -) - -// Block implements the snowman.Block interface -type Block struct { - id ids.ID - ethBlock *types.Block - vm *VM -} - -// ID implements the snowman.Block interface -func (b *Block) ID() ids.ID { return b.id } - -// Accept implements the snowman.Block interface -func (b *Block) Accept() { - b.vm.ctx.Log.Verbo("Block %s is accepted", b.ID()) - b.vm.updateStatus(b.ID(), choices.Accepted) -} - -// Reject implements the snowman.Block interface -func (b *Block) Reject() { - b.vm.ctx.Log.Verbo("Block %s is rejected", b.ID()) - b.vm.updateStatus(b.ID(), choices.Rejected) -} - -// Status implements the snowman.Block interface -func (b *Block) Status() choices.Status { - status := b.vm.getCachedStatus(b.ID()) - if status == choices.Unknown && b.ethBlock != nil { - return choices.Processing - } - return status -} - -// Parent implements the snowman.Block interface -func (b *Block) Parent() snowman.Block { - parentID := ids.NewID(b.ethBlock.ParentHash()) - block := &Block{ - id: parentID, - ethBlock: b.vm.getCachedBlock(parentID), - vm: b.vm, - } - b.vm.ctx.Log.Verbo("Parent(%s) has status: %s", block.ID(), block.Status()) - return block -} - -// Verify implements the snowman.Block interface -func (b *Block) Verify() error { - _, err := b.vm.chain.InsertChain([]*types.Block{b.ethBlock}) - return err -} - -// Bytes implements the snowman.Block interface -func (b *Block) Bytes() []byte { - res, err := rlp.EncodeToBytes(b.ethBlock) - if err != nil { - panic(err) - } - return res -} - -func (b *Block) String() string { return fmt.Sprintf("EVM block, ID = %s", b.ID()) } diff --git a/vms/evm/database.go b/vms/evm/database.go deleted file mode 100644 index de592e1..0000000 --- a/vms/evm/database.go +++ /dev/null @@ -1,66 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "errors" - - "github.com/ava-labs/go-ethereum/ethdb" - - "github.com/ava-labs/gecko/database" -) - -var ( - errOpNotSupported = errors.New("this operation is not supported") -) - -// Database implements ethdb.Database -type Database struct{ database.Database } - -// HasAncient returns an error as we don't have a backing chain freezer. -func (db Database) HasAncient(kind string, number uint64) (bool, error) { - return false, errOpNotSupported -} - -// Ancient returns an error as we don't have a backing chain freezer. -func (db Database) Ancient(kind string, number uint64) ([]byte, error) { return nil, errOpNotSupported } - -// Ancients returns an error as we don't have a backing chain freezer. -func (db Database) Ancients() (uint64, error) { return 0, errOpNotSupported } - -// AncientSize returns an error as we don't have a backing chain freezer. -func (db Database) AncientSize(kind string) (uint64, error) { return 0, errOpNotSupported } - -// AppendAncient returns an error as we don't have a backing chain freezer. -func (db Database) AppendAncient(number uint64, hash, header, body, receipts, td []byte) error { - return errOpNotSupported -} - -// TruncateAncients returns an error as we don't have a backing chain freezer. -func (db Database) TruncateAncients(items uint64) error { return errOpNotSupported } - -// Sync returns an error as we don't have a backing chain freezer. -func (db Database) Sync() error { return errOpNotSupported } - -// NewBatch implements ethdb.Database -func (db Database) NewBatch() ethdb.Batch { return Batch{db.Database.NewBatch()} } - -// NewIterator implements ethdb.Database -func (db Database) NewIterator() ethdb.Iterator { return db.Database.NewIterator() } - -// NewIteratorWithPrefix implements ethdb.Database -func (db Database) NewIteratorWithPrefix(prefix []byte) ethdb.Iterator { - return db.NewIteratorWithPrefix(prefix) -} - -// NewIteratorWithStart implements ethdb.Database -func (db Database) NewIteratorWithStart(start []byte) ethdb.Iterator { - return db.NewIteratorWithStart(start) -} - -// Batch implements ethdb.Batch -type Batch struct{ database.Batch } - -// Replay implements ethdb.Batch -func (batch Batch) Replay(w ethdb.KeyValueWriter) error { return batch.Batch.Replay(w) } diff --git a/vms/evm/factory.go b/vms/evm/factory.go deleted file mode 100644 index a4c0eca..0000000 --- a/vms/evm/factory.go +++ /dev/null @@ -1,19 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "github.com/ava-labs/gecko/ids" -) - -// ID this VM should be referenced by -var ( - ID = ids.NewID([32]byte{'e', 'v', 'm'}) -) - -// Factory ... -type Factory struct{} - -// New ... -func (f *Factory) New() interface{} { return &VM{} } diff --git a/vms/evm/service.go b/vms/evm/service.go deleted file mode 100644 index 70135cc..0000000 --- a/vms/evm/service.go +++ /dev/null @@ -1,122 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "context" - "crypto/rand" - "fmt" - "math/big" - - "github.com/ava-labs/coreth" - - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/common/hexutil" - "github.com/ava-labs/go-ethereum/core/types" - "github.com/ava-labs/go-ethereum/crypto" -) - -const ( - version = "Athereum 1.0" -) - -// test constants -const ( - GenesisTestAddr = "0x751a0b96e1042bee789452ecb20253fba40dbe85" - GenesisTestKey = "0xabd71b35d559563fea757f0f5edbde286fb8c043105b15abb7cd57189306d7d1" -) - -// DebugAPI introduces helper functions for debuging -type DebugAPI struct{ vm *VM } - -// SnowmanAPI introduces snowman specific functionality to the evm -type SnowmanAPI struct{ vm *VM } - -// NetAPI offers network related API methods -type NetAPI struct{ vm *VM } - -// NewNetAPI creates a new net API instance. -func NewNetAPI(vm *VM) *NetAPI { return &NetAPI{vm} } - -// Listening returns an indication if the node is listening for network connections. -func (s *NetAPI) Listening() bool { return true } // always listening - -// PeerCount returns the number of connected peers -func (s *NetAPI) PeerCount() hexutil.Uint { return hexutil.Uint(0) } // TODO: report number of connected peers - -// Version returns the current ethereum protocol version. -func (s *NetAPI) Version() string { return fmt.Sprintf("%d", s.vm.networkID) } - -// Web3API offers helper API methods -type Web3API struct{} - -// ClientVersion returns the version of the vm running -func (s *Web3API) ClientVersion() string { return version } - -// Sha3 returns the bytes returned by hashing [input] with Keccak256 -func (s *Web3API) Sha3(input hexutil.Bytes) hexutil.Bytes { return crypto.Keccak256(input) } - -// GetAcceptedFrontReply defines the reply that will be sent from the -// GetAcceptedFront API call -type GetAcceptedFrontReply struct { - Hash common.Hash `json:"hash"` - Number *big.Int `json:"number"` -} - -// GetAcceptedFront returns the last accepted block's hash and height -func (api *SnowmanAPI) GetAcceptedFront(ctx context.Context) (*GetAcceptedFrontReply, error) { - blk := api.vm.getLastAccepted().ethBlock - return &GetAcceptedFrontReply{ - Hash: blk.Hash(), - Number: blk.Number(), - }, nil -} - -// GetGenesisBalance returns the current funds in the genesis -func (api *DebugAPI) GetGenesisBalance(ctx context.Context) (*hexutil.Big, error) { - lastAccepted := api.vm.getLastAccepted() - api.vm.ctx.Log.Verbo("Currently accepted block front: %s", lastAccepted.ethBlock.Hash().Hex()) - state, err := api.vm.chain.BlockState(lastAccepted.ethBlock) - if err != nil { - return nil, err - } - return (*hexutil.Big)(state.GetBalance(common.HexToAddress(GenesisTestAddr))), nil -} - -// SpendGenesis funds -func (api *DebugAPI) SpendGenesis(ctx context.Context, nonce uint64) error { - api.vm.ctx.Log.Info("Spending the genesis") - - value := big.NewInt(1000000000000) - gasLimit := 21000 - gasPrice := big.NewInt(1000000000) - - genPrivateKey, err := crypto.HexToECDSA(GenesisTestKey[2:]) - if err != nil { - return err - } - bob, err := coreth.NewKey(rand.Reader) - if err != nil { - return err - } - - tx := types.NewTransaction(nonce, bob.Address, value, uint64(gasLimit), gasPrice, nil) - signedTx, err := types.SignTx(tx, types.NewEIP155Signer(api.vm.chainID), genPrivateKey) - if err != nil { - return err - } - - if err := api.vm.issueRemoteTxs([]*types.Transaction{signedTx}); err != nil { - return err - } - - return nil -} - -// IssueBlock to the chain -func (api *DebugAPI) IssueBlock(ctx context.Context) error { - api.vm.ctx.Log.Info("Issuing a new block") - - return api.vm.tryBlockGen() -} diff --git a/vms/evm/static_service.go b/vms/evm/static_service.go deleted file mode 100644 index d3870ca..0000000 --- a/vms/evm/static_service.go +++ /dev/null @@ -1,22 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "context" - "encoding/json" - - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/gecko/utils/formatting" -) - -// StaticService defines the static API services exposed by the evm -type StaticService struct{} - -// BuildGenesis returns the UTXOs such that at least one address in [args.Addresses] is -// referenced in the UTXO. -func (*StaticService) BuildGenesis(_ context.Context, args *core.Genesis) (formatting.CB58, error) { - bytes, err := json.Marshal(args) - return formatting.CB58{Bytes: bytes}, err -} diff --git a/vms/evm/static_service_test.go b/vms/evm/static_service_test.go deleted file mode 100644 index c492798..0000000 --- a/vms/evm/static_service_test.go +++ /dev/null @@ -1,64 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "math/big" - "testing" - - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/params" - - "github.com/ava-labs/coreth/core" -) - -func TestBuildGenesis(t *testing.T) { - expected := "3wP629bGfSGj9trh1UNBp5qGRGCcma5d8ezLeSmd9hnUJjSMUJesHHoxbZNcVUC9CjH7PEGNA96htNTd1saZCMt1Mf1dZFG7JDhcYNok6RS4TZufejXdxbVVgquohSa7nCCcrXpiVeiRFwzLJAxyQbXzYRhaCRtcDDfCcqfaVdtkFsPbNeQ49pDTbEC5hVkmfopeQ2Zz8tAG5QXKBdbYBCukR3xNHJ4xDxeixmEwPr1odb42yQRYrL7xREKNn2LFoFwAWUjBTsCkf5GPNgY2GvvN9o8wFWXTroW5fp754DhpdxHYxkMTfuE9DGyNWHTyrEbrUHutUdsfitcSHVj5ctFtkN2wGCs3cyv1eRRNvFFMggWTbarjne6AYaeCrJ631qAu3CbrUtrTH5N2E6G2yQKX4sT4Sk3qWPJdsGXuT95iKKcgNn1u5QRHHw9DXXuGPpJjkcKQRGUCuqpXy61iF5RNPEwAwKDa8f2Y25WMmNgWynUuLj8iSAyePj7USPWk54QFUr86ApVzqAdzzdD1qSVScpmudGnGbz9UNXdzHqSot6XLrNTYsgkabiu6TGntFm7qywbCRmtNdBuT9aznGQdUVimjt5QzUz68HXhUxBzTkrz7yXfVGV5JcWxVHQXYS4oc41U5yu83mH3A7WBrZLVq6UyNrvQVbim5nDxeKKbALPxwzVwywjgY5cp39AvzGnY8CX2AtuBNnKmZaAvG8JWAkx3yxjnJrwWhLgpDQYcCvRp2jg1EPBqN8FKJxSPE6eedjDHDJfB57mNzyEtmg22BPnem3eLdiovX8awkhBUHdE7uPrapNSVprnS85u1saW2Kwza3FsS2jAM3LckGW8KdtfPTpHBTRKAUo49zZLuPsyGL5WduedGyAdaM3a2KPoyXuz4UbexTVUWFNypFvvgyoDS8FMxDCNoMMaD7y4yVnoDpSpVFEVZD6EuSGHe9U8Ew57xLPbjhepDx6" - - balance, success := new(big.Int).SetString("33b2e3c9fd0804000000000", 16) - if !success { - t.Fatal("Failed to initialize balance") - } - - args := core.Genesis{ - Config: ¶ms.ChainConfig{ - ChainID: big.NewInt(43110), - HomesteadBlock: big.NewInt(0), - DAOForkBlock: big.NewInt(0), - DAOForkSupport: true, - EIP150Block: big.NewInt(0), - EIP150Hash: common.HexToHash("0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0"), - EIP155Block: big.NewInt(0), - EIP158Block: big.NewInt(0), - ByzantiumBlock: big.NewInt(0), - ConstantinopleBlock: big.NewInt(0), - PetersburgBlock: big.NewInt(0), - }, - Nonce: 0, - Timestamp: 0, - ExtraData: []byte{}, - GasLimit: 100000000, - Difficulty: big.NewInt(0), - Mixhash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), - Coinbase: common.HexToAddress("0x0000000000000000000000000000000000000000"), - Alloc: core.GenesisAlloc{ - common.HexToAddress("751a0b96e1042bee789452ecb20253fba40dbe85"): core.GenesisAccount{ - Balance: balance, - }, - }, - Number: 0, - GasUsed: 0, - ParentHash: common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"), - } - - ss := StaticService{} - result, err := ss.BuildGenesis(nil, &args) - if err != nil { - t.Fatal(err) - } - - if result.String() != expected { - t.Fatalf("StaticService.BuildGenesis:\nReturned: %s\nExpected: %s", result, expected) - } -} diff --git a/vms/evm/vm.go b/vms/evm/vm.go deleted file mode 100644 index e1e9846..0000000 --- a/vms/evm/vm.go +++ /dev/null @@ -1,498 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "crypto/rand" - "encoding/json" - "errors" - "fmt" - "math/big" - "sync" - "sync/atomic" - "time" - - "github.com/ava-labs/coreth" - "github.com/ava-labs/coreth/core" - "github.com/ava-labs/coreth/eth" - "github.com/ava-labs/coreth/node" - - "github.com/ava-labs/go-ethereum/common" - "github.com/ava-labs/go-ethereum/core/types" - "github.com/ava-labs/go-ethereum/rlp" - "github.com/ava-labs/go-ethereum/rpc" - - "github.com/ava-labs/gecko/cache" - "github.com/ava-labs/gecko/database" - "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/snow/choices" - "github.com/ava-labs/gecko/snow/consensus/snowman" - "github.com/ava-labs/gecko/utils/timer" - - commonEng "github.com/ava-labs/gecko/snow/engine/common" -) - -const ( - lastAcceptedKey = "snowman_lastAccepted" -) - -const ( - minBlockTime = 250 * time.Millisecond - maxBlockTime = 1000 * time.Millisecond - batchSize = 250 -) - -const ( - bdTimerStateMin = iota - bdTimerStateMax - bdTimerStateLong -) - -var ( - errEmptyBlock = errors.New("empty block") - errCreateBlock = errors.New("couldn't create block") - errUnknownBlock = errors.New("unknown block") - errBlockFrequency = errors.New("too frequent block issuance") - errUnsupportedFXs = errors.New("unsupported feature extensions") -) - -func maxDuration(x, y time.Duration) time.Duration { - if x > y { - return x - } - return y -} - -// VM implements the snowman.ChainVM interface -type VM struct { - ctx *snow.Context - - chainID *big.Int - networkID uint64 - chain *coreth.ETHChain - chaindb Database - newBlockChan chan *Block - networkChan chan<- commonEng.Message - newTxPoolHeadChan chan core.NewTxPoolHeadEvent - - txPoolStabilizedHead common.Hash - txPoolStabilizedOk chan struct{} - txPoolStabilizedLock sync.Mutex - - metalock sync.Mutex - blockCache, blockStatusCache cache.LRU - lastAccepted *Block - writingMetadata uint32 - - bdlock sync.Mutex - blockDelayTimer *timer.Timer - bdTimerState int8 - bdGenWaitFlag bool - bdGenFlag bool - - genlock sync.Mutex - txSubmitChan <-chan struct{} -} - -/* - ****************************************************************************** - ********************************* Snowman API ******************************** - ****************************************************************************** - */ - -// Initialize implements the snowman.ChainVM interface -func (vm *VM) Initialize( - ctx *snow.Context, - db database.Database, - b []byte, - toEngine chan<- commonEng.Message, - fxs []*commonEng.Fx, -) error { - if len(fxs) > 0 { - return errUnsupportedFXs - } - - vm.ctx = ctx - vm.chaindb = Database{db} - g := new(core.Genesis) - err := json.Unmarshal(b, g) - if err != nil { - return err - } - - vm.chainID = g.Config.ChainID - - config := eth.DefaultConfig - config.ManualCanonical = true - config.Genesis = g - config.Miner.ManualMining = true - config.Miner.DisableUncle = true - if err := config.SetGCMode("archive"); err != nil { - panic(err) - } - nodecfg := node.Config{NoUSB: true} - chain := coreth.NewETHChain(&config, &nodecfg, nil, vm.chaindb) - vm.chain = chain - vm.networkID = config.NetworkId - chain.SetOnHeaderNew(func(header *types.Header) { - hid := make([]byte, 32) - _, err := rand.Read(hid) - if err != nil { - panic("cannot generate hid") - } - header.Extra = append(header.Extra, hid...) - }) - chain.SetOnSeal(func(block *types.Block) error { - if len(block.Transactions()) == 0 { - // this could happen due to the async logic of geth tx pool - vm.newBlockChan <- nil - return errEmptyBlock - } - return nil - }) - chain.SetOnSealFinish(func(block *types.Block) error { - vm.ctx.Log.Verbo("EVM sealed a block") - - blk := &Block{ - id: ids.NewID(block.Hash()), - ethBlock: block, - vm: vm, - } - vm.newBlockChan <- blk - vm.updateStatus(ids.NewID(block.Hash()), choices.Processing) - vm.txPoolStabilizedLock.Lock() - vm.txPoolStabilizedHead = block.Hash() - vm.txPoolStabilizedLock.Unlock() - return nil - }) - chain.SetOnQueryAcceptedBlock(func() *types.Block { - return vm.getLastAccepted().ethBlock - }) - vm.blockCache = cache.LRU{Size: 2048} - vm.blockStatusCache = cache.LRU{Size: 1024} - vm.newBlockChan = make(chan *Block) - vm.networkChan = toEngine - vm.blockDelayTimer = timer.NewTimer(func() { - vm.bdlock.Lock() - switch vm.bdTimerState { - case bdTimerStateMin: - vm.bdTimerState = bdTimerStateMax - vm.blockDelayTimer.SetTimeoutIn(maxDuration(maxBlockTime-minBlockTime, 0)) - case bdTimerStateMax: - vm.bdTimerState = bdTimerStateLong - } - tryAgain := vm.bdGenWaitFlag - vm.bdlock.Unlock() - if tryAgain { - vm.tryBlockGen() - } - }) - go ctx.Log.RecoverAndPanic(vm.blockDelayTimer.Dispatch) - - vm.bdTimerState = bdTimerStateLong - vm.bdGenWaitFlag = true - vm.newTxPoolHeadChan = make(chan core.NewTxPoolHeadEvent, 1) - vm.txPoolStabilizedOk = make(chan struct{}, 1) - chain.GetTxPool().SubscribeNewHeadEvent(vm.newTxPoolHeadChan) - // TODO: shutdown this go routine - go ctx.Log.RecoverAndPanic(func() { - for { - select { - case h := <-vm.newTxPoolHeadChan: - vm.txPoolStabilizedLock.Lock() - if vm.txPoolStabilizedHead == h.Block.Hash() { - vm.txPoolStabilizedOk <- struct{}{} - vm.txPoolStabilizedHead = common.Hash{} - } - vm.txPoolStabilizedLock.Unlock() - } - } - }) - chain.Start() - - var lastAccepted *types.Block - if b, err := vm.chaindb.Get([]byte(lastAcceptedKey)); err == nil { - var hash common.Hash - if err = rlp.DecodeBytes(b, &hash); err == nil { - if block := chain.GetBlockByHash(hash); block == nil { - vm.ctx.Log.Debug("lastAccepted block not found in chaindb") - } else { - lastAccepted = block - } - } - } - if lastAccepted == nil { - vm.ctx.Log.Debug("lastAccepted is unavailable, setting to the genesis block") - lastAccepted = chain.GetGenesisBlock() - } - vm.lastAccepted = &Block{ - id: ids.NewID(lastAccepted.Hash()), - ethBlock: lastAccepted, - vm: vm, - } - vm.ctx.Log.Info(fmt.Sprintf("lastAccepted = %s", vm.lastAccepted.ethBlock.Hash().Hex())) - - // TODO: shutdown this go routine - go vm.ctx.Log.RecoverAndPanic(func() { - vm.txSubmitChan = vm.chain.GetTxSubmitCh() - for { - select { - case <-vm.txSubmitChan: - vm.ctx.Log.Verbo("New tx detected, trying to generate a block") - vm.tryBlockGen() - case <-time.After(5 * time.Second): - vm.tryBlockGen() - } - } - }) - - return nil -} - -// Shutdown implements the snowman.ChainVM interface -func (vm *VM) Shutdown() { - vm.writeBackMetadata() - vm.chain.Stop() -} - -// BuildBlock implements the snowman.ChainVM interface -func (vm *VM) BuildBlock() (snowman.Block, error) { - vm.chain.GenBlock() - block := <-vm.newBlockChan - if block == nil { - return nil, errCreateBlock - } - // reset the min block time timer - vm.bdlock.Lock() - vm.bdTimerState = bdTimerStateMin - vm.bdGenWaitFlag = false - vm.bdGenFlag = false - vm.blockDelayTimer.SetTimeoutIn(minBlockTime) - vm.bdlock.Unlock() - - vm.ctx.Log.Debug("built block 0x%x", block.ID().Bytes()) - // make sure Tx Pool is updated - <-vm.txPoolStabilizedOk - return block, nil -} - -// ParseBlock implements the snowman.ChainVM interface -func (vm *VM) ParseBlock(b []byte) (snowman.Block, error) { - vm.metalock.Lock() - defer vm.metalock.Unlock() - - ethBlock := new(types.Block) - if err := rlp.DecodeBytes(b, ethBlock); err != nil { - return nil, err - } - block := &Block{ - id: ids.NewID(ethBlock.Hash()), - ethBlock: ethBlock, - vm: vm, - } - vm.blockCache.Put(block.ID(), block) - return block, nil -} - -// GetBlock implements the snowman.ChainVM interface -func (vm *VM) GetBlock(id ids.ID) (snowman.Block, error) { - vm.metalock.Lock() - defer vm.metalock.Unlock() - - block := vm.getBlock(id) - if block == nil { - return nil, errUnknownBlock - } - return block, nil -} - -// SetPreference sets what the current tail of the chain is -func (vm *VM) SetPreference(blkID ids.ID) { - err := vm.chain.SetTail(blkID.Key()) - vm.ctx.Log.AssertNoError(err) -} - -// LastAccepted returns the ID of the block that was last accepted -func (vm *VM) LastAccepted() ids.ID { - vm.metalock.Lock() - defer vm.metalock.Unlock() - - return vm.lastAccepted.ID() -} - -// CreateHandlers makes new http handlers that can handle API calls -func (vm *VM) CreateHandlers() map[string]*commonEng.HTTPHandler { - handler := vm.chain.NewRPCHandler() - vm.chain.AttachEthService(handler, []string{"eth", "personal", "txpool"}) - handler.RegisterName("net", &NetAPI{vm}) - handler.RegisterName("snowman", &SnowmanAPI{vm}) - handler.RegisterName("web3", &Web3API{}) - handler.RegisterName("debug", &DebugAPI{vm}) - - return map[string]*commonEng.HTTPHandler{ - "/rpc": &commonEng.HTTPHandler{LockOptions: commonEng.NoLock, Handler: handler}, - "/ws": &commonEng.HTTPHandler{LockOptions: commonEng.NoLock, Handler: handler.WebsocketHandler([]string{"*"})}, - } -} - -// CreateStaticHandlers makes new http handlers that can handle API calls -func (vm *VM) CreateStaticHandlers() map[string]*commonEng.HTTPHandler { - handler := rpc.NewServer() - handler.RegisterName("static", &StaticService{}) - return map[string]*commonEng.HTTPHandler{ - "/rpc": &commonEng.HTTPHandler{LockOptions: commonEng.NoLock, Handler: handler}, - "/ws": &commonEng.HTTPHandler{LockOptions: commonEng.NoLock, Handler: handler.WebsocketHandler([]string{"*"})}, - } -} - -/* - ****************************************************************************** - *********************************** Helpers ********************************** - ****************************************************************************** - */ - -func (vm *VM) updateStatus(blockID ids.ID, status choices.Status) { - vm.metalock.Lock() - defer vm.metalock.Unlock() - - if status == choices.Accepted { - vm.lastAccepted = vm.getBlock(blockID) - // TODO: improve this naive implementation - if atomic.SwapUint32(&vm.writingMetadata, 1) == 0 { - go vm.ctx.Log.RecoverAndPanic(vm.writeBackMetadata) - } - } - vm.blockStatusCache.Put(blockID, status) -} - -func (vm *VM) getCachedBlock(blockID ids.ID) *types.Block { - return vm.chain.GetBlockByHash(blockID.Key()) -} - -func (vm *VM) tryBlockGen() error { - vm.bdlock.Lock() - defer vm.bdlock.Unlock() - if vm.bdGenFlag { - // skip if one call already generates a block in this round - return nil - } - vm.bdGenWaitFlag = true - - vm.genlock.Lock() - defer vm.genlock.Unlock() - // get pending size - size, err := vm.chain.PendingSize() - if err != nil { - return err - } - if size == 0 { - return nil - } - - switch vm.bdTimerState { - case bdTimerStateMin: - return nil - case bdTimerStateMax: - if size < batchSize { - return nil - } - case bdTimerStateLong: - // timeout; go ahead and generate a new block anyway - } - select { - case vm.networkChan <- commonEng.PendingTxs: - // successfully push out the notification; this round ends - vm.bdGenFlag = true - default: - return errBlockFrequency - } - return nil -} - -func (vm *VM) getCachedStatus(blockID ids.ID) choices.Status { - vm.metalock.Lock() - defer vm.metalock.Unlock() - status := choices.Processing - - if statusIntf, ok := vm.blockStatusCache.Get(blockID); ok { - status = statusIntf.(choices.Status) - } else { - blk := vm.chain.GetBlockByHash(blockID.Key()) - if blk == nil { - return choices.Unknown - } - acceptedBlk := vm.lastAccepted.ethBlock - - // TODO: There must be a better way of doing this. - // Traverse up the chain from the lower block until the indices match - highBlock := blk - lowBlock := acceptedBlk - if highBlock.Number().Cmp(lowBlock.Number()) < 0 { - highBlock, lowBlock = lowBlock, highBlock - } - for highBlock.Number().Cmp(lowBlock.Number()) > 0 { - highBlock = vm.chain.GetBlockByHash(highBlock.ParentHash()) - } - - if highBlock.Hash() == lowBlock.Hash() { // on the same branch - if blk.Number().Cmp(acceptedBlk.Number()) <= 0 { - status = choices.Accepted - } - } else { // on different branches - status = choices.Rejected - } - } - - vm.blockStatusCache.Put(blockID, status) - return status -} - -func (vm *VM) getBlock(id ids.ID) *Block { - if blockIntf, ok := vm.blockCache.Get(id); ok { - return blockIntf.(*Block) - } - ethBlock := vm.getCachedBlock(id) - if ethBlock == nil { - return nil - } - block := &Block{ - id: ids.NewID(ethBlock.Hash()), - ethBlock: ethBlock, - vm: vm, - } - vm.blockCache.Put(id, block) - return block -} - -func (vm *VM) issueRemoteTxs(txs []*types.Transaction) error { - errs := vm.chain.AddRemoteTxs(txs) - for _, err := range errs { - if err != nil { - return err - } - } - return vm.tryBlockGen() -} - -func (vm *VM) writeBackMetadata() { - vm.metalock.Lock() - defer vm.metalock.Unlock() - - b, err := rlp.EncodeToBytes(vm.lastAccepted.ethBlock.Hash()) - if err != nil { - vm.ctx.Log.Error("snowman-eth: error while writing back metadata") - return - } - vm.ctx.Log.Debug("writing back metadata") - vm.chaindb.Put([]byte(lastAcceptedKey), b) - atomic.StoreUint32(&vm.writingMetadata, 0) -} - -func (vm *VM) getLastAccepted() *Block { - vm.metalock.Lock() - defer vm.metalock.Unlock() - - return vm.lastAccepted -} diff --git a/vms/evm/vm_genesis_parse_test.go b/vms/evm/vm_genesis_parse_test.go deleted file mode 100644 index 9a113fb..0000000 --- a/vms/evm/vm_genesis_parse_test.go +++ /dev/null @@ -1,32 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package evm - -import ( - "encoding/json" - "testing" - - "github.com/ava-labs/coreth/core" -) - -func TestParseGenesis(t *testing.T) { - genesis := []byte(`{"config":{"chainId":43110,"homesteadBlock":0,"daoForkBlock":0,"daoForkSupport":true,"eip150Block":0,"eip150Hash":"0x2086799aeebeae135c246c65021c82b4e15a2c451340993aacfd2751886514f0","eip155Block":0,"eip158Block":0,"byzantiumBlock":0,"constantinopleBlock":0,"petersburgBlock":0},"nonce":"0x0","timestamp":"0x0","extraData":"0x00","gasLimit":"0x5f5e100","difficulty":"0x0","mixHash":"0x0000000000000000000000000000000000000000000000000000000000000000","coinbase":"0x0000000000000000000000000000000000000000","alloc":{"751a0b96e1042bee789452ecb20253fba40dbe85":{"balance":"0x33b2e3c9fd0804000000000"}},"number":"0x0","gasUsed":"0x0","parentHash":"0x0000000000000000000000000000000000000000000000000000000000000000"}`) - - genesisBlock := new(core.Genesis) - err := json.Unmarshal(genesis, genesisBlock) - if err != nil { - t.Fatal(err) - } - - marshalledBytes, err := json.Marshal(genesisBlock) - if err != nil { - t.Fatal(err) - } - - secondGenesisBlock := new(core.Genesis) - err = json.Unmarshal(marshalledBytes, secondGenesisBlock) - if err != nil { - t.Fatal(err) - } -} diff --git a/vms/manager.go b/vms/manager.go index 1509d49..9f8cf3b 100644 --- a/vms/manager.go +++ b/vms/manager.go @@ -16,7 +16,7 @@ import ( // A VMFactory creates new instances of a VM type VMFactory interface { - New() interface{} + New() (interface{}, error) } // Manager is a VM manager. @@ -110,10 +110,17 @@ func (m *manager) addStaticAPIEndpoints(vmID ids.ID) { vmFactory, err := m.GetVMFactory(vmID) m.log.AssertNoError(err) m.log.Debug("adding static API for VM with ID %s", vmID) - vm := vmFactory.New() + vm, err := vmFactory.New() + if err != nil { + return + } staticVM, ok := vm.(common.StaticVM) if !ok { + staticVM, ok := vm.(common.VM) + if ok { + staticVM.Shutdown() + } return } diff --git a/vms/nftfx/credential.go b/vms/nftfx/credential.go new file mode 100644 index 0000000..bb7cca0 --- /dev/null +++ b/vms/nftfx/credential.go @@ -0,0 +1,10 @@ +package nftfx + +import ( + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +// Credential ... +type Credential struct { + secp256k1fx.Credential `serialize:"true"` +} diff --git a/vms/nftfx/factory.go b/vms/nftfx/factory.go new file mode 100644 index 0000000..fc28262 --- /dev/null +++ b/vms/nftfx/factory.go @@ -0,0 +1,16 @@ +package nftfx + +import ( + "github.com/ava-labs/gecko/ids" +) + +// ID that this Fx uses when labeled +var ( + ID = ids.NewID([32]byte{'n', 'f', 't', 'f', 'x'}) +) + +// Factory ... +type Factory struct{} + +// New ... +func (f *Factory) New() (interface{}, error) { return &Fx{}, nil } diff --git a/vms/nftfx/factory_test.go b/vms/nftfx/factory_test.go new file mode 100644 index 0000000..7144129 --- /dev/null +++ b/vms/nftfx/factory_test.go @@ -0,0 +1,14 @@ +package nftfx + +import ( + "testing" +) + +func TestFactory(t *testing.T) { + factory := Factory{} + if fx, err := factory.New(); err != nil { + t.Fatal(err) + } else if fx == nil { + t.Fatalf("Factory.New returned nil") + } +} diff --git a/vms/nftfx/fx.go b/vms/nftfx/fx.go new file mode 100644 index 0000000..1440ff0 --- /dev/null +++ b/vms/nftfx/fx.go @@ -0,0 +1,118 @@ +package nftfx + +import ( + "bytes" + "errors" + + "github.com/ava-labs/gecko/utils/wrappers" + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + errWrongTxType = errors.New("wrong tx type") + errWrongUTXOType = errors.New("wrong utxo type") + errWrongOperationType = errors.New("wrong operation type") + errWrongCredentialType = errors.New("wrong credential type") + + errNoUTXOs = errors.New("an operation must consume at least one UTXO") + errWrongNumberOfUTXOs = errors.New("wrong number of UTXOs for the operation") + errWrongNumberOfCreds = errors.New("wrong number of credentials for the operation") + + errWrongUniqueID = errors.New("wrong unique ID provided") + errWrongBytes = errors.New("wrong bytes provided") + + errCantTransfer = errors.New("cant transfer with this fx") +) + +// Fx ... +type Fx struct{ secp256k1fx.Fx } + +// Initialize ... +func (fx *Fx) Initialize(vmIntf interface{}) error { + if err := fx.InitializeVM(vmIntf); err != nil { + return err + } + + log := fx.VM.Logger() + log.Debug("Initializing nft fx") + + c := fx.VM.Codec() + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(&MintOutput{}), + c.RegisterType(&TransferOutput{}), + c.RegisterType(&MintOperation{}), + c.RegisterType(&TransferOperation{}), + c.RegisterType(&Credential{}), + ) + return errs.Err +} + +// VerifyOperation ... +func (fx *Fx) VerifyOperation(txIntf, opIntf, credIntf interface{}, utxosIntf []interface{}) error { + tx, ok := txIntf.(secp256k1fx.Tx) + switch { + case !ok: + return errWrongTxType + case len(utxosIntf) != 1: + return errWrongNumberOfUTXOs + } + + cred, ok := credIntf.(*Credential) + if !ok { + return errWrongCredentialType + } + + switch op := opIntf.(type) { + case *MintOperation: + return fx.VerifyMintOperation(tx, op, cred, utxosIntf[0]) + case *TransferOperation: + return fx.VerifyTransferOperation(tx, op, cred, utxosIntf[0]) + default: + return errWrongOperationType + } +} + +// VerifyMintOperation ... +func (fx *Fx) VerifyMintOperation(tx secp256k1fx.Tx, op *MintOperation, cred *Credential, utxoIntf interface{}) error { + out, ok := utxoIntf.(*MintOutput) + if !ok { + return errWrongUTXOType + } + + if err := verify.All(op, cred, out); err != nil { + return err + } + + switch { + case out.GroupID != op.GroupID: + return errWrongUniqueID + default: + return fx.Fx.VerifyCredentials(tx, &op.MintInput, &cred.Credential, &out.OutputOwners) + } +} + +// VerifyTransferOperation ... +func (fx *Fx) VerifyTransferOperation(tx secp256k1fx.Tx, op *TransferOperation, cred *Credential, utxoIntf interface{}) error { + out, ok := utxoIntf.(*TransferOutput) + if !ok { + return errWrongUTXOType + } + + if err := verify.All(op, cred, out); err != nil { + return err + } + + switch { + case out.GroupID != op.Output.GroupID: + return errWrongUniqueID + case !bytes.Equal(out.Payload, op.Output.Payload): + return errWrongBytes + default: + return fx.VerifyCredentials(tx, &op.Input, &cred.Credential, &out.OutputOwners) + } +} + +// VerifyTransfer ... +func (fx *Fx) VerifyTransfer(_, _, _, _ interface{}) error { return errCantTransfer } diff --git a/vms/nftfx/fx_test.go b/vms/nftfx/fx_test.go new file mode 100644 index 0000000..d965902 --- /dev/null +++ b/vms/nftfx/fx_test.go @@ -0,0 +1,618 @@ +package nftfx + +import ( + "testing" + "time" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + txBytes = []byte{0, 1, 2, 3, 4, 5} + sigBytes = [crypto.SECP256K1RSigLen]byte{ + 0x0e, 0x33, 0x4e, 0xbc, 0x67, 0xa7, 0x3f, 0xe8, + 0x24, 0x33, 0xac, 0xa3, 0x47, 0x88, 0xa6, 0x3d, + 0x58, 0xe5, 0x8e, 0xf0, 0x3a, 0xd5, 0x84, 0xf1, + 0xbc, 0xa3, 0xb2, 0xd2, 0x5d, 0x51, 0xd6, 0x9b, + 0x0f, 0x28, 0x5d, 0xcd, 0x3f, 0x71, 0x17, 0x0a, + 0xf9, 0xbf, 0x2d, 0xb1, 0x10, 0x26, 0x5c, 0xe9, + 0xdc, 0xc3, 0x9d, 0x7a, 0x01, 0x50, 0x9d, 0xe8, + 0x35, 0xbd, 0xcb, 0x29, 0x3a, 0xd1, 0x49, 0x32, + 0x00, + } + addrBytes = [hashing.AddrLen]byte{ + 0x01, 0x5c, 0xce, 0x6c, 0x55, 0xd6, 0xb5, 0x09, + 0x84, 0x5c, 0x8c, 0x4e, 0x30, 0xbe, 0xd9, 0x8d, + 0x39, 0x1a, 0xe7, 0xf0, + } +) + +func TestFxInitialize(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + fx := Fx{} + err := fx.Initialize(&vm) + if err != nil { + t.Fatal(err) + } +} + +func TestFxInitializeInvalid(t *testing.T) { + fx := Fx{} + err := fx.Initialize(nil) + if err == nil { + t.Fatalf("Should have returned an error") + } +} + +func TestFxVerifyMintOperation(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { + t.Fatal(err) + } +} + +func TestFxVerifyMintOperationWrongTx(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(nil, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid tx") + } +} + +func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to not enough utxos") + } +} + +func TestFxVerifyMintOperationWrongCredential(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, nil, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to a bad credential") + } +} + +func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{nil} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo") + } +} + +func TestFxVerifyMintOperationFailingVerification(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + ids.ShortEmpty, + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") + } +} + +func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + GroupID: 1, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid Group ID") + } +} + +func TestFxVerifyTransferOperation(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + op := &TransferOperation{ + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + Output: TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { + t.Fatal(err) + } +} + +func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + op := &TransferOperation{ + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + Output: TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + }, + } + + utxos := []interface{}{nil} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo") + } +} + +func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + }, + } + op := &TransferOperation{ + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + Output: TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") + } +} + +func TestFxVerifyTransferOperationWrongGroupID(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + op := &TransferOperation{ + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + Output: TransferOutput{ + GroupID: 2, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to a wrong unique id") + } +} + +func TestFxVerifyTransferOperationWrongBytes(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + op := &TransferOperation{ + Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + Output: TransferOutput{ + GroupID: 1, + Payload: []byte{3}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.ShortEmpty, + }, + }, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to the wrong hash being produced") + } +} + +func TestFxVerifyOperationUnknownOperation(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &TransferOutput{ + GroupID: 1, + Payload: []byte{2}, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, nil, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an unknown operation") + } +} + +func TestFxVerifyTransfer(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + if err := fx.VerifyTransfer(nil, nil, nil, nil); err == nil { + t.Fatalf("this Fx doesn't support transfers") + } +} diff --git a/vms/nftfx/mint_operation.go b/vms/nftfx/mint_operation.go new file mode 100644 index 0000000..ea6bdbc --- /dev/null +++ b/vms/nftfx/mint_operation.go @@ -0,0 +1,50 @@ +package nftfx + +import ( + "errors" + + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + errNilMintOperation = errors.New("nil mint operation") +) + +// MintOperation ... +type MintOperation struct { + MintInput secp256k1fx.Input `serialize:"true" json:"mintInput"` + GroupID uint32 `serialize:"true" json:"groupID"` + Payload []byte `serialize:"true" json:"payload"` + Outputs []*secp256k1fx.OutputOwners `serialize:"true" json:"outputs"` +} + +// Outs ... +func (op *MintOperation) Outs() []verify.Verifiable { + outs := []verify.Verifiable{} + for _, out := range op.Outputs { + outs = append(outs, &TransferOutput{ + GroupID: op.GroupID, + Payload: op.Payload, + OutputOwners: *out, + }) + } + return outs +} + +// Verify ... +func (op *MintOperation) Verify() error { + switch { + case op == nil: + return errNilMintOperation + case len(op.Payload) > MaxPayloadSize: + return errPayloadTooLarge + } + + for _, out := range op.Outputs { + if err := out.Verify(); err != nil { + return err + } + } + return op.MintInput.Verify() +} diff --git a/vms/nftfx/mint_operation_test.go b/vms/nftfx/mint_operation_test.go new file mode 100644 index 0000000..18513dc --- /dev/null +++ b/vms/nftfx/mint_operation_test.go @@ -0,0 +1,43 @@ +package nftfx + +import ( + "testing" + + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestMintOperationVerifyNil(t *testing.T) { + op := (*MintOperation)(nil) + if err := op.Verify(); err == nil { + t.Fatalf("nil operation should have failed verification") + } +} + +func TestMintOperationVerifyTooLargePayload(t *testing.T) { + op := MintOperation{ + Payload: make([]byte, MaxPayloadSize+1), + } + if err := op.Verify(); err == nil { + t.Fatalf("operation should have failed verification") + } +} + +func TestMintOperationVerifyInvalidOutput(t *testing.T) { + op := MintOperation{ + Outputs: []*secp256k1fx.OutputOwners{&secp256k1fx.OutputOwners{ + Threshold: 1, + }}, + } + if err := op.Verify(); err == nil { + t.Fatalf("operation should have failed verification") + } +} + +func TestMintOperationOuts(t *testing.T) { + op := MintOperation{ + Outputs: []*secp256k1fx.OutputOwners{&secp256k1fx.OutputOwners{}}, + } + if outs := op.Outs(); len(outs) != 1 { + t.Fatalf("Wrong number of outputs returned") + } +} diff --git a/vms/nftfx/mint_output.go b/vms/nftfx/mint_output.go new file mode 100644 index 0000000..6a40c08 --- /dev/null +++ b/vms/nftfx/mint_output.go @@ -0,0 +1,11 @@ +package nftfx + +import ( + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +// MintOutput ... +type MintOutput struct { + GroupID uint32 `serialize:"true" json:"groupID"` + secp256k1fx.OutputOwners `serialize:"true"` +} diff --git a/vms/nftfx/transfer_operation.go b/vms/nftfx/transfer_operation.go new file mode 100644 index 0000000..5e1482b --- /dev/null +++ b/vms/nftfx/transfer_operation.go @@ -0,0 +1,33 @@ +package nftfx + +import ( + "errors" + + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + errNilTransferOperation = errors.New("nil transfer operation") +) + +// TransferOperation ... +type TransferOperation struct { + Input secp256k1fx.Input `serialize:"true" json:"input"` + Output TransferOutput `serialize:"true" json:"output"` +} + +// Outs ... +func (op *TransferOperation) Outs() []verify.Verifiable { + return []verify.Verifiable{&op.Output} +} + +// Verify ... +func (op *TransferOperation) Verify() error { + switch { + case op == nil: + return errNilTransferOperation + default: + return verify.All(&op.Input, &op.Output) + } +} diff --git a/vms/nftfx/transfer_operation_test.go b/vms/nftfx/transfer_operation_test.go new file mode 100644 index 0000000..80357bb --- /dev/null +++ b/vms/nftfx/transfer_operation_test.go @@ -0,0 +1,32 @@ +package nftfx + +import ( + "testing" + + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestTransferOperationVerifyNil(t *testing.T) { + op := (*TransferOperation)(nil) + if err := op.Verify(); err == nil { + t.Fatalf("nil operation should have failed verification") + } +} + +func TestTransferOperationInvalid(t *testing.T) { + op := TransferOperation{Input: secp256k1fx.Input{ + SigIndices: []uint32{1, 0}, + }} + if err := op.Verify(); err == nil { + t.Fatalf("operation should have failed verification") + } +} + +func TestTransferOperationOuts(t *testing.T) { + op := TransferOperation{ + Output: TransferOutput{}, + } + if outs := op.Outs(); len(outs) != 1 { + t.Fatalf("Wrong number of outputs returned") + } +} diff --git a/vms/nftfx/transfer_output.go b/vms/nftfx/transfer_output.go new file mode 100644 index 0000000..d46cd46 --- /dev/null +++ b/vms/nftfx/transfer_output.go @@ -0,0 +1,36 @@ +package nftfx + +import ( + "errors" + + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +const ( + // MaxPayloadSize is the maximum size that can be placed into a payload + MaxPayloadSize = 1 << 10 +) + +var ( + errNilTransferOutput = errors.New("nil transfer output") + errPayloadTooLarge = errors.New("payload too large") +) + +// TransferOutput ... +type TransferOutput struct { + GroupID uint32 `serialize:"true" json:"groupID"` + Payload []byte `serialize:"true" json:"payload"` + secp256k1fx.OutputOwners `serialize:"true"` +} + +// Verify ... +func (out *TransferOutput) Verify() error { + switch { + case out == nil: + return errNilTransferOutput + case len(out.Payload) > MaxPayloadSize: + return errPayloadTooLarge + default: + return out.OutputOwners.Verify() + } +} diff --git a/vms/nftfx/transfer_output_test.go b/vms/nftfx/transfer_output_test.go new file mode 100644 index 0000000..d3d8ca9 --- /dev/null +++ b/vms/nftfx/transfer_output_test.go @@ -0,0 +1,38 @@ +package nftfx + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestTransferOutputVerifyNil(t *testing.T) { + to := (*TransferOutput)(nil) + if err := to.Verify(); err == nil { + t.Fatalf("TransferOutput.Verify should have errored on nil") + } +} + +func TestTransferOutputLargePayload(t *testing.T) { + to := TransferOutput{ + Payload: make([]byte, MaxPayloadSize+1), + } + if err := to.Verify(); err == nil { + t.Fatalf("TransferOutput.Verify should have errored on too large of a payload") + } +} + +func TestTransferOutputInvalidSecp256k1Output(t *testing.T) { + to := TransferOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Addrs: []ids.ShortID{ + ids.ShortEmpty, + ids.ShortEmpty, + }, + }, + } + if err := to.Verify(); err == nil { + t.Fatalf("TransferOutput.Verify should have errored on too large of a payload") + } +} diff --git a/vms/platformvm/add_default_subnet_delegator_tx_test.go b/vms/platformvm/add_default_subnet_delegator_tx_test.go index 46ba21e..9d6d5cf 100644 --- a/vms/platformvm/add_default_subnet_delegator_tx_test.go +++ b/vms/platformvm/add_default_subnet_delegator_tx_test.go @@ -325,9 +325,9 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { } tx, err = vm.newAddDefaultSubnetDelegatorTx( - defaultNonce+1, // nonce - defaultStakeAmount, // weight - uint64(newTimestamp.Unix()), // start time + defaultNonce+1, // nonce + defaultStakeAmount, // weight + uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(MinimumStakingDuration).Unix()), // end time defaultKey.PublicKey().Address(), // node ID defaultKey.PublicKey().Address(), // destination diff --git a/vms/platformvm/add_default_subnet_validator_tx.go b/vms/platformvm/add_default_subnet_validator_tx.go index bf398c7..10d41fe 100644 --- a/vms/platformvm/add_default_subnet_validator_tx.go +++ b/vms/platformvm/add_default_subnet_validator_tx.go @@ -184,10 +184,7 @@ func (tx *addDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (*ve // If this proposal is aborted, chain state doesn't change onAbortDB := versiondb.New(db) - onAccept := func() { - tx.vm.resetTimer() - } - return onCommitDB, onAbortDB, onAccept, nil, nil + return onCommitDB, onAbortDB, tx.vm.resetTimer, nil, nil } // InitiallyPrefersCommit returns true if the proposed validators start time is diff --git a/vms/platformvm/add_nondefault_subnet_validator_tx.go b/vms/platformvm/add_nondefault_subnet_validator_tx.go index 6173950..531570a 100644 --- a/vms/platformvm/add_nondefault_subnet_validator_tx.go +++ b/vms/platformvm/add_nondefault_subnet_validator_tx.go @@ -162,7 +162,7 @@ func (tx *addNonDefaultSubnetValidatorTx) SemanticVerify(db database.Database) ( } var subnet *CreateSubnetTx for _, sn := range subnets { - if sn.ID.Equals(tx.SubnetID()) { + if sn.id.Equals(tx.SubnetID()) { subnet = sn break } diff --git a/vms/platformvm/add_nondefault_subnet_validator_tx_test.go b/vms/platformvm/add_nondefault_subnet_validator_tx_test.go index 2d63f06..c29faf1 100644 --- a/vms/platformvm/add_nondefault_subnet_validator_tx_test.go +++ b/vms/platformvm/add_nondefault_subnet_validator_tx_test.go @@ -28,7 +28,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -48,7 +48,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID+1, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -67,7 +67,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -87,7 +87,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -107,7 +107,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -126,7 +126,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix())-1, defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -147,7 +147,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MinimumStakingDuration).Unix())-1, defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -167,7 +167,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateStartTime.Add(MaximumStakingDuration).Unix())+1, defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -187,7 +187,7 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -212,7 +212,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix())+1, defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -235,7 +235,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -245,7 +245,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } _, _, _, _, err = tx.SemanticVerify(vm.DB) if err != nil { - t.Log(testSubnet1.ID) + t.Log(testSubnet1.id) subnets, err := vm.getSubnets(vm.DB) if err != nil { t.Fatal(err) @@ -253,7 +253,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { if len(subnets) == 0 { t.Fatal("no subnets found") } - t.Logf("subnets[0].ID: %v", subnets[0].ID) + t.Logf("subnets[0].ID: %v", subnets[0].id) t.Fatal(err) } @@ -290,7 +290,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(DSStartTime.Unix()), // start validating non-default subnet before default subnet uint64(DSEndTime.Unix()), pendingDSValidatorID, - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -324,7 +324,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(DSStartTime.Unix())-1, // start validating non-default subnet before default subnet uint64(DSEndTime.Unix()), pendingDSValidatorID, - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -346,7 +346,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(DSStartTime.Unix()), uint64(DSEndTime.Unix())+1, // stop validating non-default subnet after stopping validating default subnet pendingDSValidatorID, - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -368,7 +368,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(DSStartTime.Unix()), // same start time as for default subnet uint64(DSEndTime.Unix()), // same end time as for default subnet pendingDSValidatorID, - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, @@ -389,12 +389,12 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { } tx, err = vm.newAddNonDefaultSubnetValidatorTx( - defaultNonce+1, // nonce - defaultWeight, // weight - uint64(newTimestamp.Unix()), // start time + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(MinimumStakingDuration).Unix()), // end time defaultKey.PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, // tx fee payer @@ -429,7 +429,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time defaultKey.PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, newAcctKey.(*crypto.PrivateKeySECP256K1R), // tx fee payer @@ -451,7 +451,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time defaultKey.PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, // tx fee payer @@ -465,7 +465,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { SortByStartTime: false, Txs: []TimedTx{tx}, }, - testSubnet1.ID, + testSubnet1.id, ) // Node with ID nodeIDKey.PublicKey().Address() now validating subnet with ID testSubnet1.ID @@ -475,7 +475,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { uint64(defaultValidateStartTime.Unix()), // start time uint64(defaultValidateEndTime.Unix()), // end time defaultKey.PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, // tx fee payer @@ -494,17 +494,17 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { &EventHeap{ SortByStartTime: false, }, - testSubnet1.ID, + testSubnet1.id, ) // Case 9: Too many signatures tx, err = vm.newAddNonDefaultSubnetValidatorTx( - defaultNonce+1, // nonce - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix())+1, // end time keys[0].PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, defaultKey, // tx fee payer @@ -520,12 +520,12 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { // Case 10: Too few signatures tx, err = vm.newAddNonDefaultSubnetValidatorTx( - defaultNonce+1, // nonce - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()), // end time keys[0].PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[2]}, defaultKey, // tx fee payer @@ -541,12 +541,12 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { // Case 10: Control Signature from invalid key tx, err = vm.newAddNonDefaultSubnetValidatorTx( - defaultNonce+1, // nonce - defaultWeight, // weight - uint64(defaultGenesisTime.Unix()), // start time + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(defaultGenesisTime.Unix()), // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix()), // end time keys[0].PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], keys[3]}, defaultKey, // tx fee payer @@ -563,12 +563,12 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { // Case 11: Proposed validator in pending validator set for subnet // First, add validator to pending validator set of subnet tx, err = vm.newAddNonDefaultSubnetValidatorTx( - defaultNonce+1, // nonce - defaultWeight, // weight - uint64(defaultGenesisTime.Unix())+1, // start time + defaultNonce+1, // nonce + defaultWeight, // weight + uint64(defaultGenesisTime.Unix())+1, // start time uint64(defaultGenesisTime.Add(MinimumStakingDuration).Unix())+1, // end time defaultKey.PublicKey().Address(), // node ID - testSubnet1.ID, // subnet ID + testSubnet1.id, // subnet ID testNetworkID, // network ID []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, // tx fee payer @@ -582,7 +582,7 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { SortByStartTime: true, Txs: []TimedTx{tx}, }, - testSubnet1.ID, + testSubnet1.id, ) // Node with ID nodeIDKey.PublicKey().Address() now pending validator for subnet with ID testSubnet1.ID @@ -604,7 +604,7 @@ func TestAddNonDefaultSubnetValidatorMarshal(t *testing.T) { uint64(defaultValidateStartTime.Unix()), uint64(defaultValidateEndTime.Unix()), defaultKey.PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, diff --git a/vms/platformvm/advance_time_tx.go b/vms/platformvm/advance_time_tx.go index b126ec8..2b48707 100644 --- a/vms/platformvm/advance_time_tx.go +++ b/vms/platformvm/advance_time_tx.go @@ -7,6 +7,8 @@ import ( "fmt" "time" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/versiondb" ) @@ -86,7 +88,7 @@ func (tx *advanceTimeTx) SemanticVerify(db database.Database) (*versiondb.Databa return nil, nil, nil, nil, err } - current, pending, err := tx.vm.calculateValidators(db, tx.Timestamp(), DefaultSubnetID) + current, pending, _, _, err := tx.vm.calculateValidators(db, tx.Timestamp(), DefaultSubnetID) if err != nil { return nil, nil, nil, nil, err } @@ -98,48 +100,71 @@ func (tx *advanceTimeTx) SemanticVerify(db database.Database) (*versiondb.Databa return nil, nil, nil, nil, err } - // For each subnet, calculate what current and pending validator sets should be + // For each Subnet, calculate what current and pending validator sets should be // given new timestamp + + // Key: Subnet ID + // Value: IDs of validators that will have started validating this Subnet when + // timestamp is advanced to tx.Timestamp() + startedValidating := make(map[ids.ID]ids.ShortSet, 0) subnets, err := tx.vm.getSubnets(db) if err != nil { return nil, nil, nil, nil, err } for _, subnet := range subnets { - current, pending, err := tx.vm.calculateValidators(db, tx.Timestamp(), subnet.ID) + current, pending, started, _, err := tx.vm.calculateValidators(db, tx.Timestamp(), subnet.id) if err != nil { return nil, nil, nil, nil, err } - - if err := tx.vm.putCurrentValidators(onCommitDB, current, subnet.ID); err != nil { + if err := tx.vm.putCurrentValidators(onCommitDB, current, subnet.id); err != nil { return nil, nil, nil, nil, err } - if err := tx.vm.putPendingValidators(onCommitDB, pending, subnet.ID); err != nil { + if err := tx.vm.putPendingValidators(onCommitDB, pending, subnet.id); err != nil { return nil, nil, nil, nil, err } + startedValidating[subnet.ID()] = started } // If this block is committed, update the validator sets // onAbortDB or onCommitDB should commit (flush to vm.DB) before this is called - updateValidators := func() { + onCommitFunc := func() { + // For each Subnet, update the node's validator manager to reflect current Subnet membership subnets, err := tx.vm.getSubnets(tx.vm.DB) if err != nil { tx.vm.Ctx.Log.Error("failed to get subnets: %s", err) return } for _, subnet := range subnets { - if err := tx.vm.updateValidators(subnet.ID); err != nil { - tx.vm.Ctx.Log.Debug("failed to update validators on the default subnet: %s", err) + if err := tx.vm.updateValidators(subnet.id); err != nil { + tx.vm.Ctx.Log.Debug("failed to update Subnet %s: %s", subnet.id, err) } } if err := tx.vm.updateValidators(DefaultSubnetID); err != nil { - tx.vm.Ctx.Log.Fatal("failed to update validators on the default subnet: %s", err) + tx.vm.Ctx.Log.Fatal("failed to update Default Subnet: %s", err) + } + + // If this node started validating a Subnet, create the blockchains that the Subnet validates + chains, err := tx.vm.getChains(tx.vm.DB) // all blockchains + if err != nil { + tx.vm.Ctx.Log.Error("couldn't get blockchains: %s", err) + return + } + for subnetID, validatorIDs := range startedValidating { + if !validatorIDs.Contains(tx.vm.Ctx.NodeID) { + continue + } + for _, chain := range chains { + if chain.SubnetID.Equals(subnetID) { + tx.vm.createChain(chain) + } + } } } // Specify what the state of the chain will be if this proposal is aborted onAbortDB := versiondb.New(db) // state doesn't change - return onCommitDB, onAbortDB, updateValidators, nil, nil + return onCommitDB, onAbortDB, onCommitFunc, nil, nil } // InitiallyPrefersCommit returns true if the proposed time isn't after the diff --git a/vms/platformvm/atomic_block.go b/vms/platformvm/atomic_block.go new file mode 100644 index 0000000..8b973ad --- /dev/null +++ b/vms/platformvm/atomic_block.go @@ -0,0 +1,154 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/vms/components/core" +) + +var ( + errConflictingParentTxs = errors.New("block contains a transaction that conflicts with a transaction in a parent block") +) + +// AtomicTx is an operation that can be decided without being proposed, but must have special control over database commitment +type AtomicTx interface { + initialize(vm *VM) error + + ID() ids.ID + + // UTXOs this tx consumes + InputUTXOs() ids.Set + + // Attempt to verify this transaction with the provided state. The provided + // database can be modified arbitrarily. + SemanticVerify(database.Database) error + + Accept(database.Batch) error +} + +// AtomicBlock being accepted results in the transaction contained in the +// block to be accepted and committed to the chain. +type AtomicBlock struct { + CommonDecisionBlock `serialize:"true"` + + Tx AtomicTx `serialize:"true"` + + inputs ids.Set +} + +// initialize this block +func (ab *AtomicBlock) initialize(vm *VM, bytes []byte) error { + if err := ab.CommonDecisionBlock.initialize(vm, bytes); err != nil { + return err + } + return ab.Tx.initialize(vm) +} + +// Reject implements the snowman.Block interface +func (ab *AtomicBlock) conflicts(s ids.Set) bool { + if ab.Status() == choices.Accepted { + return false + } + if ab.inputs.Overlaps(s) { + return true + } + return ab.parentBlock().conflicts(s) +} + +// Verify this block performs a valid state transition. +// +// The parent block must be a proposal +// +// This function also sets onAcceptDB database if the verification passes. +func (ab *AtomicBlock) Verify() error { + parentBlock := ab.parentBlock() + + ab.inputs = ab.Tx.InputUTXOs() + + if parentBlock.conflicts(ab.inputs) { + return errConflictingParentTxs + } + + // AtomicBlock is not a modifier on a proposal block, so its parent must be + // a decision. + parent, ok := parentBlock.(decision) + if !ok { + return errInvalidBlockType + } + + pdb := parent.onAccept() + + ab.onAcceptDB = versiondb.New(pdb) + if err := ab.Tx.SemanticVerify(ab.onAcceptDB); err != nil { + return err + } + + ab.vm.currentBlocks[ab.ID().Key()] = ab + ab.parentBlock().addChild(ab) + return nil +} + +// Accept implements the snowman.Block interface +func (ab *AtomicBlock) Accept() { + ab.vm.Ctx.Log.Verbo("Accepting block with ID %s", ab.ID()) + + ab.CommonBlock.Accept() + + // Update the state of the chain in the database + if err := ab.onAcceptDB.Commit(); err != nil { + ab.vm.Ctx.Log.Error("unable to commit onAcceptDB") + } + + batch, err := ab.vm.DB.CommitBatch() + if err != nil { + ab.vm.Ctx.Log.Fatal("unable to commit vm's DB") + } + defer ab.vm.DB.Abort() + + if err := ab.Tx.Accept(batch); err != nil { + ab.vm.Ctx.Log.Error("unable to atomically commit block") + } + + for _, child := range ab.children { + child.setBaseDatabase(ab.vm.DB) + } + if ab.onAcceptFunc != nil { + ab.onAcceptFunc() + } + + parent := ab.parentBlock() + // remove this block and its parent from memory + parent.free() + ab.free() +} + +// newAtomicBlock returns a new *AtomicBlock where the block's parent, a +// decision block, has ID [parentID]. +func (vm *VM) newAtomicBlock(parentID ids.ID, tx AtomicTx) (*AtomicBlock, error) { + ab := &AtomicBlock{ + CommonDecisionBlock: CommonDecisionBlock{ + CommonBlock: CommonBlock{ + Block: core.NewBlock(parentID), + vm: vm, + }, + }, + Tx: tx, + } + + // We serialize this block as a Block so that it can be deserialized into a + // Block + blk := Block(ab) + bytes, err := Codec.Marshal(&blk) + if err != nil { + return nil, err + } + ab.Block.Initialize(bytes, vm.SnowmanVM) + return ab, nil +} diff --git a/vms/platformvm/common_blocks.go b/vms/platformvm/common_blocks.go index 5023a44..9d143f8 100644 --- a/vms/platformvm/common_blocks.go +++ b/vms/platformvm/common_blocks.go @@ -6,10 +6,12 @@ package platformvm import ( "errors" + "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/vms/components/missing" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/snow/consensus/snowman" "github.com/ava-labs/gecko/vms/components/core" ) @@ -87,6 +89,8 @@ type Block interface { // [bytes] is the byte representation of this block initialize(vm *VM, bytes []byte) error + conflicts(ids.Set) bool + // parentBlock returns the parent block, similarly to Parent. However, it // provides the more specific staking.Block interface. parentBlock() Block @@ -142,6 +146,14 @@ func (cb *CommonBlock) free() { cb.children = nil } +// Reject implements the snowman.Block interface +func (cb *CommonBlock) conflicts(s ids.Set) bool { + if cb.Status() == choices.Accepted { + return false + } + return cb.parentBlock().conflicts(s) +} + // Parent returns this block's parent func (cb *CommonBlock) Parent() snowman.Block { parent := cb.parentBlock() diff --git a/vms/platformvm/create_chain_tx.go b/vms/platformvm/create_chain_tx.go index 74bd3f0..645a6da 100644 --- a/vms/platformvm/create_chain_tx.go +++ b/vms/platformvm/create_chain_tx.go @@ -7,7 +7,6 @@ import ( "errors" "fmt" - "github.com/ava-labs/gecko/chains" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/crypto" @@ -15,8 +14,9 @@ import ( ) var ( - errInvalidVMID = errors.New("invalid VM ID") - errFxIDsNotSortedAndUnique = errors.New("feature extensions IDs must be sorted and unique") + errInvalidVMID = errors.New("invalid VM ID") + errFxIDsNotSortedAndUnique = errors.New("feature extensions IDs must be sorted and unique") + errControlSigsNotSortedAndUnique = errors.New("control signatures must be sorted and unique") ) // UnsignedCreateChainTx is an unsigned CreateChainTx @@ -24,6 +24,9 @@ type UnsignedCreateChainTx struct { // ID of the network this blockchain exists on NetworkID uint32 `serialize:"true"` + // ID of the Subnet that validates this blockchain + SubnetID ids.ID `serialize:"true"` + // Next unused nonce of account paying the transaction fee for this transaction. // Currently unused, as there are no tx fees. Nonce uint64 `serialize:"true"` @@ -37,7 +40,7 @@ type UnsignedCreateChainTx struct { // IDs of the feature extensions running on the new chain FxIDs []ids.ID `serialize:"true"` - // Byte representation of state of the new chain + // Byte representation of genesis state of the new chain GenesisData []byte `serialize:"true"` } @@ -45,11 +48,19 @@ type UnsignedCreateChainTx struct { type CreateChainTx struct { UnsignedCreateChainTx `serialize:"true"` - Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + // Address of the account that provides the transaction fee + // Set in SemanticVerify + PayerAddress ids.ShortID + + // Signatures from Subnet's control keys + // Should not empty slice, not nil, if there are no control sigs + ControlSigs [][crypto.SECP256K1RSigLen]byte `serialize:"true"` + + // Signature of key whose account provides the transaction fee + PayerSig [crypto.SECP256K1RSigLen]byte `serialize:"true"` vm *VM id ids.ID - key crypto.PublicKey // public key of transaction signer bytes []byte } @@ -64,10 +75,6 @@ func (tx *CreateChainTx) initialize(vm *VM) error { // ID of this transaction func (tx *CreateChainTx) ID() ids.ID { return tx.id } -// Key returns the public key of the signer of this transaction -// Precondition: tx.Verify() has been called and returned nil -func (tx *CreateChainTx) Key() crypto.PublicKey { return tx.key } - // Bytes returns the byte representation of a CreateChainTx func (tx *CreateChainTx) Bytes() []byte { return tx.bytes } @@ -77,16 +84,20 @@ func (tx *CreateChainTx) SyntacticVerify() error { switch { case tx == nil: return errNilTx - case tx.key != nil: - return nil // Only verify the transaction once + case !tx.PayerAddress.IsZero(): // Only verify the transaction once + return nil case tx.NetworkID != tx.vm.Ctx.NetworkID: // verify the transaction is on this network return errWrongNetworkID case tx.id.IsZero(): return errInvalidID case tx.VMID.IsZero(): return errInvalidVMID + case tx.SubnetID.Equals(DefaultSubnetID): + return errDSCantValidate case !ids.IsSortedAndUniqueIDs(tx.FxIDs): return errFxIDsNotSortedAndUnique + case !crypto.IsSortedAndUniqueSECP2561RSigs(tx.ControlSigs): + return errControlSigsNotSortedAndUnique } unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) @@ -95,11 +106,11 @@ func (tx *CreateChainTx) SyntacticVerify() error { return err } - key, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.Sig[:]) + payerKey, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.PayerSig[:]) if err != nil { return err } - tx.key = key + tx.PayerAddress = payerKey.Address() return nil } @@ -125,10 +136,12 @@ func (tx *CreateChainTx) SemanticVerify(db database.Database) (func(), error) { } // Deduct tx fee from payer's account - account, err := tx.vm.getAccount(db, tx.Key().Address()) + account, err := tx.vm.getAccount(db, tx.PayerAddress) if err != nil { return nil, err } + // txFee is removed in account.Remove + // TODO: Consider changing Remove to be parameterized on total amount (inc. tx fee) to remove account, err = account.Remove(0, tx.Nonce) if err != nil { return nil, err @@ -137,20 +150,55 @@ func (tx *CreateChainTx) SemanticVerify(db database.Database) (func(), error) { return nil, err } - // If this proposal is committed, create the new blockchain using the chain manager + // Verify that this transaction has sufficient control signatures + subnets, err := tx.vm.getSubnets(db) // all subnets that exist + if err != nil { + return nil, err + } + var subnet *CreateSubnetTx // the subnet that will validate the new chain + for _, sn := range subnets { + if sn.id.Equals(tx.SubnetID) { + subnet = sn + break + } + } + if subnet == nil { + return nil, fmt.Errorf("there is no subnet with ID %s", tx.SubnetID) + } + if len(tx.ControlSigs) != int(subnet.Threshold) { + return nil, fmt.Errorf("expected tx to have %d control sigs but has %d", subnet.Threshold, len(tx.ControlSigs)) + } + + unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // Byte representation of the unsigned transaction + if err != nil { + return nil, err + } + unsignedBytesHash := hashing.ComputeHash256(unsignedBytes) + + // Each element is ID of key that signed this tx + controlIDs := make([]ids.ShortID, len(tx.ControlSigs)) + for i, sig := range tx.ControlSigs { + key, err := tx.vm.factory.RecoverHashPublicKey(unsignedBytesHash, sig[:]) + if err != nil { + return nil, err + } + controlIDs[i] = key.Address() + } + + // Verify each control signature on this tx is from a control key + controlKeys := ids.ShortSet{} + controlKeys.Add(subnet.ControlKeys...) + for _, controlID := range controlIDs { + if !controlKeys.Contains(controlID) { + return nil, errors.New("tx has control signature from key not in subnet's ControlKeys") + } + } + + // If this proposal is committed and this node is a member of the + // subnet that validates the blockchain, create the blockchain onAccept := func() { - chainParams := chains.ChainParameters{ - ID: tx.ID(), - GenesisData: tx.GenesisData, - VMAlias: tx.VMID.String(), - } - for _, fxID := range tx.FxIDs { - chainParams.FxAliases = append(chainParams.FxAliases, fxID.String()) - } - // TODO: Not sure how else to make this not nil pointer error during tests - if tx.vm.ChainManager != nil { - tx.vm.ChainManager.CreateChain(chainParams) - } + tx.vm.createChain(tx) } return onAccept, nil @@ -166,10 +214,14 @@ func (chains createChainList) Bytes() []byte { return bytes } -func (vm *VM) newCreateChainTx(nonce uint64, genesisData []byte, vmID ids.ID, fxIDs []ids.ID, chainName string, networkID uint32, key *crypto.PrivateKeySECP256K1R) (*CreateChainTx, error) { +func (vm *VM) newCreateChainTx(nonce uint64, subnetID ids.ID, genesisData []byte, + vmID ids.ID, fxIDs []ids.ID, chainName string, networkID uint32, + controlKeys []*crypto.PrivateKeySECP256K1R, + payerKey *crypto.PrivateKeySECP256K1R) (*CreateChainTx, error) { tx := &CreateChainTx{ UnsignedCreateChainTx: UnsignedCreateChainTx{ NetworkID: networkID, + SubnetID: subnetID, Nonce: nonce, GenesisData: genesisData, VMID: vmID, @@ -178,17 +230,33 @@ func (vm *VM) newCreateChainTx(nonce uint64, genesisData []byte, vmID ids.ID, fx }, } + // Generate byte repr. of unsigned transaction unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) - unsignedBytes, err := Codec.Marshal(&unsignedIntf) // Byte repr. of unsigned transaction + unsignedBytes, err := Codec.Marshal(&unsignedIntf) if err != nil { return nil, err } + unsignedBytesHash := hashing.ComputeHash256(unsignedBytes) + + // Sign the tx with control keys + tx.ControlSigs = make([][crypto.SECP256K1RSigLen]byte, len(controlKeys)) + for i, key := range controlKeys { + sig, err := key.SignHash(unsignedBytesHash) + if err != nil { + return nil, err + } + copy(tx.ControlSigs[i][:], sig) + } - sig, err := key.Sign(unsignedBytes) + // Sort the control signatures + crypto.SortSECP2561RSigs(tx.ControlSigs) + + // Sign with the payer key + payerSig, err := payerKey.Sign(unsignedBytes) if err != nil { return nil, err } - copy(tx.Sig[:], sig) + copy(tx.PayerSig[:], payerSig) return tx, tx.initialize(vm) } diff --git a/vms/platformvm/create_chain_tx_test.go b/vms/platformvm/create_chain_tx_test.go index 8c555c6..f9b8476 100644 --- a/vms/platformvm/create_chain_tx_test.go +++ b/vms/platformvm/create_chain_tx_test.go @@ -6,8 +6,8 @@ package platformvm import ( "testing" - "github.com/ava-labs/gecko/database/versiondb" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/vms/avm" ) @@ -24,18 +24,19 @@ func TestCreateChainTxSyntacticVerify(t *testing.T) { // Case 2: network ID is wrong tx, err := vm.newCreateChainTx( defaultNonce+1, + testSubnet1.id, nil, avm.ID, nil, "chain name", testNetworkID+1, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, ) if err != nil { t.Fatal(err) } err = tx.SyntacticVerify() - t.Log(err) if err == nil { t.Fatal("should've errored because network ID is wrong") } @@ -43,11 +44,13 @@ func TestCreateChainTxSyntacticVerify(t *testing.T) { // case 3: tx ID is empty tx, err = vm.newCreateChainTx( defaultNonce+1, + testSubnet1.id, nil, avm.ID, nil, "chain name", testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, ) if err != nil { @@ -61,11 +64,13 @@ func TestCreateChainTxSyntacticVerify(t *testing.T) { // Case 4: vm ID is empty tx, err = vm.newCreateChainTx( defaultNonce+1, + testSubnet1.id, nil, avm.ID, nil, "chain name", testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, defaultKey, ) if err != nil { @@ -75,62 +80,189 @@ func TestCreateChainTxSyntacticVerify(t *testing.T) { if err := tx.SyntacticVerify(); err == nil { t.Fatal("should've errored because tx ID is empty") } -} -func TestSemanticVerify(t *testing.T) { - vm := defaultVM() - - // create a tx - tx, err := vm.newCreateChainTx( + // Case 5: Control sigs not sorted + tx, err = vm.newCreateChainTx( defaultNonce+1, + testSubnet1.id, nil, avm.ID, nil, "chain name", testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + // Reverse signature order + tx.ControlSigs[0], tx.ControlSigs[1] = tx.ControlSigs[1], tx.ControlSigs[0] + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should've errored because control sigs not sorted") + } + + // Case 6: Control sigs not unique + tx, err = vm.newCreateChainTx( + defaultNonce+1, + testSubnet1.id, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + tx.ControlSigs[0] = tx.ControlSigs[1] + if err := tx.SyntacticVerify(); err == nil { + t.Fatal("should've errored because control sigs not unique") + } + + // Case 7: Valid tx passes syntactic verification + tx, err = vm.newCreateChainTx( + defaultNonce+1, + testSubnet1.id, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatalf("should have passed verification but got %v", err) + } +} + +// Ensure SemanticVerify fails when there are not enough control sigs +func TestCreateChainTxInsufficientControlSigs(t *testing.T) { + vm := defaultVM() + + // Case 1: No control sigs (2 are needed) + tx, err := vm.newCreateChainTx( + defaultNonce+1, + testSubnet1.id, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + nil, defaultKey, ) if err != nil { t.Fatal(err) } - newDB := versiondb.New(vm.DB) - - _, err = tx.SemanticVerify(newDB) - if err != nil { - t.Fatal(err) + _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have errored because there are no control sigs") } - chains, err := vm.getChains(newDB) - if err != nil { - t.Fatal(err) - } - for _, c := range chains { - if c.ID().Equals(tx.ID()) { - return - } - } - t.Fatalf("Should have added the chain to the set of chains") -} - -func TestSemanticVerifyAlreadyExisting(t *testing.T) { - vm := defaultVM() - - // create a tx - tx, err := vm.newCreateChainTx( + // Case 2: 1 control sig (2 are needed) + tx, err = vm.newCreateChainTx( defaultNonce+1, + testSubnet1.id, nil, avm.ID, nil, "chain name", testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0]}, defaultKey, ) if err != nil { t.Fatal(err) } - // put the chain in existing chain + _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have errored because there are no control sigs") + } +} + +// Ensure SemanticVerify fails when an incorrect control signature is given +func TestCreateChainTxWrongControlSig(t *testing.T) { + vm := defaultVM() + + // Generate new, random key to sign tx with + factory := crypto.FactorySECP256K1R{} + key, err := factory.NewPrivateKey() + if err != nil { + t.Fatal(err) + } + + tx, err := vm.newCreateChainTx( + defaultNonce+1, + testSubnet1.id, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], key.(*crypto.PrivateKeySECP256K1R)}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have errored because incorrect control sig given") + } +} + +// Ensure SemanticVerify fails when the Subnet the blockchain specifies as +// its validator set doesn't exist +func TestCreateChainTxNoSuchSubnet(t *testing.T) { + vm := defaultVM() + + tx, err := vm.newCreateChainTx( + defaultNonce+1, + ids.NewID([32]byte{1, 9, 124, 11, 20}), // pick some random ID for subnet + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have errored because Subnet doesn't exist") + } +} + +func TestCreateChainTxAlreadyExists(t *testing.T) { + vm := defaultVM() + + // create a tx + tx, err := vm.newCreateChainTx( + defaultNonce+1, + testSubnet1.id, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + // put the chain in existing chain list if err := vm.putChains(vm.DB, []*CreateChainTx{tx}); err != nil { t.Fatal(err) } @@ -140,3 +272,29 @@ func TestSemanticVerifyAlreadyExisting(t *testing.T) { t.Fatalf("should have failed because there is already a chain with ID %s", tx.id) } } + +// Ensure valid tx passes semanticVerify +func TestCreateChainTxValid(t *testing.T) { + vm := defaultVM() + + // create a valid tx + tx, err := vm.newCreateChainTx( + defaultNonce+1, + testSubnet1.id, + nil, + avm.ID, + nil, + "chain name", + testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + _, err = tx.SemanticVerify(vm.DB) + if err != nil { + t.Fatalf("expected tx to pass verification but got error: %v", err) + } +} diff --git a/vms/platformvm/create_subnet_tx.go b/vms/platformvm/create_subnet_tx.go index 0d33ca7..16be8be 100644 --- a/vms/platformvm/create_subnet_tx.go +++ b/vms/platformvm/create_subnet_tx.go @@ -8,8 +8,8 @@ import ( "fmt" "github.com/ava-labs/gecko/database" - "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/hashing" ) @@ -17,18 +17,14 @@ import ( const maxThreshold = 25 var ( - errThresholdExceedsKeysLen = errors.New("threshold must be no more than number of control keys") - errThresholdTooHigh = fmt.Errorf("threshold can't be greater than %d", maxThreshold) + errThresholdExceedsKeysLen = errors.New("threshold must be no more than number of control keys") + errThresholdTooHigh = fmt.Errorf("threshold can't be greater than %d", maxThreshold) + errControlKeysNotSortedAndUnique = errors.New("control keys must be sorted and unique") + errUnneededKeys = errors.New("subnets shouldn't have keys if the threshold is 0") ) // UnsignedCreateSubnetTx is an unsigned proposal to create a new subnet type UnsignedCreateSubnetTx struct { - // The VM this tx exists within - vm *VM - - // ID is this transaction's ID - ID ids.ID - // NetworkID is the ID of the network this tx was issued on NetworkID uint32 `serialize:"true"` @@ -47,19 +43,28 @@ type UnsignedCreateSubnetTx struct { type CreateSubnetTx struct { UnsignedCreateSubnetTx `serialize:"true"` + // Signature on the UnsignedCreateSubnetTx's byte repr + Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + // The public key that signed this transaction // The transaction fee will be paid from the corresponding account // (ie the account whose ID is [key].Address()) // [key] is non-nil iff this tx is valid key crypto.PublicKey - // Signature on the UnsignedCreateSubnetTx's byte repr - Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + // The VM this tx exists within + vm *VM + + // ID is this transaction's ID + id ids.ID // Byte representation of this transaction (including signature) bytes []byte } +// ID returns the ID of this transaction +func (tx *CreateSubnetTx) ID() ids.ID { return tx.id } + // SyntacticVerify nil iff [tx] is syntactically valid. // If [tx] is valid, this method sets [tx.key] func (tx *CreateSubnetTx) SyntacticVerify() error { @@ -68,12 +73,18 @@ func (tx *CreateSubnetTx) SyntacticVerify() error { return errNilTx case tx.key != nil: return nil // Only verify the transaction once - case tx.ID.IsZero(): + case tx.id.IsZero(): return errInvalidID case tx.NetworkID != tx.vm.Ctx.NetworkID: return errWrongNetworkID case tx.Threshold > uint16(len(tx.ControlKeys)): return errThresholdExceedsKeysLen + case tx.Threshold > maxThreshold: + return errThresholdTooHigh + case tx.Threshold == 0 && len(tx.ControlKeys) > 0: + return errUnneededKeys + case !ids.IsSortedAndUniqueShortIDs(tx.ControlKeys): + return errControlKeysNotSortedAndUnique } // Byte representation of the unsigned transaction @@ -104,12 +115,6 @@ func (tx *CreateSubnetTx) SemanticVerify(db database.Database) (func(), error) { if err != nil { return nil, err } - - for _, subnet := range subnets { - if subnet.ID.Equals(tx.ID) { - return nil, fmt.Errorf("there is already a subnet with ID %s", tx.ID) - } - } subnets = append(subnets, tx) // add new subnet if err := tx.vm.putSubnets(db, subnets); err != nil { return nil, err @@ -128,7 +133,12 @@ func (tx *CreateSubnetTx) SemanticVerify(db database.Database) (func(), error) { return nil, err } - return nil, nil + // Register new subnet in validator manager + onAccept := func() { + tx.vm.validators.PutValidatorSet(tx.id, validators.NewSet()) + } + + return onAccept, nil } // Bytes returns the byte representation of [tx] @@ -152,22 +162,31 @@ func (tx *CreateSubnetTx) initialize(vm *VM) error { return err } tx.bytes = txBytes - tx.ID = ids.NewID(hashing.ComputeHash256Array(txBytes)) + tx.id = ids.NewID(hashing.ComputeHash256Array(txBytes)) return nil } +// [controlKeys] must be unique. They will be sorted by this method. +// If [controlKeys] is nil, [tx.Controlkeys] will be an empty list. func (vm *VM) newCreateSubnetTx(networkID uint32, nonce uint64, controlKeys []ids.ShortID, threshold uint16, payerKey *crypto.PrivateKeySECP256K1R, ) (*CreateSubnetTx, error) { + tx := &CreateSubnetTx{UnsignedCreateSubnetTx: UnsignedCreateSubnetTx{ + NetworkID: networkID, + Nonce: nonce, + ControlKeys: controlKeys, + Threshold: threshold, + }} - tx := &CreateSubnetTx{ - UnsignedCreateSubnetTx: UnsignedCreateSubnetTx{ - vm: vm, - NetworkID: networkID, - Nonce: nonce, - ControlKeys: controlKeys, - Threshold: threshold, - }, + if threshold == 0 && len(tx.ControlKeys) > 0 { + return nil, errUnneededKeys + } + + // Sort control keys + ids.SortShortIDs(tx.ControlKeys) + // Ensure control keys are unique + if !ids.IsSortedAndUniqueShortIDs(tx.ControlKeys) { + return nil, errControlKeysNotSortedAndUnique } unsignedIntf := interface{}(&tx.UnsignedCreateSubnetTx) diff --git a/vms/platformvm/event_heap_test.go b/vms/platformvm/event_heap_test.go index 1a045ad..01dbc44 100644 --- a/vms/platformvm/event_heap_test.go +++ b/vms/platformvm/event_heap_test.go @@ -18,7 +18,7 @@ func TestTxHeapStart(t *testing.T) { 123, // stake amount 1, // startTime 3, // endTime - ids.NewShortID([20]byte{1}), // node ID + ids.NewShortID([20]byte{}), // node ID ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination 0, // shares 0, // network ID @@ -33,7 +33,7 @@ func TestTxHeapStart(t *testing.T) { 123, // stake amount 1, // startTime 3, // endTime - ids.NewShortID([20]byte{}), // node ID + ids.NewShortID([20]byte{1}), // node ID ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination 0, // shares 0, // network ID @@ -85,7 +85,7 @@ func TestTxHeapStop(t *testing.T) { 123, // stake amount 1, // startTime 3, // endTime - ids.NewShortID([20]byte{1}), // node ID + ids.NewShortID([20]byte{}), // node ID ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination 0, // shares 0, // network ID @@ -100,7 +100,7 @@ func TestTxHeapStop(t *testing.T) { 123, // stake amount 1, // startTime 3, // endTime - ids.NewShortID([20]byte{}), // node ID + ids.NewShortID([20]byte{1}), // node ID ids.NewShortID([20]byte{1, 2, 3, 4, 5, 6, 7}), // destination 0, // shares 0, // network ID diff --git a/vms/platformvm/export_tx.go b/vms/platformvm/export_tx.go new file mode 100644 index 0000000..6cdc021 --- /dev/null +++ b/vms/platformvm/export_tx.go @@ -0,0 +1,193 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + + "github.com/ava-labs/gecko/chains/atomic" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/vms/components/ava" +) + +var ( + errNoExportOutputs = errors.New("no export outputs") + errOutputsNotSorted = errors.New("outputs not sorted") +) + +// UnsignedExportTx is an unsigned ExportTx +type UnsignedExportTx struct { + // ID of the network this blockchain exists on + NetworkID uint32 `serialize:"true"` + + // Next unused nonce of account paying for this transaction. + Nonce uint64 `serialize:"true"` + + Outs []*ava.TransferableOutput `serialize:"true"` // The outputs of this transaction +} + +// ExportTx exports funds to the AVM +type ExportTx struct { + UnsignedExportTx `serialize:"true"` + + Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + + vm *VM + id ids.ID + key crypto.PublicKey // public key of transaction signer + bytes []byte +} + +func (tx *ExportTx) initialize(vm *VM) error { + tx.vm = vm + txBytes, err := Codec.Marshal(tx) // byte repr. of the signed tx + tx.bytes = txBytes + tx.id = ids.NewID(hashing.ComputeHash256Array(txBytes)) + return err +} + +// ID of this transaction +func (tx *ExportTx) ID() ids.ID { return tx.id } + +// Key returns the public key of the signer of this transaction +// Precondition: tx.Verify() has been called and returned nil +func (tx *ExportTx) Key() crypto.PublicKey { return tx.key } + +// Bytes returns the byte representation of an ExportTx +func (tx *ExportTx) Bytes() []byte { return tx.bytes } + +// InputUTXOs returns an empty set +func (tx *ExportTx) InputUTXOs() ids.Set { return ids.Set{} } + +// SyntacticVerify this transaction is well-formed +// Also populates [tx.Key] with the public key that signed this transaction +func (tx *ExportTx) SyntacticVerify() error { + switch { + case tx == nil: + return errNilTx + case tx.key != nil: + return nil // Only verify the transaction once + case tx.NetworkID != tx.vm.Ctx.NetworkID: // verify the transaction is on this network + return errWrongNetworkID + case tx.id.IsZero(): + return errInvalidID + case len(tx.Outs) == 0: + return errNoExportOutputs + } + + for _, out := range tx.Outs { + if err := out.Verify(); err != nil { + return err + } + if !out.AssetID().Equals(tx.vm.ava) { + return errUnknownAsset + } + } + if !ava.IsSortedTransferableOutputs(tx.Outs, Codec) { + return errOutputsNotSorted + } + + unsignedIntf := interface{}(&tx.UnsignedExportTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // byte repr of unsigned tx + if err != nil { + return err + } + + key, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.Sig[:]) + if err != nil { + return err + } + + tx.key = key + return nil +} + +// SemanticVerify this transaction is valid. +func (tx *ExportTx) SemanticVerify(db database.Database) error { + if err := tx.SyntacticVerify(); err != nil { + return err + } + + amount := uint64(0) + for _, out := range tx.Outs { + newAmount, err := math.Add64(out.Out.Amount(), amount) + if err != nil { + return err + } + amount = newAmount + } + + accountID := tx.key.Address() + account, err := tx.vm.getAccount(db, accountID) + if err != nil { + return errDBAccount + } + + account, err = account.Remove(amount, tx.Nonce) + if err != nil { + return err + } + return tx.vm.putAccount(db, account) +} + +// Accept this transaction. +func (tx *ExportTx) Accept(batch database.Batch) error { + txID := tx.ID() + + smDB := tx.vm.Ctx.SharedMemory.GetDatabase(tx.vm.avm) + defer tx.vm.Ctx.SharedMemory.ReleaseDatabase(tx.vm.avm) + + vsmDB := versiondb.New(smDB) + + state := ava.NewPrefixedState(vsmDB, Codec) + for i, out := range tx.Outs { + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{ + TxID: txID, + OutputIndex: uint32(i), + }, + Asset: ava.Asset{ID: out.AssetID()}, + Out: out.Out, + } + if err := state.FundPlatformUTXO(utxo); err != nil { + return err + } + } + + sharedBatch, err := vsmDB.CommitBatch() + if err != nil { + return err + } + + return atomic.WriteAll(batch, sharedBatch) +} + +func (vm *VM) newExportTx(nonce uint64, networkID uint32, outs []*ava.TransferableOutput, from *crypto.PrivateKeySECP256K1R) (*ExportTx, error) { + ava.SortTransferableOutputs(outs, Codec) + + tx := &ExportTx{UnsignedExportTx: UnsignedExportTx{ + NetworkID: networkID, + Nonce: nonce, + Outs: outs, + }} + + unsignedIntf := interface{}(&tx.UnsignedExportTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // Byte repr. of unsigned transaction + if err != nil { + return nil, err + } + + sig, err := from.Sign(unsignedBytes) + if err != nil { + return nil, err + } + copy(tx.Sig[:], sig) + + return tx, tx.initialize(vm) +} diff --git a/vms/platformvm/factory.go b/vms/platformvm/factory.go index 25f9786..c55c070 100644 --- a/vms/platformvm/factory.go +++ b/vms/platformvm/factory.go @@ -16,14 +16,20 @@ var ( // Factory can create new instances of the Platform Chain type Factory struct { - ChainManager chains.Manager - Validators validators.Manager + ChainManager chains.Manager + Validators validators.Manager + StakingEnabled bool + AVA ids.ID + AVM ids.ID } // New returns a new instance of the Platform Chain -func (f *Factory) New() interface{} { +func (f *Factory) New() (interface{}, error) { return &VM{ - ChainManager: f.ChainManager, - Validators: f.Validators, - } + chainManager: f.ChainManager, + validators: f.Validators, + stakingEnabled: f.StakingEnabled, + ava: f.AVA, + avm: f.AVM, + }, nil } diff --git a/vms/platformvm/import_tx.go b/vms/platformvm/import_tx.go new file mode 100644 index 0000000..728a3f7 --- /dev/null +++ b/vms/platformvm/import_tx.go @@ -0,0 +1,268 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package platformvm + +import ( + "errors" + "fmt" + + "github.com/ava-labs/gecko/chains/atomic" + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/versiondb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + errAssetIDMismatch = errors.New("asset IDs in the input don't match the utxo") + errWrongNumberOfCredentials = errors.New("should have the same number of credentials as inputs") + errNoImportInputs = errors.New("no import inputs") + errInputsNotSortedUnique = errors.New("inputs not sorted and unique") + errPublicKeySignatureMismatch = errors.New("signature doesn't match public key") + errUnknownAsset = errors.New("unknown asset ID") +) + +// UnsignedImportTx is an unsigned ImportTx +type UnsignedImportTx struct { + // ID of the network this blockchain exists on + NetworkID uint32 `serialize:"true"` + + // Next unused nonce of account paying the transaction fee and receiving the + // inputs of this transaction. + Nonce uint64 `serialize:"true"` + + // Account that this transaction is being sent by. This is needed to ensure the Credentials are replay safe. + Account ids.ShortID `serialize:"true"` + + Ins []*ava.TransferableInput `serialize:"true"` // The inputs to this transaction +} + +// ImportTx imports funds from the AVM +type ImportTx struct { + UnsignedImportTx `serialize:"true"` + + Sig [crypto.SECP256K1RSigLen]byte `serialize:"true"` + Creds []verify.Verifiable `serialize:"true"` // The credentials of this transaction + + vm *VM + id ids.ID + key crypto.PublicKey // public key of transaction signer + unsignedBytes []byte + bytes []byte +} + +func (tx *ImportTx) initialize(vm *VM) error { + tx.vm = vm + txBytes, err := Codec.Marshal(tx) // byte repr. of the signed tx + tx.bytes = txBytes + tx.id = ids.NewID(hashing.ComputeHash256Array(txBytes)) + return err +} + +// ID of this transaction +func (tx *ImportTx) ID() ids.ID { return tx.id } + +// Key returns the public key of the signer of this transaction +// Precondition: tx.Verify() has been called and returned nil +func (tx *ImportTx) Key() crypto.PublicKey { return tx.key } + +// UnsignedBytes returns the unsigned byte representation of an ImportTx +func (tx *ImportTx) UnsignedBytes() []byte { return tx.unsignedBytes } + +// Bytes returns the byte representation of an ImportTx +func (tx *ImportTx) Bytes() []byte { return tx.bytes } + +// InputUTXOs returns an empty set +func (tx *ImportTx) InputUTXOs() ids.Set { + set := ids.Set{} + for _, in := range tx.Ins { + set.Add(in.InputID()) + } + return set +} + +// SyntacticVerify this transaction is well-formed +// Also populates [tx.Key] with the public key that signed this transaction +func (tx *ImportTx) SyntacticVerify() error { + switch { + case tx == nil: + return errNilTx + case tx.key != nil: + return nil // Only verify the transaction once + case tx.NetworkID != tx.vm.Ctx.NetworkID: // verify the transaction is on this network + return errWrongNetworkID + case tx.id.IsZero(): + return errInvalidID + case len(tx.Ins) == 0: + return errNoImportInputs + case len(tx.Ins) != len(tx.Creds): + return errWrongNumberOfCredentials + } + + for _, in := range tx.Ins { + if err := in.Verify(); err != nil { + return err + } + if !in.AssetID().Equals(tx.vm.ava) { + return errUnknownAsset + } + } + if !ava.IsSortedAndUniqueTransferableInputs(tx.Ins) { + return errInputsNotSortedUnique + } + + for _, cred := range tx.Creds { + if err := cred.Verify(); err != nil { + return err + } + } + + unsignedIntf := interface{}(&tx.UnsignedImportTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // byte repr of unsigned tx + if err != nil { + return err + } + + key, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.Sig[:]) + if err != nil { + return err + } + + if !tx.Account.Equals(key.Address()) { + return errPublicKeySignatureMismatch + } + + tx.key = key + tx.unsignedBytes = unsignedBytes + return nil +} + +// SemanticVerify this transaction is valid. +func (tx *ImportTx) SemanticVerify(db database.Database) error { + if err := tx.SyntacticVerify(); err != nil { + return err + } + + amount := uint64(0) + for _, in := range tx.Ins { + newAmount, err := math.Add64(in.In.Amount(), amount) + if err != nil { + return err + } + amount = newAmount + } + + // Deduct tx fee from payer's account + account, err := tx.vm.getAccount(db, tx.Key().Address()) + if err != nil { + return err + } + account, err = account.Add(amount) + if err != nil { + return err + } + account, err = account.Remove(0, tx.Nonce) + if err != nil { + return err + } + if err := tx.vm.putAccount(db, account); err != nil { + return err + } + + smDB := tx.vm.Ctx.SharedMemory.GetDatabase(tx.vm.avm) + defer tx.vm.Ctx.SharedMemory.ReleaseDatabase(tx.vm.avm) + + state := ava.NewPrefixedState(smDB, Codec) + + for i, in := range tx.Ins { + cred := tx.Creds[i] + + utxoID := in.UTXOID.InputID() + utxo, err := state.AVMUTXO(utxoID) + if err != nil { + return err + } + utxoAssetID := utxo.AssetID() + inAssetID := in.AssetID() + if !utxoAssetID.Equals(inAssetID) { + return errAssetIDMismatch + } + + if err := tx.vm.fx.VerifyTransfer(tx, in.In, cred, utxo.Out); err != nil { + return err + } + } + + return nil +} + +// Accept this transaction. +func (tx *ImportTx) Accept(batch database.Batch) error { + smDB := tx.vm.Ctx.SharedMemory.GetDatabase(tx.vm.avm) + defer tx.vm.Ctx.SharedMemory.ReleaseDatabase(tx.vm.avm) + + vsmDB := versiondb.New(smDB) + + state := ava.NewPrefixedState(vsmDB, Codec) + for _, in := range tx.Ins { + utxoID := in.UTXOID.InputID() + if err := state.SpendAVMUTXO(utxoID); err != nil { + return err + } + } + + sharedBatch, err := vsmDB.CommitBatch() + if err != nil { + return err + } + + return atomic.WriteAll(batch, sharedBatch) +} + +func (vm *VM) newImportTx(nonce uint64, networkID uint32, ins []*ava.TransferableInput, from [][]*crypto.PrivateKeySECP256K1R, to *crypto.PrivateKeySECP256K1R) (*ImportTx, error) { + ava.SortTransferableInputsWithSigners(ins, from) + + tx := &ImportTx{UnsignedImportTx: UnsignedImportTx{ + NetworkID: networkID, + Nonce: nonce, + Account: to.PublicKey().Address(), + Ins: ins, + }} + + unsignedIntf := interface{}(&tx.UnsignedImportTx) + unsignedBytes, err := Codec.Marshal(&unsignedIntf) // Byte repr. of unsigned transaction + if err != nil { + return nil, err + } + + hash := hashing.ComputeHash256(unsignedBytes) + + for _, credKeys := range from { + cred := &secp256k1fx.Credential{} + for _, key := range credKeys { + sig, err := key.SignHash(hash) + if err != nil { + return nil, fmt.Errorf("problem creating transaction: %w", err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + cred.Sigs = append(cred.Sigs, fixedSig) + } + tx.Creds = append(tx.Creds, cred) + } + + sig, err := to.SignHash(hash) + if err != nil { + return nil, err + } + copy(tx.Sig[:], sig) + + return tx, tx.initialize(vm) +} diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 4842ff1..b911c2a 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -8,46 +8,34 @@ import ( "errors" "fmt" "net/http" - "net/http/httptest" - - "github.com/gorilla/rpc/v2/json2" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/json" + "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/vms/avm" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/secp256k1fx" ) var ( - errMissingDecisionBlock = errors.New("should have a decision block within the past two blocks") - errParsingID = errors.New("error parsing ID") - errGetAccount = errors.New("error retrieving account information") - errGetAccounts = errors.New("error getting accounts controlled by specified user") - errGetUser = errors.New("error while getting user. Does user exist?") - errNoMethodWithGenesis = errors.New("no method was provided but genesis data was provided") - errCreatingTransaction = errors.New("problem while creating transaction") - errNoDestination = errors.New("call is missing field 'stakeDestination'") - errNoSource = errors.New("call is missing field 'stakeSource'") - errGetStakeSource = errors.New("couldn't get account specified in 'stakeSource'") + errMissingDecisionBlock = errors.New("should have a decision block within the past two blocks") + errParsingID = errors.New("error parsing ID") + errGetAccount = errors.New("error retrieving account information") + errGetAccounts = errors.New("error getting accounts controlled by specified user") + errGetUser = errors.New("error while getting user. Does user exist?") + errNoMethodWithGenesis = errors.New("no method was provided but genesis data was provided") + errCreatingTransaction = errors.New("problem while creating transaction") + errNoDestination = errors.New("call is missing field 'stakeDestination'") + errNoSource = errors.New("call is missing field 'stakeSource'") + errGetStakeSource = errors.New("couldn't get account specified in 'stakeSource'") + errNoBlockchainWithAlias = errors.New("there is no blockchain with the specified alias") + errDSCantValidate = errors.New("new blockchain can't be validated by default Subnet") ) -var key *crypto.PrivateKeySECP256K1R - -func init() { - cb58 := formatting.CB58{} - err := cb58.FromString("24jUJ9vZexUM6expyMcT48LBx27k1m7xpraoV62oSQAHdziao5") - if err != nil { - panic(err) - } - factory := crypto.FactorySECP256K1R{} - pk, err := factory.ToPrivateKey(cb58.Bytes) - if err != nil { - panic(err) - } - key = pk.(*crypto.PrivateKeySECP256K1R) -} - // Service defines the API calls that can be made to the platform chain type Service struct{ vm *VM } @@ -97,7 +85,7 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon response.Subnets = make([]APISubnet, len(subnets)) for i, subnet := range subnets { response.Subnets[i] = APISubnet{ - ID: subnet.ID, + ID: subnet.id, ControlKeys: subnet.ControlKeys, Threshold: json.Uint16(subnet.Threshold), } @@ -108,10 +96,10 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon idsSet := ids.Set{} idsSet.Add(args.IDs...) for _, subnet := range subnets { - if idsSet.Contains(subnet.ID) { + if idsSet.Contains(subnet.id) { response.Subnets = append(response.Subnets, APISubnet{ - ID: subnet.ID, + ID: subnet.id, ControlKeys: subnet.ControlKeys, Threshold: json.Uint16(subnet.Threshold), }, @@ -248,7 +236,7 @@ func (service *Service) SampleValidators(_ *http.Request, args *SampleValidators args.SubnetID = DefaultSubnetID } - validators, ok := service.vm.Validators.GetValidatorSet(args.SubnetID) + validators, ok := service.vm.validators.GetValidatorSet(args.SubnetID) if !ok { return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID) } @@ -315,7 +303,7 @@ type ListAccountsReply struct { // ListAccounts lists all of the accounts controlled by [args.Username] func (service *Service) ListAccounts(_ *http.Request, args *ListAccountsArgs, reply *ListAccountsReply) error { - service.vm.Ctx.Log.Debug("platform.listAccounts called for user '%s'", args.Username) + service.vm.Ctx.Log.Debug("listAccounts called for user '%s'", args.Username) // db holds the user's info that pertains to the Platform Chain userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) @@ -378,7 +366,7 @@ type CreateAccountReply struct { // The account's ID is [privKey].PublicKey().Address(), where [privKey] is a // private key controlled by the user. func (service *Service) CreateAccount(_ *http.Request, args *CreateAccountArgs, reply *CreateAccountReply) error { - service.vm.Ctx.Log.Debug("platform.createAccount called for user '%s'", args.Username) + service.vm.Ctx.Log.Debug("createAccount called for user '%s'", args.Username) // userDB holds the user's info that pertains to the Platform Chain userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) @@ -432,6 +420,11 @@ type genericTx struct { ****************************************************** */ +// CreateTxResponse is the response from calls to create a transaction +type CreateTxResponse struct { + UnsignedTx formatting.CB58 `json:"unsignedTx"` +} + // AddDefaultSubnetValidatorArgs are the arguments to AddDefaultSubnetValidator type AddDefaultSubnetValidatorArgs struct { APIDefaultSubnetValidator @@ -440,16 +433,10 @@ type AddDefaultSubnetValidatorArgs struct { PayerNonce json.Uint64 `json:"payerNonce"` } -// AddDefaultSubnetValidatorResponse is the response from a call to AddDefaultSubnetValidator -type AddDefaultSubnetValidatorResponse struct { - // The unsigned transaction - UnsignedTx formatting.CB58 `json:"unsignedTx"` -} - // AddDefaultSubnetValidator returns an unsigned transaction to add a validator to the default subnet // The returned unsigned transaction should be signed using Sign() -func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefaultSubnetValidatorArgs, reply *AddDefaultSubnetValidatorResponse) error { - service.vm.Ctx.Log.Debug("platform.AddDefaultSubnetValidator called") +func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefaultSubnetValidatorArgs, reply *CreateTxResponse) error { + service.vm.Ctx.Log.Debug("AddDefaultSubnetValidator called") if args.ID.IsZero() { // If ID unspecified, use this node's ID as validator ID args.ID = service.vm.Ctx.NodeID @@ -490,17 +477,11 @@ type AddDefaultSubnetDelegatorArgs struct { PayerNonce json.Uint64 `json:"payerNonce"` } -// AddDefaultSubnetDelegatorResponse is the response from a call to AddDefaultSubnetDelegator -type AddDefaultSubnetDelegatorResponse struct { - // The unsigned transaction - UnsignedTx formatting.CB58 `json:"unsignedTx"` -} - // AddDefaultSubnetDelegator returns an unsigned transaction to add a delegator // to the default subnet // The returned unsigned transaction should be signed using Sign() -func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefaultSubnetDelegatorArgs, reply *AddDefaultSubnetDelegatorResponse) error { - service.vm.Ctx.Log.Debug("platform.AddDefaultSubnetDelegator called") +func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefaultSubnetDelegatorArgs, reply *CreateTxResponse) error { + service.vm.Ctx.Log.Debug("AddDefaultSubnetDelegator called") if args.ID.IsZero() { // If ID unspecified, use this node's ID as validator ID args.ID = service.vm.Ctx.NodeID @@ -541,15 +522,9 @@ type AddNonDefaultSubnetValidatorArgs struct { PayerNonce json.Uint64 `json:"payerNonce"` } -// AddNonDefaultSubnetValidatorResponse is the response from a call to AddNonDefaultSubnetValidator -type AddNonDefaultSubnetValidatorResponse struct { - // The unsigned transaction - UnsignedTx formatting.CB58 `json:"unsignedTx"` -} - // AddNonDefaultSubnetValidator adds a validator to a subnet other than the default subnet // Returns the unsigned transaction, which must be signed using Sign -func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddNonDefaultSubnetValidatorArgs, response *AddNonDefaultSubnetValidatorResponse) error { +func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddNonDefaultSubnetValidatorArgs, response *CreateTxResponse) error { tx := addNonDefaultSubnetValidatorTx{ UnsignedAddNonDefaultSubnetValidatorTx: UnsignedAddNonDefaultSubnetValidatorTx{ SubnetValidator: SubnetValidator{ @@ -583,6 +558,86 @@ func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddN return nil } +// CreateSubnetArgs are the arguments to CreateSubnet +type CreateSubnetArgs struct { + // The ID member of APISubnet is ignored + APISubnet + + // Nonce of the account that pays the transaction fee + PayerNonce json.Uint64 `json:"payerNonce"` +} + +// CreateSubnet returns an unsigned transaction to create a new subnet. +// The unsigned transaction must be signed with the key of [args.Payer] +func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *CreateTxResponse) error { + service.vm.Ctx.Log.Debug("platform.createSubnet called") + + // Create the transaction + tx := CreateSubnetTx{ + UnsignedCreateSubnetTx: UnsignedCreateSubnetTx{ + NetworkID: service.vm.Ctx.NetworkID, + Nonce: uint64(args.PayerNonce), + ControlKeys: args.ControlKeys, + Threshold: uint16(args.Threshold), + }, + key: nil, + Sig: [65]byte{}, + bytes: nil, + } + + txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) + if err != nil { + return errCreatingTransaction + } + + response.UnsignedTx.Bytes = txBytes + return nil +} + +// ExportAVAArgs are the arguments to ExportAVA +type ExportAVAArgs struct { + // X-Chain address (without prepended X-) that will receive the exported AVA + To ids.ShortID `json:"to"` + + // Nonce of the account that pays the transaction fee and provides the export AVA + PayerNonce json.Uint64 `json:"payerNonce"` + + // Amount of nAVA to send + Amount json.Uint64 `json:"amount"` +} + +// ExportAVA returns an unsigned transaction to export AVA from the P-Chain to the X-Chain. +// After this tx is accepted, the AVA must be imported on the X-Chain side. +// The unsigned transaction must be signed with the key of the account exporting the AVA +// and paying the transaction fee +func (service *Service) ExportAVA(_ *http.Request, args *ExportAVAArgs, response *CreateTxResponse) error { + service.vm.Ctx.Log.Debug("platform.ExportAVA called") + + // Create the transaction + tx := ExportTx{UnsignedExportTx: UnsignedExportTx{ + NetworkID: service.vm.Ctx.NetworkID, + Nonce: uint64(args.PayerNonce), + Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: service.vm.ava}, + Out: &secp256k1fx.TransferOutput{ + Amt: uint64(args.Amount), + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{args.To}, + }, + }, + }}, + }} + + txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) + if err != nil { + return errCreatingTransaction + } + + response.UnsignedTx.Bytes = txBytes + return nil +} + /* ****************************************************** **************** Sign/Issue Txs ********************** @@ -606,12 +661,12 @@ type SignArgs struct { // SignResponse is the response from Sign type SignResponse struct { // The signed bytes - Tx formatting.CB58 + Tx formatting.CB58 `json:"tx"` } // Sign [args.bytes] func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignResponse) error { - service.vm.Ctx.Log.Debug("platform.sign called") + service.vm.Ctx.Log.Debug("sign called") // Get the key of the Signer db, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) @@ -642,8 +697,12 @@ func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignRespons genTx.Tx, err = service.signAddNonDefaultSubnetValidatorTx(tx, key) case *CreateSubnetTx: genTx.Tx, err = service.signCreateSubnetTx(tx, key) + case *CreateChainTx: + genTx.Tx, err = service.signCreateChainTx(tx, key) + case *ExportTx: + genTx.Tx, err = service.signExportTx(tx, key) default: - err = errors.New("Could not parse given tx. Must be one of: addDefaultSubnetValidatorTx, addNonDefaultSubnetValidatorTx, createSubnetTx") + err = errors.New("Could not parse given tx") } if err != nil { return err @@ -655,7 +714,7 @@ func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignRespons // Sign [unsigned] with [key] func (service *Service) signAddDefaultSubnetValidatorTx(tx *addDefaultSubnetValidatorTx, key *crypto.PrivateKeySECP256K1R) (*addDefaultSubnetValidatorTx, error) { - service.vm.Ctx.Log.Debug("platform.signAddDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("signAddDefaultSubnetValidatorTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetValidatorTx) @@ -678,7 +737,7 @@ func (service *Service) signAddDefaultSubnetValidatorTx(tx *addDefaultSubnetVali // Sign [unsigned] with [key] func (service *Service) signAddDefaultSubnetDelegatorTx(tx *addDefaultSubnetDelegatorTx, key *crypto.PrivateKeySECP256K1R) (*addDefaultSubnetDelegatorTx, error) { - service.vm.Ctx.Log.Debug("platform.signAddDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("signAddDefaultSubnetValidatorTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetDelegatorTx) @@ -701,7 +760,7 @@ func (service *Service) signAddDefaultSubnetDelegatorTx(tx *addDefaultSubnetDele // Sign [xt] with [key] func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.PrivateKeySECP256K1R) (*CreateSubnetTx, error) { - service.vm.Ctx.Log.Debug("platform.signAddDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("signAddDefaultSubnetValidatorTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedCreateSubnetTx) @@ -722,6 +781,29 @@ func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.Priva return tx, nil } +// Sign [tx] with [key] +func (service *Service) signExportTx(tx *ExportTx, key *crypto.PrivateKeySECP256K1R) (*ExportTx, error) { + service.vm.Ctx.Log.Debug("platform.signAddDefaultSubnetValidatorTx called") + + // TODO: Should we check if tx is already signed? + unsignedIntf := interface{}(&tx.UnsignedExportTx) + unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return nil, fmt.Errorf("error serializing unsigned tx: %v", err) + } + + sig, err := key.Sign(unsignedTxBytes) + if err != nil { + return nil, errors.New("error while signing") + } + if len(sig) != crypto.SECP256K1RSigLen { + return nil, fmt.Errorf("expected signature to be length %d but was length %d", crypto.SECP256K1RSigLen, len(sig)) + } + copy(tx.Sig[:], sig) + + return tx, nil +} + // Signs an unsigned or partially signed addNonDefaultSubnetValidatorTx with [key] // If [key] is a control key for the subnet and there is an empty spot in tx.ControlSigs, signs there // If [key] is a control key for the subnet and there is no empty spot in tx.ControlSigs, signs as payer @@ -729,7 +811,7 @@ func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.Priva // Sorts tx.ControlSigs before returning // Assumes each element of tx.ControlSigs is actually a signature, not just empty bytes func (service *Service) signAddNonDefaultSubnetValidatorTx(tx *addNonDefaultSubnetValidatorTx, key *crypto.PrivateKeySECP256K1R) (*addNonDefaultSubnetValidatorTx, error) { - service.vm.Ctx.Log.Debug("platform.signAddNonDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("signAddNonDefaultSubnetValidatorTx called") // Compute the byte repr. of the unsigned tx and the signature of [key] over it unsignedIntf := interface{}(&tx.UnsignedAddNonDefaultSubnetValidatorTx) @@ -770,6 +852,183 @@ func (service *Service) signAddNonDefaultSubnetValidatorTx(tx *addNonDefaultSubn return nil, errors.New("no place for key to sign") } + crypto.SortSECP2561RSigs(tx.ControlSigs) + + return tx, nil +} + +// ImportAVAArgs are the arguments to ImportAVA +type ImportAVAArgs struct { + // ID of the account that will receive the imported funds, and pay the transaction fee + To ids.ShortID `json:"to"` + + // Next unused nonce of the account + PayerNonce json.Uint64 `json:"payerNonce"` + + // User that controls the account + Username string `json:"username"` + Password string `json:"password"` +} + +// ImportAVA returns an unsigned transaction to import AVA from the X-Chain. +// The AVA must have already been exported from the X-Chain. +// The unsigned transaction must be signed with the key of the tx fee payer. +func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, response *SignResponse) error { + service.vm.Ctx.Log.Debug("platform.ImportAVA called") + + // Get the key of the Signer + db, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) + if err != nil { + return fmt.Errorf("couldn't get data for user '%s'. Does user exist?", args.Username) + } + user := user{db: db} + + kc := secp256k1fx.NewKeychain() + key, err := user.getKey(args.To) + if err != nil { + return errDB + } + kc.Add(key) + + addrSet := ids.Set{} + addrSet.Add(ids.NewID(hashing.ComputeHash256Array(args.To.Bytes()))) + + utxos, err := service.vm.GetAtomicUTXOs(addrSet) + if err != nil { + return fmt.Errorf("problem retrieving user's atomic UTXOs: %w", err) + } + + amount := uint64(0) + time := service.vm.clock.Unix() + + ins := []*ava.TransferableInput{} + keys := [][]*crypto.PrivateKeySECP256K1R{} + for _, utxo := range utxos { + if !utxo.AssetID().Equals(service.vm.ava) { + continue + } + inputIntf, signers, err := kc.Spend(utxo.Out, time) + if err != nil { + continue + } + input, ok := inputIntf.(ava.Transferable) + if !ok { + continue + } + spent, err := math.Add64(amount, input.Amount()) + if err != nil { + return err + } + amount = spent + + in := &ava.TransferableInput{ + UTXOID: utxo.UTXOID, + Asset: ava.Asset{ID: service.vm.ava}, + In: input, + } + + ins = append(ins, in) + keys = append(keys, signers) + } + + ava.SortTransferableInputsWithSigners(ins, keys) + + // Create the transaction + tx := ImportTx{UnsignedImportTx: UnsignedImportTx{ + NetworkID: service.vm.Ctx.NetworkID, + Nonce: uint64(args.PayerNonce), + Account: args.To, + Ins: ins, + }} + + // TODO: Should we check if tx is already signed? + unsignedIntf := interface{}(&tx.UnsignedImportTx) + unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return fmt.Errorf("error serializing unsigned tx: %w", err) + } + hash := hashing.ComputeHash256(unsignedTxBytes) + + sig, err := key.SignHash(hash) + if err != nil { + return errors.New("error while signing") + } + copy(tx.Sig[:], sig) + + for _, credKeys := range keys { + cred := &secp256k1fx.Credential{} + for _, key := range credKeys { + sig, err := key.SignHash(hash) + if err != nil { + return fmt.Errorf("problem creating transaction: %w", err) + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + cred.Sigs = append(cred.Sigs, fixedSig) + } + tx.Creds = append(tx.Creds, cred) + } + + txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) + if err != nil { + return errCreatingTransaction + } + + response.Tx.Bytes = txBytes + return nil +} + +// Signs an unsigned or partially signed CreateChainTx with [key] +// If [key] is a control key for the subnet and there is an empty spot in tx.ControlSigs, signs there +// If [key] is a control key for the subnet and there is no empty spot in tx.ControlSigs, signs as payer +// If [key] is not a control key, sign as payer (account controlled by [key] pays the tx fee) +// Sorts tx.ControlSigs before returning +// Assumes each element of tx.ControlSigs is actually a signature, not just empty bytes +func (service *Service) signCreateChainTx(tx *CreateChainTx, key *crypto.PrivateKeySECP256K1R) (*CreateChainTx, error) { + service.vm.Ctx.Log.Debug("signCreateChainTx called") + + // Compute the byte repr. of the unsigned tx and the signature of [key] over it + unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) + unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) + if err != nil { + return nil, fmt.Errorf("error serializing unsigned tx: %v", err) + } + sig, err := key.Sign(unsignedTxBytes) + if err != nil { + return nil, errors.New("error while signing") + } + if len(sig) != crypto.SECP256K1RSigLen { + return nil, fmt.Errorf("expected signature to be length %d but was length %d", crypto.SECP256K1RSigLen, len(sig)) + } + + // Get information about the subnet + subnet, err := service.vm.getSubnet(service.vm.DB, tx.SubnetID) + if err != nil { + return nil, fmt.Errorf("problem getting subnet information: %v", err) + } + + // Find the location at which [key] should put its signature. + // If [key] is a control key for this subnet and there is an empty spot in tx.ControlSigs, sign there + // If [key] is a control key for this subnet and there is no empty spot in tx.ControlSigs, sign as payer + // If [key] is not a control key, sign as payer (account controlled by [key] pays the tx fee) + controlKeySet := ids.ShortSet{} + controlKeySet.Add(subnet.ControlKeys...) + isControlKey := controlKeySet.Contains(key.PublicKey().Address()) + + payerSigEmpty := tx.PayerSig == [crypto.SECP256K1RSigLen]byte{} // true if no key has signed to pay the tx fee + + if isControlKey && len(tx.ControlSigs) != int(subnet.Threshold) { // Sign as controlSig + tx.ControlSigs = append(tx.ControlSigs, [crypto.SECP256K1RSigLen]byte{}) + copy(tx.ControlSigs[len(tx.ControlSigs)-1][:], sig) + } else if payerSigEmpty { // sign as payer + copy(tx.PayerSig[:], sig) + } else { + return nil, errors.New("no place for key to sign") + } + + crypto.SortSECP2561RSigs(tx.ControlSigs) + return tx, nil } @@ -787,6 +1046,8 @@ type IssueTxResponse struct { // IssueTx issues the transaction [args.Tx] to the network func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *IssueTxResponse) error { + service.vm.Ctx.Log.Debug("issueTx called") + genTx := genericTx{} if err := Codec.Unmarshal(args.Tx.Bytes, &genTx); err != nil { return err @@ -798,69 +1059,25 @@ func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *Is return fmt.Errorf("error initializing tx: %s", err) } service.vm.unissuedEvents.Push(tx) - defer service.vm.resetTimer() response.TxID = tx.ID() - return nil - case *CreateSubnetTx: + case DecisionTx: if err := tx.initialize(service.vm); err != nil { return fmt.Errorf("error initializing tx: %s", err) } service.vm.unissuedDecisionTxs = append(service.vm.unissuedDecisionTxs, tx) - defer service.vm.resetTimer() - response.TxID = tx.ID - return nil + response.TxID = tx.ID() + case AtomicTx: + if err := tx.initialize(service.vm); err != nil { + return fmt.Errorf("error initializing tx: %s", err) + } + service.vm.unissuedAtomicTxs = append(service.vm.unissuedAtomicTxs, tx) + response.TxID = tx.ID() default: - return errors.New("Could not parse given tx. Must be one of: addDefaultSubnetValidatorTx, addDefaultSubnetDelegatorTx, addNonDefaultSubnetValidatorTx, createSubnetTx") - } -} - -/* - ****************************************************** - **************** Create a Subnet ********************* - ****************************************************** - */ - -// CreateSubnetArgs are the arguments to CreateSubnet -type CreateSubnetArgs struct { - // The ID member of APISubnet is ignored - APISubnet - - // Nonce of the account that pays the transaction fee - PayerNonce json.Uint64 `json:"payerNonce"` -} - -// CreateSubnetResponse is the response from a call to CreateSubnet -type CreateSubnetResponse struct { - // Byte representation of the unsigned transaction to create a new subnet - UnsignedTx formatting.CB58 `json:"unsignedTx"` -} - -// CreateSubnet returns an unsigned transaction to create a new subnet. -// The unsigned transaction must be signed with the key of [args.Payer] -func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *CreateSubnetResponse) error { - service.vm.Ctx.Log.Debug("platform.createSubnet called") - - // Create the transaction - tx := CreateSubnetTx{ - UnsignedCreateSubnetTx: UnsignedCreateSubnetTx{ - NetworkID: service.vm.Ctx.NetworkID, - Nonce: uint64(args.PayerNonce), - ControlKeys: args.ControlKeys, - Threshold: uint16(args.Threshold), - }, - key: nil, - Sig: [65]byte{}, - bytes: nil, + return errors.New("Could not parse given tx. Must be a TimedTx, DecisionTx, or AtomicTx") } - txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) - if err != nil { - return errCreatingTransaction - } - - response.UnsignedTx.Bytes = txBytes + service.vm.resetTimer() return nil - } /* @@ -871,6 +1088,9 @@ func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, re // CreateBlockchainArgs is the arguments for calling CreateBlockchain type CreateBlockchainArgs struct { + // ID of Subnet that validates the new blockchain + SubnetID ids.ID `json:"subnetID"` + // ID of the VM the new blockchain is running VMID string `json:"vmID"` @@ -880,81 +1100,68 @@ type CreateBlockchainArgs struct { // Human-readable name for the new blockchain, not necessarily unique Name string `json:"name"` - // To generate the byte representation of the genesis data for this blockchain, - // a POST request with body [GenesisData] is made to the API method whose name is [Method], whose - // endpoint is [Endpoint]. See Platform Chain documentation for more info and examples. - Method string `json:"method"` - Endpoint string `json:"endpoint"` - GenesisData interface{} `json:"genesisData"` + // Next unused nonce of the account paying the transaction fee + PayerNonce json.Uint64 `json:"payerNonce"` + + // Genesis state of the blockchain being created + GenesisData formatting.CB58 `json:"genesisData"` } -// CreateGenesisReply is the reply from a call to CreateGenesis -type CreateGenesisReply struct { - Bytes formatting.CB58 `json:"bytes"` -} +// CreateBlockchain returns an unsigned transaction to create a new blockchain +// Must be signed with the Subnet's control keys and with a key that pays the transaction fee before issuance +func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, response *CreateTxResponse) error { + service.vm.Ctx.Log.Debug("createBlockchain called") -// CreateBlockchainReply is the reply from calling CreateBlockchain -type CreateBlockchainReply struct { - BlockchainID ids.ID `json:"blockchainID"` -} - -// CreateBlockchain issues a transaction to the network to create a new blockchain -func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, reply *CreateBlockchainReply) error { - vmID, err := service.vm.ChainManager.LookupVM(args.VMID) + vmID, err := service.vm.chainManager.LookupVM(args.VMID) if err != nil { return fmt.Errorf("no VM with ID '%s' found", args.VMID) } fxIDs := []ids.ID(nil) for _, fxIDStr := range args.FxIDs { - fxID, err := service.vm.ChainManager.LookupVM(fxIDStr) + fxID, err := service.vm.chainManager.LookupVM(fxIDStr) if err != nil { return fmt.Errorf("no FX with ID '%s' found", fxIDStr) } fxIDs = append(fxIDs, fxID) } - - genesisBytes := []byte(nil) - if args.Method != "" { - buf, err := json2.EncodeClientRequest(args.Method, args.GenesisData) - if err != nil { - return fmt.Errorf("problem building blockchain genesis state: %w", err) - } - - writer := httptest.NewRecorder() - service.vm.Ctx.HTTP.Call( - /*writer=*/ writer, - /*method=*/ "POST", - /*base=*/ args.VMID, - /*endpoint=*/ args.Endpoint, - /*body=*/ bytes.NewBuffer(buf), - /*headers=*/ map[string]string{ - "Content-Type": "application/json", - }, - ) - - result := CreateGenesisReply{} - if err := json2.DecodeClientResponse(writer.Body, &result); err != nil { - return fmt.Errorf("problem building blockchain genesis state: %w", err) - } - genesisBytes = result.Bytes.Bytes - } else if args.GenesisData != nil { - return errNoMethodWithGenesis + // If creating AVM instance, use secp256k1fx + // TODO: Document FXs and have user specify them in API call + fxIDsSet := ids.Set{} + fxIDsSet.Add(fxIDs...) + if vmID.Equals(avm.ID) && !fxIDsSet.Contains(secp256k1fx.ID) { + fxIDs = append(fxIDs, secp256k1fx.ID) } - // TODO: Should use the key store to sign this transaction. - // TODO: Nonce shouldn't always be 0 - tx, err := service.vm.newCreateChainTx(0, genesisBytes, vmID, fxIDs, args.Name, service.vm.Ctx.NetworkID, key) + if args.SubnetID.Equals(DefaultSubnetID) { + return errDSCantValidate + } + + tx := CreateChainTx{ + UnsignedCreateChainTx: UnsignedCreateChainTx{ + NetworkID: service.vm.Ctx.NetworkID, + SubnetID: args.SubnetID, + Nonce: uint64(args.PayerNonce), + ChainName: args.Name, + VMID: vmID, + FxIDs: fxIDs, + GenesisData: args.GenesisData.Bytes, + }, + PayerAddress: ids.ShortID{}, + PayerSig: [crypto.SECP256K1RSigLen]byte{}, + ControlSigs: nil, + vm: nil, + id: ids.ID{}, + bytes: nil, + } + + txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) if err != nil { - return fmt.Errorf("problem creating transaction: %w", err) + service.vm.Ctx.Log.Error("problem marshaling createChainTx: %v", err) + return errCreatingTransaction } - // Add this tx to the set of unissued txs - service.vm.unissuedDecisionTxs = append(service.vm.unissuedDecisionTxs, tx) - service.vm.resetTimer() - - reply.BlockchainID = tx.ID() - + response.UnsignedTx.Bytes = txBytes return nil } @@ -972,7 +1179,9 @@ type GetBlockchainStatusReply struct { // GetBlockchainStatus gets the status of a blockchain with the ID [args.BlockchainID]. func (service *Service) GetBlockchainStatus(_ *http.Request, args *GetBlockchainStatusArgs, reply *GetBlockchainStatusReply) error { - _, err := service.vm.ChainManager.Lookup(args.BlockchainID) + service.vm.Ctx.Log.Debug("getBlockchainStatus called") + + _, err := service.vm.chainManager.Lookup(args.BlockchainID) if err == nil { reply.Status = Validating return nil @@ -1026,3 +1235,100 @@ func (service *Service) chainExists(blockID ids.ID, chainID ids.ID) (bool, error return false, nil } + +// ValidatedByArgs is the arguments for calling ValidatedBy +type ValidatedByArgs struct { + // ValidatedBy returns the ID of the Subnet validating the blockchain with this ID + BlockchainID ids.ID `json:"blockchainID"` +} + +// ValidatedByResponse is the reply from calling ValidatedBy +type ValidatedByResponse struct { + // ID of the Subnet validating the specified blockchain + SubnetID ids.ID `json:"subnetID"` +} + +// ValidatedBy returns the ID of the Subnet that validates [args.BlockchainID] +func (service *Service) ValidatedBy(_ *http.Request, args *ValidatedByArgs, response *ValidatedByResponse) error { + service.vm.Ctx.Log.Debug("validatedBy called") + + chain, err := service.vm.getChain(service.vm.DB, args.BlockchainID) + if err != nil { + return err + } + response.SubnetID = chain.SubnetID + return nil +} + +// ValidatesArgs are the arguments to Validates +type ValidatesArgs struct { + SubnetID ids.ID `json:"subnetID"` +} + +// ValidatesResponse is the response from calling Validates +type ValidatesResponse struct { + BlockchainIDs []ids.ID `json:"blockchainIDs"` +} + +// Validates returns the IDs of the blockchains validated by [args.SubnetID] +func (service *Service) Validates(_ *http.Request, args *ValidatesArgs, response *ValidatesResponse) error { + service.vm.Ctx.Log.Debug("validates called") + + // Verify that the Subnet exists + if _, err := service.vm.getSubnet(service.vm.DB, args.SubnetID); err != nil { + return err + } + // Get the chains that exist + chains, err := service.vm.getChains(service.vm.DB) + if err != nil { + return err + } + // Filter to get the chains validated by the specified Subnet + for _, chain := range chains { + if chain.SubnetID.Equals(args.SubnetID) { + response.BlockchainIDs = append(response.BlockchainIDs, chain.ID()) + } + } + return nil +} + +// APIBlockchain is the representation of a blockchain used in API calls +type APIBlockchain struct { + // Blockchain's ID + ID ids.ID `json:"id"` + + // Blockchain's (non-unique) human-readable name + Name string `json:"name"` + + // Subnet that validates the blockchain + SubnetID ids.ID `json:"subnetID"` + + // Virtual Machine the blockchain runs + VMID ids.ID `json:"vmID"` +} + +// GetBlockchainsResponse is the response from a call to GetBlockchains +type GetBlockchainsResponse struct { + // blockchains that exist + Blockchains []APIBlockchain `json:"blockchains"` +} + +// GetBlockchains returns all of the blockchains that exist +func (service *Service) GetBlockchains(_ *http.Request, args *struct{}, response *GetBlockchainsResponse) error { + service.vm.Ctx.Log.Debug("getBlockchains called") + + chains, err := service.vm.getChains(service.vm.DB) + if err != nil { + return fmt.Errorf("couldn't retrieve blockchains: %v", err) + } + + for _, chain := range chains { + response.Blockchains = append(response.Blockchains, APIBlockchain{ + ID: chain.ID(), + Name: chain.ChainName, + SubnetID: chain.SubnetID, + VMID: chain.VMID, + }) + } + return nil +} diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index e1ece30..6efb8d6 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -22,7 +22,7 @@ func TestAddDefaultSubnetValidator(t *testing.T) { } func TestCreateBlockchainArgsParsing(t *testing.T) { - jsonString := `{"vmID":"lol","chainName":"awesome","genesisData":{"key":"value"}}` + jsonString := `{"vmID":"lol","fxIDs":["secp256k1"], "name":"awesome", "payerNonce":5, "genesisData":"SkB92YpWm4Q2iPnLGCuDPZPgUQMxajqQQuz91oi3xD984f8r"}` args := CreateBlockchainArgs{} err := json.Unmarshal([]byte(jsonString), &args) if err != nil { diff --git a/vms/platformvm/standard_block.go b/vms/platformvm/standard_block.go index 847f5c9..5f7e300 100644 --- a/vms/platformvm/standard_block.go +++ b/vms/platformvm/standard_block.go @@ -12,6 +12,8 @@ import ( // DecisionTx is an operation that can be decided without being proposed type DecisionTx interface { + ID() ids.ID + initialize(vm *VM) error // Attempt to verify this transaction with the provided state. The provided @@ -47,9 +49,10 @@ func (sb *StandardBlock) initialize(vm *VM, bytes []byte) error { // // This function also sets onAcceptDB database if the verification passes. func (sb *StandardBlock) Verify() error { + parentBlock := sb.parentBlock() // StandardBlock is not a modifier on a proposal block, so its parent must // be a decision. - parent, ok := sb.parentBlock().(decision) + parent, ok := parentBlock.(decision) if !ok { return errInvalidBlockType } diff --git a/vms/platformvm/state.go b/vms/platformvm/state.go index 9febf25..9f6d637 100644 --- a/vms/platformvm/state.go +++ b/vms/platformvm/state.go @@ -17,6 +17,7 @@ import ( var ( errEmptyAccountAddress = errors.New("account has empty address") + errNoSuchBlockchain = errors.New("there is no blockchain with the specified ID") ) // TODO: Cache prefixed IDs or use different way of keying into database @@ -146,7 +147,7 @@ func (vm *VM) putAccount(db database.Database, account Account) error { return nil } -// get the blockchains that exist +// get all the blockchains that exist func (vm *VM) getChains(db database.Database) ([]*CreateChainTx, error) { chainsInterface, err := vm.State.Get(db, chainsTypeID, chainsKey) if err != nil { @@ -154,12 +155,26 @@ func (vm *VM) getChains(db database.Database) ([]*CreateChainTx, error) { } chains, ok := chainsInterface.([]*CreateChainTx) if !ok { - vm.Ctx.Log.Warn("expected to retrieve []*CreateChainTx from database but got different type") + vm.Ctx.Log.Error("expected to retrieve []*CreateChainTx from database but got different type") return nil, errDBChains } return chains, nil } +// get a blockchain by its ID +func (vm *VM) getChain(db database.Database, ID ids.ID) (*CreateChainTx, error) { + chains, err := vm.getChains(db) + if err != nil { + return nil, err + } + for _, chain := range chains { + if chain.ID().Equals(ID) { + return chain, nil + } + } + return nil, errNoSuchBlockchain +} + // put the list of blockchains that exist to database func (vm *VM) putChains(db database.Database, chains createChainList) error { if err := vm.State.Put(db, chainsTypeID, chainsKey, chains); err != nil { @@ -211,18 +226,18 @@ func (vm *VM) getSubnets(db database.Database) ([]*CreateSubnetTx, error) { } // get the subnet with the specified ID -func (vm *VM) getSubnet(db database.Database, ID ids.ID) (*CreateSubnetTx, error) { +func (vm *VM) getSubnet(db database.Database, id ids.ID) (*CreateSubnetTx, error) { subnets, err := vm.getSubnets(db) if err != nil { return nil, err } for _, subnet := range subnets { - if subnet.ID.Equals(ID) { + if subnet.id.Equals(id) { return subnet, nil } } - return nil, fmt.Errorf("couldn't find subnet with ID %s", ID) + return nil, fmt.Errorf("couldn't find subnet with ID %s", id) } // register each type that we'll be storing in the database diff --git a/vms/platformvm/static_service.go b/vms/platformvm/static_service.go index cdfd1d7..80b66d2 100644 --- a/vms/platformvm/static_service.go +++ b/vms/platformvm/static_service.go @@ -9,6 +9,7 @@ import ( "net/http" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/json" ) @@ -74,11 +75,13 @@ type APIDefaultSubnetValidator struct { // [VMID] is the ID of the VM this chain runs. // [FxIDs] are the IDs of the Fxs the chain supports. // [Name] is a human-readable, non-unique name for the chain. +// [SubnetID] is the ID of the subnet that validates the chain type APIChain struct { GenesisData formatting.CB58 `json:"genesisData"` VMID ids.ID `json:"vmID"` FxIDs []ids.ID `json:"fxIDs"` Name string `json:"name"` + SubnetID ids.ID `json:"subnetID"` } // BuildGenesisArgs are the arguments used to create @@ -182,12 +185,15 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl tx := &CreateChainTx{ UnsignedCreateChainTx: UnsignedCreateChainTx{ NetworkID: uint32(args.NetworkID), + SubnetID: chain.SubnetID, Nonce: 0, ChainName: chain.Name, VMID: chain.VMID, FxIDs: chain.FxIDs, GenesisData: chain.GenesisData.Bytes, }, + ControlSigs: [][crypto.SECP256K1RSigLen]byte{}, + PayerSig: [crypto.SECP256K1RSigLen]byte{}, } if err := tx.initialize(nil); err != nil { return err diff --git a/vms/platformvm/static_service_test.go b/vms/platformvm/static_service_test.go index d1bdc4e..04433ff 100644 --- a/vms/platformvm/static_service_test.go +++ b/vms/platformvm/static_service_test.go @@ -4,114 +4,12 @@ package platformvm import ( - "bytes" "testing" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/json" ) -func TestBuildGenesis(t *testing.T) { - expected := []byte{ - 0x00, 0x00, 0x00, 0x01, 0x01, 0x5c, 0xce, 0x6c, - 0x55, 0xd6, 0xb5, 0x09, 0x84, 0x5c, 0x8c, 0x4e, - 0x30, 0xbe, 0xd9, 0x8d, 0x39, 0x1a, 0xe7, 0xf0, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x07, 0x5b, 0xcd, 0x15, - 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, - 0x05, 0x01, 0x5c, 0xce, 0x6c, 0x55, 0xd6, 0xb5, - 0x09, 0x84, 0x5c, 0x8c, 0x4e, 0x30, 0xbe, 0xd9, - 0x8d, 0x39, 0x1a, 0xe7, 0xf0, 0x00, 0x00, 0x00, - 0x00, 0x3a, 0xde, 0x68, 0xb1, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x05, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x0f, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x01, 0x5c, 0xce, 0x6c, 0x55, 0xd6, 0xb5, - 0x09, 0x84, 0x5c, 0x8c, 0x4e, 0x30, 0xbe, 0xd9, - 0x8d, 0x39, 0x1a, 0xe7, 0xf0, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x13, 0x4d, 0x79, 0x20, 0x46, - 0x61, 0x76, 0x6f, 0x72, 0x69, 0x74, 0x65, 0x20, - 0x45, 0x70, 0x69, 0x73, 0x6f, 0x64, 0x65, 0x53, - 0x6f, 0x75, 0x74, 0x68, 0x20, 0x50, 0x61, 0x72, - 0x6b, 0x20, 0x65, 0x70, 0x69, 0x73, 0x6f, 0x64, - 0x65, 0x20, 0x70, 0x6c, 0x61, 0x79, 0x65, 0x72, - 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x17, 0x53, - 0x63, 0x6f, 0x74, 0x74, 0x20, 0x54, 0x65, 0x6e, - 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x20, 0x6d, 0x75, - 0x73, 0x74, 0x20, 0x64, 0x69, 0x65, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, - 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x05, - } - - addr, _ := ids.ShortFromString("8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") - genesisData := formatting.CB58{} - genesisData.FromString("CGgRrQ3nws7RRMGyDV59cetJBAwmsmDyCSgku") - vmID, _ := ids.FromString("dkFD29iYU9e9jah2nrnksTWJUy2VVpg5Lnqd7nQqvCJgR26H4") - - account := APIAccount{ - Address: addr, - Balance: 123456789, - } - weight := json.Uint64(987654321) - validator := APIDefaultSubnetValidator{ - APIValidator: APIValidator{ - EndTime: 15, - Weight: &weight, - ID: addr, - }, - Destination: addr, - } - chains := APIChain{ - GenesisData: genesisData, - VMID: vmID, - Name: "My Favorite Episode", - } - - args := BuildGenesisArgs{ - Accounts: []APIAccount{ - account, - }, - Validators: []APIDefaultSubnetValidator{ - validator, - }, - Chains: []APIChain{ - chains, - }, - Time: 5, - } - reply := BuildGenesisReply{} - - ss := StaticService{} - if err := ss.BuildGenesis(nil, &args, &reply); err != nil { - t.Fatal(err) - } - - if !bytes.Equal(reply.Bytes.Bytes, expected) { - t.Fatalf("StaticService.BuildGenesis:\nReturned:\n%s\nExpected:\n%s", - formatting.DumpBytes{Bytes: reply.Bytes.Bytes}, - formatting.DumpBytes{Bytes: expected}) - } -} - func TestBuildGenesisInvalidAccountBalance(t *testing.T) { id, _ := ids.ShortFromString("8CrVPQZ4VSqgL8zTdvL14G8HqAfrBr4z") account := APIAccount{ diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 20eed9d..3e4bf78 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -20,12 +20,15 @@ import ( "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/math" "github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/utils/units" "github.com/ava-labs/gecko/utils/wrappers" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/codec" "github.com/ava-labs/gecko/vms/components/core" + "github.com/ava-labs/gecko/vms/secp256k1fx" ) const ( @@ -37,15 +40,19 @@ const ( subnetsTypeID // Delta is the synchrony bound used for safe decision making - Delta = 10 * time.Second // TODO change to longer period (2 minutes?) before release - - // InflationRate is the maximum inflation rate of AVA from staking - InflationRate = 1.04 + Delta = 10 * time.Second // BatchSize is the number of decision transaction to place into a block BatchSize = 30 - // TODO: Incorporate these constants + turn them into governable parameters + // NumberOfShares is the number of shares that a delegator is + // rewarded + NumberOfShares = 1000000 + + // TODO: Turn these constants into governable parameters + + // InflationRate is the maximum inflation rate of AVA from staking + InflationRate = 1.04 // MinimumStakeAmount is the minimum amount of $AVA one must bond to be a staker MinimumStakeAmount = 10 * units.MicroAva @@ -57,10 +64,6 @@ const ( // MaximumStakingDuration is the longest amount of time a staker can bond // their funds for. MaximumStakingDuration = 365 * 24 * time.Hour - - // NumberOfShares is the number of shares that a delegator is - // rewarded - NumberOfShares = 1000000 ) var ( @@ -108,6 +111,13 @@ func init() { Codec.RegisterType(&Abort{}), Codec.RegisterType(&Commit{}), Codec.RegisterType(&StandardBlock{}), + Codec.RegisterType(&AtomicBlock{}), + + Codec.RegisterType(&secp256k1fx.TransferInput{}), + Codec.RegisterType(&secp256k1fx.MintOutput{}), + Codec.RegisterType(&secp256k1fx.TransferOutput{}), + Codec.RegisterType(&secp256k1fx.MintOperation{}), + Codec.RegisterType(&secp256k1fx.Credential{}), Codec.RegisterType(&UnsignedAddDefaultSubnetValidatorTx{}), Codec.RegisterType(&addDefaultSubnetValidatorTx{}), @@ -124,6 +134,12 @@ func init() { Codec.RegisterType(&UnsignedCreateSubnetTx{}), Codec.RegisterType(&CreateSubnetTx{}), + Codec.RegisterType(&UnsignedImportTx{}), + Codec.RegisterType(&ImportTx{}), + + Codec.RegisterType(&UnsignedExportTx{}), + Codec.RegisterType(&ExportTx{}), + Codec.RegisterType(&advanceTimeTx{}), Codec.RegisterType(&rewardValidatorTx{}), ) @@ -136,10 +152,24 @@ func init() { type VM struct { *core.SnowmanVM - Validators validators.Manager + // Node's validator manager + // Maps Subnets --> nodes in the Subnet HEAD + validators validators.Manager + + // true if the node is being run with staking enabled + stakingEnabled bool // The node's chain manager - ChainManager chains.Manager + chainManager chains.Manager + + // AVA asset ID + ava ids.ID + + // AVM is the ID of the ava virtual machine + avm ids.ID + + fx secp256k1fx.Fx + codec codec.Codec // Used to create and use keys. factory crypto.FactorySECP256K1R @@ -154,6 +184,7 @@ type VM struct { // Transactions that have not been put into blocks yet unissuedEvents *EventHeap unissuedDecisionTxs []DecisionTx + unissuedAtomicTxs []AtomicTx // This timer goes off when it is time for the next validator to add/leave the validator set // When it goes off resetTimer() is called, triggering creation of a new block @@ -181,6 +212,12 @@ func (vm *VM) Initialize( return err } + vm.codec = codec.NewDefault() + if err := vm.fx.Initialize(vm); err != nil { + return err + } + vm.codec = Codec + // Register this VM's types with the database so we can get/put structs to/from it vm.registerDBTypes() @@ -265,8 +302,8 @@ func (vm *VM) Initialize( }) go ctx.Log.RecoverAndPanic(vm.timer.Dispatch) - if err := vm.updateValidators(DefaultSubnetID); err != nil { - ctx.Log.Error("failed to initialize the current validator set: %s", err) + if err := vm.initSubnets(); err != nil { + ctx.Log.Error("failed to initialize Subnets: %s", err) return err } @@ -282,29 +319,73 @@ func (vm *VM) Initialize( return nil } -// Create all of the chains that the database says should exist +// Create all chains that exist that this node validates +// Can only be called after initSubnets() func (vm *VM) initBlockchains() error { - vm.Ctx.Log.Verbo("platform chain initializing existing blockchains") - existingChains, err := vm.getChains(vm.DB) + vm.Ctx.Log.Info("initializing blockchains") + blockchains, err := vm.getChains(vm.DB) // get blockchains that exist if err != nil { return err } - for _, chain := range existingChains { // Create each blockchain - chainParams := chains.ChainParameters{ - ID: chain.ID(), - GenesisData: chain.GenesisData, - VMAlias: chain.VMID.String(), - } - for _, fxID := range chain.FxIDs { - chainParams.FxAliases = append(chainParams.FxAliases, fxID.String()) - } - vm.ChainManager.CreateChain(chainParams) + + for _, chain := range blockchains { + vm.createChain(chain) } return nil } +// Set the node's validator manager to be up to date +func (vm *VM) initSubnets() error { + vm.Ctx.Log.Info("initializing Subnets") + subnets, err := vm.getSubnets(vm.DB) + if err != nil { + return err + } + + if err := vm.updateValidators(DefaultSubnetID); err != nil { + return err + } + + for _, subnet := range subnets { + if err := vm.updateValidators(subnet.id); err != nil { + return err + } + } + + return nil +} + +// Create the blockchain described in [tx], but only if this node is a member of +// the Subnet that validates the chain +func (vm *VM) createChain(tx *CreateChainTx) { + // The validators that compose the Subnet that validates this chain + validators, subnetExists := vm.validators.GetValidatorSet(tx.SubnetID) + if !subnetExists { + vm.Ctx.Log.Error("blockchain %s validated by Subnet %s but couldn't get that Subnet. Blockchain not created") + return + } + if vm.stakingEnabled && !DefaultSubnetID.Equals(tx.SubnetID) && !validators.Contains(vm.Ctx.NodeID) { // This node doesn't validate this blockchain + return + } + + chainParams := chains.ChainParameters{ + ID: tx.ID(), + SubnetID: tx.SubnetID, + GenesisData: tx.GenesisData, + VMAlias: tx.VMID.String(), + } + for _, fxID := range tx.FxIDs { + chainParams.FxAliases = append(chainParams.FxAliases, fxID.String()) + } + vm.chainManager.CreateChain(chainParams) +} + // Shutdown this blockchain func (vm *VM) Shutdown() { + if vm.timer == nil { + return + } + vm.timer.Stop() if err := vm.DB.Close(); err != nil { vm.Ctx.Log.Error("Closing the database failed with %s", err) @@ -338,6 +419,24 @@ func (vm *VM) BuildBlock() (snowman.Block, error) { return blk, vm.DB.Commit() } + // If there is a pending atomic tx, build a block with it + if len(vm.unissuedAtomicTxs) > 0 { + tx := vm.unissuedAtomicTxs[0] + vm.unissuedAtomicTxs = vm.unissuedAtomicTxs[1:] + blk, err := vm.newAtomicBlock(preferredID, tx) + if err != nil { + return nil, err + } + if err := blk.Verify(); err != nil { + vm.resetTimer() + return nil, err + } + if err := vm.State.PutBlock(vm.DB, blk); err != nil { + return nil, err + } + return blk, vm.DB.Commit() + } + // Get the preferred block (which we want to build off) preferred, err := vm.getBlock(preferredID) vm.Ctx.Log.AssertNoError(err) @@ -502,9 +601,9 @@ func (vm *VM) CreateStaticHandlers() map[string]*common.HTTPHandler { // Check if there is a block ready to be added to consensus // If so, notify the consensus engine func (vm *VM) resetTimer() { - // If there is a pending CreateChainTx, trigger building of a block - // with that transaction - if len(vm.unissuedDecisionTxs) > 0 { + // If there is a pending transaction, trigger building of a block with that + // transaction + if len(vm.unissuedDecisionTxs) > 0 || len(vm.unissuedAtomicTxs) > 0 { vm.SnowmanVM.NotifyBlockReady() return } @@ -584,7 +683,7 @@ func (vm *VM) nextValidatorChangeTime(db database.Database, start bool) time.Tim return earliest } for _, subnet := range subnets { - t := vm.nextSubnetValidatorChangeTime(db, subnet.ID, start) + t := vm.nextSubnetValidatorChangeTime(db, subnet.id, start) if t.Before(earliest) { earliest = t } @@ -614,13 +713,16 @@ func (vm *VM) nextSubnetValidatorChangeTime(db database.Database, subnetID ids.I // Returns: // 1) The validator set of subnet with ID [subnetID] when timestamp is advanced to [timestamp] // 2) The pending validator set of subnet with ID [subnetID] when timestamp is advanced to [timestamp] +// 3) The IDs of the validators that start validating [subnetID] between now and [timestamp] +// 4) The IDs of the validators that stop validating [subnetID] between now and [timestamp] // Note that this method will not remove validators from the current validator set of the default subnet. // That happens in reward blocks. -func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, subnetID ids.ID) (current, pending *EventHeap, err error) { +func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, subnetID ids.ID) (current, + pending *EventHeap, started, stopped ids.ShortSet, err error) { // remove validators whose end time <= [timestamp] current, err = vm.getCurrentValidators(db, subnetID) if err != nil { - return nil, nil, err + return nil, nil, nil, nil, err } if !subnetID.Equals(DefaultSubnetID) { // validators of default subnet removed in rewardValidatorTxs, not here for current.Len() > 0 { @@ -629,11 +731,12 @@ func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, sub break } current.Remove() + stopped.Add(next.Vdr().ID()) } } pending, err = vm.getPendingValidators(db, subnetID) if err != nil { - return nil, nil, err + return nil, nil, nil, nil, err } for pending.Len() > 0 { nextTx := pending.Peek() // pending staker with earliest start time @@ -642,8 +745,9 @@ func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, sub } heap.Push(current, nextTx) heap.Pop(pending) + started.Add(nextTx.Vdr().ID()) } - return current, pending, nil + return current, pending, started, stopped, nil } func (vm *VM) getValidators(validatorEvents *EventHeap) []validators.Validator { @@ -671,10 +775,12 @@ func (vm *VM) getValidators(validatorEvents *EventHeap) []validators.Validator { return vdrList } +// update the node's validator manager to contain the current validator set of the given Subnet func (vm *VM) updateValidators(subnetID ids.ID) error { - validatorSet, ok := vm.Validators.GetValidatorSet(subnetID) - if !ok { - return fmt.Errorf("couldn't get the validator sampler of the %s subnet", subnetID) + validatorSet, subnetInitialized := vm.validators.GetValidatorSet(subnetID) + if !subnetInitialized { // validator manager doesn't know about this subnet yet + validatorSet = validators.NewSet() + vm.validators.PutValidatorSet(subnetID, validatorSet) } currentValidators, err := vm.getCurrentValidators(vm.DB, subnetID) @@ -686,3 +792,37 @@ func (vm *VM) updateValidators(subnetID ids.ID) error { validatorSet.Set(validators) return nil } + +// Codec ... +func (vm *VM) Codec() codec.Codec { return vm.codec } + +// Clock ... +func (vm *VM) Clock() *timer.Clock { return &vm.clock } + +// Logger ... +func (vm *VM) Logger() logging.Logger { return vm.Ctx.Log } + +// GetAtomicUTXOs returns the utxos that at least one of the provided addresses is +// referenced in. +func (vm *VM) GetAtomicUTXOs(addrs ids.Set) ([]*ava.UTXO, error) { + smDB := vm.Ctx.SharedMemory.GetDatabase(vm.avm) + defer vm.Ctx.SharedMemory.ReleaseDatabase(vm.avm) + + state := ava.NewPrefixedState(smDB, vm.codec) + + utxoIDs := ids.Set{} + for _, addr := range addrs.List() { + utxos, _ := state.AVMFunds(addr) + utxoIDs.Add(utxos...) + } + + utxos := []*ava.UTXO{} + for _, utxoID := range utxoIDs.List() { + utxo, err := state.AVMUTXO(utxoID) + if err != nil { + return nil, err + } + utxos = append(utxos, utxo) + } + return utxos, nil +} diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 67c0084..e0af19f 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -10,6 +10,8 @@ import ( "testing" "time" + "github.com/ava-labs/gecko/chains" + "github.com/ava-labs/gecko/chains/atomic" "github.com/ava-labs/gecko/database/memdb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" @@ -18,7 +20,10 @@ import ( "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/core" + "github.com/ava-labs/gecko/vms/secp256k1fx" "github.com/ava-labs/gecko/vms/timestampvm" ) @@ -35,16 +40,17 @@ var ( // each key corresponds to an account that has $AVA and a genesis validator keys []*crypto.PrivateKeySECP256K1R - // amount all genesis validators stake + // amount all genesis validators stake in defaultVM defaultStakeAmount uint64 - // balance of accounts that exist at genesis + // balance of accounts that exist at genesis in defaultVM defaultBalance = 100 * MinimumStakeAmount // At genesis this account has AVA and is validating the default subnet defaultKey *crypto.PrivateKeySECP256K1R - // non-default subnet that exists at genesis in defaultVM + // non-default Subnet that exists at genesis in defaultVM + // Its controlKeys are keys[0], keys[1], keys[2] testSubnet1 *CreateSubnetTx testSubnet1ControlKeys []*crypto.PrivateKeySECP256K1R ) @@ -112,12 +118,13 @@ func defaultVM() *VM { } vm := &VM{ - SnowmanVM: &core.SnowmanVM{}, + SnowmanVM: &core.SnowmanVM{}, + chainManager: chains.MockManager{}, } defaultSubnet := validators.NewSet() - vm.Validators = validators.NewManager() - vm.Validators.PutValidatorSet(DefaultSubnetID, defaultSubnet) + vm.validators = validators.NewManager() + vm.validators.PutValidatorSet(DefaultSubnetID, defaultSubnet) vm.clock.Set(defaultGenesisTime) db := memdb.New() @@ -132,7 +139,7 @@ func defaultVM() *VM { testNetworkID, 0, []ids.ShortID{keys[0].PublicKey().Address(), keys[1].PublicKey().Address(), keys[2].PublicKey().Address()}, // control keys are keys[0], keys[1], keys[2] - 2, // 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet + 2, // threshold; 2 sigs from keys[0], keys[1], keys[2] needed to add validator to this subnet keys[0], ) if err != nil { @@ -149,7 +156,7 @@ func defaultVM() *VM { &EventHeap{ SortByStartTime: false, }, - tx.ID, + tx.id, ) if err != nil { panic(err) @@ -159,7 +166,7 @@ func defaultVM() *VM { &EventHeap{ SortByStartTime: true, }, - tx.ID, + tx.id, ) if err != nil { panic(err) @@ -433,7 +440,7 @@ func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) { uint64(startTime.Unix()), uint64(endTime.Unix()), keys[0].PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, keys[0], @@ -478,7 +485,7 @@ func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) { commit.Accept() // accept the proposal // Verify that new validator is in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, testSubnet1.ID) + pendingValidators, err := vm.getPendingValidators(vm.DB, testSubnet1.id) if err != nil { t.Fatal(err) } @@ -506,7 +513,7 @@ func TestAddNonDefaultSubnetValidatorReject(t *testing.T) { uint64(startTime.Unix()), uint64(endTime.Unix()), keys[0].PublicKey().Address(), - testSubnet1.ID, + testSubnet1.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[1], testSubnet1ControlKeys[2]}, keys[0], @@ -551,7 +558,7 @@ func TestAddNonDefaultSubnetValidatorReject(t *testing.T) { abort.Accept() // reject the proposal // Verify that new validator NOT in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, testSubnet1.ID) + pendingValidators, err := vm.getPendingValidators(vm.DB, testSubnet1.id) if err != nil { t.Fatal(err) } @@ -761,11 +768,13 @@ func TestCreateChain(t *testing.T) { tx, err := vm.newCreateChainTx( defaultNonce+1, + testSubnet1.id, nil, timestampvm.ID, nil, - "name ", + "name", testNetworkID, + []*crypto.PrivateKeySECP256K1R{testSubnet1ControlKeys[0], testSubnet1ControlKeys[1]}, keys[0], ) if err != nil { @@ -802,7 +811,7 @@ func TestCreateChain(t *testing.T) { } // Verify tx fee was deducted - account, err := vm.getAccount(vm.DB, tx.Key().Address()) + account, err := vm.getAccount(vm.DB, tx.PayerAddress) if err != nil { t.Fatal(err) } @@ -881,7 +890,7 @@ func TestCreateSubnet(t *testing.T) { uint64(startTime.Unix()), uint64(endTime.Unix()), keys[0].PublicKey().Address(), - createSubnetTx.ID, + createSubnetTx.id, testNetworkID, []*crypto.PrivateKeySECP256K1R{keys[0]}, keys[0], @@ -931,7 +940,7 @@ func TestCreateSubnet(t *testing.T) { commit.Accept() // add the validator to pending validator set // Verify validator is in pending validator set - pendingValidators, err := vm.getPendingValidators(vm.DB, createSubnetTx.ID) + pendingValidators, err := vm.getPendingValidators(vm.DB, createSubnetTx.id) if err != nil { t.Fatal(err) } @@ -985,7 +994,7 @@ func TestCreateSubnet(t *testing.T) { // Verify validator no longer in pending validator set // Verify validator is in pending validator set - pendingValidators, err = vm.getPendingValidators(vm.DB, createSubnetTx.ID) + pendingValidators, err = vm.getPendingValidators(vm.DB, createSubnetTx.id) if err != nil { t.Fatal(err) } @@ -994,7 +1003,7 @@ func TestCreateSubnet(t *testing.T) { } // Verify validator is in current validator set - currentValidators, err := vm.getCurrentValidators(vm.DB, createSubnetTx.ID) + currentValidators, err := vm.getCurrentValidators(vm.DB, createSubnetTx.id) if err != nil { t.Fatal(err) } @@ -1044,19 +1053,177 @@ func TestCreateSubnet(t *testing.T) { commit.Accept() // remove validator from current validator set // pending validators and current validator should be empty - pendingValidators, err = vm.getPendingValidators(vm.DB, createSubnetTx.ID) + pendingValidators, err = vm.getPendingValidators(vm.DB, createSubnetTx.id) if err != nil { t.Fatal(err) } if pendingValidators.Len() != 0 { t.Fatal("pending validator set should be empty") } - currentValidators, err = vm.getCurrentValidators(vm.DB, createSubnetTx.ID) + currentValidators, err = vm.getCurrentValidators(vm.DB, createSubnetTx.id) if err != nil { t.Fatal(err) } if currentValidators.Len() != 0 { t.Fatal("pending validator set should be empty") } - +} + +// test asset import +func TestAtomicImport(t *testing.T) { + vm := defaultVM() + + avmID := ids.Empty.Prefix(0) + utxoID := ava.UTXOID{ + TxID: ids.Empty.Prefix(1), + OutputIndex: 1, + } + assetID := ids.Empty.Prefix(2) + amount := uint64(50000) + key := keys[0] + + sm := &atomic.SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + vm.Ctx.SharedMemory = sm.NewBlockchainSharedMemory(vm.Ctx.ChainID) + + tx, err := vm.newImportTx( + defaultNonce+1, + testNetworkID, + []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: utxoID, + Asset: ava.Asset{ID: assetID}, + In: &secp256k1fx.TransferInput{ + Amt: amount, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + [][]*crypto.PrivateKeySECP256K1R{[]*crypto.PrivateKeySECP256K1R{key}}, + key, + ) + if err != nil { + t.Fatal(err) + } + + vm.Ctx.Lock.Lock() + defer vm.Ctx.Lock.Unlock() + + vm.ava = assetID + vm.avm = avmID + + vm.unissuedAtomicTxs = append(vm.unissuedAtomicTxs, tx) + if _, err := vm.BuildBlock(); err == nil { + t.Fatalf("should have errored due to missing utxos") + } + + // Provide the avm UTXO: + + smDB := vm.Ctx.SharedMemory.GetDatabase(avmID) + + utxo := &ava.UTXO{ + UTXOID: utxoID, + Asset: ava.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amount, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{key.PublicKey().Address()}, + }, + }, + } + + state := ava.NewPrefixedState(smDB, Codec) + if err := state.FundAVMUTXO(utxo); err != nil { + t.Fatal(err) + } + + vm.Ctx.SharedMemory.ReleaseDatabase(avmID) + + vm.unissuedAtomicTxs = append(vm.unissuedAtomicTxs, tx) + blk, err := vm.BuildBlock() + if err != nil { + t.Fatal(err) + } + + if err := blk.Verify(); err != nil { + t.Fatal(err) + } + + blk.Accept() + + smDB = vm.Ctx.SharedMemory.GetDatabase(avmID) + defer vm.Ctx.SharedMemory.ReleaseDatabase(avmID) + + state = ava.NewPrefixedState(smDB, vm.codec) + if _, err := state.AVMUTXO(utxoID.InputID()); err == nil { + t.Fatalf("shouldn't have been able to read the utxo") + } +} + +// test optimistic asset import +func TestOptimisticAtomicImport(t *testing.T) { + vm := defaultVM() + + avmID := ids.Empty.Prefix(0) + utxoID := ava.UTXOID{ + TxID: ids.Empty.Prefix(1), + OutputIndex: 1, + } + assetID := ids.Empty.Prefix(2) + amount := uint64(50000) + key := keys[0] + + sm := &atomic.SharedMemory{} + sm.Initialize(logging.NoLog{}, memdb.New()) + + vm.Ctx.SharedMemory = sm.NewBlockchainSharedMemory(vm.Ctx.ChainID) + + tx, err := vm.newImportTx( + defaultNonce+1, + testNetworkID, + []*ava.TransferableInput{&ava.TransferableInput{ + UTXOID: utxoID, + Asset: ava.Asset{ID: assetID}, + In: &secp256k1fx.TransferInput{ + Amt: amount, + Input: secp256k1fx.Input{SigIndices: []uint32{0}}, + }, + }}, + [][]*crypto.PrivateKeySECP256K1R{[]*crypto.PrivateKeySECP256K1R{key}}, + key, + ) + if err != nil { + t.Fatal(err) + } + + vm.Ctx.Lock.Lock() + defer vm.Ctx.Lock.Unlock() + + vm.ava = assetID + vm.avm = avmID + + blk, err := vm.newAtomicBlock(vm.Preferred(), tx) + if err != nil { + t.Fatal(err) + } + + if err := blk.Verify(); err == nil { + t.Fatalf("should have errored due to an invalid atomic utxo") + } + + previousAccount, err := vm.getAccount(vm.DB, key.PublicKey().Address()) + if err != nil { + t.Fatal(err) + } + + blk.Accept() + + newAccount, err := vm.getAccount(vm.DB, key.PublicKey().Address()) + if err != nil { + t.Fatal(err) + } + + if newAccount.Balance != previousAccount.Balance+amount { + t.Fatalf("failed to provide funds") + } } diff --git a/vms/propertyfx/burn_operation.go b/vms/propertyfx/burn_operation.go new file mode 100644 index 0000000..c662f6e --- /dev/null +++ b/vms/propertyfx/burn_operation.go @@ -0,0 +1,14 @@ +package propertyfx + +import ( + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +// BurnOperation ... +type BurnOperation struct { + secp256k1fx.Input `serialize:"true"` +} + +// Outs ... +func (op *BurnOperation) Outs() []verify.Verifiable { return nil } diff --git a/vms/propertyfx/burn_operation_test.go b/vms/propertyfx/burn_operation_test.go new file mode 100644 index 0000000..1e74833 --- /dev/null +++ b/vms/propertyfx/burn_operation_test.go @@ -0,0 +1,23 @@ +package propertyfx + +import ( + "testing" + + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestBurnOperationInvalid(t *testing.T) { + op := BurnOperation{Input: secp256k1fx.Input{ + SigIndices: []uint32{1, 0}, + }} + if err := op.Verify(); err == nil { + t.Fatalf("operation should have failed verification") + } +} + +func TestBurnOperationNumberOfOutput(t *testing.T) { + op := BurnOperation{} + if outs := op.Outs(); len(outs) != 0 { + t.Fatalf("wrong number of outputs") + } +} diff --git a/vms/propertyfx/credential.go b/vms/propertyfx/credential.go new file mode 100644 index 0000000..0b468cf --- /dev/null +++ b/vms/propertyfx/credential.go @@ -0,0 +1,10 @@ +package propertyfx + +import ( + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +// Credential ... +type Credential struct { + secp256k1fx.Credential `serialize:"true"` +} diff --git a/vms/propertyfx/factory.go b/vms/propertyfx/factory.go new file mode 100644 index 0000000..67ebb5a --- /dev/null +++ b/vms/propertyfx/factory.go @@ -0,0 +1,16 @@ +package propertyfx + +import ( + "github.com/ava-labs/gecko/ids" +) + +// ID that this Fx uses when labeled +var ( + ID = ids.NewID([32]byte{'p', 'r', 'o', 'p', 'e', 'r', 't', 'y', 'f', 'x'}) +) + +// Factory ... +type Factory struct{} + +// New ... +func (f *Factory) New() (interface{}, error) { return &Fx{}, nil } diff --git a/vms/propertyfx/factory_test.go b/vms/propertyfx/factory_test.go new file mode 100644 index 0000000..d49d27c --- /dev/null +++ b/vms/propertyfx/factory_test.go @@ -0,0 +1,14 @@ +package propertyfx + +import ( + "testing" +) + +func TestFactory(t *testing.T) { + factory := Factory{} + if fx, err := factory.New(); err != nil { + t.Fatal(err) + } else if fx == nil { + t.Fatalf("Factory.New returned nil") + } +} diff --git a/vms/propertyfx/fx.go b/vms/propertyfx/fx.go new file mode 100644 index 0000000..41cd225 --- /dev/null +++ b/vms/propertyfx/fx.go @@ -0,0 +1,109 @@ +package propertyfx + +import ( + "errors" + + "github.com/ava-labs/gecko/utils/wrappers" + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + errWrongTxType = errors.New("wrong tx type") + errWrongUTXOType = errors.New("wrong utxo type") + errWrongOperationType = errors.New("wrong operation type") + errWrongCredentialType = errors.New("wrong credential type") + + errNoUTXOs = errors.New("an operation must consume at least one UTXO") + errWrongNumberOfUTXOs = errors.New("wrong number of UTXOs for the operation") + errWrongNumberOfCreds = errors.New("wrong number of credentials for the operation") + + errWrongMintOutput = errors.New("wrong mint output provided") + + errCantTransfer = errors.New("cant transfer with this fx") +) + +// Fx ... +type Fx struct{ secp256k1fx.Fx } + +// Initialize ... +func (fx *Fx) Initialize(vmIntf interface{}) error { + if err := fx.InitializeVM(vmIntf); err != nil { + return err + } + + log := fx.VM.Logger() + log.Debug("Initializing nft fx") + + c := fx.VM.Codec() + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(&MintOutput{}), + c.RegisterType(&OwnedOutput{}), + c.RegisterType(&MintOperation{}), + c.RegisterType(&BurnOperation{}), + c.RegisterType(&Credential{}), + ) + return errs.Err +} + +// VerifyOperation ... +func (fx *Fx) VerifyOperation(txIntf, opIntf, credIntf interface{}, utxosIntf []interface{}) error { + tx, ok := txIntf.(secp256k1fx.Tx) + switch { + case !ok: + return errWrongTxType + case len(utxosIntf) != 1: + return errWrongNumberOfUTXOs + } + + cred, ok := credIntf.(*Credential) + if !ok { + return errWrongCredentialType + } + + switch op := opIntf.(type) { + case *MintOperation: + return fx.VerifyMintOperation(tx, op, cred, utxosIntf[0]) + case *BurnOperation: + return fx.VerifyTransferOperation(tx, op, cred, utxosIntf[0]) + default: + return errWrongOperationType + } +} + +// VerifyMintOperation ... +func (fx *Fx) VerifyMintOperation(tx secp256k1fx.Tx, op *MintOperation, cred *Credential, utxoIntf interface{}) error { + out, ok := utxoIntf.(*MintOutput) + if !ok { + return errWrongUTXOType + } + + if err := verify.All(op, cred, out); err != nil { + return err + } + + switch { + case !out.OutputOwners.Equals(&op.MintOutput.OutputOwners): + return errWrongMintOutput + default: + return fx.Fx.VerifyCredentials(tx, &op.MintInput, &cred.Credential, &out.OutputOwners) + } +} + +// VerifyTransferOperation ... +func (fx *Fx) VerifyTransferOperation(tx secp256k1fx.Tx, op *BurnOperation, cred *Credential, utxoIntf interface{}) error { + out, ok := utxoIntf.(*OwnedOutput) + if !ok { + return errWrongUTXOType + } + + if err := verify.All(op, cred, out); err != nil { + return err + } + + return fx.VerifyCredentials(tx, &op.Input, &cred.Credential, &out.OutputOwners) +} + +// VerifyTransfer ... +func (fx *Fx) VerifyTransfer(_, _, _, _ interface{}) error { return errCantTransfer } diff --git a/vms/propertyfx/fx_test.go b/vms/propertyfx/fx_test.go new file mode 100644 index 0000000..cfdf5c9 --- /dev/null +++ b/vms/propertyfx/fx_test.go @@ -0,0 +1,473 @@ +package propertyfx + +import ( + "testing" + "time" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + txBytes = []byte{0, 1, 2, 3, 4, 5} + sigBytes = [crypto.SECP256K1RSigLen]byte{ + 0x0e, 0x33, 0x4e, 0xbc, 0x67, 0xa7, 0x3f, 0xe8, + 0x24, 0x33, 0xac, 0xa3, 0x47, 0x88, 0xa6, 0x3d, + 0x58, 0xe5, 0x8e, 0xf0, 0x3a, 0xd5, 0x84, 0xf1, + 0xbc, 0xa3, 0xb2, 0xd2, 0x5d, 0x51, 0xd6, 0x9b, + 0x0f, 0x28, 0x5d, 0xcd, 0x3f, 0x71, 0x17, 0x0a, + 0xf9, 0xbf, 0x2d, 0xb1, 0x10, 0x26, 0x5c, 0xe9, + 0xdc, 0xc3, 0x9d, 0x7a, 0x01, 0x50, 0x9d, 0xe8, + 0x35, 0xbd, 0xcb, 0x29, 0x3a, 0xd1, 0x49, 0x32, + 0x00, + } + addrBytes = [hashing.AddrLen]byte{ + 0x01, 0x5c, 0xce, 0x6c, 0x55, 0xd6, 0xb5, 0x09, + 0x84, 0x5c, 0x8c, 0x4e, 0x30, 0xbe, 0xd9, 0x8d, + 0x39, 0x1a, 0xe7, 0xf0, + } +) + +func TestFxInitialize(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + fx := Fx{} + err := fx.Initialize(&vm) + if err != nil { + t.Fatal(err) + } +} + +func TestFxInitializeInvalid(t *testing.T) { + fx := Fx{} + err := fx.Initialize(nil) + if err == nil { + t.Fatalf("Should have returned an error") + } +} + +func TestFxVerifyMintOperation(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + MintOutput: MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }}, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { + t.Fatal(err) + } +} + +func TestFxVerifyMintOperationWrongTx(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(nil, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid tx") + } +} + +func TestFxVerifyMintOperationWrongNumberUTXOs(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to not enough utxos") + } +} + +func TestFxVerifyMintOperationWrongCredential(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, nil, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to a bad credential") + } +} + +func TestFxVerifyMintOperationInvalidUTXO(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{nil} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo") + } +} + +func TestFxVerifyMintOperationFailingVerification(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + ids.ShortEmpty, + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") + } +} + +func TestFxVerifyMintOperationInvalidGroupID(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &MintOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &MintOperation{ + MintInput: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }, + } + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid mint output") + } +} + +func TestFxVerifyTransferOperation(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &OwnedOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &BurnOperation{Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }} + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err != nil { + t.Fatal(err) + } +} + +func TestFxVerifyTransferOperationWrongUTXO(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + op := &BurnOperation{Input: secp256k1fx.Input{ + SigIndices: []uint32{0}, + }} + + utxos := []interface{}{nil} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo") + } +} + +func TestFxVerifyTransferOperationFailedVerify(t *testing.T) { + + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &OwnedOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + op := &BurnOperation{Input: secp256k1fx.Input{ + SigIndices: []uint32{1, 0}, + }} + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, op, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an invalid utxo output") + } +} + +func TestFxVerifyOperationUnknownOperation(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + tx := &secp256k1fx.TestTx{ + Bytes: txBytes, + } + cred := &Credential{Credential: secp256k1fx.Credential{ + Sigs: [][crypto.SECP256K1RSigLen]byte{ + sigBytes, + }, + }} + utxo := &OwnedOutput{OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }} + + utxos := []interface{}{utxo} + if err := fx.VerifyOperation(tx, nil, cred, utxos); err == nil { + t.Fatalf("VerifyOperation should have errored due to an unknown operation") + } +} + +func TestFxVerifyTransfer(t *testing.T) { + vm := secp256k1fx.TestVM{ + CLK: new(timer.Clock), + Code: codec.NewDefault(), + Log: logging.NoLog{}, + } + date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) + vm.CLK.Set(date) + + fx := Fx{} + if err := fx.Initialize(&vm); err != nil { + t.Fatal(err) + } + if err := fx.VerifyTransfer(nil, nil, nil, nil); err == nil { + t.Fatalf("this Fx doesn't support transfers") + } +} diff --git a/vms/propertyfx/mint_operation.go b/vms/propertyfx/mint_operation.go new file mode 100644 index 0000000..af7b920 --- /dev/null +++ b/vms/propertyfx/mint_operation.go @@ -0,0 +1,37 @@ +package propertyfx + +import ( + "errors" + + "github.com/ava-labs/gecko/vms/components/verify" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +var ( + errNilMintOperation = errors.New("nil mint operation") +) + +// MintOperation ... +type MintOperation struct { + MintInput secp256k1fx.Input `serialize:"true" json:"mintInput"` + MintOutput MintOutput `serialize:"true" json:"mintOutput"` + OwnedOutput OwnedOutput `serialize:"true" json:"ownedOutput"` +} + +// Outs ... +func (op *MintOperation) Outs() []verify.Verifiable { + return []verify.Verifiable{ + &op.MintOutput, + &op.OwnedOutput, + } +} + +// Verify ... +func (op *MintOperation) Verify() error { + switch { + case op == nil: + return errNilMintOperation + default: + return verify.All(&op.MintInput, &op.MintOutput, &op.OwnedOutput) + } +} diff --git a/vms/propertyfx/mint_operation_test.go b/vms/propertyfx/mint_operation_test.go new file mode 100644 index 0000000..dc2f350 --- /dev/null +++ b/vms/propertyfx/mint_operation_test.go @@ -0,0 +1,34 @@ +package propertyfx + +import ( + "testing" + + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestMintOperationVerifyNil(t *testing.T) { + op := (*MintOperation)(nil) + if err := op.Verify(); err == nil { + t.Fatalf("nil operation should have failed verification") + } +} + +func TestMintOperationVerifyInvalidOutput(t *testing.T) { + op := MintOperation{ + OwnedOutput: OwnedOutput{ + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + }, + }, + } + if err := op.Verify(); err == nil { + t.Fatalf("operation should have failed verification") + } +} + +func TestMintOperationOuts(t *testing.T) { + op := MintOperation{} + if outs := op.Outs(); len(outs) != 2 { + t.Fatalf("Wrong number of outputs returned") + } +} diff --git a/vms/propertyfx/mint_output.go b/vms/propertyfx/mint_output.go new file mode 100644 index 0000000..46042da --- /dev/null +++ b/vms/propertyfx/mint_output.go @@ -0,0 +1,10 @@ +package propertyfx + +import ( + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +// MintOutput ... +type MintOutput struct { + secp256k1fx.OutputOwners `serialize:"true"` +} diff --git a/vms/propertyfx/owned_output.go b/vms/propertyfx/owned_output.go new file mode 100644 index 0000000..2ddb81c --- /dev/null +++ b/vms/propertyfx/owned_output.go @@ -0,0 +1,10 @@ +package propertyfx + +import ( + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +// OwnedOutput ... +type OwnedOutput struct { + secp256k1fx.OutputOwners `serialize:"true"` +} diff --git a/vms/rpcchainvm/factory.go b/vms/rpcchainvm/factory.go new file mode 100644 index 0000000..78e1ead --- /dev/null +++ b/vms/rpcchainvm/factory.go @@ -0,0 +1,54 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package rpcchainvm + +import ( + "errors" + "os/exec" + + "github.com/hashicorp/go-plugin" +) + +var ( + errWrongVM = errors.New("wrong vm type") +) + +// Factory ... +type Factory struct { + Path string +} + +// New ... +func (f *Factory) New() (interface{}, error) { + client := plugin.NewClient(&plugin.ClientConfig{ + HandshakeConfig: Handshake, + Plugins: PluginMap, + Cmd: exec.Command("sh", "-c", f.Path), + AllowedProtocols: []plugin.Protocol{ + plugin.ProtocolNetRPC, + plugin.ProtocolGRPC, + }, + }) + + rpcClient, err := client.Client() + if err != nil { + client.Kill() + return nil, err + } + + raw, err := rpcClient.Dispense("vm") + if err != nil { + client.Kill() + return nil, err + } + + vm, ok := raw.(*VMClient) + if !ok { + client.Kill() + return nil, errWrongVM + } + + vm.SetProcess(client) + return vm, nil +} diff --git a/vms/rpcchainvm/ghttp/gconn/conn_client.go b/vms/rpcchainvm/ghttp/gconn/conn_client.go new file mode 100644 index 0000000..d35e1a4 --- /dev/null +++ b/vms/rpcchainvm/ghttp/gconn/conn_client.go @@ -0,0 +1,118 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gconn + +import ( + "context" + "errors" + "io" + "net" + "time" + + "github.com/ava-labs/gecko/utils/wrappers" + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gconn/proto" +) + +// Client is an implementation of a messenger channel that talks over RPC. +type Client struct { + client proto.ConnClient + local net.Addr + remote net.Addr + toClose []io.Closer +} + +// NewClient returns a database instance connected to a remote database instance +func NewClient(client proto.ConnClient, local, remote net.Addr, toClose ...io.Closer) *Client { + return &Client{ + client: client, + local: local, + remote: remote, + toClose: toClose, + } +} + +// Read ... +func (c *Client) Read(p []byte) (int, error) { + resp, err := c.client.Read(context.Background(), &proto.ReadRequest{ + Length: int32(len(p)), + }) + if err != nil { + return 0, err + } + + copy(p, resp.Read) + + if resp.Errored { + err = errors.New(resp.Error) + } + return len(resp.Read), err +} + +// Write ... +func (c *Client) Write(b []byte) (int, error) { + resp, err := c.client.Write(context.Background(), &proto.WriteRequest{ + Payload: b, + }) + if err != nil { + return 0, err + } + + if resp.Errored { + err = errors.New(resp.Error) + } + return int(resp.Length), err +} + +// Close ... +func (c *Client) Close() error { + _, err := c.client.Close(context.Background(), &proto.CloseRequest{}) + errs := wrappers.Errs{} + errs.Add(err) + for _, toClose := range c.toClose { + errs.Add(toClose.Close()) + } + return errs.Err +} + +// LocalAddr ... +func (c *Client) LocalAddr() net.Addr { return c.local } + +// RemoteAddr ... +func (c *Client) RemoteAddr() net.Addr { return c.remote } + +// SetDeadline ... +func (c *Client) SetDeadline(t time.Time) error { + bytes, err := t.MarshalBinary() + if err != nil { + return err + } + _, err = c.client.SetDeadline(context.Background(), &proto.SetDeadlineRequest{ + Time: bytes, + }) + return err +} + +// SetReadDeadline ... +func (c *Client) SetReadDeadline(t time.Time) error { + bytes, err := t.MarshalBinary() + if err != nil { + return err + } + _, err = c.client.SetReadDeadline(context.Background(), &proto.SetReadDeadlineRequest{ + Time: bytes, + }) + return err +} + +// SetWriteDeadline ... +func (c *Client) SetWriteDeadline(t time.Time) error { + bytes, err := t.MarshalBinary() + if err != nil { + return err + } + _, err = c.client.SetWriteDeadline(context.Background(), &proto.SetWriteDeadlineRequest{ + Time: bytes, + }) + return err +} diff --git a/vms/rpcchainvm/ghttp/gconn/conn_server.go b/vms/rpcchainvm/ghttp/gconn/conn_server.go new file mode 100644 index 0000000..287463d --- /dev/null +++ b/vms/rpcchainvm/ghttp/gconn/conn_server.go @@ -0,0 +1,80 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gconn + +import ( + "context" + "net" + "time" + + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gconn/proto" +) + +// Server is a http.Handler that is managed over RPC. +type Server struct{ conn net.Conn } + +// NewServer returns a http.Handler instance manage remotely +func NewServer(conn net.Conn) *Server { + return &Server{conn: conn} +} + +// Read ... +func (s *Server) Read(ctx context.Context, req *proto.ReadRequest) (*proto.ReadResponse, error) { + buf := make([]byte, int(req.Length)) + n, err := s.conn.Read(buf) + resp := &proto.ReadResponse{ + Read: buf[:n], + } + if err != nil { + resp.Errored = true + resp.Error = err.Error() + } + return resp, nil +} + +// Write ... +func (s *Server) Write(ctx context.Context, req *proto.WriteRequest) (*proto.WriteResponse, error) { + n, err := s.conn.Write(req.Payload) + if err != nil { + return nil, err + } + return &proto.WriteResponse{ + Length: int32(n), + }, nil +} + +// Close ... +func (s *Server) Close(ctx context.Context, req *proto.CloseRequest) (*proto.CloseResponse, error) { + return &proto.CloseResponse{}, s.conn.Close() +} + +// SetDeadline ... +func (s *Server) SetDeadline(ctx context.Context, req *proto.SetDeadlineRequest) (*proto.SetDeadlineResponse, error) { + deadline := time.Time{} + err := deadline.UnmarshalBinary(req.Time) + if err != nil { + return nil, err + } + return &proto.SetDeadlineResponse{}, s.conn.SetDeadline(deadline) +} + +// SetReadDeadline ... +func (s *Server) SetReadDeadline(ctx context.Context, req *proto.SetReadDeadlineRequest) (*proto.SetReadDeadlineResponse, error) { + deadline := time.Time{} + err := deadline.UnmarshalBinary(req.Time) + if err != nil { + return nil, err + } + return &proto.SetReadDeadlineResponse{}, s.conn.SetReadDeadline(deadline) +} + +// SetWriteDeadline ... +func (s *Server) SetWriteDeadline(ctx context.Context, req *proto.SetWriteDeadlineRequest) (*proto.SetWriteDeadlineResponse, error) { + deadline := time.Time{} + err := deadline.UnmarshalBinary(req.Time) + if err != nil { + return nil, err + } + return &proto.SetWriteDeadlineResponse{}, s.conn.SetWriteDeadline(deadline) +} diff --git a/vms/rpcchainvm/ghttp/gconn/proto/conn.pb.go b/vms/rpcchainvm/ghttp/gconn/proto/conn.pb.go new file mode 100644 index 0000000..6a4e46f --- /dev/null +++ b/vms/rpcchainvm/ghttp/gconn/proto/conn.pb.go @@ -0,0 +1,788 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: conn.proto + +package proto + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ReadRequest struct { + Length int32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (m *ReadRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f401a58c1fc7ceef, []int{0} +} + +func (m *ReadRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadRequest.Unmarshal(m, b) +} +func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) +} +func (m *ReadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadRequest.Merge(m, src) +} +func (m *ReadRequest) XXX_Size() int { + return xxx_messageInfo_ReadRequest.Size(m) +} +func (m *ReadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadRequest proto.InternalMessageInfo + +func (m *ReadRequest) GetLength() int32 { + if m != nil { + return m.Length + } + return 0 +} + +type ReadResponse struct { + Read []byte `protobuf:"bytes,1,opt,name=read,proto3" json:"read,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + Errored bool `protobuf:"varint,3,opt,name=errored,proto3" json:"errored,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResponse) Reset() { *m = ReadResponse{} } +func (m *ReadResponse) String() string { return proto.CompactTextString(m) } +func (*ReadResponse) ProtoMessage() {} +func (*ReadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f401a58c1fc7ceef, []int{1} +} + +func (m *ReadResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResponse.Unmarshal(m, b) +} +func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic) +} +func (m *ReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResponse.Merge(m, src) +} +func (m *ReadResponse) XXX_Size() int { + return xxx_messageInfo_ReadResponse.Size(m) +} +func (m *ReadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResponse proto.InternalMessageInfo + +func (m *ReadResponse) GetRead() []byte { + if m != nil { + return m.Read + } + return nil +} + +func (m *ReadResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *ReadResponse) GetErrored() bool { + if m != nil { + return m.Errored + } + return false +} + +type WriteRequest struct { + Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteRequest) Reset() { *m = WriteRequest{} } +func (m *WriteRequest) String() string { return proto.CompactTextString(m) } +func (*WriteRequest) ProtoMessage() {} +func (*WriteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f401a58c1fc7ceef, []int{2} +} + +func (m *WriteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WriteRequest.Unmarshal(m, b) +} +func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) +} +func (m *WriteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteRequest.Merge(m, src) +} +func (m *WriteRequest) XXX_Size() int { + return xxx_messageInfo_WriteRequest.Size(m) +} +func (m *WriteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WriteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteRequest proto.InternalMessageInfo + +func (m *WriteRequest) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +type WriteResponse struct { + Length int32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + Errored bool `protobuf:"varint,3,opt,name=errored,proto3" json:"errored,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteResponse) Reset() { *m = WriteResponse{} } +func (m *WriteResponse) String() string { return proto.CompactTextString(m) } +func (*WriteResponse) ProtoMessage() {} +func (*WriteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f401a58c1fc7ceef, []int{3} +} + +func (m *WriteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WriteResponse.Unmarshal(m, b) +} +func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WriteResponse.Marshal(b, m, deterministic) +} +func (m *WriteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteResponse.Merge(m, src) +} +func (m *WriteResponse) XXX_Size() int { + return xxx_messageInfo_WriteResponse.Size(m) +} +func (m *WriteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WriteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteResponse proto.InternalMessageInfo + +func (m *WriteResponse) GetLength() int32 { + if m != nil { + return m.Length + } + return 0 +} + +func (m *WriteResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *WriteResponse) GetErrored() bool { + if m != nil { + return m.Errored + } + return false +} + +type CloseRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseRequest) Reset() { *m = CloseRequest{} } +func (m *CloseRequest) String() string { return proto.CompactTextString(m) } +func (*CloseRequest) ProtoMessage() {} +func (*CloseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f401a58c1fc7ceef, []int{4} +} + +func (m *CloseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseRequest.Unmarshal(m, b) +} +func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) +} +func (m *CloseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseRequest.Merge(m, src) +} +func (m *CloseRequest) XXX_Size() int { + return xxx_messageInfo_CloseRequest.Size(m) +} +func (m *CloseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CloseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseRequest proto.InternalMessageInfo + +type CloseResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseResponse) Reset() { *m = CloseResponse{} } +func (m *CloseResponse) String() string { return proto.CompactTextString(m) } +func (*CloseResponse) ProtoMessage() {} +func (*CloseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f401a58c1fc7ceef, []int{5} +} + +func (m *CloseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseResponse.Unmarshal(m, b) +} +func (m *CloseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseResponse.Marshal(b, m, deterministic) +} +func (m *CloseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseResponse.Merge(m, src) +} +func (m *CloseResponse) XXX_Size() int { + return xxx_messageInfo_CloseResponse.Size(m) +} +func (m *CloseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CloseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseResponse proto.InternalMessageInfo + +type SetDeadlineRequest struct { + Time []byte `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetDeadlineRequest) Reset() { *m = SetDeadlineRequest{} } +func (m *SetDeadlineRequest) String() string { return proto.CompactTextString(m) } +func (*SetDeadlineRequest) ProtoMessage() {} +func (*SetDeadlineRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f401a58c1fc7ceef, []int{6} +} + +func (m *SetDeadlineRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetDeadlineRequest.Unmarshal(m, b) +} +func (m *SetDeadlineRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetDeadlineRequest.Marshal(b, m, deterministic) +} +func (m *SetDeadlineRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetDeadlineRequest.Merge(m, src) +} +func (m *SetDeadlineRequest) XXX_Size() int { + return xxx_messageInfo_SetDeadlineRequest.Size(m) +} +func (m *SetDeadlineRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetDeadlineRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetDeadlineRequest proto.InternalMessageInfo + +func (m *SetDeadlineRequest) GetTime() []byte { + if m != nil { + return m.Time + } + return nil +} + +type SetDeadlineResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetDeadlineResponse) Reset() { *m = SetDeadlineResponse{} } +func (m *SetDeadlineResponse) String() string { return proto.CompactTextString(m) } +func (*SetDeadlineResponse) ProtoMessage() {} +func (*SetDeadlineResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f401a58c1fc7ceef, []int{7} +} + +func (m *SetDeadlineResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetDeadlineResponse.Unmarshal(m, b) +} +func (m *SetDeadlineResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetDeadlineResponse.Marshal(b, m, deterministic) +} +func (m *SetDeadlineResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetDeadlineResponse.Merge(m, src) +} +func (m *SetDeadlineResponse) XXX_Size() int { + return xxx_messageInfo_SetDeadlineResponse.Size(m) +} +func (m *SetDeadlineResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetDeadlineResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetDeadlineResponse proto.InternalMessageInfo + +type SetReadDeadlineRequest struct { + Time []byte `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetReadDeadlineRequest) Reset() { *m = SetReadDeadlineRequest{} } +func (m *SetReadDeadlineRequest) String() string { return proto.CompactTextString(m) } +func (*SetReadDeadlineRequest) ProtoMessage() {} +func (*SetReadDeadlineRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f401a58c1fc7ceef, []int{8} +} + +func (m *SetReadDeadlineRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetReadDeadlineRequest.Unmarshal(m, b) +} +func (m *SetReadDeadlineRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetReadDeadlineRequest.Marshal(b, m, deterministic) +} +func (m *SetReadDeadlineRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetReadDeadlineRequest.Merge(m, src) +} +func (m *SetReadDeadlineRequest) XXX_Size() int { + return xxx_messageInfo_SetReadDeadlineRequest.Size(m) +} +func (m *SetReadDeadlineRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetReadDeadlineRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetReadDeadlineRequest proto.InternalMessageInfo + +func (m *SetReadDeadlineRequest) GetTime() []byte { + if m != nil { + return m.Time + } + return nil +} + +type SetReadDeadlineResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetReadDeadlineResponse) Reset() { *m = SetReadDeadlineResponse{} } +func (m *SetReadDeadlineResponse) String() string { return proto.CompactTextString(m) } +func (*SetReadDeadlineResponse) ProtoMessage() {} +func (*SetReadDeadlineResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f401a58c1fc7ceef, []int{9} +} + +func (m *SetReadDeadlineResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetReadDeadlineResponse.Unmarshal(m, b) +} +func (m *SetReadDeadlineResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetReadDeadlineResponse.Marshal(b, m, deterministic) +} +func (m *SetReadDeadlineResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetReadDeadlineResponse.Merge(m, src) +} +func (m *SetReadDeadlineResponse) XXX_Size() int { + return xxx_messageInfo_SetReadDeadlineResponse.Size(m) +} +func (m *SetReadDeadlineResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetReadDeadlineResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetReadDeadlineResponse proto.InternalMessageInfo + +type SetWriteDeadlineRequest struct { + Time []byte `protobuf:"bytes,1,opt,name=time,proto3" json:"time,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetWriteDeadlineRequest) Reset() { *m = SetWriteDeadlineRequest{} } +func (m *SetWriteDeadlineRequest) String() string { return proto.CompactTextString(m) } +func (*SetWriteDeadlineRequest) ProtoMessage() {} +func (*SetWriteDeadlineRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f401a58c1fc7ceef, []int{10} +} + +func (m *SetWriteDeadlineRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetWriteDeadlineRequest.Unmarshal(m, b) +} +func (m *SetWriteDeadlineRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetWriteDeadlineRequest.Marshal(b, m, deterministic) +} +func (m *SetWriteDeadlineRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetWriteDeadlineRequest.Merge(m, src) +} +func (m *SetWriteDeadlineRequest) XXX_Size() int { + return xxx_messageInfo_SetWriteDeadlineRequest.Size(m) +} +func (m *SetWriteDeadlineRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetWriteDeadlineRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetWriteDeadlineRequest proto.InternalMessageInfo + +func (m *SetWriteDeadlineRequest) GetTime() []byte { + if m != nil { + return m.Time + } + return nil +} + +type SetWriteDeadlineResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetWriteDeadlineResponse) Reset() { *m = SetWriteDeadlineResponse{} } +func (m *SetWriteDeadlineResponse) String() string { return proto.CompactTextString(m) } +func (*SetWriteDeadlineResponse) ProtoMessage() {} +func (*SetWriteDeadlineResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f401a58c1fc7ceef, []int{11} +} + +func (m *SetWriteDeadlineResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetWriteDeadlineResponse.Unmarshal(m, b) +} +func (m *SetWriteDeadlineResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetWriteDeadlineResponse.Marshal(b, m, deterministic) +} +func (m *SetWriteDeadlineResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetWriteDeadlineResponse.Merge(m, src) +} +func (m *SetWriteDeadlineResponse) XXX_Size() int { + return xxx_messageInfo_SetWriteDeadlineResponse.Size(m) +} +func (m *SetWriteDeadlineResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetWriteDeadlineResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetWriteDeadlineResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ReadRequest)(nil), "proto.ReadRequest") + proto.RegisterType((*ReadResponse)(nil), "proto.ReadResponse") + proto.RegisterType((*WriteRequest)(nil), "proto.WriteRequest") + proto.RegisterType((*WriteResponse)(nil), "proto.WriteResponse") + proto.RegisterType((*CloseRequest)(nil), "proto.CloseRequest") + proto.RegisterType((*CloseResponse)(nil), "proto.CloseResponse") + proto.RegisterType((*SetDeadlineRequest)(nil), "proto.SetDeadlineRequest") + proto.RegisterType((*SetDeadlineResponse)(nil), "proto.SetDeadlineResponse") + proto.RegisterType((*SetReadDeadlineRequest)(nil), "proto.SetReadDeadlineRequest") + proto.RegisterType((*SetReadDeadlineResponse)(nil), "proto.SetReadDeadlineResponse") + proto.RegisterType((*SetWriteDeadlineRequest)(nil), "proto.SetWriteDeadlineRequest") + proto.RegisterType((*SetWriteDeadlineResponse)(nil), "proto.SetWriteDeadlineResponse") +} + +func init() { proto.RegisterFile("conn.proto", fileDescriptor_f401a58c1fc7ceef) } + +var fileDescriptor_f401a58c1fc7ceef = []byte{ + // 351 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x52, 0xd1, 0x4e, 0xea, 0x40, + 0x10, 0x4d, 0x2f, 0x2d, 0x5c, 0x87, 0x22, 0x66, 0x40, 0x2c, 0x9b, 0xa8, 0x64, 0x13, 0x93, 0x3e, + 0x28, 0x26, 0xf8, 0x09, 0xf0, 0x01, 0x66, 0x79, 0xe0, 0xb9, 0xda, 0x89, 0x92, 0xd4, 0x5d, 0x6c, + 0xd7, 0x07, 0xff, 0xc1, 0x8f, 0x36, 0x6c, 0xb7, 0xa5, 0x05, 0x9a, 0xe0, 0x53, 0xf7, 0xec, 0x39, + 0x73, 0x66, 0x7a, 0x66, 0x01, 0x5e, 0x95, 0x94, 0xd3, 0x4d, 0xaa, 0xb4, 0x42, 0xcf, 0x7c, 0xf8, + 0x1d, 0x74, 0x05, 0x45, 0xb1, 0xa0, 0xcf, 0x2f, 0xca, 0x34, 0x8e, 0xa0, 0x9d, 0x90, 0x7c, 0xd3, + 0xef, 0x81, 0x33, 0x71, 0x42, 0x4f, 0x58, 0xc4, 0x05, 0xf8, 0xb9, 0x2c, 0xdb, 0x28, 0x99, 0x11, + 0x22, 0xb8, 0x29, 0x45, 0xb1, 0x51, 0xf9, 0xc2, 0x9c, 0x71, 0x08, 0x1e, 0xa5, 0xa9, 0x4a, 0x83, + 0x7f, 0x13, 0x27, 0x3c, 0x13, 0x39, 0xc0, 0x00, 0x3a, 0xe6, 0x40, 0x71, 0xd0, 0x9a, 0x38, 0xe1, + 0x7f, 0x51, 0x40, 0x1e, 0x82, 0xbf, 0x4a, 0xd7, 0x9a, 0x8a, 0xde, 0x01, 0x74, 0x36, 0xd1, 0x77, + 0xa2, 0x4a, 0xdb, 0x02, 0xf2, 0x15, 0xf4, 0xac, 0xd2, 0xb6, 0x6f, 0x18, 0xf3, 0xcf, 0x23, 0x9c, + 0x83, 0x3f, 0x4f, 0x54, 0x56, 0x8c, 0xc0, 0xfb, 0xd0, 0xb3, 0x38, 0x6f, 0xc4, 0x43, 0xc0, 0x25, + 0xe9, 0x05, 0x45, 0x71, 0xb2, 0x96, 0xe5, 0xa4, 0x08, 0xae, 0x5e, 0x7f, 0x50, 0xf1, 0xf7, 0xdb, + 0x33, 0xbf, 0x84, 0x41, 0x4d, 0x69, 0x0d, 0xee, 0x61, 0xb4, 0x24, 0xbd, 0xcd, 0xee, 0x14, 0x93, + 0x31, 0x5c, 0x1d, 0xa8, 0xad, 0xd1, 0x83, 0xa1, 0x4c, 0x0c, 0xa7, 0x38, 0x31, 0x08, 0x0e, 0xe5, + 0xb9, 0xd5, 0xec, 0xa7, 0x05, 0xee, 0x5c, 0x49, 0x89, 0x8f, 0xe0, 0x6e, 0x7b, 0x21, 0xe6, 0x6f, + 0x62, 0x5a, 0x79, 0x09, 0x6c, 0x50, 0xbb, 0xb3, 0xb9, 0xcf, 0xc0, 0x33, 0x96, 0x58, 0xb0, 0xd5, + 0x05, 0xb2, 0x61, 0xfd, 0x72, 0x57, 0x63, 0x32, 0x2d, 0x6b, 0xaa, 0x89, 0x97, 0x35, 0xb5, 0xd8, + 0x71, 0x01, 0xdd, 0x4a, 0x98, 0x38, 0xb6, 0xa2, 0xc3, 0x55, 0x30, 0x76, 0x8c, 0xb2, 0x2e, 0xcf, + 0xd0, 0xdf, 0x4b, 0x13, 0xaf, 0x77, 0xf2, 0x23, 0x3b, 0x61, 0x37, 0x4d, 0xb4, 0x75, 0x5c, 0xc2, + 0xc5, 0x7e, 0xaa, 0x58, 0xa9, 0x39, 0xb6, 0x1d, 0x76, 0xdb, 0xc8, 0xe7, 0xa6, 0x2f, 0x6d, 0xc3, + 0x3f, 0xfd, 0x06, 0x00, 0x00, 0xff, 0xff, 0xe3, 0x73, 0x4d, 0x43, 0x9e, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ConnClient is the client API for Conn service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ConnClient interface { + Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ReadResponse, error) + Write(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) + Close(ctx context.Context, in *CloseRequest, opts ...grpc.CallOption) (*CloseResponse, error) + SetDeadline(ctx context.Context, in *SetDeadlineRequest, opts ...grpc.CallOption) (*SetDeadlineResponse, error) + SetReadDeadline(ctx context.Context, in *SetReadDeadlineRequest, opts ...grpc.CallOption) (*SetReadDeadlineResponse, error) + SetWriteDeadline(ctx context.Context, in *SetWriteDeadlineRequest, opts ...grpc.CallOption) (*SetWriteDeadlineResponse, error) +} + +type connClient struct { + cc grpc.ClientConnInterface +} + +func NewConnClient(cc grpc.ClientConnInterface) ConnClient { + return &connClient{cc} +} + +func (c *connClient) Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ReadResponse, error) { + out := new(ReadResponse) + err := c.cc.Invoke(ctx, "/proto.Conn/Read", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *connClient) Write(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) { + out := new(WriteResponse) + err := c.cc.Invoke(ctx, "/proto.Conn/Write", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *connClient) Close(ctx context.Context, in *CloseRequest, opts ...grpc.CallOption) (*CloseResponse, error) { + out := new(CloseResponse) + err := c.cc.Invoke(ctx, "/proto.Conn/Close", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *connClient) SetDeadline(ctx context.Context, in *SetDeadlineRequest, opts ...grpc.CallOption) (*SetDeadlineResponse, error) { + out := new(SetDeadlineResponse) + err := c.cc.Invoke(ctx, "/proto.Conn/SetDeadline", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *connClient) SetReadDeadline(ctx context.Context, in *SetReadDeadlineRequest, opts ...grpc.CallOption) (*SetReadDeadlineResponse, error) { + out := new(SetReadDeadlineResponse) + err := c.cc.Invoke(ctx, "/proto.Conn/SetReadDeadline", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *connClient) SetWriteDeadline(ctx context.Context, in *SetWriteDeadlineRequest, opts ...grpc.CallOption) (*SetWriteDeadlineResponse, error) { + out := new(SetWriteDeadlineResponse) + err := c.cc.Invoke(ctx, "/proto.Conn/SetWriteDeadline", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ConnServer is the server API for Conn service. +type ConnServer interface { + Read(context.Context, *ReadRequest) (*ReadResponse, error) + Write(context.Context, *WriteRequest) (*WriteResponse, error) + Close(context.Context, *CloseRequest) (*CloseResponse, error) + SetDeadline(context.Context, *SetDeadlineRequest) (*SetDeadlineResponse, error) + SetReadDeadline(context.Context, *SetReadDeadlineRequest) (*SetReadDeadlineResponse, error) + SetWriteDeadline(context.Context, *SetWriteDeadlineRequest) (*SetWriteDeadlineResponse, error) +} + +// UnimplementedConnServer can be embedded to have forward compatible implementations. +type UnimplementedConnServer struct { +} + +func (*UnimplementedConnServer) Read(ctx context.Context, req *ReadRequest) (*ReadResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Read not implemented") +} +func (*UnimplementedConnServer) Write(ctx context.Context, req *WriteRequest) (*WriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Write not implemented") +} +func (*UnimplementedConnServer) Close(ctx context.Context, req *CloseRequest) (*CloseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Close not implemented") +} +func (*UnimplementedConnServer) SetDeadline(ctx context.Context, req *SetDeadlineRequest) (*SetDeadlineResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetDeadline not implemented") +} +func (*UnimplementedConnServer) SetReadDeadline(ctx context.Context, req *SetReadDeadlineRequest) (*SetReadDeadlineResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetReadDeadline not implemented") +} +func (*UnimplementedConnServer) SetWriteDeadline(ctx context.Context, req *SetWriteDeadlineRequest) (*SetWriteDeadlineResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetWriteDeadline not implemented") +} + +func RegisterConnServer(s *grpc.Server, srv ConnServer) { + s.RegisterService(&_Conn_serviceDesc, srv) +} + +func _Conn_Read_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConnServer).Read(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Conn/Read", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConnServer).Read(ctx, req.(*ReadRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Conn_Write_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConnServer).Write(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Conn/Write", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConnServer).Write(ctx, req.(*WriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Conn_Close_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CloseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConnServer).Close(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Conn/Close", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConnServer).Close(ctx, req.(*CloseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Conn_SetDeadline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetDeadlineRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConnServer).SetDeadline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Conn/SetDeadline", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConnServer).SetDeadline(ctx, req.(*SetDeadlineRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Conn_SetReadDeadline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetReadDeadlineRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConnServer).SetReadDeadline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Conn/SetReadDeadline", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConnServer).SetReadDeadline(ctx, req.(*SetReadDeadlineRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Conn_SetWriteDeadline_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetWriteDeadlineRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ConnServer).SetWriteDeadline(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Conn/SetWriteDeadline", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ConnServer).SetWriteDeadline(ctx, req.(*SetWriteDeadlineRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Conn_serviceDesc = grpc.ServiceDesc{ + ServiceName: "proto.Conn", + HandlerType: (*ConnServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Read", + Handler: _Conn_Read_Handler, + }, + { + MethodName: "Write", + Handler: _Conn_Write_Handler, + }, + { + MethodName: "Close", + Handler: _Conn_Close_Handler, + }, + { + MethodName: "SetDeadline", + Handler: _Conn_SetDeadline_Handler, + }, + { + MethodName: "SetReadDeadline", + Handler: _Conn_SetReadDeadline_Handler, + }, + { + MethodName: "SetWriteDeadline", + Handler: _Conn_SetWriteDeadline_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "conn.proto", +} diff --git a/vms/rpcchainvm/ghttp/gconn/proto/conn.proto b/vms/rpcchainvm/ghttp/gconn/proto/conn.proto new file mode 100644 index 0000000..cc5d150 --- /dev/null +++ b/vms/rpcchainvm/ghttp/gconn/proto/conn.proto @@ -0,0 +1,53 @@ +syntax = "proto3"; +package proto; + +message ReadRequest { + int32 length = 1; +} + +message ReadResponse { + bytes read = 1; + string error = 2; + bool errored = 3; +} + +message WriteRequest { + bytes payload = 1; +} + +message WriteResponse { + int32 length = 1; + string error = 2; + bool errored = 3; +} + +message CloseRequest {} + +message CloseResponse {} + +message SetDeadlineRequest { + bytes time = 1; +} + +message SetDeadlineResponse {} + +message SetReadDeadlineRequest { + bytes time = 1; +} + +message SetReadDeadlineResponse {} + +message SetWriteDeadlineRequest { + bytes time = 1; +} + +message SetWriteDeadlineResponse {} + +service Conn { + rpc Read(ReadRequest) returns (ReadResponse); + rpc Write(WriteRequest) returns (WriteResponse); + rpc Close(CloseRequest) returns (CloseResponse); + rpc SetDeadline(SetDeadlineRequest) returns (SetDeadlineResponse); + rpc SetReadDeadline(SetReadDeadlineRequest) returns (SetReadDeadlineResponse); + rpc SetWriteDeadline(SetWriteDeadlineRequest) returns (SetWriteDeadlineResponse); +} \ No newline at end of file diff --git a/vms/rpcchainvm/ghttp/greadcloser/proto/reader.pb.go b/vms/rpcchainvm/ghttp/greadcloser/proto/reader.pb.go new file mode 100644 index 0000000..682a2e1 --- /dev/null +++ b/vms/rpcchainvm/ghttp/greadcloser/proto/reader.pb.go @@ -0,0 +1,323 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: reader.proto + +package proto + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ReadRequest struct { + Length int32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (m *ReadRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f534e48276761a43, []int{0} +} + +func (m *ReadRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadRequest.Unmarshal(m, b) +} +func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) +} +func (m *ReadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadRequest.Merge(m, src) +} +func (m *ReadRequest) XXX_Size() int { + return xxx_messageInfo_ReadRequest.Size(m) +} +func (m *ReadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadRequest proto.InternalMessageInfo + +func (m *ReadRequest) GetLength() int32 { + if m != nil { + return m.Length + } + return 0 +} + +type ReadResponse struct { + Read []byte `protobuf:"bytes,1,opt,name=read,proto3" json:"read,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + Errored bool `protobuf:"varint,3,opt,name=errored,proto3" json:"errored,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResponse) Reset() { *m = ReadResponse{} } +func (m *ReadResponse) String() string { return proto.CompactTextString(m) } +func (*ReadResponse) ProtoMessage() {} +func (*ReadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f534e48276761a43, []int{1} +} + +func (m *ReadResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResponse.Unmarshal(m, b) +} +func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic) +} +func (m *ReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResponse.Merge(m, src) +} +func (m *ReadResponse) XXX_Size() int { + return xxx_messageInfo_ReadResponse.Size(m) +} +func (m *ReadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResponse proto.InternalMessageInfo + +func (m *ReadResponse) GetRead() []byte { + if m != nil { + return m.Read + } + return nil +} + +func (m *ReadResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *ReadResponse) GetErrored() bool { + if m != nil { + return m.Errored + } + return false +} + +type CloseRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseRequest) Reset() { *m = CloseRequest{} } +func (m *CloseRequest) String() string { return proto.CompactTextString(m) } +func (*CloseRequest) ProtoMessage() {} +func (*CloseRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f534e48276761a43, []int{2} +} + +func (m *CloseRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseRequest.Unmarshal(m, b) +} +func (m *CloseRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseRequest.Marshal(b, m, deterministic) +} +func (m *CloseRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseRequest.Merge(m, src) +} +func (m *CloseRequest) XXX_Size() int { + return xxx_messageInfo_CloseRequest.Size(m) +} +func (m *CloseRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CloseRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseRequest proto.InternalMessageInfo + +type CloseResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CloseResponse) Reset() { *m = CloseResponse{} } +func (m *CloseResponse) String() string { return proto.CompactTextString(m) } +func (*CloseResponse) ProtoMessage() {} +func (*CloseResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f534e48276761a43, []int{3} +} + +func (m *CloseResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CloseResponse.Unmarshal(m, b) +} +func (m *CloseResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CloseResponse.Marshal(b, m, deterministic) +} +func (m *CloseResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CloseResponse.Merge(m, src) +} +func (m *CloseResponse) XXX_Size() int { + return xxx_messageInfo_CloseResponse.Size(m) +} +func (m *CloseResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CloseResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CloseResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ReadRequest)(nil), "proto.ReadRequest") + proto.RegisterType((*ReadResponse)(nil), "proto.ReadResponse") + proto.RegisterType((*CloseRequest)(nil), "proto.CloseRequest") + proto.RegisterType((*CloseResponse)(nil), "proto.CloseResponse") +} + +func init() { proto.RegisterFile("reader.proto", fileDescriptor_f534e48276761a43) } + +var fileDescriptor_f534e48276761a43 = []byte{ + // 195 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8e, 0xd1, 0x6a, 0x84, 0x30, + 0x10, 0x45, 0x49, 0x6b, 0x6c, 0x3b, 0x4d, 0x5b, 0x98, 0x4a, 0x09, 0x3e, 0x49, 0xa0, 0xe0, 0x93, + 0x0b, 0xee, 0x27, 0xec, 0x1f, 0xcc, 0x1f, 0xb8, 0x38, 0xec, 0x3e, 0xb8, 0xc6, 0x4d, 0xdc, 0xff, + 0x5f, 0x4c, 0x22, 0xe8, 0x53, 0xee, 0xbd, 0x99, 0x99, 0x7b, 0x40, 0x39, 0xee, 0x7a, 0x76, 0xcd, + 0xe4, 0xec, 0x6c, 0x51, 0x86, 0xc7, 0xfc, 0xc3, 0x27, 0x71, 0xd7, 0x13, 0xdf, 0x1f, 0xec, 0x67, + 0xfc, 0x83, 0x7c, 0xe0, 0xf1, 0x32, 0x5f, 0xb5, 0xa8, 0x44, 0x2d, 0x29, 0x39, 0x43, 0xa0, 0xe2, + 0x98, 0x9f, 0xec, 0xe8, 0x19, 0x11, 0xb2, 0xe5, 0x5a, 0x98, 0x52, 0x14, 0x34, 0x16, 0x20, 0xd9, + 0x39, 0xeb, 0xf4, 0x4b, 0x25, 0xea, 0x0f, 0x8a, 0x06, 0x35, 0xbc, 0x05, 0xc1, 0xbd, 0x7e, 0xad, + 0x44, 0xfd, 0x4e, 0xab, 0x35, 0xdf, 0xa0, 0x4e, 0x83, 0xf5, 0x9c, 0xba, 0xcd, 0x0f, 0x7c, 0x25, + 0x1f, 0x4b, 0xda, 0x1b, 0xe4, 0x14, 0x90, 0xf1, 0x00, 0xd9, 0xa2, 0x10, 0x23, 0x7c, 0xb3, 0x41, + 0x2e, 0x7f, 0x77, 0x59, 0xe2, 0x6b, 0x41, 0x86, 0x5b, 0xb8, 0xfe, 0x6e, 0x9b, 0xca, 0x62, 0x1f, + 0xc6, 0x9d, 0x73, 0x1e, 0xc2, 0xe3, 0x33, 0x00, 0x00, 0xff, 0xff, 0x47, 0x74, 0x81, 0x61, 0x28, + 0x01, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ReaderClient is the client API for Reader service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ReaderClient interface { + Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ReadResponse, error) + Close(ctx context.Context, in *CloseRequest, opts ...grpc.CallOption) (*CloseResponse, error) +} + +type readerClient struct { + cc grpc.ClientConnInterface +} + +func NewReaderClient(cc grpc.ClientConnInterface) ReaderClient { + return &readerClient{cc} +} + +func (c *readerClient) Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ReadResponse, error) { + out := new(ReadResponse) + err := c.cc.Invoke(ctx, "/proto.Reader/Read", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *readerClient) Close(ctx context.Context, in *CloseRequest, opts ...grpc.CallOption) (*CloseResponse, error) { + out := new(CloseResponse) + err := c.cc.Invoke(ctx, "/proto.Reader/Close", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ReaderServer is the server API for Reader service. +type ReaderServer interface { + Read(context.Context, *ReadRequest) (*ReadResponse, error) + Close(context.Context, *CloseRequest) (*CloseResponse, error) +} + +// UnimplementedReaderServer can be embedded to have forward compatible implementations. +type UnimplementedReaderServer struct { +} + +func (*UnimplementedReaderServer) Read(ctx context.Context, req *ReadRequest) (*ReadResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Read not implemented") +} +func (*UnimplementedReaderServer) Close(ctx context.Context, req *CloseRequest) (*CloseResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Close not implemented") +} + +func RegisterReaderServer(s *grpc.Server, srv ReaderServer) { + s.RegisterService(&_Reader_serviceDesc, srv) +} + +func _Reader_Read_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReaderServer).Read(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Reader/Read", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReaderServer).Read(ctx, req.(*ReadRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Reader_Close_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CloseRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReaderServer).Close(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Reader/Close", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReaderServer).Close(ctx, req.(*CloseRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Reader_serviceDesc = grpc.ServiceDesc{ + ServiceName: "proto.Reader", + HandlerType: (*ReaderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Read", + Handler: _Reader_Read_Handler, + }, + { + MethodName: "Close", + Handler: _Reader_Close_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "reader.proto", +} diff --git a/vms/rpcchainvm/ghttp/greadcloser/proto/reader.proto b/vms/rpcchainvm/ghttp/greadcloser/proto/reader.proto new file mode 100644 index 0000000..0b300b3 --- /dev/null +++ b/vms/rpcchainvm/ghttp/greadcloser/proto/reader.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; +package proto; + +message ReadRequest { + int32 length = 1; +} + +message ReadResponse { + bytes read = 1; + string error = 2; + bool errored = 3; +} + +message CloseRequest {} + +message CloseResponse {} + +service Reader { + rpc Read(ReadRequest) returns (ReadResponse); + rpc Close(CloseRequest) returns (CloseResponse); +} \ No newline at end of file diff --git a/vms/rpcchainvm/ghttp/greadcloser/reader_client.go b/vms/rpcchainvm/ghttp/greadcloser/reader_client.go new file mode 100644 index 0000000..d5d139e --- /dev/null +++ b/vms/rpcchainvm/ghttp/greadcloser/reader_client.go @@ -0,0 +1,42 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package greadcloser + +import ( + "context" + "errors" + + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/greadcloser/proto" +) + +// Client is an implementation of a messenger channel that talks over RPC. +type Client struct{ client proto.ReaderClient } + +// NewClient returns a database instance connected to a remote database instance +func NewClient(client proto.ReaderClient) *Client { + return &Client{client: client} +} + +// Read ... +func (c *Client) Read(p []byte) (int, error) { + resp, err := c.client.Read(context.Background(), &proto.ReadRequest{ + Length: int32(len(p)), + }) + if err != nil { + return 0, err + } + + copy(p, resp.Read) + + if resp.Errored { + err = errors.New(resp.Error) + } + return len(resp.Read), err +} + +// Close ... +func (c *Client) Close() error { + _, err := c.client.Close(context.Background(), &proto.CloseRequest{}) + return err +} diff --git a/vms/rpcchainvm/ghttp/greadcloser/reader_server.go b/vms/rpcchainvm/ghttp/greadcloser/reader_server.go new file mode 100644 index 0000000..5dd7602 --- /dev/null +++ b/vms/rpcchainvm/ghttp/greadcloser/reader_server.go @@ -0,0 +1,38 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package greadcloser + +import ( + "context" + "io" + + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/greadcloser/proto" +) + +// Server is a http.Handler that is managed over RPC. +type Server struct{ readCloser io.ReadCloser } + +// NewServer returns a http.Handler instance manage remotely +func NewServer(readCloser io.ReadCloser) *Server { + return &Server{readCloser: readCloser} +} + +// Read ... +func (s *Server) Read(ctx context.Context, req *proto.ReadRequest) (*proto.ReadResponse, error) { + buf := make([]byte, int(req.Length)) + n, err := s.readCloser.Read(buf) + resp := &proto.ReadResponse{ + Read: buf[:n], + } + if err != nil { + resp.Errored = true + resp.Error = err.Error() + } + return resp, nil +} + +// Close ... +func (s *Server) Close(ctx context.Context, req *proto.CloseRequest) (*proto.CloseResponse, error) { + return &proto.CloseResponse{}, s.readCloser.Close() +} diff --git a/vms/rpcchainvm/ghttp/greader/proto/reader.pb.go b/vms/rpcchainvm/ghttp/greader/proto/reader.pb.go new file mode 100644 index 0000000..5bdefa0 --- /dev/null +++ b/vms/rpcchainvm/ghttp/greader/proto/reader.pb.go @@ -0,0 +1,221 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: reader.proto + +package proto + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type ReadRequest struct { + Length int32 `protobuf:"varint,1,opt,name=length,proto3" json:"length,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (m *ReadRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f534e48276761a43, []int{0} +} + +func (m *ReadRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadRequest.Unmarshal(m, b) +} +func (m *ReadRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadRequest.Marshal(b, m, deterministic) +} +func (m *ReadRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadRequest.Merge(m, src) +} +func (m *ReadRequest) XXX_Size() int { + return xxx_messageInfo_ReadRequest.Size(m) +} +func (m *ReadRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ReadRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadRequest proto.InternalMessageInfo + +func (m *ReadRequest) GetLength() int32 { + if m != nil { + return m.Length + } + return 0 +} + +type ReadResponse struct { + Read []byte `protobuf:"bytes,1,opt,name=read,proto3" json:"read,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + Errored bool `protobuf:"varint,3,opt,name=errored,proto3" json:"errored,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ReadResponse) Reset() { *m = ReadResponse{} } +func (m *ReadResponse) String() string { return proto.CompactTextString(m) } +func (*ReadResponse) ProtoMessage() {} +func (*ReadResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f534e48276761a43, []int{1} +} + +func (m *ReadResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ReadResponse.Unmarshal(m, b) +} +func (m *ReadResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ReadResponse.Marshal(b, m, deterministic) +} +func (m *ReadResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ReadResponse.Merge(m, src) +} +func (m *ReadResponse) XXX_Size() int { + return xxx_messageInfo_ReadResponse.Size(m) +} +func (m *ReadResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ReadResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ReadResponse proto.InternalMessageInfo + +func (m *ReadResponse) GetRead() []byte { + if m != nil { + return m.Read + } + return nil +} + +func (m *ReadResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *ReadResponse) GetErrored() bool { + if m != nil { + return m.Errored + } + return false +} + +func init() { + proto.RegisterType((*ReadRequest)(nil), "proto.ReadRequest") + proto.RegisterType((*ReadResponse)(nil), "proto.ReadResponse") +} + +func init() { proto.RegisterFile("reader.proto", fileDescriptor_f534e48276761a43) } + +var fileDescriptor_f534e48276761a43 = []byte{ + // 162 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x54, 0x8e, 0x41, 0x0a, 0xc2, 0x30, + 0x10, 0x45, 0x89, 0xb6, 0x55, 0xc7, 0xae, 0x46, 0x91, 0xe0, 0xaa, 0x14, 0x84, 0xac, 0x2a, 0xe8, + 0xca, 0x6b, 0xcc, 0x0d, 0x2a, 0x19, 0x74, 0x21, 0x4d, 0x9d, 0xc4, 0xfb, 0x4b, 0x27, 0x0a, 0xba, + 0xca, 0x7f, 0x9f, 0x4f, 0xe6, 0x41, 0x2d, 0xdc, 0x7b, 0x96, 0x6e, 0x94, 0x90, 0x02, 0x96, 0xfa, + 0xb4, 0x07, 0x58, 0x13, 0xf7, 0x9e, 0xf8, 0xf9, 0xe2, 0x98, 0x70, 0x07, 0xd5, 0x83, 0x87, 0x5b, + 0xba, 0x5b, 0xd3, 0x18, 0x57, 0xd2, 0x87, 0x5a, 0x82, 0x3a, 0xcf, 0xe2, 0x18, 0x86, 0xc8, 0x88, + 0x50, 0x4c, 0xbf, 0xe9, 0xaa, 0x26, 0xcd, 0xb8, 0x85, 0x92, 0x45, 0x82, 0xd8, 0x59, 0x63, 0xdc, + 0x8a, 0x32, 0xa0, 0x85, 0x85, 0x06, 0xf6, 0x76, 0xde, 0x18, 0xb7, 0xa4, 0x2f, 0x9e, 0x2e, 0x50, + 0x91, 0x1a, 0xe1, 0x11, 0x8a, 0x29, 0x21, 0x66, 0xb7, 0xee, 0xc7, 0x68, 0xbf, 0xf9, 0xeb, 0xf2, + 0xf9, 0x6b, 0xa5, 0xdd, 0xf9, 0x1d, 0x00, 0x00, 0xff, 0xff, 0x06, 0x11, 0x6f, 0x5d, 0xd3, 0x00, + 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// ReaderClient is the client API for Reader service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type ReaderClient interface { + Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ReadResponse, error) +} + +type readerClient struct { + cc grpc.ClientConnInterface +} + +func NewReaderClient(cc grpc.ClientConnInterface) ReaderClient { + return &readerClient{cc} +} + +func (c *readerClient) Read(ctx context.Context, in *ReadRequest, opts ...grpc.CallOption) (*ReadResponse, error) { + out := new(ReadResponse) + err := c.cc.Invoke(ctx, "/proto.Reader/Read", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// ReaderServer is the server API for Reader service. +type ReaderServer interface { + Read(context.Context, *ReadRequest) (*ReadResponse, error) +} + +// UnimplementedReaderServer can be embedded to have forward compatible implementations. +type UnimplementedReaderServer struct { +} + +func (*UnimplementedReaderServer) Read(ctx context.Context, req *ReadRequest) (*ReadResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Read not implemented") +} + +func RegisterReaderServer(s *grpc.Server, srv ReaderServer) { + s.RegisterService(&_Reader_serviceDesc, srv) +} + +func _Reader_Read_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ReadRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ReaderServer).Read(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Reader/Read", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ReaderServer).Read(ctx, req.(*ReadRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Reader_serviceDesc = grpc.ServiceDesc{ + ServiceName: "proto.Reader", + HandlerType: (*ReaderServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Read", + Handler: _Reader_Read_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "reader.proto", +} diff --git a/vms/rpcchainvm/ghttp/greader/proto/reader.proto b/vms/rpcchainvm/ghttp/greader/proto/reader.proto new file mode 100644 index 0000000..80142d3 --- /dev/null +++ b/vms/rpcchainvm/ghttp/greader/proto/reader.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package proto; + +message ReadRequest { + int32 length = 1; +} + +message ReadResponse { + bytes read = 1; + string error = 2; + bool errored = 3; +} + +service Reader { + rpc Read(ReadRequest) returns (ReadResponse); +} \ No newline at end of file diff --git a/vms/rpcchainvm/ghttp/greader/reader_client.go b/vms/rpcchainvm/ghttp/greader/reader_client.go new file mode 100644 index 0000000..0505d71 --- /dev/null +++ b/vms/rpcchainvm/ghttp/greader/reader_client.go @@ -0,0 +1,36 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package greader + +import ( + "context" + "errors" + + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/greader/proto" +) + +// Client is an implementation of a messenger channel that talks over RPC. +type Client struct{ client proto.ReaderClient } + +// NewClient returns a database instance connected to a remote database instance +func NewClient(client proto.ReaderClient) *Client { + return &Client{client: client} +} + +// Read ... +func (c *Client) Read(p []byte) (int, error) { + resp, err := c.client.Read(context.Background(), &proto.ReadRequest{ + Length: int32(len(p)), + }) + if err != nil { + return 0, err + } + + copy(p, resp.Read) + + if resp.Errored { + err = errors.New(resp.Error) + } + return len(resp.Read), err +} diff --git a/vms/rpcchainvm/ghttp/greader/reader_server.go b/vms/rpcchainvm/ghttp/greader/reader_server.go new file mode 100644 index 0000000..840bd66 --- /dev/null +++ b/vms/rpcchainvm/ghttp/greader/reader_server.go @@ -0,0 +1,33 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package greader + +import ( + "context" + "io" + + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/greader/proto" +) + +// Server is a http.Handler that is managed over RPC. +type Server struct{ reader io.Reader } + +// NewServer returns a http.Handler instance manage remotely +func NewServer(reader io.Reader) *Server { + return &Server{reader: reader} +} + +// Read ... +func (s *Server) Read(ctx context.Context, req *proto.ReadRequest) (*proto.ReadResponse, error) { + buf := make([]byte, int(req.Length)) + n, err := s.reader.Read(buf) + resp := &proto.ReadResponse{ + Read: buf[:n], + } + if err != nil { + resp.Errored = true + resp.Error = err.Error() + } + return resp, nil +} diff --git a/vms/rpcchainvm/ghttp/gresponsewriter/proto/writer.pb.go b/vms/rpcchainvm/ghttp/gresponsewriter/proto/writer.pb.go new file mode 100644 index 0000000..a392667 --- /dev/null +++ b/vms/rpcchainvm/ghttp/gresponsewriter/proto/writer.pb.go @@ -0,0 +1,648 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: writer.proto + +package proto + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Header struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Values []string `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Header) Reset() { *m = Header{} } +func (m *Header) String() string { return proto.CompactTextString(m) } +func (*Header) ProtoMessage() {} +func (*Header) Descriptor() ([]byte, []int) { + return fileDescriptor_ea6fbe89c42e6759, []int{0} +} + +func (m *Header) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Header.Unmarshal(m, b) +} +func (m *Header) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Header.Marshal(b, m, deterministic) +} +func (m *Header) XXX_Merge(src proto.Message) { + xxx_messageInfo_Header.Merge(m, src) +} +func (m *Header) XXX_Size() int { + return xxx_messageInfo_Header.Size(m) +} +func (m *Header) XXX_DiscardUnknown() { + xxx_messageInfo_Header.DiscardUnknown(m) +} + +var xxx_messageInfo_Header proto.InternalMessageInfo + +func (m *Header) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Header) GetValues() []string { + if m != nil { + return m.Values + } + return nil +} + +type WriteRequest struct { + Headers []*Header `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` + Payload []byte `protobuf:"bytes,2,opt,name=payload,proto3" json:"payload,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteRequest) Reset() { *m = WriteRequest{} } +func (m *WriteRequest) String() string { return proto.CompactTextString(m) } +func (*WriteRequest) ProtoMessage() {} +func (*WriteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ea6fbe89c42e6759, []int{1} +} + +func (m *WriteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WriteRequest.Unmarshal(m, b) +} +func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) +} +func (m *WriteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteRequest.Merge(m, src) +} +func (m *WriteRequest) XXX_Size() int { + return xxx_messageInfo_WriteRequest.Size(m) +} +func (m *WriteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WriteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteRequest proto.InternalMessageInfo + +func (m *WriteRequest) GetHeaders() []*Header { + if m != nil { + return m.Headers + } + return nil +} + +func (m *WriteRequest) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +type WriteResponse struct { + Written int32 `protobuf:"varint,1,opt,name=written,proto3" json:"written,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteResponse) Reset() { *m = WriteResponse{} } +func (m *WriteResponse) String() string { return proto.CompactTextString(m) } +func (*WriteResponse) ProtoMessage() {} +func (*WriteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ea6fbe89c42e6759, []int{2} +} + +func (m *WriteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WriteResponse.Unmarshal(m, b) +} +func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WriteResponse.Marshal(b, m, deterministic) +} +func (m *WriteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteResponse.Merge(m, src) +} +func (m *WriteResponse) XXX_Size() int { + return xxx_messageInfo_WriteResponse.Size(m) +} +func (m *WriteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WriteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteResponse proto.InternalMessageInfo + +func (m *WriteResponse) GetWritten() int32 { + if m != nil { + return m.Written + } + return 0 +} + +type WriteHeaderRequest struct { + Headers []*Header `protobuf:"bytes,1,rep,name=headers,proto3" json:"headers,omitempty"` + StatusCode int32 `protobuf:"varint,2,opt,name=statusCode,proto3" json:"statusCode,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteHeaderRequest) Reset() { *m = WriteHeaderRequest{} } +func (m *WriteHeaderRequest) String() string { return proto.CompactTextString(m) } +func (*WriteHeaderRequest) ProtoMessage() {} +func (*WriteHeaderRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ea6fbe89c42e6759, []int{3} +} + +func (m *WriteHeaderRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WriteHeaderRequest.Unmarshal(m, b) +} +func (m *WriteHeaderRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WriteHeaderRequest.Marshal(b, m, deterministic) +} +func (m *WriteHeaderRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteHeaderRequest.Merge(m, src) +} +func (m *WriteHeaderRequest) XXX_Size() int { + return xxx_messageInfo_WriteHeaderRequest.Size(m) +} +func (m *WriteHeaderRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WriteHeaderRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteHeaderRequest proto.InternalMessageInfo + +func (m *WriteHeaderRequest) GetHeaders() []*Header { + if m != nil { + return m.Headers + } + return nil +} + +func (m *WriteHeaderRequest) GetStatusCode() int32 { + if m != nil { + return m.StatusCode + } + return 0 +} + +type WriteHeaderResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteHeaderResponse) Reset() { *m = WriteHeaderResponse{} } +func (m *WriteHeaderResponse) String() string { return proto.CompactTextString(m) } +func (*WriteHeaderResponse) ProtoMessage() {} +func (*WriteHeaderResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ea6fbe89c42e6759, []int{4} +} + +func (m *WriteHeaderResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WriteHeaderResponse.Unmarshal(m, b) +} +func (m *WriteHeaderResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WriteHeaderResponse.Marshal(b, m, deterministic) +} +func (m *WriteHeaderResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteHeaderResponse.Merge(m, src) +} +func (m *WriteHeaderResponse) XXX_Size() int { + return xxx_messageInfo_WriteHeaderResponse.Size(m) +} +func (m *WriteHeaderResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WriteHeaderResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteHeaderResponse proto.InternalMessageInfo + +type FlushRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FlushRequest) Reset() { *m = FlushRequest{} } +func (m *FlushRequest) String() string { return proto.CompactTextString(m) } +func (*FlushRequest) ProtoMessage() {} +func (*FlushRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ea6fbe89c42e6759, []int{5} +} + +func (m *FlushRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FlushRequest.Unmarshal(m, b) +} +func (m *FlushRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FlushRequest.Marshal(b, m, deterministic) +} +func (m *FlushRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlushRequest.Merge(m, src) +} +func (m *FlushRequest) XXX_Size() int { + return xxx_messageInfo_FlushRequest.Size(m) +} +func (m *FlushRequest) XXX_DiscardUnknown() { + xxx_messageInfo_FlushRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_FlushRequest proto.InternalMessageInfo + +type FlushResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *FlushResponse) Reset() { *m = FlushResponse{} } +func (m *FlushResponse) String() string { return proto.CompactTextString(m) } +func (*FlushResponse) ProtoMessage() {} +func (*FlushResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ea6fbe89c42e6759, []int{6} +} + +func (m *FlushResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_FlushResponse.Unmarshal(m, b) +} +func (m *FlushResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_FlushResponse.Marshal(b, m, deterministic) +} +func (m *FlushResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_FlushResponse.Merge(m, src) +} +func (m *FlushResponse) XXX_Size() int { + return xxx_messageInfo_FlushResponse.Size(m) +} +func (m *FlushResponse) XXX_DiscardUnknown() { + xxx_messageInfo_FlushResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_FlushResponse proto.InternalMessageInfo + +type HijackRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HijackRequest) Reset() { *m = HijackRequest{} } +func (m *HijackRequest) String() string { return proto.CompactTextString(m) } +func (*HijackRequest) ProtoMessage() {} +func (*HijackRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ea6fbe89c42e6759, []int{7} +} + +func (m *HijackRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HijackRequest.Unmarshal(m, b) +} +func (m *HijackRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HijackRequest.Marshal(b, m, deterministic) +} +func (m *HijackRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HijackRequest.Merge(m, src) +} +func (m *HijackRequest) XXX_Size() int { + return xxx_messageInfo_HijackRequest.Size(m) +} +func (m *HijackRequest) XXX_DiscardUnknown() { + xxx_messageInfo_HijackRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_HijackRequest proto.InternalMessageInfo + +type HijackResponse struct { + ConnServer uint32 `protobuf:"varint,1,opt,name=connServer,proto3" json:"connServer,omitempty"` + LocalNetwork string `protobuf:"bytes,2,opt,name=localNetwork,proto3" json:"localNetwork,omitempty"` + LocalString string `protobuf:"bytes,3,opt,name=localString,proto3" json:"localString,omitempty"` + RemoteNetwork string `protobuf:"bytes,4,opt,name=remoteNetwork,proto3" json:"remoteNetwork,omitempty"` + RemoteString string `protobuf:"bytes,5,opt,name=remoteString,proto3" json:"remoteString,omitempty"` + ReaderServer uint32 `protobuf:"varint,6,opt,name=readerServer,proto3" json:"readerServer,omitempty"` + WriterServer uint32 `protobuf:"varint,7,opt,name=writerServer,proto3" json:"writerServer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HijackResponse) Reset() { *m = HijackResponse{} } +func (m *HijackResponse) String() string { return proto.CompactTextString(m) } +func (*HijackResponse) ProtoMessage() {} +func (*HijackResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ea6fbe89c42e6759, []int{8} +} + +func (m *HijackResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HijackResponse.Unmarshal(m, b) +} +func (m *HijackResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HijackResponse.Marshal(b, m, deterministic) +} +func (m *HijackResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HijackResponse.Merge(m, src) +} +func (m *HijackResponse) XXX_Size() int { + return xxx_messageInfo_HijackResponse.Size(m) +} +func (m *HijackResponse) XXX_DiscardUnknown() { + xxx_messageInfo_HijackResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_HijackResponse proto.InternalMessageInfo + +func (m *HijackResponse) GetConnServer() uint32 { + if m != nil { + return m.ConnServer + } + return 0 +} + +func (m *HijackResponse) GetLocalNetwork() string { + if m != nil { + return m.LocalNetwork + } + return "" +} + +func (m *HijackResponse) GetLocalString() string { + if m != nil { + return m.LocalString + } + return "" +} + +func (m *HijackResponse) GetRemoteNetwork() string { + if m != nil { + return m.RemoteNetwork + } + return "" +} + +func (m *HijackResponse) GetRemoteString() string { + if m != nil { + return m.RemoteString + } + return "" +} + +func (m *HijackResponse) GetReaderServer() uint32 { + if m != nil { + return m.ReaderServer + } + return 0 +} + +func (m *HijackResponse) GetWriterServer() uint32 { + if m != nil { + return m.WriterServer + } + return 0 +} + +func init() { + proto.RegisterType((*Header)(nil), "proto.Header") + proto.RegisterType((*WriteRequest)(nil), "proto.WriteRequest") + proto.RegisterType((*WriteResponse)(nil), "proto.WriteResponse") + proto.RegisterType((*WriteHeaderRequest)(nil), "proto.WriteHeaderRequest") + proto.RegisterType((*WriteHeaderResponse)(nil), "proto.WriteHeaderResponse") + proto.RegisterType((*FlushRequest)(nil), "proto.FlushRequest") + proto.RegisterType((*FlushResponse)(nil), "proto.FlushResponse") + proto.RegisterType((*HijackRequest)(nil), "proto.HijackRequest") + proto.RegisterType((*HijackResponse)(nil), "proto.HijackResponse") +} + +func init() { proto.RegisterFile("writer.proto", fileDescriptor_ea6fbe89c42e6759) } + +var fileDescriptor_ea6fbe89c42e6759 = []byte{ + // 408 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x92, 0x41, 0x6e, 0xe2, 0x30, + 0x14, 0x86, 0x15, 0x32, 0x09, 0xe2, 0x91, 0xc0, 0xc8, 0x0c, 0xa3, 0x4c, 0x16, 0x28, 0xb2, 0x46, + 0x9a, 0xcc, 0x86, 0x45, 0xaa, 0x9e, 0xa0, 0x55, 0xc5, 0xaa, 0x52, 0xcd, 0xa2, 0xab, 0x2e, 0x52, + 0xb0, 0x0a, 0x25, 0x8d, 0xa9, 0xed, 0x80, 0xb8, 0x41, 0x2f, 0xdb, 0x3b, 0x54, 0xb1, 0x1d, 0x64, + 0x57, 0xdd, 0x74, 0x15, 0xbf, 0xcf, 0x7f, 0xfe, 0xf7, 0xdb, 0xcf, 0x10, 0x1d, 0xf9, 0x56, 0x52, + 0x3e, 0xdf, 0x73, 0x26, 0x19, 0x0a, 0xd4, 0x07, 0x17, 0x10, 0x2e, 0x68, 0xb9, 0xa6, 0x1c, 0xfd, + 0x04, 0x7f, 0x47, 0x4f, 0x89, 0x97, 0x79, 0xf9, 0x80, 0xb4, 0x4b, 0xf4, 0x1b, 0xc2, 0x43, 0x59, + 0x35, 0x54, 0x24, 0xbd, 0xcc, 0xcf, 0x07, 0xc4, 0x54, 0xf8, 0x0e, 0xa2, 0xfb, 0xd6, 0x8a, 0xd0, + 0xd7, 0x86, 0x0a, 0x89, 0xfe, 0x41, 0x7f, 0xa3, 0x3c, 0x44, 0xe2, 0x65, 0x7e, 0x3e, 0x2c, 0x62, + 0xdd, 0x63, 0xae, 0x9d, 0x49, 0xb7, 0x8b, 0x12, 0xe8, 0xef, 0xcb, 0x53, 0xc5, 0xca, 0x75, 0xd2, + 0xcb, 0xbc, 0x3c, 0x22, 0x5d, 0x89, 0xff, 0x43, 0x6c, 0x2c, 0xc5, 0x9e, 0xd5, 0x82, 0xb6, 0xd2, + 0x36, 0xae, 0xa4, 0xb5, 0x4a, 0x14, 0x90, 0xae, 0xc4, 0x0f, 0x80, 0x94, 0xd4, 0x98, 0x7f, 0x37, + 0xc3, 0x0c, 0x40, 0xc8, 0x52, 0x36, 0xe2, 0x8a, 0xad, 0xa9, 0x8a, 0x11, 0x10, 0x8b, 0xe0, 0x29, + 0x4c, 0x1c, 0x7b, 0x9d, 0x07, 0x8f, 0x20, 0xba, 0xa9, 0x1a, 0xb1, 0x31, 0xfd, 0xf0, 0x18, 0x62, + 0x53, 0x1b, 0xc1, 0x18, 0xe2, 0xc5, 0xf6, 0xb9, 0x5c, 0xed, 0x3a, 0xc5, 0x5b, 0x0f, 0x46, 0x1d, + 0x31, 0x87, 0x9a, 0x01, 0xac, 0x58, 0x5d, 0x2f, 0x29, 0x3f, 0x50, 0xae, 0xce, 0x15, 0x13, 0x8b, + 0x20, 0x0c, 0x51, 0xc5, 0x56, 0x65, 0x75, 0x4b, 0xe5, 0x91, 0xf1, 0x9d, 0x4a, 0x37, 0x20, 0x0e, + 0x43, 0x19, 0x0c, 0x55, 0xbd, 0x94, 0x7c, 0x5b, 0x3f, 0x25, 0xbe, 0x92, 0xd8, 0x08, 0xfd, 0x85, + 0x98, 0xd3, 0x17, 0x26, 0x69, 0x67, 0xf3, 0x43, 0x69, 0x5c, 0xd8, 0xf6, 0xd2, 0xc0, 0x18, 0x05, + 0xba, 0x97, 0xcd, 0xb4, 0xa6, 0xbd, 0x06, 0x93, 0x38, 0x54, 0x89, 0x1d, 0xd6, 0x6a, 0xf4, 0xbb, + 0x32, 0x9a, 0xbe, 0xd6, 0xd8, 0xac, 0x78, 0xf7, 0x20, 0x54, 0x97, 0xca, 0x51, 0x01, 0x81, 0x5a, + 0xa1, 0x89, 0x99, 0x8f, 0xfd, 0x92, 0xd2, 0x5f, 0x2e, 0x34, 0xd7, 0x76, 0x0d, 0x43, 0x6b, 0x24, + 0xe8, 0x8f, 0x2d, 0x72, 0x5e, 0x41, 0x9a, 0x7e, 0xb5, 0x65, 0x5c, 0x0a, 0x08, 0xd4, 0xc4, 0xce, + 0x9d, 0xed, 0x79, 0x9e, 0x3b, 0x3b, 0x43, 0x45, 0x97, 0x10, 0xea, 0x11, 0xa2, 0x6e, 0xdf, 0x99, + 0x71, 0x3a, 0xfd, 0x44, 0xf5, 0x6f, 0x8f, 0xa1, 0xa2, 0x17, 0x1f, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x6f, 0xbe, 0x4a, 0x4f, 0x72, 0x03, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// WriterClient is the client API for Writer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type WriterClient interface { + Write(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) + WriteHeader(ctx context.Context, in *WriteHeaderRequest, opts ...grpc.CallOption) (*WriteHeaderResponse, error) + Flush(ctx context.Context, in *FlushRequest, opts ...grpc.CallOption) (*FlushResponse, error) + Hijack(ctx context.Context, in *HijackRequest, opts ...grpc.CallOption) (*HijackResponse, error) +} + +type writerClient struct { + cc grpc.ClientConnInterface +} + +func NewWriterClient(cc grpc.ClientConnInterface) WriterClient { + return &writerClient{cc} +} + +func (c *writerClient) Write(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) { + out := new(WriteResponse) + err := c.cc.Invoke(ctx, "/proto.Writer/Write", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *writerClient) WriteHeader(ctx context.Context, in *WriteHeaderRequest, opts ...grpc.CallOption) (*WriteHeaderResponse, error) { + out := new(WriteHeaderResponse) + err := c.cc.Invoke(ctx, "/proto.Writer/WriteHeader", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *writerClient) Flush(ctx context.Context, in *FlushRequest, opts ...grpc.CallOption) (*FlushResponse, error) { + out := new(FlushResponse) + err := c.cc.Invoke(ctx, "/proto.Writer/Flush", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *writerClient) Hijack(ctx context.Context, in *HijackRequest, opts ...grpc.CallOption) (*HijackResponse, error) { + out := new(HijackResponse) + err := c.cc.Invoke(ctx, "/proto.Writer/Hijack", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// WriterServer is the server API for Writer service. +type WriterServer interface { + Write(context.Context, *WriteRequest) (*WriteResponse, error) + WriteHeader(context.Context, *WriteHeaderRequest) (*WriteHeaderResponse, error) + Flush(context.Context, *FlushRequest) (*FlushResponse, error) + Hijack(context.Context, *HijackRequest) (*HijackResponse, error) +} + +// UnimplementedWriterServer can be embedded to have forward compatible implementations. +type UnimplementedWriterServer struct { +} + +func (*UnimplementedWriterServer) Write(ctx context.Context, req *WriteRequest) (*WriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Write not implemented") +} +func (*UnimplementedWriterServer) WriteHeader(ctx context.Context, req *WriteHeaderRequest) (*WriteHeaderResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method WriteHeader not implemented") +} +func (*UnimplementedWriterServer) Flush(ctx context.Context, req *FlushRequest) (*FlushResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Flush not implemented") +} +func (*UnimplementedWriterServer) Hijack(ctx context.Context, req *HijackRequest) (*HijackResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Hijack not implemented") +} + +func RegisterWriterServer(s *grpc.Server, srv WriterServer) { + s.RegisterService(&_Writer_serviceDesc, srv) +} + +func _Writer_Write_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WriterServer).Write(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Writer/Write", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WriterServer).Write(ctx, req.(*WriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Writer_WriteHeader_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WriteHeaderRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WriterServer).WriteHeader(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Writer/WriteHeader", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WriterServer).WriteHeader(ctx, req.(*WriteHeaderRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Writer_Flush_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(FlushRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WriterServer).Flush(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Writer/Flush", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WriterServer).Flush(ctx, req.(*FlushRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _Writer_Hijack_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HijackRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WriterServer).Hijack(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Writer/Hijack", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WriterServer).Hijack(ctx, req.(*HijackRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Writer_serviceDesc = grpc.ServiceDesc{ + ServiceName: "proto.Writer", + HandlerType: (*WriterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Write", + Handler: _Writer_Write_Handler, + }, + { + MethodName: "WriteHeader", + Handler: _Writer_WriteHeader_Handler, + }, + { + MethodName: "Flush", + Handler: _Writer_Flush_Handler, + }, + { + MethodName: "Hijack", + Handler: _Writer_Hijack_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "writer.proto", +} diff --git a/vms/rpcchainvm/ghttp/gresponsewriter/proto/writer.proto b/vms/rpcchainvm/ghttp/gresponsewriter/proto/writer.proto new file mode 100644 index 0000000..121c661 --- /dev/null +++ b/vms/rpcchainvm/ghttp/gresponsewriter/proto/writer.proto @@ -0,0 +1,46 @@ +syntax = "proto3"; +package proto; + +message Header { + string key = 1; + repeated string values = 2; +} + +message WriteRequest { + repeated Header headers = 1; + bytes payload = 2; +} + +message WriteResponse { + int32 written = 1; +} + +message WriteHeaderRequest { + repeated Header headers = 1; + int32 statusCode = 2; +} + +message WriteHeaderResponse {} + +message FlushRequest {} + +message FlushResponse {} + +message HijackRequest {} + +message HijackResponse { + uint32 connServer = 1; + string localNetwork = 2; + string localString = 3; + string remoteNetwork = 4; + string remoteString = 5; + uint32 readerServer = 6; + uint32 writerServer = 7; +} + +service Writer { + rpc Write(WriteRequest) returns (WriteResponse); + rpc WriteHeader(WriteHeaderRequest) returns (WriteHeaderResponse); + rpc Flush(FlushRequest) returns (FlushResponse); + rpc Hijack(HijackRequest) returns (HijackResponse); +} \ No newline at end of file diff --git a/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go b/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go new file mode 100644 index 0000000..bcd880b --- /dev/null +++ b/vms/rpcchainvm/ghttp/gresponsewriter/writer_client.go @@ -0,0 +1,134 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gresponsewriter + +import ( + "bufio" + "context" + "net" + "net/http" + + "github.com/hashicorp/go-plugin" + + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gconn" + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/greader" + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gwriter" + + connproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gconn/proto" + readerproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/greader/proto" + responsewriterproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gresponsewriter/proto" + writerproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gwriter/proto" +) + +// Client is an implementation of a messenger channel that talks over RPC. +type Client struct { + client responsewriterproto.WriterClient + header http.Header + broker *plugin.GRPCBroker +} + +// NewClient returns a database instance connected to a remote database instance +func NewClient(client responsewriterproto.WriterClient, broker *plugin.GRPCBroker) *Client { + return &Client{ + client: client, + header: make(http.Header), + broker: broker, + } +} + +// Header ... +func (c *Client) Header() http.Header { return c.header } + +// Write ... +func (c *Client) Write(payload []byte) (int, error) { + req := &responsewriterproto.WriteRequest{ + Headers: make([]*responsewriterproto.Header, 0, len(c.header)), + Payload: payload, + } + for key, values := range c.header { + req.Headers = append(req.Headers, &responsewriterproto.Header{ + Key: key, + Values: values, + }) + } + resp, err := c.client.Write(context.Background(), req) + if err != nil { + return 0, err + } + return int(resp.Written), nil +} + +// WriteHeader ... +func (c *Client) WriteHeader(statusCode int) { + req := &responsewriterproto.WriteHeaderRequest{ + Headers: make([]*responsewriterproto.Header, 0, len(c.header)), + StatusCode: int32(statusCode), + } + for key, values := range c.header { + req.Headers = append(req.Headers, &responsewriterproto.Header{ + Key: key, + Values: values, + }) + } + // TODO: How should we handle an error here? + c.client.WriteHeader(context.Background(), req) +} + +// Flush ... +func (c *Client) Flush() { + // TODO: How should we handle an error here? + c.client.Flush(context.Background(), &responsewriterproto.FlushRequest{}) +} + +type addr struct { + network string + str string +} + +func (a *addr) Network() string { return a.network } +func (a *addr) String() string { return a.str } + +// Hijack ... +func (c *Client) Hijack() (net.Conn, *bufio.ReadWriter, error) { + resp, err := c.client.Hijack(context.Background(), &responsewriterproto.HijackRequest{}) + if err != nil { + return nil, nil, err + } + + connConn, err := c.broker.Dial(resp.ConnServer) + if err != nil { + return nil, nil, err + } + + readerConn, err := c.broker.Dial(resp.ReaderServer) + if err != nil { + connConn.Close() + return nil, nil, err + } + + writerConn, err := c.broker.Dial(resp.WriterServer) + if err != nil { + connConn.Close() + readerConn.Close() + return nil, nil, err + } + + conn := gconn.NewClient(connproto.NewConnClient(connConn), &addr{ + network: resp.LocalNetwork, + str: resp.LocalString, + }, &addr{ + network: resp.RemoteNetwork, + str: resp.RemoteString, + }, connConn, readerConn, writerConn) + + reader := greader.NewClient(readerproto.NewReaderClient(readerConn)) + writer := gwriter.NewClient(writerproto.NewWriterClient(writerConn)) + + readWriter := bufio.NewReadWriter( + bufio.NewReader(reader), + bufio.NewWriter(writer), + ) + + return conn, readWriter, nil +} diff --git a/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go b/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go new file mode 100644 index 0000000..296fad2 --- /dev/null +++ b/vms/rpcchainvm/ghttp/gresponsewriter/writer_server.go @@ -0,0 +1,123 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gresponsewriter + +import ( + "context" + "errors" + "net/http" + + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gconn" + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/greader" + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gwriter" + + "github.com/hashicorp/go-plugin" + "google.golang.org/grpc" + + connproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gconn/proto" + readerproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/greader/proto" + responsewriterproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gresponsewriter/proto" + writerproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gwriter/proto" +) + +// Server is a http.Handler that is managed over RPC. +type Server struct { + writer http.ResponseWriter + broker *plugin.GRPCBroker +} + +// NewServer returns a http.Handler instance manage remotely +func NewServer(writer http.ResponseWriter, broker *plugin.GRPCBroker) *Server { + return &Server{ + writer: writer, + broker: broker, + } +} + +// Write ... +func (s *Server) Write(ctx context.Context, req *responsewriterproto.WriteRequest) (*responsewriterproto.WriteResponse, error) { + headers := s.writer.Header() + for key := range headers { + delete(headers, key) + } + for _, header := range req.Headers { + headers[header.Key] = header.Values + } + + n, err := s.writer.Write(req.Payload) + if err != nil { + return nil, err + } + return &responsewriterproto.WriteResponse{ + Written: int32(n), + }, nil +} + +// WriteHeader ... +func (s *Server) WriteHeader(ctx context.Context, req *responsewriterproto.WriteHeaderRequest) (*responsewriterproto.WriteHeaderResponse, error) { + headers := s.writer.Header() + for key := range headers { + delete(headers, key) + } + for _, header := range req.Headers { + headers[header.Key] = header.Values + } + s.writer.WriteHeader(int(req.StatusCode)) + return &responsewriterproto.WriteHeaderResponse{}, nil +} + +// Flush ... +func (s *Server) Flush(ctx context.Context, req *responsewriterproto.FlushRequest) (*responsewriterproto.FlushResponse, error) { + flusher, ok := s.writer.(http.Flusher) + if !ok { + return nil, errors.New("response writer doesn't support flushing") + } + flusher.Flush() + return &responsewriterproto.FlushResponse{}, nil +} + +// Hijack ... +func (s *Server) Hijack(ctx context.Context, req *responsewriterproto.HijackRequest) (*responsewriterproto.HijackResponse, error) { + hijacker, ok := s.writer.(http.Hijacker) + if !ok { + return nil, errors.New("response writer doesn't support hijacking") + } + conn, readWriter, err := hijacker.Hijack() + if err != nil { + return nil, err + } + + connID := s.broker.NextId() + readerID := s.broker.NextId() + writerID := s.broker.NextId() + + go s.broker.AcceptAndServe(connID, func(opts []grpc.ServerOption) *grpc.Server { + connServer := grpc.NewServer(opts...) + connproto.RegisterConnServer(connServer, gconn.NewServer(conn)) + return connServer + }) + go s.broker.AcceptAndServe(readerID, func(opts []grpc.ServerOption) *grpc.Server { + readerServer := grpc.NewServer(opts...) + readerproto.RegisterReaderServer(readerServer, greader.NewServer(readWriter)) + return readerServer + }) + go s.broker.AcceptAndServe(writerID, func(opts []grpc.ServerOption) *grpc.Server { + writerServer := grpc.NewServer(opts...) + writerproto.RegisterWriterServer(writerServer, gwriter.NewServer(readWriter)) + return writerServer + }) + + local := conn.LocalAddr() + remote := conn.RemoteAddr() + + return &responsewriterproto.HijackResponse{ + ConnServer: connID, + LocalNetwork: local.Network(), + LocalString: local.String(), + RemoteNetwork: remote.Network(), + RemoteString: remote.String(), + ReaderServer: readerID, + WriterServer: writerID, + }, nil +} diff --git a/vms/rpcchainvm/ghttp/gwriter/proto/writer.pb.go b/vms/rpcchainvm/ghttp/gwriter/proto/writer.pb.go new file mode 100644 index 0000000..fe0e7bf --- /dev/null +++ b/vms/rpcchainvm/ghttp/gwriter/proto/writer.pb.go @@ -0,0 +1,221 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: writer.proto + +package proto + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type WriteRequest struct { + Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteRequest) Reset() { *m = WriteRequest{} } +func (m *WriteRequest) String() string { return proto.CompactTextString(m) } +func (*WriteRequest) ProtoMessage() {} +func (*WriteRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_ea6fbe89c42e6759, []int{0} +} + +func (m *WriteRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WriteRequest.Unmarshal(m, b) +} +func (m *WriteRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WriteRequest.Marshal(b, m, deterministic) +} +func (m *WriteRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteRequest.Merge(m, src) +} +func (m *WriteRequest) XXX_Size() int { + return xxx_messageInfo_WriteRequest.Size(m) +} +func (m *WriteRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WriteRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteRequest proto.InternalMessageInfo + +func (m *WriteRequest) GetPayload() []byte { + if m != nil { + return m.Payload + } + return nil +} + +type WriteResponse struct { + Written int32 `protobuf:"varint,1,opt,name=written,proto3" json:"written,omitempty"` + Error string `protobuf:"bytes,2,opt,name=error,proto3" json:"error,omitempty"` + Errored bool `protobuf:"varint,3,opt,name=errored,proto3" json:"errored,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WriteResponse) Reset() { *m = WriteResponse{} } +func (m *WriteResponse) String() string { return proto.CompactTextString(m) } +func (*WriteResponse) ProtoMessage() {} +func (*WriteResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_ea6fbe89c42e6759, []int{1} +} + +func (m *WriteResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_WriteResponse.Unmarshal(m, b) +} +func (m *WriteResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_WriteResponse.Marshal(b, m, deterministic) +} +func (m *WriteResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WriteResponse.Merge(m, src) +} +func (m *WriteResponse) XXX_Size() int { + return xxx_messageInfo_WriteResponse.Size(m) +} +func (m *WriteResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WriteResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WriteResponse proto.InternalMessageInfo + +func (m *WriteResponse) GetWritten() int32 { + if m != nil { + return m.Written + } + return 0 +} + +func (m *WriteResponse) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *WriteResponse) GetErrored() bool { + if m != nil { + return m.Errored + } + return false +} + +func init() { + proto.RegisterType((*WriteRequest)(nil), "proto.WriteRequest") + proto.RegisterType((*WriteResponse)(nil), "proto.WriteResponse") +} + +func init() { proto.RegisterFile("writer.proto", fileDescriptor_ea6fbe89c42e6759) } + +var fileDescriptor_ea6fbe89c42e6759 = []byte{ + // 164 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x29, 0x2f, 0xca, 0x2c, + 0x49, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x05, 0x53, 0x4a, 0x1a, 0x5c, 0x3c, + 0xe1, 0x20, 0xe1, 0xa0, 0xd4, 0xc2, 0xd2, 0xd4, 0xe2, 0x12, 0x21, 0x09, 0x2e, 0xf6, 0x82, 0xc4, + 0xca, 0x9c, 0xfc, 0xc4, 0x14, 0x09, 0x46, 0x05, 0x46, 0x0d, 0x9e, 0x20, 0x18, 0x57, 0x29, 0x92, + 0x8b, 0x17, 0xaa, 0xb2, 0xb8, 0x20, 0x3f, 0xaf, 0x38, 0x15, 0xa4, 0x14, 0x64, 0x62, 0x49, 0x6a, + 0x1e, 0x58, 0x29, 0x6b, 0x10, 0x8c, 0x2b, 0x24, 0xc2, 0xc5, 0x9a, 0x5a, 0x54, 0x94, 0x5f, 0x24, + 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0xe1, 0x80, 0xd4, 0x83, 0x19, 0xa9, 0x29, 0x12, 0xcc, + 0x0a, 0x8c, 0x1a, 0x1c, 0x41, 0x30, 0xae, 0x91, 0x0d, 0x17, 0x1b, 0xd8, 0xe8, 0x22, 0x21, 0x23, + 0x2e, 0x56, 0x30, 0x4b, 0x48, 0x18, 0xe2, 0x4c, 0x3d, 0x64, 0xc7, 0x49, 0x89, 0xa0, 0x0a, 0x42, + 0xdc, 0x91, 0xc4, 0x06, 0x16, 0x34, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x3e, 0x3f, 0xbb, 0x2f, + 0xe0, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// WriterClient is the client API for Writer service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type WriterClient interface { + Write(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) +} + +type writerClient struct { + cc grpc.ClientConnInterface +} + +func NewWriterClient(cc grpc.ClientConnInterface) WriterClient { + return &writerClient{cc} +} + +func (c *writerClient) Write(ctx context.Context, in *WriteRequest, opts ...grpc.CallOption) (*WriteResponse, error) { + out := new(WriteResponse) + err := c.cc.Invoke(ctx, "/proto.Writer/Write", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// WriterServer is the server API for Writer service. +type WriterServer interface { + Write(context.Context, *WriteRequest) (*WriteResponse, error) +} + +// UnimplementedWriterServer can be embedded to have forward compatible implementations. +type UnimplementedWriterServer struct { +} + +func (*UnimplementedWriterServer) Write(ctx context.Context, req *WriteRequest) (*WriteResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Write not implemented") +} + +func RegisterWriterServer(s *grpc.Server, srv WriterServer) { + s.RegisterService(&_Writer_serviceDesc, srv) +} + +func _Writer_Write_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WriteRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WriterServer).Write(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Writer/Write", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WriterServer).Write(ctx, req.(*WriteRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Writer_serviceDesc = grpc.ServiceDesc{ + ServiceName: "proto.Writer", + HandlerType: (*WriterServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Write", + Handler: _Writer_Write_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "writer.proto", +} diff --git a/vms/rpcchainvm/ghttp/gwriter/proto/writer.proto b/vms/rpcchainvm/ghttp/gwriter/proto/writer.proto new file mode 100644 index 0000000..0baa7d6 --- /dev/null +++ b/vms/rpcchainvm/ghttp/gwriter/proto/writer.proto @@ -0,0 +1,16 @@ +syntax = "proto3"; +package proto; + +message WriteRequest { + bytes payload = 1; +} + +message WriteResponse { + int32 written = 1; + string error = 2; + bool errored = 3; +} + +service Writer { + rpc Write(WriteRequest) returns (WriteResponse); +} \ No newline at end of file diff --git a/vms/rpcchainvm/ghttp/gwriter/writer_client.go b/vms/rpcchainvm/ghttp/gwriter/writer_client.go new file mode 100644 index 0000000..f229a3d --- /dev/null +++ b/vms/rpcchainvm/ghttp/gwriter/writer_client.go @@ -0,0 +1,34 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gwriter + +import ( + "context" + "errors" + + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gwriter/proto" +) + +// Client is an implementation of a messenger channel that talks over RPC. +type Client struct{ client proto.WriterClient } + +// NewClient returns a database instance connected to a remote database instance +func NewClient(client proto.WriterClient) *Client { + return &Client{client: client} +} + +// Write ... +func (c *Client) Write(p []byte) (int, error) { + resp, err := c.client.Write(context.Background(), &proto.WriteRequest{ + Payload: p, + }) + if err != nil { + return 0, err + } + + if resp.Errored { + err = errors.New(resp.Error) + } + return int(resp.Written), err +} diff --git a/vms/rpcchainvm/ghttp/gwriter/writer_server.go b/vms/rpcchainvm/ghttp/gwriter/writer_server.go new file mode 100644 index 0000000..5d1d800 --- /dev/null +++ b/vms/rpcchainvm/ghttp/gwriter/writer_server.go @@ -0,0 +1,32 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package gwriter + +import ( + "context" + "io" + + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gwriter/proto" +) + +// Server is a http.Handler that is managed over RPC. +type Server struct{ writer io.Writer } + +// NewServer returns a http.Handler instance manage remotely +func NewServer(writer io.Writer) *Server { + return &Server{writer: writer} +} + +// Write ... +func (s *Server) Write(ctx context.Context, req *proto.WriteRequest) (*proto.WriteResponse, error) { + n, err := s.writer.Write(req.Payload) + resp := &proto.WriteResponse{ + Written: int32(n), + } + if err != nil { + resp.Errored = true + resp.Error = err.Error() + } + return resp, nil +} diff --git a/vms/rpcchainvm/ghttp/http_client.go b/vms/rpcchainvm/ghttp/http_client.go new file mode 100644 index 0000000..edb8347 --- /dev/null +++ b/vms/rpcchainvm/ghttp/http_client.go @@ -0,0 +1,152 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ghttp + +import ( + "net/http" + + "google.golang.org/grpc" + + "github.com/hashicorp/go-plugin" + + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/greadcloser" + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gresponsewriter" + + readcloserproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/greadcloser/proto" + responsewriterproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gresponsewriter/proto" + httpproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/proto" +) + +// Client is an implementation of a messenger channel that talks over RPC. +type Client struct { + client httpproto.HTTPClient + broker *plugin.GRPCBroker +} + +// NewClient returns a database instance connected to a remote database instance +func NewClient(client httpproto.HTTPClient, broker *plugin.GRPCBroker) *Client { + return &Client{ + client: client, + broker: broker, + } +} + +// Handle ... +func (c *Client) ServeHTTP(w http.ResponseWriter, r *http.Request) { + var reader *grpc.Server + var writer *grpc.Server + + readerID := c.broker.NextId() + go c.broker.AcceptAndServe(readerID, func(opts []grpc.ServerOption) *grpc.Server { + reader = grpc.NewServer(opts...) + readcloserproto.RegisterReaderServer(reader, greadcloser.NewServer(r.Body)) + + return reader + }) + writerID := c.broker.NextId() + go c.broker.AcceptAndServe(writerID, func(opts []grpc.ServerOption) *grpc.Server { + writer = grpc.NewServer(opts...) + responsewriterproto.RegisterWriterServer(writer, gresponsewriter.NewServer(w, c.broker)) + + return writer + }) + + req := &httpproto.HTTPRequest{ + ResponseWriter: writerID, + Request: &httpproto.Request{ + Method: r.Method, + Proto: r.Proto, + ProtoMajor: int32(r.ProtoMajor), + ProtoMinor: int32(r.ProtoMinor), + Body: readerID, + ContentLength: r.ContentLength, + TransferEncoding: r.TransferEncoding, + Host: r.Host, + RemoteAddr: r.RemoteAddr, + RequestURI: r.RequestURI, + }, + } + req.Request.Header = make([]*httpproto.Element, 0, len(r.Header)) + for key, values := range r.Header { + req.Request.Header = append(req.Request.Header, &httpproto.Element{ + Key: key, + Values: values, + }) + } + + req.Request.Form = make([]*httpproto.Element, 0, len(r.Form)) + for key, values := range r.Form { + req.Request.Form = append(req.Request.Form, &httpproto.Element{ + Key: key, + Values: values, + }) + } + + req.Request.PostForm = make([]*httpproto.Element, 0, len(r.PostForm)) + for key, values := range r.PostForm { + req.Request.PostForm = append(req.Request.PostForm, &httpproto.Element{ + Key: key, + Values: values, + }) + } + + if r.URL != nil { + req.Request.Url = &httpproto.URL{ + Scheme: r.URL.Scheme, + Opaque: r.URL.Opaque, + Host: r.URL.Host, + Path: r.URL.Path, + RawPath: r.URL.RawPath, + ForceQuery: r.URL.ForceQuery, + RawQuery: r.URL.RawQuery, + Fragment: r.URL.Fragment, + } + + if r.URL.User != nil { + req.Request.Url.User = &httpproto.Userinfo{ + Username: r.URL.User.Username(), + } + pwd, set := r.URL.User.Password() + req.Request.Url.User.Password = pwd + req.Request.Url.User.PasswordSet = set + } + } + + if r.TLS != nil { + req.Request.Tls = &httpproto.ConnectionState{ + Version: uint32(r.TLS.Version), + HandshakeComplete: r.TLS.HandshakeComplete, + DidResume: r.TLS.DidResume, + CipherSuite: uint32(r.TLS.CipherSuite), + NegotiatedProtocol: r.TLS.NegotiatedProtocol, + NegotiatedProtocolIsMutual: r.TLS.NegotiatedProtocolIsMutual, + ServerName: r.TLS.ServerName, + SignedCertificateTimestamps: r.TLS.SignedCertificateTimestamps, + OcspResponse: r.TLS.OCSPResponse, + TlsUnique: r.TLS.TLSUnique, + } + + req.Request.Tls.PeerCertificates = &httpproto.Certificates{ + Cert: make([][]byte, len(r.TLS.PeerCertificates)), + } + for i, cert := range r.TLS.PeerCertificates { + req.Request.Tls.PeerCertificates.Cert[i] = cert.Raw + } + + req.Request.Tls.VerifiedChains = make([]*httpproto.Certificates, len(r.TLS.VerifiedChains)) + for i, chain := range r.TLS.VerifiedChains { + req.Request.Tls.VerifiedChains[i] = &httpproto.Certificates{ + Cert: make([][]byte, len(chain)), + } + for j, cert := range chain { + req.Request.Tls.VerifiedChains[i].Cert[j] = cert.Raw + } + } + } + + c.client.Handle(r.Context(), req) + + reader.Stop() + writer.Stop() +} diff --git a/vms/rpcchainvm/ghttp/http_server.go b/vms/rpcchainvm/ghttp/http_server.go new file mode 100644 index 0000000..ae1af60 --- /dev/null +++ b/vms/rpcchainvm/ghttp/http_server.go @@ -0,0 +1,147 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package ghttp + +import ( + "context" + "crypto/tls" + "crypto/x509" + "net/http" + "net/url" + + "github.com/hashicorp/go-plugin" + + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/greadcloser" + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gresponsewriter" + + readcloserproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/greadcloser/proto" + responsewriterproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/gresponsewriter/proto" + httpproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/proto" +) + +// Server is a http.Handler that is managed over RPC. +type Server struct { + handler http.Handler + broker *plugin.GRPCBroker +} + +// NewServer returns a http.Handler instance manage remotely +func NewServer(handler http.Handler, broker *plugin.GRPCBroker) *Server { + return &Server{ + handler: handler, + broker: broker, + } +} + +// Handle ... +func (s *Server) Handle(ctx context.Context, req *httpproto.HTTPRequest) (*httpproto.HTTPResponse, error) { + writerConn, err := s.broker.Dial(req.ResponseWriter) + if err != nil { + return nil, err + } + defer writerConn.Close() + + readerConn, err := s.broker.Dial(req.Request.Body) + if err != nil { + return nil, err + } + defer readerConn.Close() + + writer := gresponsewriter.NewClient(responsewriterproto.NewWriterClient(writerConn), s.broker) + reader := greadcloser.NewClient(readcloserproto.NewReaderClient(readerConn)) + + // create the request with the current context + request, err := http.NewRequestWithContext( + ctx, + req.Request.Method, + req.Request.RequestURI, + reader, + ) + if err != nil { + return nil, err + } + + if req.Request.Url != nil { + request.URL = &url.URL{ + Scheme: req.Request.Url.Scheme, + Opaque: req.Request.Url.Opaque, + Host: req.Request.Url.Host, + Path: req.Request.Url.Path, + RawPath: req.Request.Url.RawPath, + ForceQuery: req.Request.Url.ForceQuery, + RawQuery: req.Request.Url.RawQuery, + Fragment: req.Request.Url.Fragment, + } + if req.Request.Url.User != nil { + if req.Request.Url.User.PasswordSet { + request.URL.User = url.UserPassword(req.Request.Url.User.Username, req.Request.Url.User.Password) + } else { + request.URL.User = url.User(req.Request.Url.User.Username) + } + } + } + + request.Proto = req.Request.Proto + request.ProtoMajor = int(req.Request.ProtoMajor) + request.ProtoMinor = int(req.Request.ProtoMinor) + request.Header = make(http.Header, len(req.Request.Header)) + for _, elem := range req.Request.Header { + request.Header[elem.Key] = elem.Values + } + request.ContentLength = req.Request.ContentLength + request.TransferEncoding = req.Request.TransferEncoding + request.Host = req.Request.Host + request.Form = make(url.Values, len(req.Request.Form)) + for _, elem := range req.Request.Form { + request.Form[elem.Key] = elem.Values + } + request.PostForm = make(url.Values, len(req.Request.PostForm)) + for _, elem := range req.Request.PostForm { + request.PostForm[elem.Key] = elem.Values + } + request.Trailer = make(http.Header) + request.RemoteAddr = req.Request.RemoteAddr + request.RequestURI = req.Request.RequestURI + + if req.Request.Tls != nil { + request.TLS = &tls.ConnectionState{ + Version: uint16(req.Request.Tls.Version), + HandshakeComplete: req.Request.Tls.HandshakeComplete, + DidResume: req.Request.Tls.DidResume, + CipherSuite: uint16(req.Request.Tls.CipherSuite), + NegotiatedProtocol: req.Request.Tls.NegotiatedProtocol, + NegotiatedProtocolIsMutual: req.Request.Tls.NegotiatedProtocolIsMutual, + ServerName: req.Request.Tls.ServerName, + SignedCertificateTimestamps: req.Request.Tls.SignedCertificateTimestamps, + OCSPResponse: req.Request.Tls.OcspResponse, + TLSUnique: req.Request.Tls.TlsUnique, + } + + request.TLS.PeerCertificates = make([]*x509.Certificate, len(req.Request.Tls.PeerCertificates.Cert)) + for i, certBytes := range req.Request.Tls.PeerCertificates.Cert { + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + request.TLS.PeerCertificates[i] = cert + } + + request.TLS.VerifiedChains = make([][]*x509.Certificate, len(req.Request.Tls.VerifiedChains)) + for i, chain := range req.Request.Tls.VerifiedChains { + request.TLS.VerifiedChains[i] = make([]*x509.Certificate, len(chain.Cert)) + for j, certBytes := range chain.Cert { + cert, err := x509.ParseCertificate(certBytes) + if err != nil { + return nil, err + } + request.TLS.VerifiedChains[i][j] = cert + } + } + } + + s.handler.ServeHTTP(writer, request) + + // return the response + return &httpproto.HTTPResponse{}, nil +} diff --git a/vms/rpcchainvm/ghttp/proto/http.pb.go b/vms/rpcchainvm/ghttp/proto/http.pb.go new file mode 100644 index 0000000..013c3b4 --- /dev/null +++ b/vms/rpcchainvm/ghttp/proto/http.pb.go @@ -0,0 +1,781 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: http.proto + +package proto + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type Userinfo struct { + Username string `protobuf:"bytes,1,opt,name=username,proto3" json:"username,omitempty"` + Password string `protobuf:"bytes,2,opt,name=password,proto3" json:"password,omitempty"` + PasswordSet bool `protobuf:"varint,3,opt,name=passwordSet,proto3" json:"passwordSet,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Userinfo) Reset() { *m = Userinfo{} } +func (m *Userinfo) String() string { return proto.CompactTextString(m) } +func (*Userinfo) ProtoMessage() {} +func (*Userinfo) Descriptor() ([]byte, []int) { + return fileDescriptor_11b04836674e6f94, []int{0} +} + +func (m *Userinfo) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Userinfo.Unmarshal(m, b) +} +func (m *Userinfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Userinfo.Marshal(b, m, deterministic) +} +func (m *Userinfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_Userinfo.Merge(m, src) +} +func (m *Userinfo) XXX_Size() int { + return xxx_messageInfo_Userinfo.Size(m) +} +func (m *Userinfo) XXX_DiscardUnknown() { + xxx_messageInfo_Userinfo.DiscardUnknown(m) +} + +var xxx_messageInfo_Userinfo proto.InternalMessageInfo + +func (m *Userinfo) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *Userinfo) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +func (m *Userinfo) GetPasswordSet() bool { + if m != nil { + return m.PasswordSet + } + return false +} + +type URL struct { + Scheme string `protobuf:"bytes,1,opt,name=scheme,proto3" json:"scheme,omitempty"` + Opaque string `protobuf:"bytes,2,opt,name=opaque,proto3" json:"opaque,omitempty"` + User *Userinfo `protobuf:"bytes,3,opt,name=user,proto3" json:"user,omitempty"` + Host string `protobuf:"bytes,4,opt,name=host,proto3" json:"host,omitempty"` + Path string `protobuf:"bytes,5,opt,name=path,proto3" json:"path,omitempty"` + RawPath string `protobuf:"bytes,6,opt,name=rawPath,proto3" json:"rawPath,omitempty"` + ForceQuery bool `protobuf:"varint,7,opt,name=forceQuery,proto3" json:"forceQuery,omitempty"` + RawQuery string `protobuf:"bytes,8,opt,name=rawQuery,proto3" json:"rawQuery,omitempty"` + Fragment string `protobuf:"bytes,9,opt,name=fragment,proto3" json:"fragment,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *URL) Reset() { *m = URL{} } +func (m *URL) String() string { return proto.CompactTextString(m) } +func (*URL) ProtoMessage() {} +func (*URL) Descriptor() ([]byte, []int) { + return fileDescriptor_11b04836674e6f94, []int{1} +} + +func (m *URL) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_URL.Unmarshal(m, b) +} +func (m *URL) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_URL.Marshal(b, m, deterministic) +} +func (m *URL) XXX_Merge(src proto.Message) { + xxx_messageInfo_URL.Merge(m, src) +} +func (m *URL) XXX_Size() int { + return xxx_messageInfo_URL.Size(m) +} +func (m *URL) XXX_DiscardUnknown() { + xxx_messageInfo_URL.DiscardUnknown(m) +} + +var xxx_messageInfo_URL proto.InternalMessageInfo + +func (m *URL) GetScheme() string { + if m != nil { + return m.Scheme + } + return "" +} + +func (m *URL) GetOpaque() string { + if m != nil { + return m.Opaque + } + return "" +} + +func (m *URL) GetUser() *Userinfo { + if m != nil { + return m.User + } + return nil +} + +func (m *URL) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *URL) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *URL) GetRawPath() string { + if m != nil { + return m.RawPath + } + return "" +} + +func (m *URL) GetForceQuery() bool { + if m != nil { + return m.ForceQuery + } + return false +} + +func (m *URL) GetRawQuery() string { + if m != nil { + return m.RawQuery + } + return "" +} + +func (m *URL) GetFragment() string { + if m != nil { + return m.Fragment + } + return "" +} + +type Element struct { + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Values []string `protobuf:"bytes,2,rep,name=values,proto3" json:"values,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Element) Reset() { *m = Element{} } +func (m *Element) String() string { return proto.CompactTextString(m) } +func (*Element) ProtoMessage() {} +func (*Element) Descriptor() ([]byte, []int) { + return fileDescriptor_11b04836674e6f94, []int{2} +} + +func (m *Element) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Element.Unmarshal(m, b) +} +func (m *Element) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Element.Marshal(b, m, deterministic) +} +func (m *Element) XXX_Merge(src proto.Message) { + xxx_messageInfo_Element.Merge(m, src) +} +func (m *Element) XXX_Size() int { + return xxx_messageInfo_Element.Size(m) +} +func (m *Element) XXX_DiscardUnknown() { + xxx_messageInfo_Element.DiscardUnknown(m) +} + +var xxx_messageInfo_Element proto.InternalMessageInfo + +func (m *Element) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +func (m *Element) GetValues() []string { + if m != nil { + return m.Values + } + return nil +} + +type Certificates struct { + Cert [][]byte `protobuf:"bytes,1,rep,name=cert,proto3" json:"cert,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Certificates) Reset() { *m = Certificates{} } +func (m *Certificates) String() string { return proto.CompactTextString(m) } +func (*Certificates) ProtoMessage() {} +func (*Certificates) Descriptor() ([]byte, []int) { + return fileDescriptor_11b04836674e6f94, []int{3} +} + +func (m *Certificates) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Certificates.Unmarshal(m, b) +} +func (m *Certificates) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Certificates.Marshal(b, m, deterministic) +} +func (m *Certificates) XXX_Merge(src proto.Message) { + xxx_messageInfo_Certificates.Merge(m, src) +} +func (m *Certificates) XXX_Size() int { + return xxx_messageInfo_Certificates.Size(m) +} +func (m *Certificates) XXX_DiscardUnknown() { + xxx_messageInfo_Certificates.DiscardUnknown(m) +} + +var xxx_messageInfo_Certificates proto.InternalMessageInfo + +func (m *Certificates) GetCert() [][]byte { + if m != nil { + return m.Cert + } + return nil +} + +type ConnectionState struct { + Version uint32 `protobuf:"varint,1,opt,name=version,proto3" json:"version,omitempty"` + HandshakeComplete bool `protobuf:"varint,2,opt,name=handshakeComplete,proto3" json:"handshakeComplete,omitempty"` + DidResume bool `protobuf:"varint,3,opt,name=didResume,proto3" json:"didResume,omitempty"` + CipherSuite uint32 `protobuf:"varint,4,opt,name=cipherSuite,proto3" json:"cipherSuite,omitempty"` + NegotiatedProtocol string `protobuf:"bytes,5,opt,name=negotiatedProtocol,proto3" json:"negotiatedProtocol,omitempty"` + NegotiatedProtocolIsMutual bool `protobuf:"varint,6,opt,name=negotiatedProtocolIsMutual,proto3" json:"negotiatedProtocolIsMutual,omitempty"` + ServerName string `protobuf:"bytes,7,opt,name=serverName,proto3" json:"serverName,omitempty"` + PeerCertificates *Certificates `protobuf:"bytes,8,opt,name=peerCertificates,proto3" json:"peerCertificates,omitempty"` + VerifiedChains []*Certificates `protobuf:"bytes,9,rep,name=verifiedChains,proto3" json:"verifiedChains,omitempty"` + SignedCertificateTimestamps [][]byte `protobuf:"bytes,10,rep,name=signedCertificateTimestamps,proto3" json:"signedCertificateTimestamps,omitempty"` + OcspResponse []byte `protobuf:"bytes,11,opt,name=ocspResponse,proto3" json:"ocspResponse,omitempty"` + TlsUnique []byte `protobuf:"bytes,12,opt,name=tlsUnique,proto3" json:"tlsUnique,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ConnectionState) Reset() { *m = ConnectionState{} } +func (m *ConnectionState) String() string { return proto.CompactTextString(m) } +func (*ConnectionState) ProtoMessage() {} +func (*ConnectionState) Descriptor() ([]byte, []int) { + return fileDescriptor_11b04836674e6f94, []int{4} +} + +func (m *ConnectionState) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ConnectionState.Unmarshal(m, b) +} +func (m *ConnectionState) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ConnectionState.Marshal(b, m, deterministic) +} +func (m *ConnectionState) XXX_Merge(src proto.Message) { + xxx_messageInfo_ConnectionState.Merge(m, src) +} +func (m *ConnectionState) XXX_Size() int { + return xxx_messageInfo_ConnectionState.Size(m) +} +func (m *ConnectionState) XXX_DiscardUnknown() { + xxx_messageInfo_ConnectionState.DiscardUnknown(m) +} + +var xxx_messageInfo_ConnectionState proto.InternalMessageInfo + +func (m *ConnectionState) GetVersion() uint32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *ConnectionState) GetHandshakeComplete() bool { + if m != nil { + return m.HandshakeComplete + } + return false +} + +func (m *ConnectionState) GetDidResume() bool { + if m != nil { + return m.DidResume + } + return false +} + +func (m *ConnectionState) GetCipherSuite() uint32 { + if m != nil { + return m.CipherSuite + } + return 0 +} + +func (m *ConnectionState) GetNegotiatedProtocol() string { + if m != nil { + return m.NegotiatedProtocol + } + return "" +} + +func (m *ConnectionState) GetNegotiatedProtocolIsMutual() bool { + if m != nil { + return m.NegotiatedProtocolIsMutual + } + return false +} + +func (m *ConnectionState) GetServerName() string { + if m != nil { + return m.ServerName + } + return "" +} + +func (m *ConnectionState) GetPeerCertificates() *Certificates { + if m != nil { + return m.PeerCertificates + } + return nil +} + +func (m *ConnectionState) GetVerifiedChains() []*Certificates { + if m != nil { + return m.VerifiedChains + } + return nil +} + +func (m *ConnectionState) GetSignedCertificateTimestamps() [][]byte { + if m != nil { + return m.SignedCertificateTimestamps + } + return nil +} + +func (m *ConnectionState) GetOcspResponse() []byte { + if m != nil { + return m.OcspResponse + } + return nil +} + +func (m *ConnectionState) GetTlsUnique() []byte { + if m != nil { + return m.TlsUnique + } + return nil +} + +type Request struct { + Method string `protobuf:"bytes,1,opt,name=method,proto3" json:"method,omitempty"` + Url *URL `protobuf:"bytes,2,opt,name=url,proto3" json:"url,omitempty"` + Proto string `protobuf:"bytes,3,opt,name=proto,proto3" json:"proto,omitempty"` + ProtoMajor int32 `protobuf:"varint,4,opt,name=protoMajor,proto3" json:"protoMajor,omitempty"` + ProtoMinor int32 `protobuf:"varint,5,opt,name=protoMinor,proto3" json:"protoMinor,omitempty"` + Header []*Element `protobuf:"bytes,6,rep,name=header,proto3" json:"header,omitempty"` + Body uint32 `protobuf:"varint,7,opt,name=body,proto3" json:"body,omitempty"` + ContentLength int64 `protobuf:"varint,8,opt,name=contentLength,proto3" json:"contentLength,omitempty"` + TransferEncoding []string `protobuf:"bytes,9,rep,name=transferEncoding,proto3" json:"transferEncoding,omitempty"` + Host string `protobuf:"bytes,10,opt,name=host,proto3" json:"host,omitempty"` + Form []*Element `protobuf:"bytes,11,rep,name=form,proto3" json:"form,omitempty"` + PostForm []*Element `protobuf:"bytes,12,rep,name=postForm,proto3" json:"postForm,omitempty"` + TrailerKeys []string `protobuf:"bytes,13,rep,name=trailerKeys,proto3" json:"trailerKeys,omitempty"` + RemoteAddr string `protobuf:"bytes,14,opt,name=remoteAddr,proto3" json:"remoteAddr,omitempty"` + RequestURI string `protobuf:"bytes,15,opt,name=requestURI,proto3" json:"requestURI,omitempty"` + Tls *ConnectionState `protobuf:"bytes,16,opt,name=tls,proto3" json:"tls,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Request) Reset() { *m = Request{} } +func (m *Request) String() string { return proto.CompactTextString(m) } +func (*Request) ProtoMessage() {} +func (*Request) Descriptor() ([]byte, []int) { + return fileDescriptor_11b04836674e6f94, []int{5} +} + +func (m *Request) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Request.Unmarshal(m, b) +} +func (m *Request) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Request.Marshal(b, m, deterministic) +} +func (m *Request) XXX_Merge(src proto.Message) { + xxx_messageInfo_Request.Merge(m, src) +} +func (m *Request) XXX_Size() int { + return xxx_messageInfo_Request.Size(m) +} +func (m *Request) XXX_DiscardUnknown() { + xxx_messageInfo_Request.DiscardUnknown(m) +} + +var xxx_messageInfo_Request proto.InternalMessageInfo + +func (m *Request) GetMethod() string { + if m != nil { + return m.Method + } + return "" +} + +func (m *Request) GetUrl() *URL { + if m != nil { + return m.Url + } + return nil +} + +func (m *Request) GetProto() string { + if m != nil { + return m.Proto + } + return "" +} + +func (m *Request) GetProtoMajor() int32 { + if m != nil { + return m.ProtoMajor + } + return 0 +} + +func (m *Request) GetProtoMinor() int32 { + if m != nil { + return m.ProtoMinor + } + return 0 +} + +func (m *Request) GetHeader() []*Element { + if m != nil { + return m.Header + } + return nil +} + +func (m *Request) GetBody() uint32 { + if m != nil { + return m.Body + } + return 0 +} + +func (m *Request) GetContentLength() int64 { + if m != nil { + return m.ContentLength + } + return 0 +} + +func (m *Request) GetTransferEncoding() []string { + if m != nil { + return m.TransferEncoding + } + return nil +} + +func (m *Request) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *Request) GetForm() []*Element { + if m != nil { + return m.Form + } + return nil +} + +func (m *Request) GetPostForm() []*Element { + if m != nil { + return m.PostForm + } + return nil +} + +func (m *Request) GetTrailerKeys() []string { + if m != nil { + return m.TrailerKeys + } + return nil +} + +func (m *Request) GetRemoteAddr() string { + if m != nil { + return m.RemoteAddr + } + return "" +} + +func (m *Request) GetRequestURI() string { + if m != nil { + return m.RequestURI + } + return "" +} + +func (m *Request) GetTls() *ConnectionState { + if m != nil { + return m.Tls + } + return nil +} + +type HTTPRequest struct { + ResponseWriter uint32 `protobuf:"varint,1,opt,name=responseWriter,proto3" json:"responseWriter,omitempty"` + Request *Request `protobuf:"bytes,2,opt,name=request,proto3" json:"request,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HTTPRequest) Reset() { *m = HTTPRequest{} } +func (m *HTTPRequest) String() string { return proto.CompactTextString(m) } +func (*HTTPRequest) ProtoMessage() {} +func (*HTTPRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_11b04836674e6f94, []int{6} +} + +func (m *HTTPRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HTTPRequest.Unmarshal(m, b) +} +func (m *HTTPRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HTTPRequest.Marshal(b, m, deterministic) +} +func (m *HTTPRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPRequest.Merge(m, src) +} +func (m *HTTPRequest) XXX_Size() int { + return xxx_messageInfo_HTTPRequest.Size(m) +} +func (m *HTTPRequest) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPRequest proto.InternalMessageInfo + +func (m *HTTPRequest) GetResponseWriter() uint32 { + if m != nil { + return m.ResponseWriter + } + return 0 +} + +func (m *HTTPRequest) GetRequest() *Request { + if m != nil { + return m.Request + } + return nil +} + +type HTTPResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *HTTPResponse) Reset() { *m = HTTPResponse{} } +func (m *HTTPResponse) String() string { return proto.CompactTextString(m) } +func (*HTTPResponse) ProtoMessage() {} +func (*HTTPResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_11b04836674e6f94, []int{7} +} + +func (m *HTTPResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_HTTPResponse.Unmarshal(m, b) +} +func (m *HTTPResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_HTTPResponse.Marshal(b, m, deterministic) +} +func (m *HTTPResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_HTTPResponse.Merge(m, src) +} +func (m *HTTPResponse) XXX_Size() int { + return xxx_messageInfo_HTTPResponse.Size(m) +} +func (m *HTTPResponse) XXX_DiscardUnknown() { + xxx_messageInfo_HTTPResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_HTTPResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*Userinfo)(nil), "proto.Userinfo") + proto.RegisterType((*URL)(nil), "proto.URL") + proto.RegisterType((*Element)(nil), "proto.Element") + proto.RegisterType((*Certificates)(nil), "proto.Certificates") + proto.RegisterType((*ConnectionState)(nil), "proto.ConnectionState") + proto.RegisterType((*Request)(nil), "proto.Request") + proto.RegisterType((*HTTPRequest)(nil), "proto.HTTPRequest") + proto.RegisterType((*HTTPResponse)(nil), "proto.HTTPResponse") +} + +func init() { proto.RegisterFile("http.proto", fileDescriptor_11b04836674e6f94) } + +var fileDescriptor_11b04836674e6f94 = []byte{ + // 816 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x7c, 0x54, 0xdb, 0x6e, 0x1b, 0x37, + 0x10, 0x85, 0xb2, 0xb2, 0x2e, 0xa3, 0x8b, 0x5d, 0xa6, 0x08, 0x08, 0x37, 0x28, 0x84, 0x6d, 0x11, + 0x08, 0x41, 0x61, 0xa0, 0xce, 0x53, 0x51, 0xa0, 0x17, 0x18, 0x29, 0x12, 0xd4, 0x29, 0x5c, 0xda, + 0x46, 0x1f, 0x0b, 0x66, 0x77, 0xa4, 0x65, 0xb3, 0x4b, 0x6e, 0x48, 0xae, 0x0c, 0x7f, 0x47, 0x3f, + 0xa3, 0xbf, 0xd7, 0x0f, 0x28, 0x78, 0x59, 0x69, 0x15, 0xb9, 0x7e, 0xda, 0x99, 0x73, 0x86, 0xcb, + 0x99, 0x39, 0x33, 0x04, 0x28, 0xac, 0xad, 0xcf, 0x6a, 0xad, 0xac, 0x22, 0x47, 0xfe, 0x93, 0xe6, + 0x30, 0xba, 0x35, 0xa8, 0x85, 0x5c, 0x29, 0x72, 0x0a, 0xa3, 0xc6, 0xa0, 0x96, 0xbc, 0x42, 0xda, + 0x5b, 0xf4, 0x96, 0x63, 0xb6, 0xf5, 0x1d, 0x57, 0x73, 0x63, 0xee, 0x94, 0xce, 0xe9, 0x93, 0xc0, + 0xb5, 0x3e, 0x59, 0xc0, 0xa4, 0xb5, 0xaf, 0xd1, 0xd2, 0x64, 0xd1, 0x5b, 0x8e, 0x58, 0x17, 0x4a, + 0xff, 0xed, 0x41, 0x72, 0xcb, 0x2e, 0xc9, 0x33, 0x18, 0x98, 0xac, 0xc0, 0xed, 0xff, 0xa3, 0xe7, + 0x70, 0x55, 0xf3, 0x8f, 0x0d, 0xc6, 0x7f, 0x47, 0x8f, 0x7c, 0x05, 0x7d, 0x97, 0x81, 0xff, 0xe5, + 0xe4, 0xfc, 0x38, 0xa4, 0x7e, 0xd6, 0x26, 0xcc, 0x3c, 0x49, 0x08, 0xf4, 0x0b, 0x65, 0x2c, 0xed, + 0xfb, 0xa3, 0xde, 0x76, 0x58, 0xcd, 0x6d, 0x41, 0x8f, 0x02, 0xe6, 0x6c, 0x42, 0x61, 0xa8, 0xf9, + 0xdd, 0x95, 0x83, 0x07, 0x1e, 0x6e, 0x5d, 0xf2, 0x25, 0xc0, 0x4a, 0xe9, 0x0c, 0x7f, 0x6f, 0x50, + 0xdf, 0xd3, 0xa1, 0xcf, 0xbf, 0x83, 0xb8, 0xe2, 0x35, 0xbf, 0x0b, 0xec, 0x28, 0x14, 0xdf, 0xfa, + 0x8e, 0x5b, 0x69, 0xbe, 0xae, 0x50, 0x5a, 0x3a, 0x0e, 0x5c, 0xeb, 0xa7, 0xaf, 0x60, 0xf8, 0xba, + 0x44, 0x67, 0x92, 0x13, 0x48, 0x3e, 0xe0, 0x7d, 0x2c, 0xdb, 0x99, 0xae, 0xe6, 0x0d, 0x2f, 0x1b, + 0x34, 0xf4, 0xc9, 0x22, 0x71, 0x35, 0x07, 0x2f, 0x4d, 0x61, 0x7a, 0x81, 0xda, 0x8a, 0x95, 0xc8, + 0xb8, 0x45, 0xe3, 0x4a, 0xc9, 0x50, 0x5b, 0xda, 0x5b, 0x24, 0xcb, 0x29, 0xf3, 0x76, 0xfa, 0x4f, + 0x1f, 0x8e, 0x2f, 0x94, 0x94, 0x98, 0x59, 0xa1, 0xe4, 0xb5, 0xe5, 0x16, 0x5d, 0x79, 0x1b, 0xd4, + 0x46, 0x28, 0xe9, 0x6f, 0x99, 0xb1, 0xd6, 0x25, 0xdf, 0xc0, 0x67, 0x05, 0x97, 0xb9, 0x29, 0xf8, + 0x07, 0xbc, 0x50, 0x55, 0x5d, 0xa2, 0x0d, 0x8d, 0x1e, 0xb1, 0x43, 0x82, 0x3c, 0x87, 0x71, 0x2e, + 0x72, 0x86, 0xa6, 0xa9, 0x30, 0x6a, 0xb9, 0x03, 0x9c, 0xd6, 0x99, 0xa8, 0x0b, 0xd4, 0xd7, 0x8d, + 0xb0, 0xe8, 0x7b, 0x3e, 0x63, 0x5d, 0x88, 0x9c, 0x01, 0x91, 0xb8, 0x56, 0x56, 0x70, 0x8b, 0xf9, + 0x95, 0x13, 0x2c, 0x53, 0x65, 0x14, 0xe2, 0x01, 0x86, 0xfc, 0x00, 0xa7, 0x87, 0xe8, 0x5b, 0xf3, + 0xae, 0xb1, 0x0d, 0x2f, 0xbd, 0x52, 0x23, 0xf6, 0x48, 0x84, 0x13, 0xcf, 0xa0, 0xde, 0xa0, 0xfe, + 0xcd, 0xcd, 0xed, 0xd0, 0xdf, 0xd3, 0x41, 0xc8, 0x8f, 0x70, 0x52, 0x23, 0xea, 0x6e, 0x4f, 0xbd, + 0x88, 0x93, 0xf3, 0xa7, 0x71, 0x9e, 0xba, 0x14, 0x3b, 0x08, 0x26, 0xdf, 0xc3, 0x7c, 0x83, 0x5a, + 0xac, 0x04, 0xe6, 0x17, 0x05, 0x17, 0xd2, 0xd0, 0xf1, 0x22, 0xf9, 0xbf, 0xe3, 0x9f, 0x84, 0x92, + 0x9f, 0xe0, 0x0b, 0x23, 0xd6, 0x12, 0xf3, 0x4e, 0xd4, 0x8d, 0xa8, 0xd0, 0x58, 0x5e, 0xd5, 0x86, + 0x82, 0x17, 0xf5, 0xb1, 0x10, 0x92, 0xc2, 0x54, 0x65, 0xa6, 0x66, 0x68, 0x6a, 0x25, 0x0d, 0xd2, + 0xc9, 0xa2, 0xb7, 0x9c, 0xb2, 0x3d, 0xcc, 0x69, 0x66, 0x4b, 0x73, 0x2b, 0x85, 0x5b, 0xa1, 0xa9, + 0x0f, 0xd8, 0x01, 0xe9, 0xdf, 0x7d, 0x18, 0x32, 0xfc, 0xd8, 0xa0, 0xb1, 0x6e, 0xea, 0x2a, 0xb4, + 0x85, 0xca, 0xdb, 0x0d, 0x0c, 0x1e, 0x79, 0x0e, 0x49, 0xa3, 0x4b, 0x3f, 0x15, 0x93, 0x73, 0x68, + 0x17, 0x8d, 0x5d, 0x32, 0x07, 0x93, 0xcf, 0x21, 0x3c, 0x17, 0x7e, 0x1e, 0xc6, 0x2c, 0x38, 0xae, + 0xf3, 0xde, 0x78, 0xc7, 0xff, 0x52, 0xda, 0x8f, 0xc2, 0x11, 0xeb, 0x20, 0x3b, 0x5e, 0x48, 0xa5, + 0xfd, 0x04, 0x6c, 0x79, 0x87, 0x90, 0x17, 0x30, 0x28, 0x90, 0xe7, 0xa8, 0xe9, 0xc0, 0x37, 0x74, + 0x1e, 0xaf, 0x8d, 0x3b, 0xc3, 0x22, 0xeb, 0x36, 0xe0, 0xbd, 0xca, 0xc3, 0x62, 0xce, 0x98, 0xb7, + 0xc9, 0xd7, 0x30, 0xcb, 0x94, 0xb4, 0x28, 0xed, 0x25, 0xca, 0xb5, 0x2d, 0xbc, 0xa4, 0x09, 0xdb, + 0x07, 0xc9, 0x4b, 0x38, 0xb1, 0x9a, 0x4b, 0xb3, 0x42, 0xfd, 0x5a, 0x66, 0x2a, 0x17, 0x72, 0xed, + 0xc5, 0x1b, 0xb3, 0x03, 0x7c, 0xfb, 0x8c, 0x40, 0xe7, 0x19, 0x49, 0xa1, 0xbf, 0x52, 0xba, 0xa2, + 0x93, 0x07, 0xf3, 0xf3, 0x1c, 0x79, 0x09, 0xa3, 0x5a, 0x19, 0xfb, 0x8b, 0x8b, 0x9b, 0x3e, 0x18, + 0xb7, 0xe5, 0xdd, 0xf6, 0x58, 0xcd, 0x45, 0x89, 0xfa, 0x57, 0xbc, 0x37, 0x74, 0xe6, 0x53, 0xe9, + 0x42, 0xae, 0x67, 0x1a, 0x2b, 0x65, 0xf1, 0xe7, 0x3c, 0xd7, 0x74, 0x1e, 0xa6, 0x79, 0x87, 0x04, + 0xde, 0x4b, 0x79, 0xcb, 0xde, 0xd2, 0xe3, 0x96, 0x6f, 0x11, 0xb2, 0x84, 0xc4, 0x96, 0x86, 0x9e, + 0x78, 0x1d, 0x9f, 0xb5, 0x13, 0xba, 0xff, 0x54, 0x30, 0x17, 0x92, 0xfe, 0x09, 0x93, 0x37, 0x37, + 0x37, 0x57, 0xed, 0x60, 0xbc, 0x80, 0xb9, 0x8e, 0xe3, 0xf4, 0x87, 0x16, 0x16, 0x75, 0x7c, 0x45, + 0x3e, 0x41, 0xc9, 0x12, 0x86, 0xf1, 0xba, 0x38, 0x2c, 0x6d, 0xb5, 0xf1, 0x47, 0xac, 0xa5, 0xd3, + 0x39, 0x4c, 0xc3, 0x05, 0xe1, 0xfc, 0xf9, 0x77, 0xd0, 0x77, 0x3e, 0xf9, 0x16, 0x06, 0x6f, 0xb8, + 0xcc, 0x4b, 0x24, 0x24, 0x1e, 0xed, 0xe4, 0x71, 0xfa, 0x74, 0x0f, 0x0b, 0x47, 0xdf, 0x0f, 0x3c, + 0xf6, 0xea, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0xbf, 0x29, 0xb2, 0xd4, 0xc1, 0x06, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// HTTPClient is the client API for HTTP service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type HTTPClient interface { + Handle(ctx context.Context, in *HTTPRequest, opts ...grpc.CallOption) (*HTTPResponse, error) +} + +type hTTPClient struct { + cc grpc.ClientConnInterface +} + +func NewHTTPClient(cc grpc.ClientConnInterface) HTTPClient { + return &hTTPClient{cc} +} + +func (c *hTTPClient) Handle(ctx context.Context, in *HTTPRequest, opts ...grpc.CallOption) (*HTTPResponse, error) { + out := new(HTTPResponse) + err := c.cc.Invoke(ctx, "/proto.HTTP/Handle", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// HTTPServer is the server API for HTTP service. +type HTTPServer interface { + Handle(context.Context, *HTTPRequest) (*HTTPResponse, error) +} + +// UnimplementedHTTPServer can be embedded to have forward compatible implementations. +type UnimplementedHTTPServer struct { +} + +func (*UnimplementedHTTPServer) Handle(ctx context.Context, req *HTTPRequest) (*HTTPResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Handle not implemented") +} + +func RegisterHTTPServer(s *grpc.Server, srv HTTPServer) { + s.RegisterService(&_HTTP_serviceDesc, srv) +} + +func _HTTP_Handle_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(HTTPRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(HTTPServer).Handle(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.HTTP/Handle", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(HTTPServer).Handle(ctx, req.(*HTTPRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _HTTP_serviceDesc = grpc.ServiceDesc{ + ServiceName: "proto.HTTP", + HandlerType: (*HTTPServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Handle", + Handler: _HTTP_Handle_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "http.proto", +} diff --git a/vms/rpcchainvm/ghttp/proto/http.proto b/vms/rpcchainvm/ghttp/proto/http.proto new file mode 100644 index 0000000..f5e9c71 --- /dev/null +++ b/vms/rpcchainvm/ghttp/proto/http.proto @@ -0,0 +1,75 @@ +syntax = "proto3"; +package proto; + + +message Userinfo { + string username = 1; + string password = 2; + bool passwordSet = 3; +} + +message URL { + string scheme = 1; + string opaque = 2; + Userinfo user = 3; + string host = 4; + string path = 5; + string rawPath = 6; + bool forceQuery = 7; + string rawQuery = 8; + string fragment = 9; +} + +message Element { + string key = 1; + repeated string values = 2; +} + +message Certificates { + repeated bytes cert = 1; +} + +message ConnectionState { + uint32 version = 1; + bool handshakeComplete = 2; + bool didResume = 3; + uint32 cipherSuite = 4; + string negotiatedProtocol = 5; + bool negotiatedProtocolIsMutual = 6; + string serverName = 7; + Certificates peerCertificates = 8; + repeated Certificates verifiedChains = 9; + repeated bytes signedCertificateTimestamps = 10; + bytes ocspResponse = 11; + bytes tlsUnique = 12; +} + +message Request { + string method = 1; + URL url = 2; + string proto = 3; + int32 protoMajor = 4; + int32 protoMinor = 5; + repeated Element header = 6; + uint32 body = 7; // server ID + int64 contentLength = 8; + repeated string transferEncoding = 9; + string host = 10; + repeated Element form = 11; + repeated Element postForm = 12; + repeated string trailerKeys = 13; + string remoteAddr = 14; + string requestURI = 15; + ConnectionState tls = 16; +} + +message HTTPRequest { + uint32 responseWriter = 1; // server ID + Request request = 2; +} + +message HTTPResponse {} + +service HTTP { + rpc Handle(HTTPRequest) returns (HTTPResponse); +} \ No newline at end of file diff --git a/vms/rpcchainvm/messenger/messenger_client.go b/vms/rpcchainvm/messenger/messenger_client.go new file mode 100644 index 0000000..f8f8033 --- /dev/null +++ b/vms/rpcchainvm/messenger/messenger_client.go @@ -0,0 +1,27 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package messenger + +import ( + "context" + + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/vms/rpcchainvm/messenger/proto" +) + +// Client is an implementation of a messenger channel that talks over RPC. +type Client struct{ client proto.MessengerClient } + +// NewClient returns a database instance connected to a remote database instance +func NewClient(client proto.MessengerClient) *Client { + return &Client{client: client} +} + +// Notify ... +func (c *Client) Notify(msg common.Message) error { + _, err := c.client.Notify(context.Background(), &proto.NotifyRequest{ + Message: uint32(msg), + }) + return err +} diff --git a/vms/rpcchainvm/messenger/messenger_server.go b/vms/rpcchainvm/messenger/messenger_server.go new file mode 100644 index 0000000..a051d7b --- /dev/null +++ b/vms/rpcchainvm/messenger/messenger_server.go @@ -0,0 +1,37 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package messenger + +import ( + "context" + "errors" + + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/vms/rpcchainvm/messenger/proto" +) + +var ( + errFullQueue = errors.New("full message queue") +) + +// Server is a messenger that is managed over RPC. +type Server struct { + messenger chan<- common.Message +} + +// NewServer returns a vm instance connected to a remote vm instance +func NewServer(messenger chan<- common.Message) *Server { + return &Server{messenger: messenger} +} + +// Notify ... +func (s *Server) Notify(_ context.Context, req *proto.NotifyRequest) (*proto.NotifyResponse, error) { + msg := common.Message(req.Message) + select { + case s.messenger <- msg: + return &proto.NotifyResponse{}, nil + default: + return nil, errFullQueue + } +} diff --git a/vms/rpcchainvm/messenger/proto/messenger.pb.go b/vms/rpcchainvm/messenger/proto/messenger.pb.go new file mode 100644 index 0000000..35dd59c --- /dev/null +++ b/vms/rpcchainvm/messenger/proto/messenger.pb.go @@ -0,0 +1,194 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: messenger.proto + +package proto + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type NotifyRequest struct { + Message uint32 `protobuf:"varint,1,opt,name=message,proto3" json:"message,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NotifyRequest) Reset() { *m = NotifyRequest{} } +func (m *NotifyRequest) String() string { return proto.CompactTextString(m) } +func (*NotifyRequest) ProtoMessage() {} +func (*NotifyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_b99aba0cbf4e4b91, []int{0} +} + +func (m *NotifyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NotifyRequest.Unmarshal(m, b) +} +func (m *NotifyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NotifyRequest.Marshal(b, m, deterministic) +} +func (m *NotifyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_NotifyRequest.Merge(m, src) +} +func (m *NotifyRequest) XXX_Size() int { + return xxx_messageInfo_NotifyRequest.Size(m) +} +func (m *NotifyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_NotifyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_NotifyRequest proto.InternalMessageInfo + +func (m *NotifyRequest) GetMessage() uint32 { + if m != nil { + return m.Message + } + return 0 +} + +type NotifyResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *NotifyResponse) Reset() { *m = NotifyResponse{} } +func (m *NotifyResponse) String() string { return proto.CompactTextString(m) } +func (*NotifyResponse) ProtoMessage() {} +func (*NotifyResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_b99aba0cbf4e4b91, []int{1} +} + +func (m *NotifyResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_NotifyResponse.Unmarshal(m, b) +} +func (m *NotifyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_NotifyResponse.Marshal(b, m, deterministic) +} +func (m *NotifyResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_NotifyResponse.Merge(m, src) +} +func (m *NotifyResponse) XXX_Size() int { + return xxx_messageInfo_NotifyResponse.Size(m) +} +func (m *NotifyResponse) XXX_DiscardUnknown() { + xxx_messageInfo_NotifyResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_NotifyResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*NotifyRequest)(nil), "proto.NotifyRequest") + proto.RegisterType((*NotifyResponse)(nil), "proto.NotifyResponse") +} + +func init() { proto.RegisterFile("messenger.proto", fileDescriptor_b99aba0cbf4e4b91) } + +var fileDescriptor_b99aba0cbf4e4b91 = []byte{ + // 123 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0xcf, 0x4d, 0x2d, 0x2e, + 0x4e, 0xcd, 0x4b, 0x4f, 0x2d, 0xd2, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x62, 0x05, 0x53, 0x4a, + 0x9a, 0x5c, 0xbc, 0x7e, 0xf9, 0x25, 0x99, 0x69, 0x95, 0x41, 0xa9, 0x85, 0xa5, 0xa9, 0xc5, 0x25, + 0x42, 0x12, 0x5c, 0xec, 0x20, 0xa5, 0x89, 0xe9, 0xa9, 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xbc, 0x41, + 0x30, 0xae, 0x92, 0x00, 0x17, 0x1f, 0x4c, 0x69, 0x71, 0x41, 0x7e, 0x5e, 0x71, 0xaa, 0x91, 0x13, + 0x17, 0xa7, 0x2f, 0xcc, 0x58, 0x21, 0x53, 0x2e, 0x36, 0x88, 0xb4, 0x90, 0x08, 0xc4, 0x0a, 0x3d, + 0x14, 0x83, 0xa5, 0x44, 0xd1, 0x44, 0x21, 0x66, 0x24, 0xb1, 0x81, 0x45, 0x8d, 0x01, 0x01, 0x00, + 0x00, 0xff, 0xff, 0x39, 0x03, 0x19, 0x97, 0xa1, 0x00, 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// MessengerClient is the client API for Messenger service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type MessengerClient interface { + Notify(ctx context.Context, in *NotifyRequest, opts ...grpc.CallOption) (*NotifyResponse, error) +} + +type messengerClient struct { + cc grpc.ClientConnInterface +} + +func NewMessengerClient(cc grpc.ClientConnInterface) MessengerClient { + return &messengerClient{cc} +} + +func (c *messengerClient) Notify(ctx context.Context, in *NotifyRequest, opts ...grpc.CallOption) (*NotifyResponse, error) { + out := new(NotifyResponse) + err := c.cc.Invoke(ctx, "/proto.Messenger/Notify", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// MessengerServer is the server API for Messenger service. +type MessengerServer interface { + Notify(context.Context, *NotifyRequest) (*NotifyResponse, error) +} + +// UnimplementedMessengerServer can be embedded to have forward compatible implementations. +type UnimplementedMessengerServer struct { +} + +func (*UnimplementedMessengerServer) Notify(ctx context.Context, req *NotifyRequest) (*NotifyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Notify not implemented") +} + +func RegisterMessengerServer(s *grpc.Server, srv MessengerServer) { + s.RegisterService(&_Messenger_serviceDesc, srv) +} + +func _Messenger_Notify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(NotifyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(MessengerServer).Notify(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.Messenger/Notify", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(MessengerServer).Notify(ctx, req.(*NotifyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _Messenger_serviceDesc = grpc.ServiceDesc{ + ServiceName: "proto.Messenger", + HandlerType: (*MessengerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Notify", + Handler: _Messenger_Notify_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "messenger.proto", +} diff --git a/vms/rpcchainvm/messenger/proto/messenger.proto b/vms/rpcchainvm/messenger/proto/messenger.proto new file mode 100644 index 0000000..b1cb02b --- /dev/null +++ b/vms/rpcchainvm/messenger/proto/messenger.proto @@ -0,0 +1,12 @@ +syntax = "proto3"; +package proto; + +message NotifyRequest { + uint32 message = 1; +} + +message NotifyResponse {} + +service Messenger { + rpc Notify(NotifyRequest) returns (NotifyResponse); +} \ No newline at end of file diff --git a/vms/rpcchainvm/proto/vm.pb.go b/vms/rpcchainvm/proto/vm.pb.go new file mode 100644 index 0000000..f39e89d --- /dev/null +++ b/vms/rpcchainvm/proto/vm.pb.go @@ -0,0 +1,1433 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: vm.proto + +package proto + +import ( + context "context" + fmt "fmt" + proto "github.com/golang/protobuf/proto" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +type InitializeRequest struct { + DbServer uint32 `protobuf:"varint,1,opt,name=dbServer,proto3" json:"dbServer,omitempty"` + GenesisBytes []byte `protobuf:"bytes,2,opt,name=genesisBytes,proto3" json:"genesisBytes,omitempty"` + EngineServer uint32 `protobuf:"varint,3,opt,name=engineServer,proto3" json:"engineServer,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitializeRequest) Reset() { *m = InitializeRequest{} } +func (m *InitializeRequest) String() string { return proto.CompactTextString(m) } +func (*InitializeRequest) ProtoMessage() {} +func (*InitializeRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{0} +} + +func (m *InitializeRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitializeRequest.Unmarshal(m, b) +} +func (m *InitializeRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitializeRequest.Marshal(b, m, deterministic) +} +func (m *InitializeRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitializeRequest.Merge(m, src) +} +func (m *InitializeRequest) XXX_Size() int { + return xxx_messageInfo_InitializeRequest.Size(m) +} +func (m *InitializeRequest) XXX_DiscardUnknown() { + xxx_messageInfo_InitializeRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_InitializeRequest proto.InternalMessageInfo + +func (m *InitializeRequest) GetDbServer() uint32 { + if m != nil { + return m.DbServer + } + return 0 +} + +func (m *InitializeRequest) GetGenesisBytes() []byte { + if m != nil { + return m.GenesisBytes + } + return nil +} + +func (m *InitializeRequest) GetEngineServer() uint32 { + if m != nil { + return m.EngineServer + } + return 0 +} + +type InitializeResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *InitializeResponse) Reset() { *m = InitializeResponse{} } +func (m *InitializeResponse) String() string { return proto.CompactTextString(m) } +func (*InitializeResponse) ProtoMessage() {} +func (*InitializeResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{1} +} + +func (m *InitializeResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_InitializeResponse.Unmarshal(m, b) +} +func (m *InitializeResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_InitializeResponse.Marshal(b, m, deterministic) +} +func (m *InitializeResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_InitializeResponse.Merge(m, src) +} +func (m *InitializeResponse) XXX_Size() int { + return xxx_messageInfo_InitializeResponse.Size(m) +} +func (m *InitializeResponse) XXX_DiscardUnknown() { + xxx_messageInfo_InitializeResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_InitializeResponse proto.InternalMessageInfo + +type ShutdownRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShutdownRequest) Reset() { *m = ShutdownRequest{} } +func (m *ShutdownRequest) String() string { return proto.CompactTextString(m) } +func (*ShutdownRequest) ProtoMessage() {} +func (*ShutdownRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{2} +} + +func (m *ShutdownRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShutdownRequest.Unmarshal(m, b) +} +func (m *ShutdownRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShutdownRequest.Marshal(b, m, deterministic) +} +func (m *ShutdownRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShutdownRequest.Merge(m, src) +} +func (m *ShutdownRequest) XXX_Size() int { + return xxx_messageInfo_ShutdownRequest.Size(m) +} +func (m *ShutdownRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ShutdownRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ShutdownRequest proto.InternalMessageInfo + +type ShutdownResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ShutdownResponse) Reset() { *m = ShutdownResponse{} } +func (m *ShutdownResponse) String() string { return proto.CompactTextString(m) } +func (*ShutdownResponse) ProtoMessage() {} +func (*ShutdownResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{3} +} + +func (m *ShutdownResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ShutdownResponse.Unmarshal(m, b) +} +func (m *ShutdownResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ShutdownResponse.Marshal(b, m, deterministic) +} +func (m *ShutdownResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ShutdownResponse.Merge(m, src) +} +func (m *ShutdownResponse) XXX_Size() int { + return xxx_messageInfo_ShutdownResponse.Size(m) +} +func (m *ShutdownResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ShutdownResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ShutdownResponse proto.InternalMessageInfo + +type CreateHandlersRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateHandlersRequest) Reset() { *m = CreateHandlersRequest{} } +func (m *CreateHandlersRequest) String() string { return proto.CompactTextString(m) } +func (*CreateHandlersRequest) ProtoMessage() {} +func (*CreateHandlersRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{4} +} + +func (m *CreateHandlersRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateHandlersRequest.Unmarshal(m, b) +} +func (m *CreateHandlersRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateHandlersRequest.Marshal(b, m, deterministic) +} +func (m *CreateHandlersRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateHandlersRequest.Merge(m, src) +} +func (m *CreateHandlersRequest) XXX_Size() int { + return xxx_messageInfo_CreateHandlersRequest.Size(m) +} +func (m *CreateHandlersRequest) XXX_DiscardUnknown() { + xxx_messageInfo_CreateHandlersRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateHandlersRequest proto.InternalMessageInfo + +type CreateHandlersResponse struct { + Handlers []*Handler `protobuf:"bytes,1,rep,name=handlers,proto3" json:"handlers,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *CreateHandlersResponse) Reset() { *m = CreateHandlersResponse{} } +func (m *CreateHandlersResponse) String() string { return proto.CompactTextString(m) } +func (*CreateHandlersResponse) ProtoMessage() {} +func (*CreateHandlersResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{5} +} + +func (m *CreateHandlersResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_CreateHandlersResponse.Unmarshal(m, b) +} +func (m *CreateHandlersResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_CreateHandlersResponse.Marshal(b, m, deterministic) +} +func (m *CreateHandlersResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_CreateHandlersResponse.Merge(m, src) +} +func (m *CreateHandlersResponse) XXX_Size() int { + return xxx_messageInfo_CreateHandlersResponse.Size(m) +} +func (m *CreateHandlersResponse) XXX_DiscardUnknown() { + xxx_messageInfo_CreateHandlersResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_CreateHandlersResponse proto.InternalMessageInfo + +func (m *CreateHandlersResponse) GetHandlers() []*Handler { + if m != nil { + return m.Handlers + } + return nil +} + +type Handler struct { + Prefix string `protobuf:"bytes,1,opt,name=prefix,proto3" json:"prefix,omitempty"` + LockOptions uint32 `protobuf:"varint,2,opt,name=lockOptions,proto3" json:"lockOptions,omitempty"` + Server uint32 `protobuf:"varint,3,opt,name=server,proto3" json:"server,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Handler) Reset() { *m = Handler{} } +func (m *Handler) String() string { return proto.CompactTextString(m) } +func (*Handler) ProtoMessage() {} +func (*Handler) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{6} +} + +func (m *Handler) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Handler.Unmarshal(m, b) +} +func (m *Handler) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Handler.Marshal(b, m, deterministic) +} +func (m *Handler) XXX_Merge(src proto.Message) { + xxx_messageInfo_Handler.Merge(m, src) +} +func (m *Handler) XXX_Size() int { + return xxx_messageInfo_Handler.Size(m) +} +func (m *Handler) XXX_DiscardUnknown() { + xxx_messageInfo_Handler.DiscardUnknown(m) +} + +var xxx_messageInfo_Handler proto.InternalMessageInfo + +func (m *Handler) GetPrefix() string { + if m != nil { + return m.Prefix + } + return "" +} + +func (m *Handler) GetLockOptions() uint32 { + if m != nil { + return m.LockOptions + } + return 0 +} + +func (m *Handler) GetServer() uint32 { + if m != nil { + return m.Server + } + return 0 +} + +type BuildBlockRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BuildBlockRequest) Reset() { *m = BuildBlockRequest{} } +func (m *BuildBlockRequest) String() string { return proto.CompactTextString(m) } +func (*BuildBlockRequest) ProtoMessage() {} +func (*BuildBlockRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{7} +} + +func (m *BuildBlockRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BuildBlockRequest.Unmarshal(m, b) +} +func (m *BuildBlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BuildBlockRequest.Marshal(b, m, deterministic) +} +func (m *BuildBlockRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildBlockRequest.Merge(m, src) +} +func (m *BuildBlockRequest) XXX_Size() int { + return xxx_messageInfo_BuildBlockRequest.Size(m) +} +func (m *BuildBlockRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BuildBlockRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildBlockRequest proto.InternalMessageInfo + +type BuildBlockResponse struct { + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ParentID []byte `protobuf:"bytes,2,opt,name=parentID,proto3" json:"parentID,omitempty"` + Bytes []byte `protobuf:"bytes,3,opt,name=bytes,proto3" json:"bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BuildBlockResponse) Reset() { *m = BuildBlockResponse{} } +func (m *BuildBlockResponse) String() string { return proto.CompactTextString(m) } +func (*BuildBlockResponse) ProtoMessage() {} +func (*BuildBlockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{8} +} + +func (m *BuildBlockResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BuildBlockResponse.Unmarshal(m, b) +} +func (m *BuildBlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BuildBlockResponse.Marshal(b, m, deterministic) +} +func (m *BuildBlockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BuildBlockResponse.Merge(m, src) +} +func (m *BuildBlockResponse) XXX_Size() int { + return xxx_messageInfo_BuildBlockResponse.Size(m) +} +func (m *BuildBlockResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BuildBlockResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BuildBlockResponse proto.InternalMessageInfo + +func (m *BuildBlockResponse) GetId() []byte { + if m != nil { + return m.Id + } + return nil +} + +func (m *BuildBlockResponse) GetParentID() []byte { + if m != nil { + return m.ParentID + } + return nil +} + +func (m *BuildBlockResponse) GetBytes() []byte { + if m != nil { + return m.Bytes + } + return nil +} + +type ParseBlockRequest struct { + Bytes []byte `protobuf:"bytes,1,opt,name=bytes,proto3" json:"bytes,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ParseBlockRequest) Reset() { *m = ParseBlockRequest{} } +func (m *ParseBlockRequest) String() string { return proto.CompactTextString(m) } +func (*ParseBlockRequest) ProtoMessage() {} +func (*ParseBlockRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{9} +} + +func (m *ParseBlockRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ParseBlockRequest.Unmarshal(m, b) +} +func (m *ParseBlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ParseBlockRequest.Marshal(b, m, deterministic) +} +func (m *ParseBlockRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParseBlockRequest.Merge(m, src) +} +func (m *ParseBlockRequest) XXX_Size() int { + return xxx_messageInfo_ParseBlockRequest.Size(m) +} +func (m *ParseBlockRequest) XXX_DiscardUnknown() { + xxx_messageInfo_ParseBlockRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_ParseBlockRequest proto.InternalMessageInfo + +func (m *ParseBlockRequest) GetBytes() []byte { + if m != nil { + return m.Bytes + } + return nil +} + +type ParseBlockResponse struct { + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + ParentID []byte `protobuf:"bytes,2,opt,name=parentID,proto3" json:"parentID,omitempty"` + Status uint32 `protobuf:"varint,3,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ParseBlockResponse) Reset() { *m = ParseBlockResponse{} } +func (m *ParseBlockResponse) String() string { return proto.CompactTextString(m) } +func (*ParseBlockResponse) ProtoMessage() {} +func (*ParseBlockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{10} +} + +func (m *ParseBlockResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_ParseBlockResponse.Unmarshal(m, b) +} +func (m *ParseBlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_ParseBlockResponse.Marshal(b, m, deterministic) +} +func (m *ParseBlockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_ParseBlockResponse.Merge(m, src) +} +func (m *ParseBlockResponse) XXX_Size() int { + return xxx_messageInfo_ParseBlockResponse.Size(m) +} +func (m *ParseBlockResponse) XXX_DiscardUnknown() { + xxx_messageInfo_ParseBlockResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_ParseBlockResponse proto.InternalMessageInfo + +func (m *ParseBlockResponse) GetId() []byte { + if m != nil { + return m.Id + } + return nil +} + +func (m *ParseBlockResponse) GetParentID() []byte { + if m != nil { + return m.ParentID + } + return nil +} + +func (m *ParseBlockResponse) GetStatus() uint32 { + if m != nil { + return m.Status + } + return 0 +} + +type GetBlockRequest struct { + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetBlockRequest) Reset() { *m = GetBlockRequest{} } +func (m *GetBlockRequest) String() string { return proto.CompactTextString(m) } +func (*GetBlockRequest) ProtoMessage() {} +func (*GetBlockRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{11} +} + +func (m *GetBlockRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetBlockRequest.Unmarshal(m, b) +} +func (m *GetBlockRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetBlockRequest.Marshal(b, m, deterministic) +} +func (m *GetBlockRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetBlockRequest.Merge(m, src) +} +func (m *GetBlockRequest) XXX_Size() int { + return xxx_messageInfo_GetBlockRequest.Size(m) +} +func (m *GetBlockRequest) XXX_DiscardUnknown() { + xxx_messageInfo_GetBlockRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_GetBlockRequest proto.InternalMessageInfo + +func (m *GetBlockRequest) GetId() []byte { + if m != nil { + return m.Id + } + return nil +} + +type GetBlockResponse struct { + ParentID []byte `protobuf:"bytes,1,opt,name=parentID,proto3" json:"parentID,omitempty"` + Bytes []byte `protobuf:"bytes,2,opt,name=bytes,proto3" json:"bytes,omitempty"` + Status uint32 `protobuf:"varint,3,opt,name=status,proto3" json:"status,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *GetBlockResponse) Reset() { *m = GetBlockResponse{} } +func (m *GetBlockResponse) String() string { return proto.CompactTextString(m) } +func (*GetBlockResponse) ProtoMessage() {} +func (*GetBlockResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{12} +} + +func (m *GetBlockResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_GetBlockResponse.Unmarshal(m, b) +} +func (m *GetBlockResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_GetBlockResponse.Marshal(b, m, deterministic) +} +func (m *GetBlockResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_GetBlockResponse.Merge(m, src) +} +func (m *GetBlockResponse) XXX_Size() int { + return xxx_messageInfo_GetBlockResponse.Size(m) +} +func (m *GetBlockResponse) XXX_DiscardUnknown() { + xxx_messageInfo_GetBlockResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_GetBlockResponse proto.InternalMessageInfo + +func (m *GetBlockResponse) GetParentID() []byte { + if m != nil { + return m.ParentID + } + return nil +} + +func (m *GetBlockResponse) GetBytes() []byte { + if m != nil { + return m.Bytes + } + return nil +} + +func (m *GetBlockResponse) GetStatus() uint32 { + if m != nil { + return m.Status + } + return 0 +} + +type SetPreferenceRequest struct { + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetPreferenceRequest) Reset() { *m = SetPreferenceRequest{} } +func (m *SetPreferenceRequest) String() string { return proto.CompactTextString(m) } +func (*SetPreferenceRequest) ProtoMessage() {} +func (*SetPreferenceRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{13} +} + +func (m *SetPreferenceRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetPreferenceRequest.Unmarshal(m, b) +} +func (m *SetPreferenceRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetPreferenceRequest.Marshal(b, m, deterministic) +} +func (m *SetPreferenceRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetPreferenceRequest.Merge(m, src) +} +func (m *SetPreferenceRequest) XXX_Size() int { + return xxx_messageInfo_SetPreferenceRequest.Size(m) +} +func (m *SetPreferenceRequest) XXX_DiscardUnknown() { + xxx_messageInfo_SetPreferenceRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_SetPreferenceRequest proto.InternalMessageInfo + +func (m *SetPreferenceRequest) GetId() []byte { + if m != nil { + return m.Id + } + return nil +} + +type SetPreferenceResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SetPreferenceResponse) Reset() { *m = SetPreferenceResponse{} } +func (m *SetPreferenceResponse) String() string { return proto.CompactTextString(m) } +func (*SetPreferenceResponse) ProtoMessage() {} +func (*SetPreferenceResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{14} +} + +func (m *SetPreferenceResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_SetPreferenceResponse.Unmarshal(m, b) +} +func (m *SetPreferenceResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_SetPreferenceResponse.Marshal(b, m, deterministic) +} +func (m *SetPreferenceResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_SetPreferenceResponse.Merge(m, src) +} +func (m *SetPreferenceResponse) XXX_Size() int { + return xxx_messageInfo_SetPreferenceResponse.Size(m) +} +func (m *SetPreferenceResponse) XXX_DiscardUnknown() { + xxx_messageInfo_SetPreferenceResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_SetPreferenceResponse proto.InternalMessageInfo + +type LastAcceptedRequest struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LastAcceptedRequest) Reset() { *m = LastAcceptedRequest{} } +func (m *LastAcceptedRequest) String() string { return proto.CompactTextString(m) } +func (*LastAcceptedRequest) ProtoMessage() {} +func (*LastAcceptedRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{15} +} + +func (m *LastAcceptedRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LastAcceptedRequest.Unmarshal(m, b) +} +func (m *LastAcceptedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LastAcceptedRequest.Marshal(b, m, deterministic) +} +func (m *LastAcceptedRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_LastAcceptedRequest.Merge(m, src) +} +func (m *LastAcceptedRequest) XXX_Size() int { + return xxx_messageInfo_LastAcceptedRequest.Size(m) +} +func (m *LastAcceptedRequest) XXX_DiscardUnknown() { + xxx_messageInfo_LastAcceptedRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_LastAcceptedRequest proto.InternalMessageInfo + +type LastAcceptedResponse struct { + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *LastAcceptedResponse) Reset() { *m = LastAcceptedResponse{} } +func (m *LastAcceptedResponse) String() string { return proto.CompactTextString(m) } +func (*LastAcceptedResponse) ProtoMessage() {} +func (*LastAcceptedResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{16} +} + +func (m *LastAcceptedResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_LastAcceptedResponse.Unmarshal(m, b) +} +func (m *LastAcceptedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_LastAcceptedResponse.Marshal(b, m, deterministic) +} +func (m *LastAcceptedResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_LastAcceptedResponse.Merge(m, src) +} +func (m *LastAcceptedResponse) XXX_Size() int { + return xxx_messageInfo_LastAcceptedResponse.Size(m) +} +func (m *LastAcceptedResponse) XXX_DiscardUnknown() { + xxx_messageInfo_LastAcceptedResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_LastAcceptedResponse proto.InternalMessageInfo + +func (m *LastAcceptedResponse) GetId() []byte { + if m != nil { + return m.Id + } + return nil +} + +type BlockVerifyRequest struct { + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockVerifyRequest) Reset() { *m = BlockVerifyRequest{} } +func (m *BlockVerifyRequest) String() string { return proto.CompactTextString(m) } +func (*BlockVerifyRequest) ProtoMessage() {} +func (*BlockVerifyRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{17} +} + +func (m *BlockVerifyRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockVerifyRequest.Unmarshal(m, b) +} +func (m *BlockVerifyRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockVerifyRequest.Marshal(b, m, deterministic) +} +func (m *BlockVerifyRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockVerifyRequest.Merge(m, src) +} +func (m *BlockVerifyRequest) XXX_Size() int { + return xxx_messageInfo_BlockVerifyRequest.Size(m) +} +func (m *BlockVerifyRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BlockVerifyRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockVerifyRequest proto.InternalMessageInfo + +func (m *BlockVerifyRequest) GetId() []byte { + if m != nil { + return m.Id + } + return nil +} + +type BlockVerifyResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockVerifyResponse) Reset() { *m = BlockVerifyResponse{} } +func (m *BlockVerifyResponse) String() string { return proto.CompactTextString(m) } +func (*BlockVerifyResponse) ProtoMessage() {} +func (*BlockVerifyResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{18} +} + +func (m *BlockVerifyResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockVerifyResponse.Unmarshal(m, b) +} +func (m *BlockVerifyResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockVerifyResponse.Marshal(b, m, deterministic) +} +func (m *BlockVerifyResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockVerifyResponse.Merge(m, src) +} +func (m *BlockVerifyResponse) XXX_Size() int { + return xxx_messageInfo_BlockVerifyResponse.Size(m) +} +func (m *BlockVerifyResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BlockVerifyResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockVerifyResponse proto.InternalMessageInfo + +type BlockAcceptRequest struct { + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockAcceptRequest) Reset() { *m = BlockAcceptRequest{} } +func (m *BlockAcceptRequest) String() string { return proto.CompactTextString(m) } +func (*BlockAcceptRequest) ProtoMessage() {} +func (*BlockAcceptRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{19} +} + +func (m *BlockAcceptRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockAcceptRequest.Unmarshal(m, b) +} +func (m *BlockAcceptRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockAcceptRequest.Marshal(b, m, deterministic) +} +func (m *BlockAcceptRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockAcceptRequest.Merge(m, src) +} +func (m *BlockAcceptRequest) XXX_Size() int { + return xxx_messageInfo_BlockAcceptRequest.Size(m) +} +func (m *BlockAcceptRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BlockAcceptRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockAcceptRequest proto.InternalMessageInfo + +func (m *BlockAcceptRequest) GetId() []byte { + if m != nil { + return m.Id + } + return nil +} + +type BlockAcceptResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockAcceptResponse) Reset() { *m = BlockAcceptResponse{} } +func (m *BlockAcceptResponse) String() string { return proto.CompactTextString(m) } +func (*BlockAcceptResponse) ProtoMessage() {} +func (*BlockAcceptResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{20} +} + +func (m *BlockAcceptResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockAcceptResponse.Unmarshal(m, b) +} +func (m *BlockAcceptResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockAcceptResponse.Marshal(b, m, deterministic) +} +func (m *BlockAcceptResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockAcceptResponse.Merge(m, src) +} +func (m *BlockAcceptResponse) XXX_Size() int { + return xxx_messageInfo_BlockAcceptResponse.Size(m) +} +func (m *BlockAcceptResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BlockAcceptResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockAcceptResponse proto.InternalMessageInfo + +type BlockRejectRequest struct { + Id []byte `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockRejectRequest) Reset() { *m = BlockRejectRequest{} } +func (m *BlockRejectRequest) String() string { return proto.CompactTextString(m) } +func (*BlockRejectRequest) ProtoMessage() {} +func (*BlockRejectRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{21} +} + +func (m *BlockRejectRequest) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockRejectRequest.Unmarshal(m, b) +} +func (m *BlockRejectRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockRejectRequest.Marshal(b, m, deterministic) +} +func (m *BlockRejectRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockRejectRequest.Merge(m, src) +} +func (m *BlockRejectRequest) XXX_Size() int { + return xxx_messageInfo_BlockRejectRequest.Size(m) +} +func (m *BlockRejectRequest) XXX_DiscardUnknown() { + xxx_messageInfo_BlockRejectRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockRejectRequest proto.InternalMessageInfo + +func (m *BlockRejectRequest) GetId() []byte { + if m != nil { + return m.Id + } + return nil +} + +type BlockRejectResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *BlockRejectResponse) Reset() { *m = BlockRejectResponse{} } +func (m *BlockRejectResponse) String() string { return proto.CompactTextString(m) } +func (*BlockRejectResponse) ProtoMessage() {} +func (*BlockRejectResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_cab246c8c7c5372d, []int{22} +} + +func (m *BlockRejectResponse) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_BlockRejectResponse.Unmarshal(m, b) +} +func (m *BlockRejectResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_BlockRejectResponse.Marshal(b, m, deterministic) +} +func (m *BlockRejectResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_BlockRejectResponse.Merge(m, src) +} +func (m *BlockRejectResponse) XXX_Size() int { + return xxx_messageInfo_BlockRejectResponse.Size(m) +} +func (m *BlockRejectResponse) XXX_DiscardUnknown() { + xxx_messageInfo_BlockRejectResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_BlockRejectResponse proto.InternalMessageInfo + +func init() { + proto.RegisterType((*InitializeRequest)(nil), "proto.InitializeRequest") + proto.RegisterType((*InitializeResponse)(nil), "proto.InitializeResponse") + proto.RegisterType((*ShutdownRequest)(nil), "proto.ShutdownRequest") + proto.RegisterType((*ShutdownResponse)(nil), "proto.ShutdownResponse") + proto.RegisterType((*CreateHandlersRequest)(nil), "proto.CreateHandlersRequest") + proto.RegisterType((*CreateHandlersResponse)(nil), "proto.CreateHandlersResponse") + proto.RegisterType((*Handler)(nil), "proto.Handler") + proto.RegisterType((*BuildBlockRequest)(nil), "proto.BuildBlockRequest") + proto.RegisterType((*BuildBlockResponse)(nil), "proto.BuildBlockResponse") + proto.RegisterType((*ParseBlockRequest)(nil), "proto.ParseBlockRequest") + proto.RegisterType((*ParseBlockResponse)(nil), "proto.ParseBlockResponse") + proto.RegisterType((*GetBlockRequest)(nil), "proto.GetBlockRequest") + proto.RegisterType((*GetBlockResponse)(nil), "proto.GetBlockResponse") + proto.RegisterType((*SetPreferenceRequest)(nil), "proto.SetPreferenceRequest") + proto.RegisterType((*SetPreferenceResponse)(nil), "proto.SetPreferenceResponse") + proto.RegisterType((*LastAcceptedRequest)(nil), "proto.LastAcceptedRequest") + proto.RegisterType((*LastAcceptedResponse)(nil), "proto.LastAcceptedResponse") + proto.RegisterType((*BlockVerifyRequest)(nil), "proto.BlockVerifyRequest") + proto.RegisterType((*BlockVerifyResponse)(nil), "proto.BlockVerifyResponse") + proto.RegisterType((*BlockAcceptRequest)(nil), "proto.BlockAcceptRequest") + proto.RegisterType((*BlockAcceptResponse)(nil), "proto.BlockAcceptResponse") + proto.RegisterType((*BlockRejectRequest)(nil), "proto.BlockRejectRequest") + proto.RegisterType((*BlockRejectResponse)(nil), "proto.BlockRejectResponse") +} + +func init() { proto.RegisterFile("vm.proto", fileDescriptor_cab246c8c7c5372d) } + +var fileDescriptor_cab246c8c7c5372d = []byte{ + // 610 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x4b, 0x6f, 0xd3, 0x40, + 0x10, 0x96, 0x53, 0xb5, 0x84, 0xc9, 0xa3, 0xcd, 0xe6, 0x89, 0x5b, 0xa4, 0x60, 0xa1, 0x2a, 0x70, + 0xe8, 0xa1, 0x1c, 0x7b, 0x4a, 0x88, 0x54, 0x8a, 0xa8, 0xa8, 0x1c, 0x29, 0x42, 0x82, 0x8b, 0x13, + 0x4f, 0x5a, 0x43, 0xb0, 0x8d, 0x77, 0xd3, 0x52, 0xfe, 0x3b, 0x12, 0x8a, 0x3d, 0x5e, 0xef, 0xda, + 0x8e, 0x2a, 0x71, 0xb2, 0x66, 0xe6, 0x9b, 0x6f, 0x1e, 0x3b, 0x9f, 0xa1, 0x7a, 0xff, 0xf3, 0x2c, + 0x8c, 0x02, 0x11, 0xb0, 0xfd, 0xf8, 0x63, 0x3d, 0x40, 0xeb, 0xca, 0xf7, 0x84, 0xe7, 0xac, 0xbd, + 0x3f, 0x68, 0xe3, 0xaf, 0x0d, 0x72, 0xc1, 0x4c, 0xa8, 0xba, 0x8b, 0x19, 0x46, 0xf7, 0x18, 0x0d, + 0x8c, 0xa1, 0x31, 0x6a, 0xd8, 0xd2, 0x66, 0x16, 0xd4, 0x6f, 0xd1, 0x47, 0xee, 0xf1, 0xc9, 0xa3, + 0x40, 0x3e, 0xa8, 0x0c, 0x8d, 0x51, 0xdd, 0xd6, 0x7c, 0x5b, 0x0c, 0xfa, 0xb7, 0x9e, 0x8f, 0xc4, + 0xb1, 0x17, 0x73, 0x68, 0x3e, 0xab, 0x03, 0x4c, 0x2d, 0xcc, 0xc3, 0xc0, 0xe7, 0x68, 0xb5, 0xe0, + 0x70, 0x76, 0xb7, 0x11, 0x6e, 0xf0, 0xe0, 0x53, 0x33, 0x16, 0x83, 0xa3, 0xcc, 0x45, 0xb0, 0x3e, + 0x74, 0xdf, 0x47, 0xe8, 0x08, 0xfc, 0xe0, 0xf8, 0xee, 0x1a, 0x23, 0x9e, 0x82, 0xa7, 0xd0, 0xcb, + 0x07, 0x92, 0x14, 0xf6, 0x16, 0xaa, 0x77, 0xe4, 0x1b, 0x18, 0xc3, 0xbd, 0x51, 0xed, 0xbc, 0x99, + 0x6c, 0xe2, 0x8c, 0xa0, 0xb6, 0x8c, 0x5b, 0x5f, 0xe1, 0x19, 0x39, 0x59, 0x0f, 0x0e, 0xc2, 0x08, + 0x57, 0xde, 0xef, 0x78, 0x11, 0xcf, 0x6d, 0xb2, 0xd8, 0x10, 0x6a, 0xeb, 0x60, 0xf9, 0xe3, 0x73, + 0x28, 0xbc, 0xc0, 0x4f, 0xb6, 0xd0, 0xb0, 0x55, 0xd7, 0x36, 0x93, 0xab, 0xe3, 0x93, 0x65, 0xb5, + 0xa1, 0x35, 0xd9, 0x78, 0x6b, 0x77, 0xb2, 0x05, 0xa7, 0x7d, 0xcf, 0x81, 0xa9, 0x4e, 0xea, 0xb9, + 0x09, 0x15, 0xcf, 0x8d, 0x0b, 0xd7, 0xed, 0x8a, 0xe7, 0x6e, 0xdf, 0x25, 0x74, 0x22, 0xf4, 0xc5, + 0xd5, 0x94, 0xf6, 0x2e, 0x6d, 0xd6, 0x81, 0xfd, 0x45, 0xfc, 0x20, 0x7b, 0x71, 0x20, 0x31, 0xac, + 0x37, 0xd0, 0xba, 0x71, 0x22, 0x8e, 0x6a, 0xb1, 0x0c, 0x6a, 0xa8, 0xd0, 0x2f, 0xc0, 0x54, 0xe8, + 0x7f, 0xb4, 0xb0, 0x9d, 0x58, 0x38, 0x62, 0xc3, 0xe5, 0xc4, 0xb1, 0x65, 0xbd, 0x82, 0xc3, 0x4b, + 0x14, 0x5a, 0x0b, 0x39, 0x5a, 0xeb, 0x1b, 0x1c, 0x65, 0x10, 0x2a, 0xad, 0x96, 0x32, 0x76, 0x4d, + 0x5b, 0x51, 0x46, 0xd8, 0xd9, 0xc0, 0x29, 0x74, 0x66, 0x28, 0x6e, 0x22, 0x5c, 0x61, 0x84, 0xfe, + 0x12, 0x77, 0x75, 0xd1, 0x87, 0x6e, 0x0e, 0x47, 0xf7, 0xd6, 0x85, 0xf6, 0x27, 0x87, 0x8b, 0xf1, + 0x72, 0x89, 0xa1, 0x40, 0x37, 0x7d, 0xb5, 0x53, 0xe8, 0xe8, 0xee, 0xf2, 0xa5, 0x59, 0xaf, 0x81, + 0xc5, 0xa3, 0xcd, 0x31, 0xf2, 0x56, 0x8f, 0xbb, 0xaa, 0x77, 0xa1, 0xad, 0xa1, 0xa8, 0x76, 0x9a, + 0x9c, 0x54, 0x79, 0x2a, 0x39, 0x45, 0xe5, 0x92, 0x6d, 0xfc, 0x8e, 0xcb, 0x27, 0x93, 0x53, 0x54, + 0x92, 0x7c, 0xfe, 0x77, 0x1f, 0x2a, 0xf3, 0x6b, 0x36, 0x06, 0xc8, 0x94, 0xca, 0x06, 0xa4, 0x9a, + 0xc2, 0x5f, 0xc3, 0x7c, 0x51, 0x12, 0xa1, 0x85, 0x5c, 0x40, 0x35, 0xd5, 0x30, 0xeb, 0x11, 0x2c, + 0xa7, 0x73, 0xb3, 0x5f, 0xf0, 0x53, 0xf2, 0x35, 0x34, 0x75, 0x4d, 0xb3, 0x13, 0x82, 0x96, 0xfe, + 0x03, 0xcc, 0x97, 0x3b, 0xa2, 0x44, 0x37, 0x06, 0xc8, 0xa4, 0x26, 0xc7, 0x29, 0x48, 0x52, 0x8e, + 0x53, 0xa2, 0xcb, 0x31, 0x40, 0x26, 0x15, 0x49, 0x51, 0x10, 0x9a, 0xa4, 0x28, 0xd1, 0xd5, 0x05, + 0x54, 0xd3, 0x83, 0x97, 0x1b, 0xc9, 0x89, 0x44, 0x6e, 0xa4, 0xa0, 0x8c, 0x8f, 0xd0, 0xd0, 0xee, + 0x94, 0x1d, 0xa7, 0xbb, 0x2b, 0xb9, 0x72, 0xf3, 0xa4, 0x3c, 0x48, 0x5c, 0x97, 0x50, 0x57, 0x6f, + 0x98, 0x99, 0x84, 0x2e, 0xb9, 0x77, 0xf3, 0xb8, 0x34, 0x46, 0x44, 0x53, 0xa8, 0x29, 0xe7, 0xcb, + 0xe4, 0xfa, 0x0a, 0x87, 0x6f, 0x9a, 0x65, 0xa1, 0x1c, 0x4b, 0x42, 0xaf, 0xb3, 0x68, 0x0a, 0xd0, + 0x59, 0xf4, 0xb3, 0x97, 0x2c, 0xc9, 0x41, 0xeb, 0x2c, 0x9a, 0x14, 0x74, 0x16, 0xfd, 0xfe, 0x17, + 0x07, 0x71, 0xe8, 0xdd, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x10, 0x57, 0x73, 0x31, 0x35, 0x07, + 0x00, 0x00, +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConnInterface + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion6 + +// VMClient is the client API for VM service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. +type VMClient interface { + Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*InitializeResponse, error) + Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) + CreateHandlers(ctx context.Context, in *CreateHandlersRequest, opts ...grpc.CallOption) (*CreateHandlersResponse, error) + BuildBlock(ctx context.Context, in *BuildBlockRequest, opts ...grpc.CallOption) (*BuildBlockResponse, error) + ParseBlock(ctx context.Context, in *ParseBlockRequest, opts ...grpc.CallOption) (*ParseBlockResponse, error) + GetBlock(ctx context.Context, in *GetBlockRequest, opts ...grpc.CallOption) (*GetBlockResponse, error) + SetPreference(ctx context.Context, in *SetPreferenceRequest, opts ...grpc.CallOption) (*SetPreferenceResponse, error) + LastAccepted(ctx context.Context, in *LastAcceptedRequest, opts ...grpc.CallOption) (*LastAcceptedResponse, error) + BlockVerify(ctx context.Context, in *BlockVerifyRequest, opts ...grpc.CallOption) (*BlockVerifyResponse, error) + BlockAccept(ctx context.Context, in *BlockAcceptRequest, opts ...grpc.CallOption) (*BlockAcceptResponse, error) + BlockReject(ctx context.Context, in *BlockRejectRequest, opts ...grpc.CallOption) (*BlockRejectResponse, error) +} + +type vMClient struct { + cc grpc.ClientConnInterface +} + +func NewVMClient(cc grpc.ClientConnInterface) VMClient { + return &vMClient{cc} +} + +func (c *vMClient) Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*InitializeResponse, error) { + out := new(InitializeResponse) + err := c.cc.Invoke(ctx, "/proto.VM/Initialize", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vMClient) Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) { + out := new(ShutdownResponse) + err := c.cc.Invoke(ctx, "/proto.VM/Shutdown", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vMClient) CreateHandlers(ctx context.Context, in *CreateHandlersRequest, opts ...grpc.CallOption) (*CreateHandlersResponse, error) { + out := new(CreateHandlersResponse) + err := c.cc.Invoke(ctx, "/proto.VM/CreateHandlers", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vMClient) BuildBlock(ctx context.Context, in *BuildBlockRequest, opts ...grpc.CallOption) (*BuildBlockResponse, error) { + out := new(BuildBlockResponse) + err := c.cc.Invoke(ctx, "/proto.VM/BuildBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vMClient) ParseBlock(ctx context.Context, in *ParseBlockRequest, opts ...grpc.CallOption) (*ParseBlockResponse, error) { + out := new(ParseBlockResponse) + err := c.cc.Invoke(ctx, "/proto.VM/ParseBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vMClient) GetBlock(ctx context.Context, in *GetBlockRequest, opts ...grpc.CallOption) (*GetBlockResponse, error) { + out := new(GetBlockResponse) + err := c.cc.Invoke(ctx, "/proto.VM/GetBlock", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vMClient) SetPreference(ctx context.Context, in *SetPreferenceRequest, opts ...grpc.CallOption) (*SetPreferenceResponse, error) { + out := new(SetPreferenceResponse) + err := c.cc.Invoke(ctx, "/proto.VM/SetPreference", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vMClient) LastAccepted(ctx context.Context, in *LastAcceptedRequest, opts ...grpc.CallOption) (*LastAcceptedResponse, error) { + out := new(LastAcceptedResponse) + err := c.cc.Invoke(ctx, "/proto.VM/LastAccepted", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vMClient) BlockVerify(ctx context.Context, in *BlockVerifyRequest, opts ...grpc.CallOption) (*BlockVerifyResponse, error) { + out := new(BlockVerifyResponse) + err := c.cc.Invoke(ctx, "/proto.VM/BlockVerify", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vMClient) BlockAccept(ctx context.Context, in *BlockAcceptRequest, opts ...grpc.CallOption) (*BlockAcceptResponse, error) { + out := new(BlockAcceptResponse) + err := c.cc.Invoke(ctx, "/proto.VM/BlockAccept", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *vMClient) BlockReject(ctx context.Context, in *BlockRejectRequest, opts ...grpc.CallOption) (*BlockRejectResponse, error) { + out := new(BlockRejectResponse) + err := c.cc.Invoke(ctx, "/proto.VM/BlockReject", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// VMServer is the server API for VM service. +type VMServer interface { + Initialize(context.Context, *InitializeRequest) (*InitializeResponse, error) + Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error) + CreateHandlers(context.Context, *CreateHandlersRequest) (*CreateHandlersResponse, error) + BuildBlock(context.Context, *BuildBlockRequest) (*BuildBlockResponse, error) + ParseBlock(context.Context, *ParseBlockRequest) (*ParseBlockResponse, error) + GetBlock(context.Context, *GetBlockRequest) (*GetBlockResponse, error) + SetPreference(context.Context, *SetPreferenceRequest) (*SetPreferenceResponse, error) + LastAccepted(context.Context, *LastAcceptedRequest) (*LastAcceptedResponse, error) + BlockVerify(context.Context, *BlockVerifyRequest) (*BlockVerifyResponse, error) + BlockAccept(context.Context, *BlockAcceptRequest) (*BlockAcceptResponse, error) + BlockReject(context.Context, *BlockRejectRequest) (*BlockRejectResponse, error) +} + +// UnimplementedVMServer can be embedded to have forward compatible implementations. +type UnimplementedVMServer struct { +} + +func (*UnimplementedVMServer) Initialize(ctx context.Context, req *InitializeRequest) (*InitializeResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Initialize not implemented") +} +func (*UnimplementedVMServer) Shutdown(ctx context.Context, req *ShutdownRequest) (*ShutdownResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented") +} +func (*UnimplementedVMServer) CreateHandlers(ctx context.Context, req *CreateHandlersRequest) (*CreateHandlersResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method CreateHandlers not implemented") +} +func (*UnimplementedVMServer) BuildBlock(ctx context.Context, req *BuildBlockRequest) (*BuildBlockResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BuildBlock not implemented") +} +func (*UnimplementedVMServer) ParseBlock(ctx context.Context, req *ParseBlockRequest) (*ParseBlockResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ParseBlock not implemented") +} +func (*UnimplementedVMServer) GetBlock(ctx context.Context, req *GetBlockRequest) (*GetBlockResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method GetBlock not implemented") +} +func (*UnimplementedVMServer) SetPreference(ctx context.Context, req *SetPreferenceRequest) (*SetPreferenceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method SetPreference not implemented") +} +func (*UnimplementedVMServer) LastAccepted(ctx context.Context, req *LastAcceptedRequest) (*LastAcceptedResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method LastAccepted not implemented") +} +func (*UnimplementedVMServer) BlockVerify(ctx context.Context, req *BlockVerifyRequest) (*BlockVerifyResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BlockVerify not implemented") +} +func (*UnimplementedVMServer) BlockAccept(ctx context.Context, req *BlockAcceptRequest) (*BlockAcceptResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BlockAccept not implemented") +} +func (*UnimplementedVMServer) BlockReject(ctx context.Context, req *BlockRejectRequest) (*BlockRejectResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method BlockReject not implemented") +} + +func RegisterVMServer(s *grpc.Server, srv VMServer) { + s.RegisterService(&_VM_serviceDesc, srv) +} + +func _VM_Initialize_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InitializeRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VMServer).Initialize(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.VM/Initialize", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VMServer).Initialize(ctx, req.(*InitializeRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VM_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ShutdownRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VMServer).Shutdown(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.VM/Shutdown", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VMServer).Shutdown(ctx, req.(*ShutdownRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VM_CreateHandlers_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateHandlersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VMServer).CreateHandlers(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.VM/CreateHandlers", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VMServer).CreateHandlers(ctx, req.(*CreateHandlersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VM_BuildBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BuildBlockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VMServer).BuildBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.VM/BuildBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VMServer).BuildBlock(ctx, req.(*BuildBlockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VM_ParseBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ParseBlockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VMServer).ParseBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.VM/ParseBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VMServer).ParseBlock(ctx, req.(*ParseBlockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VM_GetBlock_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetBlockRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VMServer).GetBlock(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.VM/GetBlock", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VMServer).GetBlock(ctx, req.(*GetBlockRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VM_SetPreference_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SetPreferenceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VMServer).SetPreference(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.VM/SetPreference", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VMServer).SetPreference(ctx, req.(*SetPreferenceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VM_LastAccepted_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(LastAcceptedRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VMServer).LastAccepted(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.VM/LastAccepted", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VMServer).LastAccepted(ctx, req.(*LastAcceptedRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VM_BlockVerify_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BlockVerifyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VMServer).BlockVerify(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.VM/BlockVerify", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VMServer).BlockVerify(ctx, req.(*BlockVerifyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VM_BlockAccept_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BlockAcceptRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VMServer).BlockAccept(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.VM/BlockAccept", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VMServer).BlockAccept(ctx, req.(*BlockAcceptRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _VM_BlockReject_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(BlockRejectRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(VMServer).BlockReject(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/proto.VM/BlockReject", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(VMServer).BlockReject(ctx, req.(*BlockRejectRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _VM_serviceDesc = grpc.ServiceDesc{ + ServiceName: "proto.VM", + HandlerType: (*VMServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Initialize", + Handler: _VM_Initialize_Handler, + }, + { + MethodName: "Shutdown", + Handler: _VM_Shutdown_Handler, + }, + { + MethodName: "CreateHandlers", + Handler: _VM_CreateHandlers_Handler, + }, + { + MethodName: "BuildBlock", + Handler: _VM_BuildBlock_Handler, + }, + { + MethodName: "ParseBlock", + Handler: _VM_ParseBlock_Handler, + }, + { + MethodName: "GetBlock", + Handler: _VM_GetBlock_Handler, + }, + { + MethodName: "SetPreference", + Handler: _VM_SetPreference_Handler, + }, + { + MethodName: "LastAccepted", + Handler: _VM_LastAccepted_Handler, + }, + { + MethodName: "BlockVerify", + Handler: _VM_BlockVerify_Handler, + }, + { + MethodName: "BlockAccept", + Handler: _VM_BlockAccept_Handler, + }, + { + MethodName: "BlockReject", + Handler: _VM_BlockReject_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "vm.proto", +} diff --git a/vms/rpcchainvm/proto/vm.proto b/vms/rpcchainvm/proto/vm.proto new file mode 100644 index 0000000..649c66b --- /dev/null +++ b/vms/rpcchainvm/proto/vm.proto @@ -0,0 +1,100 @@ +syntax = "proto3"; +package proto; + +message InitializeRequest { + uint32 dbServer = 1; + bytes genesisBytes = 2; + uint32 engineServer = 3; +} + +message InitializeResponse {} + +message ShutdownRequest {} + +message ShutdownResponse {} + +message CreateHandlersRequest {} + +message CreateHandlersResponse { + repeated Handler handlers = 1; +} + +message Handler { + string prefix = 1; + uint32 lockOptions = 2; + uint32 server = 3; +} + +message BuildBlockRequest {} + +message BuildBlockResponse { + bytes id = 1; + bytes parentID = 2; + bytes bytes = 3; + // status is always processing +} + +message ParseBlockRequest { + bytes bytes = 1; +} + +message ParseBlockResponse { + bytes id = 1; + bytes parentID = 2; + uint32 status = 3; +} + +message GetBlockRequest { + bytes id = 1; +} + +message GetBlockResponse { + bytes parentID = 1; + bytes bytes = 2; + uint32 status = 3; +} + +message SetPreferenceRequest { + bytes id = 1; +} + +message SetPreferenceResponse {} + +message LastAcceptedRequest {} + +message LastAcceptedResponse { + bytes id = 1; +} + +message BlockVerifyRequest { + bytes id = 1; +} + +message BlockVerifyResponse {} + +message BlockAcceptRequest { + bytes id = 1; +} + +message BlockAcceptResponse {} + +message BlockRejectRequest { + bytes id = 1; +} + +message BlockRejectResponse {} + +service VM { + rpc Initialize(InitializeRequest) returns (InitializeResponse); + rpc Shutdown(ShutdownRequest) returns (ShutdownResponse); + rpc CreateHandlers(CreateHandlersRequest) returns (CreateHandlersResponse); + rpc BuildBlock(BuildBlockRequest) returns (BuildBlockResponse); + rpc ParseBlock(ParseBlockRequest) returns (ParseBlockResponse); + rpc GetBlock(GetBlockRequest) returns (GetBlockResponse); + rpc SetPreference(SetPreferenceRequest) returns (SetPreferenceResponse); + rpc LastAccepted(LastAcceptedRequest) returns (LastAcceptedResponse); + + rpc BlockVerify(BlockVerifyRequest) returns (BlockVerifyResponse); + rpc BlockAccept(BlockAcceptRequest) returns (BlockAcceptResponse); + rpc BlockReject(BlockRejectRequest) returns (BlockRejectResponse); +} \ No newline at end of file diff --git a/vms/rpcchainvm/vm.go b/vms/rpcchainvm/vm.go new file mode 100644 index 0000000..083fbd3 --- /dev/null +++ b/vms/rpcchainvm/vm.go @@ -0,0 +1,50 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package rpcchainvm + +import ( + "golang.org/x/net/context" + + "google.golang.org/grpc" + + "github.com/hashicorp/go-plugin" + + "github.com/ava-labs/gecko/snow/engine/snowman" + "github.com/ava-labs/gecko/vms/rpcchainvm/proto" +) + +// Handshake is a common handshake that is shared by plugin and host. +var Handshake = plugin.HandshakeConfig{ + ProtocolVersion: 1, + MagicCookieKey: "VM_PLUGIN", + MagicCookieValue: "dynamic", +} + +// PluginMap is the map of plugins we can dispense. +var PluginMap = map[string]plugin.Plugin{ + "vm": &Plugin{}, +} + +// Plugin is the implementation of plugin.Plugin so we can serve/consume this. +// We also implement GRPCPlugin so that this plugin can be served over gRPC. +type Plugin struct { + plugin.NetRPCUnsupportedPlugin + // Concrete implementation, written in Go. This is only used for plugins + // that are written in Go. + vm snowman.ChainVM +} + +// New ... +func New(vm snowman.ChainVM) *Plugin { return &Plugin{vm: vm} } + +// GRPCServer ... +func (p *Plugin) GRPCServer(broker *plugin.GRPCBroker, s *grpc.Server) error { + proto.RegisterVMServer(s, NewServer(p.vm, broker)) + return nil +} + +// GRPCClient ... +func (p *Plugin) GRPCClient(ctx context.Context, broker *plugin.GRPCBroker, c *grpc.ClientConn) (interface{}, error) { + return NewClient(proto.NewVMClient(c), broker), nil +} diff --git a/vms/rpcchainvm/vm_client.go b/vms/rpcchainvm/vm_client.go new file mode 100644 index 0000000..098241e --- /dev/null +++ b/vms/rpcchainvm/vm_client.go @@ -0,0 +1,336 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package rpcchainvm + +import ( + "context" + "errors" + "sync" + + "google.golang.org/grpc" + + "github.com/hashicorp/go-plugin" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/database/rpcdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowman" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/vms/components/missing" + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp" + "github.com/ava-labs/gecko/vms/rpcchainvm/messenger" + + dbproto "github.com/ava-labs/gecko/database/rpcdb/proto" + httpproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/proto" + msgproto "github.com/ava-labs/gecko/vms/rpcchainvm/messenger/proto" + vmproto "github.com/ava-labs/gecko/vms/rpcchainvm/proto" +) + +var ( + errUnsupportedFXs = errors.New("unsupported feature extensions") +) + +// VMClient is an implementation of VM that talks over RPC. +type VMClient struct { + client vmproto.VMClient + broker *plugin.GRPCBroker + proc *plugin.Client + + db *rpcdb.DatabaseServer + messenger *messenger.Server + + lock sync.Mutex + closed bool + servers []*grpc.Server + conns []*grpc.ClientConn + + ctx *snow.Context + blks map[[32]byte]*BlockClient +} + +// NewClient returns a database instance connected to a remote database instance +func NewClient(client vmproto.VMClient, broker *plugin.GRPCBroker) *VMClient { + return &VMClient{ + client: client, + broker: broker, + } +} + +// SetProcess ... +func (vm *VMClient) SetProcess(proc *plugin.Client) { + vm.proc = proc +} + +// Initialize ... +func (vm *VMClient) Initialize( + ctx *snow.Context, + db database.Database, + genesisBytes []byte, + toEngine chan<- common.Message, + fxs []*common.Fx, +) error { + if len(fxs) != 0 { + return errUnsupportedFXs + } + + vm.ctx = ctx + + vm.db = rpcdb.NewServer(db) + vm.messenger = messenger.NewServer(toEngine) + + // start the db server + dbBrokerID := vm.broker.NextId() + go vm.broker.AcceptAndServe(dbBrokerID, vm.startDBServer) + + // start the messenger server + messengerBrokerID := vm.broker.NextId() + go vm.broker.AcceptAndServe(messengerBrokerID, vm.startMessengerServer) + + _, err := vm.client.Initialize(context.Background(), &vmproto.InitializeRequest{ + DbServer: dbBrokerID, + GenesisBytes: genesisBytes, + EngineServer: messengerBrokerID, + }) + return err +} + +func (vm *VMClient) startDBServer(opts []grpc.ServerOption) *grpc.Server { + vm.lock.Lock() + defer vm.lock.Unlock() + + server := grpc.NewServer(opts...) + + if vm.closed { + server.Stop() + } else { + vm.servers = append(vm.servers, server) + } + + dbproto.RegisterDatabaseServer(server, vm.db) + return server +} + +func (vm *VMClient) startMessengerServer(opts []grpc.ServerOption) *grpc.Server { + vm.lock.Lock() + defer vm.lock.Unlock() + + server := grpc.NewServer(opts...) + + if vm.closed { + server.Stop() + } else { + vm.servers = append(vm.servers, server) + } + + msgproto.RegisterMessengerServer(server, vm.messenger) + return server +} + +// Shutdown ... +func (vm *VMClient) Shutdown() { + vm.lock.Lock() + defer vm.lock.Unlock() + + if vm.closed { + return + } + + vm.closed = true + + vm.client.Shutdown(context.Background(), &vmproto.ShutdownRequest{}) + + for _, server := range vm.servers { + server.Stop() + } + for _, conn := range vm.conns { + conn.Close() + } + + vm.proc.Kill() +} + +// CreateHandlers ... +func (vm *VMClient) CreateHandlers() map[string]*common.HTTPHandler { + vm.lock.Lock() + defer vm.lock.Unlock() + + if vm.closed { + return nil + } + + resp, err := vm.client.CreateHandlers(context.Background(), &vmproto.CreateHandlersRequest{}) + vm.ctx.Log.AssertNoError(err) + + handlers := make(map[string]*common.HTTPHandler, len(resp.Handlers)) + for _, handler := range resp.Handlers { + conn, err := vm.broker.Dial(handler.Server) + vm.ctx.Log.AssertNoError(err) + + vm.conns = append(vm.conns, conn) + handlers[handler.Prefix] = &common.HTTPHandler{ + LockOptions: common.LockOption(handler.LockOptions), + Handler: ghttp.NewClient(httpproto.NewHTTPClient(conn), vm.broker), + } + } + return handlers +} + +// BuildBlock ... +func (vm *VMClient) BuildBlock() (snowman.Block, error) { + resp, err := vm.client.BuildBlock(context.Background(), &vmproto.BuildBlockRequest{}) + if err != nil { + return nil, err + } + + id, err := ids.ToID(resp.Id) + vm.ctx.Log.AssertNoError(err) + parentID, err := ids.ToID(resp.ParentID) + vm.ctx.Log.AssertNoError(err) + + return &BlockClient{ + vm: vm, + id: id, + parentID: parentID, + status: choices.Processing, + bytes: resp.Bytes, + }, nil +} + +// ParseBlock ... +func (vm *VMClient) ParseBlock(bytes []byte) (snowman.Block, error) { + resp, err := vm.client.ParseBlock(context.Background(), &vmproto.ParseBlockRequest{ + Bytes: bytes, + }) + if err != nil { + return nil, err + } + + id, err := ids.ToID(resp.Id) + vm.ctx.Log.AssertNoError(err) + + if blk, cached := vm.blks[id.Key()]; cached { + return blk, nil + } + + parentID, err := ids.ToID(resp.ParentID) + vm.ctx.Log.AssertNoError(err) + status := choices.Status(resp.Status) + vm.ctx.Log.AssertDeferredNoError(status.Valid) + + return &BlockClient{ + vm: vm, + id: id, + parentID: parentID, + status: status, + bytes: bytes, + }, nil +} + +// GetBlock ... +func (vm *VMClient) GetBlock(id ids.ID) (snowman.Block, error) { + if blk, cached := vm.blks[id.Key()]; cached { + return blk, nil + } + + resp, err := vm.client.GetBlock(context.Background(), &vmproto.GetBlockRequest{ + Id: id.Bytes(), + }) + if err != nil { + return nil, err + } + + parentID, err := ids.ToID(resp.ParentID) + vm.ctx.Log.AssertNoError(err) + status := choices.Status(resp.Status) + vm.ctx.Log.AssertDeferredNoError(status.Valid) + + return &BlockClient{ + vm: vm, + id: id, + parentID: parentID, + status: status, + bytes: resp.Bytes, + }, nil +} + +// SetPreference ... +func (vm *VMClient) SetPreference(id ids.ID) { + _, err := vm.client.SetPreference(context.Background(), &vmproto.SetPreferenceRequest{ + Id: id.Bytes(), + }) + vm.ctx.Log.AssertNoError(err) +} + +// LastAccepted ... +func (vm *VMClient) LastAccepted() ids.ID { + resp, err := vm.client.LastAccepted(context.Background(), &vmproto.LastAcceptedRequest{}) + vm.ctx.Log.AssertNoError(err) + + id, err := ids.ToID(resp.Id) + vm.ctx.Log.AssertNoError(err) + + return id +} + +// BlockClient is an implementation of Block that talks over RPC. +type BlockClient struct { + vm *VMClient + + id ids.ID + parentID ids.ID + status choices.Status + bytes []byte +} + +// ID ... +func (b *BlockClient) ID() ids.ID { return b.id } + +// Accept ... +func (b *BlockClient) Accept() { + delete(b.vm.blks, b.id.Key()) + b.status = choices.Accepted + _, err := b.vm.client.BlockAccept(context.Background(), &vmproto.BlockAcceptRequest{ + Id: b.id.Bytes(), + }) + b.vm.ctx.Log.AssertNoError(err) +} + +// Reject ... +func (b *BlockClient) Reject() { + delete(b.vm.blks, b.id.Key()) + b.status = choices.Rejected + _, err := b.vm.client.BlockReject(context.Background(), &vmproto.BlockRejectRequest{ + Id: b.id.Bytes(), + }) + b.vm.ctx.Log.AssertNoError(err) +} + +// Status ... +func (b *BlockClient) Status() choices.Status { return b.status } + +// Parent ... +func (b *BlockClient) Parent() snowman.Block { + if parent, err := b.vm.GetBlock(b.parentID); err == nil { + return parent + } + return &missing.Block{BlkID: b.parentID} +} + +// Verify ... +func (b *BlockClient) Verify() error { + _, err := b.vm.client.BlockVerify(context.Background(), &vmproto.BlockVerifyRequest{ + Id: b.id.Bytes(), + }) + if err != nil { + return err + } + + b.vm.blks[b.id.Key()] = b + return nil +} + +// Bytes ... +func (b *BlockClient) Bytes() []byte { return b.bytes } diff --git a/vms/rpcchainvm/vm_server.go b/vms/rpcchainvm/vm_server.go new file mode 100644 index 0000000..e83a957 --- /dev/null +++ b/vms/rpcchainvm/vm_server.go @@ -0,0 +1,240 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package rpcchainvm + +import ( + "context" + "sync" + + "google.golang.org/grpc" + + "github.com/hashicorp/go-plugin" + + "github.com/ava-labs/gecko/database/rpcdb" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/engine/snowman" + "github.com/ava-labs/gecko/utils/wrappers" + "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp" + "github.com/ava-labs/gecko/vms/rpcchainvm/messenger" + + dbproto "github.com/ava-labs/gecko/database/rpcdb/proto" + httpproto "github.com/ava-labs/gecko/vms/rpcchainvm/ghttp/proto" + msgproto "github.com/ava-labs/gecko/vms/rpcchainvm/messenger/proto" + vmproto "github.com/ava-labs/gecko/vms/rpcchainvm/proto" +) + +// VMServer is a VM that is managed over RPC. +type VMServer struct { + vm snowman.ChainVM + broker *plugin.GRPCBroker + + lock sync.Mutex + closed bool + servers []*grpc.Server + conns []*grpc.ClientConn + + toEngine chan common.Message +} + +// NewServer returns a vm instance connected to a remote vm instance +func NewServer(vm snowman.ChainVM, broker *plugin.GRPCBroker) *VMServer { + return &VMServer{ + vm: vm, + broker: broker, + } +} + +// Initialize ... +func (vm *VMServer) Initialize(_ context.Context, req *vmproto.InitializeRequest) (*vmproto.InitializeResponse, error) { + dbConn, err := vm.broker.Dial(req.DbServer) + if err != nil { + return nil, err + } + msgConn, err := vm.broker.Dial(req.EngineServer) + if err != nil { + dbConn.Close() + return nil, err + } + + dbClient := rpcdb.NewClient(dbproto.NewDatabaseClient(dbConn)) + msgClient := messenger.NewClient(msgproto.NewMessengerClient(msgConn)) + + toEngine := make(chan common.Message, 1) + go func() { + for msg := range toEngine { + msgClient.Notify(msg) + } + }() + + // TODO: Needs to populate a real context + ctx := snow.DefaultContextTest() + + if err := vm.vm.Initialize(ctx, dbClient, req.GenesisBytes, toEngine, nil); err != nil { + dbConn.Close() + msgConn.Close() + close(toEngine) + return nil, err + } + + vm.conns = append(vm.conns, dbConn) + vm.conns = append(vm.conns, msgConn) + vm.toEngine = toEngine + return &vmproto.InitializeResponse{}, nil +} + +// Shutdown ... +func (vm *VMServer) Shutdown(_ context.Context, _ *vmproto.ShutdownRequest) (*vmproto.ShutdownResponse, error) { + vm.lock.Lock() + defer vm.lock.Unlock() + + if vm.closed || vm.toEngine == nil { + return &vmproto.ShutdownResponse{}, nil + } + + vm.closed = true + + vm.vm.Shutdown() + close(vm.toEngine) + + errs := wrappers.Errs{} + for _, conn := range vm.conns { + errs.Add(conn.Close()) + } + return &vmproto.ShutdownResponse{}, errs.Err +} + +// CreateHandlers ... +func (vm *VMServer) CreateHandlers(_ context.Context, req *vmproto.CreateHandlersRequest) (*vmproto.CreateHandlersResponse, error) { + handlers := vm.vm.CreateHandlers() + resp := &vmproto.CreateHandlersResponse{} + for prefix, h := range handlers { + handler := h + + // start the messenger server + serverID := vm.broker.NextId() + go vm.broker.AcceptAndServe(serverID, func(opts []grpc.ServerOption) *grpc.Server { + vm.lock.Lock() + defer vm.lock.Unlock() + + server := grpc.NewServer(opts...) + + if vm.closed { + server.Stop() + } else { + vm.servers = append(vm.servers, server) + } + + httpproto.RegisterHTTPServer(server, ghttp.NewServer(handler.Handler, vm.broker)) + return server + }) + + resp.Handlers = append(resp.Handlers, &vmproto.Handler{ + Prefix: prefix, + LockOptions: uint32(handler.LockOptions), + Server: serverID, + }) + } + return resp, nil +} + +// BuildBlock ... +func (vm *VMServer) BuildBlock(_ context.Context, _ *vmproto.BuildBlockRequest) (*vmproto.BuildBlockResponse, error) { + blk, err := vm.vm.BuildBlock() + if err != nil { + return nil, err + } + return &vmproto.BuildBlockResponse{ + Id: blk.ID().Bytes(), + ParentID: blk.Parent().ID().Bytes(), + Bytes: blk.Bytes(), + }, nil +} + +// ParseBlock ... +func (vm *VMServer) ParseBlock(_ context.Context, req *vmproto.ParseBlockRequest) (*vmproto.ParseBlockResponse, error) { + blk, err := vm.vm.ParseBlock(req.Bytes) + if err != nil { + return nil, err + } + return &vmproto.ParseBlockResponse{ + Id: blk.ID().Bytes(), + ParentID: blk.Parent().ID().Bytes(), + Status: uint32(blk.Status()), + }, nil +} + +// GetBlock ... +func (vm *VMServer) GetBlock(_ context.Context, req *vmproto.GetBlockRequest) (*vmproto.GetBlockResponse, error) { + id, err := ids.ToID(req.Id) + if err != nil { + return nil, err + } + blk, err := vm.vm.GetBlock(id) + if err != nil { + return nil, err + } + return &vmproto.GetBlockResponse{ + ParentID: blk.Parent().ID().Bytes(), + Bytes: blk.Bytes(), + Status: uint32(blk.Status()), + }, nil +} + +// SetPreference ... +func (vm *VMServer) SetPreference(_ context.Context, req *vmproto.SetPreferenceRequest) (*vmproto.SetPreferenceResponse, error) { + id, err := ids.ToID(req.Id) + if err != nil { + return nil, err + } + vm.vm.SetPreference(id) + return &vmproto.SetPreferenceResponse{}, nil +} + +// LastAccepted ... +func (vm *VMServer) LastAccepted(_ context.Context, _ *vmproto.LastAcceptedRequest) (*vmproto.LastAcceptedResponse, error) { + return &vmproto.LastAcceptedResponse{Id: vm.vm.LastAccepted().Bytes()}, nil +} + +// BlockVerify ... +func (vm *VMServer) BlockVerify(_ context.Context, req *vmproto.BlockVerifyRequest) (*vmproto.BlockVerifyResponse, error) { + id, err := ids.ToID(req.Id) + if err != nil { + return nil, err + } + blk, err := vm.vm.GetBlock(id) + if err != nil { + return nil, err + } + return &vmproto.BlockVerifyResponse{}, blk.Verify() +} + +// BlockAccept ... +func (vm *VMServer) BlockAccept(_ context.Context, req *vmproto.BlockAcceptRequest) (*vmproto.BlockAcceptResponse, error) { + id, err := ids.ToID(req.Id) + if err != nil { + return nil, err + } + blk, err := vm.vm.GetBlock(id) + if err != nil { + return nil, err + } + blk.Accept() + return &vmproto.BlockAcceptResponse{}, nil +} + +// BlockReject ... +func (vm *VMServer) BlockReject(_ context.Context, req *vmproto.BlockRejectRequest) (*vmproto.BlockRejectResponse, error) { + id, err := ids.ToID(req.Id) + if err != nil { + return nil, err + } + blk, err := vm.vm.GetBlock(id) + if err != nil { + return nil, err + } + blk.Reject() + return &vmproto.BlockRejectResponse{}, nil +} diff --git a/vms/secp256k1fx/credential.go b/vms/secp256k1fx/credential.go index 2b1cfc7..6420836 100644 --- a/vms/secp256k1fx/credential.go +++ b/vms/secp256k1fx/credential.go @@ -15,7 +15,7 @@ var ( // Credential ... type Credential struct { - Sigs [][crypto.SECP256K1RSigLen]byte `serialize:"true"` + Sigs [][crypto.SECP256K1RSigLen]byte `serialize:"true" json:"signatures"` } // Verify ... diff --git a/vms/secp256k1fx/factory.go b/vms/secp256k1fx/factory.go index da2e022..cdcb200 100644 --- a/vms/secp256k1fx/factory.go +++ b/vms/secp256k1fx/factory.go @@ -16,4 +16,4 @@ var ( type Factory struct{} // New ... -func (f *Factory) New() interface{} { return &Fx{} } +func (f *Factory) New() (interface{}, error) { return &Fx{}, nil } diff --git a/vms/secp256k1fx/factory_test.go b/vms/secp256k1fx/factory_test.go new file mode 100644 index 0000000..1c92b6a --- /dev/null +++ b/vms/secp256k1fx/factory_test.go @@ -0,0 +1,17 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "testing" +) + +func TestFactory(t *testing.T) { + factory := Factory{} + if fx, err := factory.New(); err != nil { + t.Fatal(err) + } else if fx == nil { + t.Fatalf("Factory.New returned nil") + } +} diff --git a/vms/secp256k1fx/fx.go b/vms/secp256k1fx/fx.go index ff54b91..e7571b5 100644 --- a/vms/secp256k1fx/fx.go +++ b/vms/secp256k1fx/fx.go @@ -8,23 +8,22 @@ import ( "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/components/verify" ) var ( errWrongVMType = errors.New("wrong vm type") errWrongTxType = errors.New("wrong tx type") + errWrongOpType = errors.New("wrong operation type") errWrongUTXOType = errors.New("wrong utxo type") errWrongOutputType = errors.New("wrong output type") errWrongInputType = errors.New("wrong input type") errWrongCredentialType = errors.New("wrong credential type") - errWrongNumberOfOutputs = errors.New("wrong number of outputs for an operation") - errWrongNumberOfInputs = errors.New("wrong number of inputs for an operation") - errWrongNumberOfCredentials = errors.New("wrong number of credentials for an operation") - - errWrongMintCreated = errors.New("wrong mint output created from the operation") + errWrongNumberOfUTXOs = errors.New("wrong number of utxos for the operation") + errWrongMintCreated = errors.New("wrong mint output created from the operation") errWrongAmounts = errors.New("input is consuming a different amount than expected") errTimelocked = errors.New("output is time locked") errTooManySigners = errors.New("input has more signers than expected") @@ -33,93 +32,85 @@ var ( errWrongSigner = errors.New("credential does not produce expected signer") ) -// Fx ... +// Fx describes the secp256k1 feature extension type Fx struct { - vm VM - secpFactory crypto.FactorySECP256K1R + VM VM + SECPFactory crypto.FactorySECP256K1R } // Initialize ... func (fx *Fx) Initialize(vmIntf interface{}) error { + if err := fx.InitializeVM(vmIntf); err != nil { + return err + } + + log := fx.VM.Logger() + log.Debug("Initializing secp561k1 fx") + + c := fx.VM.Codec() + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(&TransferInput{}), + c.RegisterType(&MintOutput{}), + c.RegisterType(&TransferOutput{}), + c.RegisterType(&MintOperation{}), + c.RegisterType(&Credential{}), + ) + return errs.Err +} + +// InitializeVM ... +func (fx *Fx) InitializeVM(vmIntf interface{}) error { vm, ok := vmIntf.(VM) if !ok { return errWrongVMType } - - c := vm.Codec() - c.RegisterType(&MintOutput{}) - c.RegisterType(&TransferOutput{}) - c.RegisterType(&MintInput{}) - c.RegisterType(&TransferInput{}) - c.RegisterType(&Credential{}) - - fx.vm = vm + fx.VM = vm return nil } // VerifyOperation ... -func (fx *Fx) VerifyOperation(txIntf interface{}, utxosIntf, insIntf, credsIntf, outsIntf []interface{}) error { +func (fx *Fx) VerifyOperation(txIntf, opIntf, credIntf interface{}, utxosIntf []interface{}) error { tx, ok := txIntf.(Tx) if !ok { return errWrongTxType } - - if len(outsIntf) != 2 { - return errWrongNumberOfOutputs - } - if len(utxosIntf) != 1 || len(insIntf) != 1 { - return errWrongNumberOfInputs - } - if len(credsIntf) != 1 { - return errWrongNumberOfCredentials - } - - utxo, ok := utxosIntf[0].(*MintOutput) + op, ok := opIntf.(*MintOperation) if !ok { - return errWrongUTXOType + return errWrongOpType } - in, ok := insIntf[0].(*MintInput) - if !ok { - return errWrongInputType - } - cred, ok := credsIntf[0].(*Credential) + cred, ok := credIntf.(*Credential) if !ok { return errWrongCredentialType } - newMint, ok := outsIntf[0].(*MintOutput) - if !ok { - return errWrongOutputType + if len(utxosIntf) != 1 { + return errWrongNumberOfUTXOs } - newOutput, ok := outsIntf[1].(*TransferOutput) + out, ok := utxosIntf[0].(*MintOutput) if !ok { - return errWrongOutputType + return errWrongUTXOType } - - return fx.verifyOperation(tx, utxo, in, cred, newMint, newOutput) + return fx.verifyOperation(tx, op, cred, out) } -func (fx *Fx) verifyOperation(tx Tx, utxo *MintOutput, in *MintInput, cred *Credential, newMint *MintOutput, newOutput *TransferOutput) error { - if err := verify.All(utxo, in, cred, newMint, newOutput); err != nil { +func (fx *Fx) verifyOperation(tx Tx, op *MintOperation, cred *Credential, utxo *MintOutput) error { + if err := verify.All(op, cred, utxo); err != nil { return err } - if !utxo.Equals(&newMint.OutputOwners) { + if !utxo.Equals(&op.MintOutput.OutputOwners) { return errWrongMintCreated } - return fx.verifyCredentials(tx, &utxo.OutputOwners, &in.Input, cred) + return fx.VerifyCredentials(tx, &op.MintInput, cred, &utxo.OutputOwners) } // VerifyTransfer ... -func (fx *Fx) VerifyTransfer(txIntf, utxoIntf, inIntf, credIntf interface{}) error { +func (fx *Fx) VerifyTransfer(txIntf, inIntf, credIntf, utxoIntf interface{}) error { tx, ok := txIntf.(Tx) if !ok { return errWrongTxType } - utxo, ok := utxoIntf.(*TransferOutput) - if !ok { - return errWrongUTXOType - } in, ok := inIntf.(*TransferInput) if !ok { return errWrongInputType @@ -128,15 +119,20 @@ func (fx *Fx) VerifyTransfer(txIntf, utxoIntf, inIntf, credIntf interface{}) err if !ok { return errWrongCredentialType } - return fx.verifyTransfer(tx, utxo, in, cred) + out, ok := utxoIntf.(*TransferOutput) + if !ok { + return errWrongUTXOType + } + return fx.VerifySpend(tx, in, cred, out) } -func (fx *Fx) verifyTransfer(tx Tx, utxo *TransferOutput, in *TransferInput, cred *Credential) error { +// VerifySpend ensures that the utxo can be sent to any address +func (fx *Fx) VerifySpend(tx Tx, in *TransferInput, cred *Credential, utxo *TransferOutput) error { if err := verify.All(utxo, in, cred); err != nil { return err } - clock := fx.vm.Clock() + clock := fx.VM.Clock() switch { case utxo.Amt != in.Amt: return errWrongAmounts @@ -144,10 +140,12 @@ func (fx *Fx) verifyTransfer(tx Tx, utxo *TransferOutput, in *TransferInput, cre return errTimelocked } - return fx.verifyCredentials(tx, &utxo.OutputOwners, &in.Input, cred) + return fx.VerifyCredentials(tx, &in.Input, cred, &utxo.OutputOwners) } -func (fx *Fx) verifyCredentials(tx Tx, out *OutputOwners, in *Input, cred *Credential) error { +// VerifyCredentials ensures that the output can be spent by the input with the +// credential. A nil return values means the output can be spent. +func (fx *Fx) VerifyCredentials(tx Tx, in *Input, cred *Credential, out *OutputOwners) error { numSigs := len(in.SigIndices) switch { case out.Threshold < uint32(numSigs): @@ -164,7 +162,7 @@ func (fx *Fx) verifyCredentials(tx Tx, out *OutputOwners, in *Input, cred *Crede for i, index := range in.SigIndices { sig := cred.Sigs[i] - pk, err := fx.secpFactory.RecoverHashPublicKey(txHash, sig[:]) + pk, err := fx.SECPFactory.RecoverHashPublicKey(txHash, sig[:]) if err != nil { return err } diff --git a/vms/secp256k1fx/fx_test.go b/vms/secp256k1fx/fx_test.go index 0d0e9d0..18cd7aa 100644 --- a/vms/secp256k1fx/fx_test.go +++ b/vms/secp256k1fx/fx_test.go @@ -10,6 +10,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/vms/components/codec" ) @@ -40,6 +41,8 @@ func (vm *testVM) Codec() codec.Codec { return codec.NewDefault() } func (vm *testVM) Clock() *timer.Clock { return &vm.clock } +func (vm *testVM) Logger() logging.Logger { return logging.NoLog{} } + type testCodec struct{} func (c *testCodec) RegisterStruct(interface{}) {} @@ -98,8 +101,7 @@ func TestFxVerifyTransfer(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err != nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err != nil { t.Fatal(err) } } @@ -134,8 +136,7 @@ func TestFxVerifyTransferNilTx(t *testing.T) { }, } - err := fx.VerifyTransfer(nil, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(nil, in, cred, out); err == nil { t.Fatalf("Should have failed verification due to a nil tx") } } @@ -163,8 +164,7 @@ func TestFxVerifyTransferNilOutput(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, nil, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, nil); err == nil { t.Fatalf("Should have failed verification due to a nil output") } } @@ -196,8 +196,7 @@ func TestFxVerifyTransferNilInput(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, nil, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, nil, cred, out); err == nil { t.Fatalf("Should have failed verification due to a nil input") } } @@ -230,8 +229,7 @@ func TestFxVerifyTransferNilCredential(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, nil) - if err == nil { + if err := fx.VerifyTransfer(tx, in, nil, out); err == nil { t.Fatalf("Should have failed verification due to a nil credential") } } @@ -269,8 +267,7 @@ func TestFxVerifyTransferInvalidOutput(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to an invalid output") } } @@ -308,8 +305,7 @@ func TestFxVerifyTransferWrongAmounts(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to different amounts") } } @@ -347,8 +343,7 @@ func TestFxVerifyTransferTimelocked(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to a timelocked output") } } @@ -387,8 +382,7 @@ func TestFxVerifyTransferTooManySigners(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to too many signers") } } @@ -424,8 +418,7 @@ func TestFxVerifyTransferTooFewSigners(t *testing.T) { Sigs: [][crypto.SECP256K1RSigLen]byte{}, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to too few signers") } } @@ -464,8 +457,7 @@ func TestFxVerifyTransferMismatchedSigners(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to too mismatched signers") } } @@ -503,8 +495,7 @@ func TestFxVerifyTransferInvalidSignature(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to an invalid signature") } } @@ -542,8 +533,7 @@ func TestFxVerifyTransferWrongSigner(t *testing.T) { }, } - err := fx.VerifyTransfer(tx, out, in, cred) - if err == nil { + if err := fx.VerifyTransfer(tx, in, cred, out); err == nil { t.Fatalf("Should have errored due to a wrong signer") } } @@ -567,40 +557,37 @@ func TestFxVerifyOperation(t *testing.T) { }, }, } - in := &MintInput{ - Input: Input{ + op := &MintOperation{ + MintInput: Input{ SigIndices: []uint32{0}, }, + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, } cred := &Credential{ Sigs: [][crypto.SECP256K1RSigLen]byte{ sigBytes, }, } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + err := fx.VerifyOperation(tx, op, cred, utxos) if err != nil { t.Fatal(err) } @@ -622,94 +609,43 @@ func TestFxVerifyOperationUnknownTx(t *testing.T) { }, }, } - in := &MintInput{ - Input: Input{ + op := &MintOperation{ + MintInput: Input{ SigIndices: []uint32{0}, }, + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, } cred := &Credential{ Sigs: [][crypto.SECP256K1RSigLen]byte{ sigBytes, }, } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(nil, utxos, ins, creds, outs) + err := fx.VerifyOperation(nil, op, cred, utxos) if err == nil { t.Fatalf("Should have errored due to an invalid tx type") } } -func TestFxVerifyOperationWrongNumberOfOutputs(t *testing.T) { - vm := testVM{} - date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.clock.Set(date) - fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } - tx := &testTx{ - bytes: txBytes, - } - utxo := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - in := &MintInput{ - Input: Input{ - SigIndices: []uint32{0}, - }, - } - cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - sigBytes, - }, - } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - - utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) - if err == nil { - t.Fatalf("Should have errored due to a wrong number of outputs") - } -} - -func TestFxVerifyOperationWrongNumberOfInputs(t *testing.T) { +func TestFxVerifyOperationUnknownOperation(t *testing.T) { vm := testVM{} date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.clock.Set(date) @@ -733,35 +669,15 @@ func TestFxVerifyOperationWrongNumberOfInputs(t *testing.T) { sigBytes, }, } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } utxos := []interface{}{utxo} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, nil, creds, outs) + err := fx.VerifyOperation(tx, nil, cred, utxos) if err == nil { - t.Fatalf("Should have errored due to a wrong number of inputs") + t.Fatalf("Should have errored due to an invalid operation type") } } -func TestFxVerifyOperationWrongNumberOfCredentials(t *testing.T) { +func TestFxVerifyOperationUnknownCredential(t *testing.T) { vm := testVM{} date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.clock.Set(date) @@ -780,40 +696,38 @@ func TestFxVerifyOperationWrongNumberOfCredentials(t *testing.T) { }, }, } - in := &MintInput{ - Input: Input{ + op := &MintOperation{ + MintInput: Input{ SigIndices: []uint32{0}, }, - } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, }, }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, }, }, } utxos := []interface{}{utxo} - ins := []interface{}{in} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, nil, outs) + err := fx.VerifyOperation(tx, op, nil, utxos) if err == nil { - t.Fatalf("Should have errored due to a wrong number of credentials") + t.Fatalf("Should have errored due to an invalid credential type") } } -func TestFxVerifyOperationWrongUTXOType(t *testing.T) { +func TestFxVerifyOperationWrongNumberOfUTXOs(t *testing.T) { vm := testVM{} date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.clock.Set(date) @@ -824,9 +738,7 @@ func TestFxVerifyOperationWrongUTXOType(t *testing.T) { tx := &testTx{ bytes: txBytes, } - utxo := &TransferOutput{ - Amt: 1, - Locktime: 0, + utxo := &MintOutput{ OutputOwners: OutputOwners{ Threshold: 1, Addrs: []ids.ShortID{ @@ -834,46 +746,43 @@ func TestFxVerifyOperationWrongUTXOType(t *testing.T) { }, }, } - in := &MintInput{ - Input: Input{ + op := &MintOperation{ + MintInput: Input{ SigIndices: []uint32{0}, }, + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, } cred := &Credential{ Sigs: [][crypto.SECP256K1RSigLen]byte{ sigBytes, }, } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + utxos := []interface{}{utxo, utxo} + err := fx.VerifyOperation(tx, op, cred, utxos) if err == nil { - t.Fatalf("Should have errored due to a wrong utxo type") + t.Fatalf("Should have errored due to a wrong number of utxos") } } -func TestFxVerifyOperationWrongInputType(t *testing.T) { +func TestFxVerifyOperationUnknownUTXOType(t *testing.T) { vm := testVM{} date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.clock.Set(date) @@ -884,18 +793,27 @@ func TestFxVerifyOperationWrongInputType(t *testing.T) { tx := &testTx{ bytes: txBytes, } - utxo := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), + op := &MintOperation{ + MintInput: Input{ + SigIndices: []uint32{0}, + }, + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, }, }, - } - in := &TransferInput{ - Amt: 1, - Input: Input{ - SigIndices: []uint32{0}, + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, }, } cred := &Credential{ @@ -903,36 +821,15 @@ func TestFxVerifyOperationWrongInputType(t *testing.T) { sigBytes, }, } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + utxos := []interface{}{nil} + err := fx.VerifyOperation(tx, op, cred, utxos) if err == nil { - t.Fatalf("Should have errored due to a wrong input type") + t.Fatalf("Should have errored due to an invalid utxo type") } } -func TestFxVerifyOperationWrongCredentialType(t *testing.T) { +func TestFxVerifyOperationInvalidOperationVerify(t *testing.T) { vm := testVM{} date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.clock.Set(date) @@ -951,101 +848,40 @@ func TestFxVerifyOperationWrongCredentialType(t *testing.T) { }, }, } - in := &MintInput{ - Input: Input{ + op := &MintOperation{ + MintInput: Input{ SigIndices: []uint32{0}, }, - } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, }, }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, }, }, } - - utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{nil} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) - if err == nil { - t.Fatalf("Should have errored due to a wrong credential type") - } -} - -func TestFxVerifyOperationWrongMintType(t *testing.T) { - vm := testVM{} - date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.clock.Set(date) - fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } - tx := &testTx{ - bytes: txBytes, - } - utxo := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - in := &MintInput{ - Input: Input{ - SigIndices: []uint32{0}, - }, - } cred := &Credential{ Sigs: [][crypto.SECP256K1RSigLen]byte{ sigBytes, }, } - mintOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + err := fx.VerifyOperation(tx, op, cred, utxos) if err == nil { - t.Fatalf("Should have errored due to a wrong output type") + t.Fatalf("Should have errored due to a failed verify") } } -func TestFxVerifyOperationWrongTransferType(t *testing.T) { +func TestFxVerifyOperationMismatchedMintOutputs(t *testing.T) { vm := testVM{} date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) vm.clock.Set(date) @@ -1064,152 +900,33 @@ func TestFxVerifyOperationWrongTransferType(t *testing.T) { }, }, } - in := &MintInput{ - Input: Input{ + op := &MintOperation{ + MintInput: Input{ SigIndices: []uint32{0}, }, + MintOutput: MintOutput{ + OutputOwners: OutputOwners{}, + }, + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, } cred := &Credential{ Sigs: [][crypto.SECP256K1RSigLen]byte{ sigBytes, }, } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) + err := fx.VerifyOperation(tx, op, cred, utxos) if err == nil { - t.Fatalf("Should have errored due to a wrong output type") - } -} - -func TestFxVerifyOperationInvalid(t *testing.T) { - vm := testVM{} - date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.clock.Set(date) - fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } - tx := &testTx{ - bytes: txBytes, - } - utxo := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - in := &MintInput{ - Input: Input{ - SigIndices: []uint32{0}, - }, - } - cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - sigBytes, - }, - } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 0, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - - utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) - if err == nil { - t.Fatalf("Should have errored due to an invalid output") - } -} - -func TestFxVerifyOperationMismatchedMintOutput(t *testing.T) { - vm := testVM{} - date := time.Date(2019, time.January, 19, 16, 25, 17, 3, time.UTC) - vm.clock.Set(date) - fx := Fx{} - if err := fx.Initialize(&vm); err != nil { - t.Fatal(err) - } - tx := &testTx{ - bytes: txBytes, - } - utxo := &MintOutput{ - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - in := &MintInput{ - Input: Input{ - SigIndices: []uint32{0}, - }, - } - cred := &Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - sigBytes, - }, - } - mintOutput := &MintOutput{ - OutputOwners: OutputOwners{ - Addrs: []ids.ShortID{}, - }, - } - transferOutput := &TransferOutput{ - Amt: 1, - Locktime: 0, - OutputOwners: OutputOwners{ - Threshold: 1, - Addrs: []ids.ShortID{ - ids.NewShortID(addrBytes), - }, - }, - } - - utxos := []interface{}{utxo} - ins := []interface{}{in} - creds := []interface{}{cred} - outs := []interface{}{mintOutput, transferOutput} - err := fx.VerifyOperation(tx, utxos, ins, creds, outs) - if err == nil { - t.Fatalf("Should have errored due to a mismatched mint output") + t.Fatalf("Should have errored due to the wrong MintOutput being created") } } diff --git a/vms/secp256k1fx/input.go b/vms/secp256k1fx/input.go index 0a6ce66..1727c2d 100644 --- a/vms/secp256k1fx/input.go +++ b/vms/secp256k1fx/input.go @@ -16,7 +16,7 @@ var ( // Input ... type Input struct { - SigIndices []uint32 `serialize:"true"` + SigIndices []uint32 `serialize:"true" json:"signatureIndices"` } // Verify this input is syntactically valid diff --git a/vms/secp256k1fx/mint_operation.go b/vms/secp256k1fx/mint_operation.go new file mode 100644 index 0000000..2f612f6 --- /dev/null +++ b/vms/secp256k1fx/mint_operation.go @@ -0,0 +1,36 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "errors" + + "github.com/ava-labs/gecko/vms/components/verify" +) + +var ( + errNilMintOperation = errors.New("nil mint operation") +) + +// MintOperation ... +type MintOperation struct { + MintInput Input `serialize:"true" json:"mintInput"` + MintOutput MintOutput `serialize:"true" json:"mintOutput"` + TransferOutput TransferOutput `serialize:"true" json:"transferOutput"` +} + +// Outs ... +func (op *MintOperation) Outs() []verify.Verifiable { + return []verify.Verifiable{&op.MintOutput, &op.TransferOutput} +} + +// Verify ... +func (op *MintOperation) Verify() error { + switch { + case op == nil: + return errNilMintOperation + default: + return verify.All(&op.MintInput, &op.MintOutput, &op.TransferOutput) + } +} diff --git a/vms/secp256k1fx/mint_operation_test.go b/vms/secp256k1fx/mint_operation_test.go new file mode 100644 index 0000000..a0a1f20 --- /dev/null +++ b/vms/secp256k1fx/mint_operation_test.go @@ -0,0 +1,45 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package secp256k1fx + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestMintOperationVerifyNil(t *testing.T) { + op := (*MintOperation)(nil) + if err := op.Verify(); err == nil { + t.Fatalf("MintOperation.Verify should have returned an error due to an nil operation") + } +} + +func TestMintOperationOuts(t *testing.T) { + op := &MintOperation{ + MintInput: Input{ + SigIndices: []uint32{0}, + }, + MintOutput: MintOutput{ + OutputOwners: OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{ + ids.NewShortID(addrBytes), + }, + }, + }, + TransferOutput: TransferOutput{ + Amt: 1, + Locktime: 0, + OutputOwners: OutputOwners{ + Threshold: 1, + }, + }, + } + + outs := op.Outs() + if len(outs) != 2 { + t.Fatalf("Wrong number of outputs") + } +} diff --git a/vms/secp256k1fx/output_owners.go b/vms/secp256k1fx/output_owners.go index 104a7a4..9cefecb 100644 --- a/vms/secp256k1fx/output_owners.go +++ b/vms/secp256k1fx/output_owners.go @@ -18,8 +18,8 @@ var ( // OutputOwners ... type OutputOwners struct { - Threshold uint32 `serialize:"true"` - Addrs []ids.ShortID `serialize:"true"` + Threshold uint32 `serialize:"true" json:"threshold"` + Addrs []ids.ShortID `serialize:"true" json:"addresses"` } // Addresses returns the addresses that manage this output diff --git a/vms/secp256k1fx/transfer_input.go b/vms/secp256k1fx/transfer_input.go index 5e44f76..0c2a104 100644 --- a/vms/secp256k1fx/transfer_input.go +++ b/vms/secp256k1fx/transfer_input.go @@ -13,7 +13,7 @@ var ( // TransferInput ... type TransferInput struct { - Amt uint64 `serialize:"true"` + Amt uint64 `serialize:"true" json:"amount"` Input `serialize:"true"` } diff --git a/vms/secp256k1fx/transfer_output.go b/vms/secp256k1fx/transfer_output.go index 69f2f20..6e31ce0 100644 --- a/vms/secp256k1fx/transfer_output.go +++ b/vms/secp256k1fx/transfer_output.go @@ -13,8 +13,8 @@ var ( // TransferOutput ... type TransferOutput struct { - Amt uint64 `serialize:"true"` - Locktime uint64 `serialize:"true"` + Amt uint64 `serialize:"true" json:"amount"` + Locktime uint64 `serialize:"true" json:"locktime"` OutputOwners `serialize:"true"` } diff --git a/vms/secp256k1fx/tx.go b/vms/secp256k1fx/tx.go index e2ac0f7..7ee304b 100644 --- a/vms/secp256k1fx/tx.go +++ b/vms/secp256k1fx/tx.go @@ -7,3 +7,9 @@ package secp256k1fx type Tx interface { UnsignedBytes() []byte } + +// TestTx is a minimal implementation of a Tx +type TestTx struct{ Bytes []byte } + +// UnsignedBytes returns Bytes +func (tx *TestTx) UnsignedBytes() []byte { return tx.Bytes } diff --git a/vms/secp256k1fx/vm.go b/vms/secp256k1fx/vm.go index 1083af7..bb59166 100644 --- a/vms/secp256k1fx/vm.go +++ b/vms/secp256k1fx/vm.go @@ -4,6 +4,7 @@ package secp256k1fx import ( + "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/vms/components/codec" ) @@ -12,4 +13,21 @@ import ( type VM interface { Codec() codec.Codec Clock() *timer.Clock + Logger() logging.Logger } + +// TestVM is a minimal implementation of a VM +type TestVM struct { + CLK *timer.Clock + Code codec.Codec + Log logging.Logger +} + +// Clock returns CLK +func (vm *TestVM) Clock() *timer.Clock { return vm.CLK } + +// Codec returns Code +func (vm *TestVM) Codec() codec.Codec { return vm.Code } + +// Logger returns Log +func (vm *TestVM) Logger() logging.Logger { return vm.Log } diff --git a/vms/spchainvm/consensus_benchmark_test.go b/vms/spchainvm/consensus_benchmark_test.go index 08c63ab..aa80e6d 100644 --- a/vms/spchainvm/consensus_benchmark_test.go +++ b/vms/spchainvm/consensus_benchmark_test.go @@ -86,18 +86,19 @@ func ConsensusLeader(numBlocks, numTxsPerBlock int, b *testing.B) { Context: ctx, Validators: vdrs, Beacons: beacons, - Alpha: (beacons.Len() + 1) / 2, + Alpha: uint64(beacons.Len()/2 + 1), Sender: &sender, }, Blocked: blocked, VM: vm, }, Params: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, - Alpha: 1, - BetaVirtuous: 20, - BetaRogue: 20, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 20, + BetaRogue: 20, + ConcurrentRepolls: 1, }, Consensus: &smcon.Topological{}, }) @@ -217,18 +218,19 @@ func ConsensusFollower(numBlocks, numTxsPerBlock int, b *testing.B) { Context: ctx, Validators: vdrs, Beacons: beacons, - Alpha: (beacons.Len() + 1) / 2, + Alpha: uint64(beacons.Len()/2 + 1), Sender: &sender, }, Blocked: blocked, VM: vm, }, Params: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, - Alpha: 1, - BetaVirtuous: 20, - BetaRogue: 20, + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 20, + BetaRogue: 20, + ConcurrentRepolls: 1, }, Consensus: &smcon.Topological{}, }) diff --git a/vms/spchainvm/factory.go b/vms/spchainvm/factory.go index 6cb6fe2..0b8fa98 100644 --- a/vms/spchainvm/factory.go +++ b/vms/spchainvm/factory.go @@ -16,4 +16,4 @@ var ( type Factory struct{} // New ... -func (f *Factory) New() interface{} { return &VM{} } +func (f *Factory) New() (interface{}, error) { return &VM{}, nil } diff --git a/vms/spchainvm/key_chain.go b/vms/spchainvm/keychain.go similarity index 58% rename from vms/spchainvm/key_chain.go rename to vms/spchainvm/keychain.go index 03c1879..00eee64 100644 --- a/vms/spchainvm/key_chain.go +++ b/vms/spchainvm/keychain.go @@ -18,37 +18,48 @@ var ( errUnknownAccount = errors.New("unknown account") ) -// KeyChain is a collection of keys that can be used to spend utxos -type KeyChain struct { +// Keychain is a collection of keys that can be used to spend utxos +type Keychain struct { + factory crypto.FactorySECP256K1R networkID uint32 chainID ids.ID - // This can be used to iterate over. However, it should not be modified externally. + + // Key: The id of a private key (namely, [privKey].PublicKey().Address().Key()) + // Value: The index in Keys of that private key keyMap map[[20]byte]int - Addrs ids.ShortSet - Keys []*crypto.PrivateKeySECP256K1R + + // Each element is an address controlled by a key in [Keys] + // This can be used to iterate over. It should not be modified externally. + Addrs ids.ShortSet + + // List of keys this keychain manages + // This can be used to iterate over. It should not be modified externally. + Keys []*crypto.PrivateKeySECP256K1R } -// NewKeyChain creates a new keychain for a chain -func NewKeyChain(networkID uint32, chainID ids.ID) *KeyChain { - return &KeyChain{ - chainID: chainID, - keyMap: make(map[[20]byte]int), +// NewKeychain creates a new keychain for a chain +func NewKeychain(networkID uint32, chainID ids.ID) *Keychain { + return &Keychain{ + networkID: networkID, + chainID: chainID, + keyMap: make(map[[20]byte]int), } } // New returns a newly generated private key -func (kc *KeyChain) New() *crypto.PrivateKeySECP256K1R { - factory := &crypto.FactorySECP256K1R{} - - skGen, _ := factory.NewPrivateKey() +func (kc *Keychain) New() (*crypto.PrivateKeySECP256K1R, error) { + skGen, err := kc.factory.NewPrivateKey() + if err != nil { + return nil, err + } sk := skGen.(*crypto.PrivateKeySECP256K1R) kc.Add(sk) - return sk + return sk, nil } // Add a new key to the key chain -func (kc *KeyChain) Add(key *crypto.PrivateKeySECP256K1R) { +func (kc *Keychain) Add(key *crypto.PrivateKeySECP256K1R) { addr := key.PublicKey().Address() addrHash := addr.Key() if _, ok := kc.keyMap[addrHash]; !ok { @@ -59,7 +70,7 @@ func (kc *KeyChain) Add(key *crypto.PrivateKeySECP256K1R) { } // Get a key from the keychain. If the key is unknown, the -func (kc *KeyChain) Get(id ids.ShortID) (*crypto.PrivateKeySECP256K1R, bool) { +func (kc *Keychain) Get(id ids.ShortID) (*crypto.PrivateKeySECP256K1R, bool) { if i, ok := kc.keyMap[id.Key()]; ok { return kc.Keys[i], true } @@ -67,10 +78,10 @@ func (kc *KeyChain) Get(id ids.ShortID) (*crypto.PrivateKeySECP256K1R, bool) { } // Addresses returns a list of addresses this keychain manages -func (kc *KeyChain) Addresses() ids.ShortSet { return kc.Addrs } +func (kc *Keychain) Addresses() ids.ShortSet { return kc.Addrs } // Spend attempts to create a new transaction -func (kc *KeyChain) Spend(account Account, amount uint64, destination ids.ShortID) (*Tx, Account, error) { +func (kc *Keychain) Spend(account Account, amount uint64, destination ids.ShortID) (*Tx, Account, error) { key, exists := kc.Get(account.ID()) if !exists { return nil, Account{}, errUnknownAccount @@ -83,7 +94,7 @@ func (kc *KeyChain) Spend(account Account, amount uint64, destination ids.ShortI // PrefixedString returns a string representation of this keychain with each // line prepended with [prefix] -func (kc *KeyChain) PrefixedString(prefix string) string { +func (kc *Keychain) PrefixedString(prefix string) string { s := strings.Builder{} format := fmt.Sprintf("%%sKey[%s]: Key: %%s Address: %%s\n", @@ -99,6 +110,6 @@ func (kc *KeyChain) PrefixedString(prefix string) string { return strings.TrimSuffix(s.String(), "\n") } -func (kc *KeyChain) String() string { +func (kc *Keychain) String() string { return kc.PrefixedString("") } diff --git a/vms/spchainvm/vm.go b/vms/spchainvm/vm.go index 6b71ce6..8956097 100644 --- a/vms/spchainvm/vm.go +++ b/vms/spchainvm/vm.go @@ -118,6 +118,10 @@ func (vm *VM) Initialize( // Shutdown implements the snowman.ChainVM interface func (vm *VM) Shutdown() { + if vm.timer == nil { + return + } + vm.timer.Stop() if err := vm.baseDB.Close(); err != nil { vm.ctx.Log.Error("Closing the database failed with %s", err) diff --git a/vms/spdagvm/builder.go b/vms/spdagvm/builder.go index 5087b36..4dd5707 100644 --- a/vms/spdagvm/builder.go +++ b/vms/spdagvm/builder.go @@ -88,8 +88,8 @@ func (b Builder) NewSig(index uint32) *Sig { return &Sig{index: index} } // * This output can't be spent until at least [locktime]. // * If there is any "change" there is another output controlled by [changeAddr] with the change. // * The UTXOs consumed to make this transaction are a subset of [utxos]. -// * The keys controlling [utxos] are in [keyChain] -func (b Builder) NewTxFromUTXOs(keyChain *KeyChain, utxos []*UTXO, amount, txFee, locktime uint64, +// * The keys controlling [utxos] are in [keychain] +func (b Builder) NewTxFromUTXOs(keychain *Keychain, utxos []*UTXO, amount, txFee, locktime uint64, threshold uint32, toAddrs []ids.ShortID, changeAddr ids.ShortID, currentTime uint64) (*Tx, error) { ins := []Input{} // Consumed by this transaction @@ -103,7 +103,7 @@ func (b Builder) NewTxFromUTXOs(keyChain *KeyChain, utxos []*UTXO, amount, txFee spent := uint64(0) // The sum of the UTXOs consumed in this transaction for i := 0; i < len(utxos) && amountPlusTxFee > spent; i++ { utxo := utxos[i] - if in, signer, err := keyChain.Spend(utxo, currentTime); err == nil { + if in, signer, err := keychain.Spend(utxo, currentTime); err == nil { ins = append(ins, in) amount := in.(*InputPayment).Amount() spent += amount diff --git a/vms/spdagvm/factory.go b/vms/spdagvm/factory.go index 7e6e263..04e20ab 100644 --- a/vms/spdagvm/factory.go +++ b/vms/spdagvm/factory.go @@ -16,6 +16,6 @@ var ( type Factory struct{ TxFee uint64 } // New ... -func (f *Factory) New() interface{} { - return &VM{TxFee: f.TxFee} // Use the tx fee from the config +func (f *Factory) New() (interface{}, error) { + return &VM{TxFee: f.TxFee}, nil } diff --git a/vms/spdagvm/keychain.go b/vms/spdagvm/keychain.go index 5740cfa..7142a9d 100644 --- a/vms/spdagvm/keychain.go +++ b/vms/spdagvm/keychain.go @@ -18,31 +18,37 @@ var ( errCantSpend = errors.New("utxo couldn't be spent") ) -// KeyChain is a collection of keys that can be used to spend utxos -type KeyChain struct { - // This can be used to iterate over. However, it should not be modified externally. +// Keychain is a collection of keys that can be used to spend utxos +type Keychain struct { + factory crypto.FactorySECP256K1R + networkID uint32 + chainID ids.ID + // Key: The id of a private key (namely, [privKey].PublicKey().Address().Key()) // Value: The index in Keys of that private key keyMap map[[20]byte]int // Each element is an address controlled by a key in [Keys] + // This can be used to iterate over. It should not be modified externally. Addrs ids.ShortSet // List of keys this keychain manages + // This can be used to iterate over. It should not be modified externally. Keys []*crypto.PrivateKeySECP256K1R } -func (kc *KeyChain) init() { - if kc.keyMap == nil { - kc.keyMap = make(map[[20]byte]int) +// NewKeychain creates a new keychain for a chain +func NewKeychain(networkID uint32, chainID ids.ID) *Keychain { + return &Keychain{ + networkID: networkID, + chainID: chainID, + keyMap: make(map[[20]byte]int), } } // Add a new key to the key chain. // If [key] is already in the keychain, does nothing. -func (kc *KeyChain) Add(key *crypto.PrivateKeySECP256K1R) { - kc.init() - +func (kc *Keychain) Add(key *crypto.PrivateKeySECP256K1R) { addr := key.PublicKey().Address() // The address controlled by [key] addrHash := addr.Key() if _, ok := kc.keyMap[addrHash]; !ok { @@ -53,9 +59,7 @@ func (kc *KeyChain) Add(key *crypto.PrivateKeySECP256K1R) { } // Get a key from the keychain. If the key is unknown, the second return value is false. -func (kc KeyChain) Get(id ids.ShortID) (*crypto.PrivateKeySECP256K1R, bool) { - kc.init() - +func (kc *Keychain) Get(id ids.ShortID) (*crypto.PrivateKeySECP256K1R, bool) { if i, ok := kc.keyMap[id.Key()]; ok { return kc.Keys[i], true } @@ -63,15 +67,13 @@ func (kc KeyChain) Get(id ids.ShortID) (*crypto.PrivateKeySECP256K1R, bool) { } // Addresses returns a list of addresses this keychain manages -func (kc KeyChain) Addresses() ids.ShortSet { return kc.Addrs } +func (kc *Keychain) Addresses() ids.ShortSet { return kc.Addrs } // New returns a newly generated private key. // The key and the address it controls are added to // [kc.Keys] and [kc.Addrs], respectively -func (kc *KeyChain) New() (*crypto.PrivateKeySECP256K1R, error) { - factory := crypto.FactorySECP256K1R{} - - skGen, err := factory.NewPrivateKey() +func (kc *Keychain) New() (*crypto.PrivateKeySECP256K1R, error) { + skGen, err := kc.factory.NewPrivateKey() if err != nil { return nil, err } @@ -82,10 +84,10 @@ func (kc *KeyChain) New() (*crypto.PrivateKeySECP256K1R, error) { } // Spend attempts to create an input -func (kc *KeyChain) Spend(utxo *UTXO, time uint64) (Input, *InputSigner, error) { +func (kc *Keychain) Spend(utxo *UTXO, time uint64) (Input, *InputSigner, error) { builder := Builder{ - NetworkID: 0, - ChainID: ids.Empty, + NetworkID: kc.networkID, + ChainID: kc.chainID, } switch out := utxo.Out().(type) { @@ -144,12 +146,12 @@ func (kc *KeyChain) Spend(utxo *UTXO, time uint64) (Input, *InputSigner, error) // 2) A list of private keys such that each key controls an address in [addresses] // 3) true iff this keychain contains at least [threshold] keys that control an address // in [addresses] -func (kc *KeyChain) GetSigsAndKeys(addresses []ids.ShortID, threshold int) ([]*Sig, []*crypto.PrivateKeySECP256K1R, bool) { +func (kc *Keychain) GetSigsAndKeys(addresses []ids.ShortID, threshold int) ([]*Sig, []*crypto.PrivateKeySECP256K1R, bool) { sigs := []*Sig{} keys := []*crypto.PrivateKeySECP256K1R{} builder := Builder{ - NetworkID: 0, - ChainID: ids.Empty, + NetworkID: kc.networkID, + ChainID: kc.chainID, } for i := uint32(0); i < uint32(len(addresses)) && len(keys) < threshold; i++ { if key, exists := kc.Get(addresses[i]); exists { @@ -162,7 +164,7 @@ func (kc *KeyChain) GetSigsAndKeys(addresses []ids.ShortID, threshold int) ([]*S // PrefixedString returns the key chain as a string representation with [prefix] // added before every line. -func (kc *KeyChain) PrefixedString(prefix string) string { +func (kc *Keychain) PrefixedString(prefix string) string { s := strings.Builder{} format := fmt.Sprintf("%%sKey[%s]: Key: %%s Address: %%s\n", @@ -178,4 +180,4 @@ func (kc *KeyChain) PrefixedString(prefix string) string { return strings.TrimSuffix(s.String(), "\n") } -func (kc *KeyChain) String() string { return kc.PrefixedString("") } +func (kc *Keychain) String() string { return kc.PrefixedString("") } diff --git a/vms/spdagvm/output.go b/vms/spdagvm/output.go index 3997428..6118a9b 100644 --- a/vms/spdagvm/output.go +++ b/vms/spdagvm/output.go @@ -84,7 +84,7 @@ func (op *OutputPayment) Verify() error { return errOutputUnspendable case op.threshold == 0 && len(op.addresses) > 0: return errOutputUnoptimized - case !ids.IsSortedAndUniqueShortIDs(op.addresses): // TODO: Should we allow duplicated addresses + case !ids.IsSortedAndUniqueShortIDs(op.addresses): return errAddrsNotSortedUnique default: return nil @@ -216,7 +216,7 @@ func (otol *OutputTakeOrLeave) Verify() error { return errOutputUnoptimized case otol.locktime1 >= otol.locktime2: return errTimesNotSortedUnique - case !ids.IsSortedAndUniqueShortIDs(otol.addresses1) || // TODO: Should we allow duplicated addresses + case !ids.IsSortedAndUniqueShortIDs(otol.addresses1) || !ids.IsSortedAndUniqueShortIDs(otol.addresses2): return errAddrsNotSortedUnique default: diff --git a/vms/spdagvm/prefixed_state.go b/vms/spdagvm/prefixed_state.go index 01d37b9..b51e4cc 100644 --- a/vms/spdagvm/prefixed_state.go +++ b/vms/spdagvm/prefixed_state.go @@ -107,7 +107,6 @@ func (s *prefixedState) SpendUTXO(utxoID ids.ID) error { } // Update funds - // TODO: Clean this up. More into the output object? switch out := utxo.Out().(type) { case *OutputPayment: return s.removeUTXO(out.Addresses(), utxoID) diff --git a/vms/spdagvm/unique_tx.go b/vms/spdagvm/unique_tx.go index f775377..cb64d3a 100644 --- a/vms/spdagvm/unique_tx.go +++ b/vms/spdagvm/unique_tx.go @@ -241,7 +241,6 @@ func (tx *UniqueTx) VerifyState() error { txID: inputTx, } - // TODO: Replace with a switch? if err := parent.Verify(); err != nil { tx.t.validity = errMissingUTXO } else if status := parent.Status(); status.Decided() { diff --git a/vms/spdagvm/vm.go b/vms/spdagvm/vm.go index c5a78d8..f23b2ca 100644 --- a/vms/spdagvm/vm.go +++ b/vms/spdagvm/vm.go @@ -130,6 +130,10 @@ func (vm *VM) Initialize( // Shutdown implements the avalanche.DAGVM interface func (vm *VM) Shutdown() { + if vm.timer == nil { + return + } + vm.timer.Stop() if err := vm.baseDB.Close(); err != nil { vm.ctx.Log.Error("Closing the database failed with %s", err) @@ -315,7 +319,7 @@ func (vm *VM) Send(amount uint64, assetID, toAddrStr string, fromPKs []string) ( } // Add all of the keys in [fromPKs] to a keychain - keychain := KeyChain{} + keychain := NewKeychain(vm.ctx.NetworkID, vm.ctx.ChainID) factory := crypto.FactorySECP256K1R{} cb58 := formatting.CB58{} for _, fpk := range fromPKs { @@ -359,7 +363,7 @@ func (vm *VM) Send(amount uint64, assetID, toAddrStr string, fromPKs []string) ( ChainID: vm.ctx.ChainID, } currentTime := vm.clock.Unix() - tx, err := builder.NewTxFromUTXOs(&keychain, utxos, amount, vm.TxFee, 0, 1, toAddrs, outAddr, currentTime) + tx, err := builder.NewTxFromUTXOs(keychain, utxos, amount, vm.TxFee, 0, 1, toAddrs, outAddr, currentTime) if err != nil { return "", err } diff --git a/vms/spdagvm/vm_test.go b/vms/spdagvm/vm_test.go index 03a3b8e..e12b15e 100644 --- a/vms/spdagvm/vm_test.go +++ b/vms/spdagvm/vm_test.go @@ -4,7 +4,6 @@ package spdagvm import ( - "math" "testing" "github.com/ava-labs/gecko/database/memdb" @@ -452,7 +451,7 @@ func TestRPCAPI(t *testing.T) { // it had (from genesis) minus the 2 amounts it sent to [addr1] minus 2 tx fees if testbal, err := vm.GetBalance(pkToAddr[pks[0]], ""); err != nil { t.Fatalf("GetBalance(%q): %s", pkToAddr[pks[0]], err) - } else if testbal != defaultInitBalance-send1Amt-send2Amt-2*txFeeTest { // TODO generalize + } else if testbal != defaultInitBalance-send1Amt-send2Amt-2*txFeeTest { t.Fatalf("GetBalance(%q): returned wrong balance - expected: %d; returned: %d", pkToAddr[pks[0]], defaultInitBalance-send1Amt-send2Amt-2*txFeeTest, testbal) // Send [send3Amt] from [addr1] to the address controlled by [pks[0]] } else if _, err = vm.Send(send3Amt, "", pkToAddr[pks[0]], []string{addr1PrivKey}); err != nil { @@ -724,87 +723,3 @@ func TestIssuePendingDependency(t *testing.T) { ctx.Lock.Unlock() } - -// Ensure that an error is returned if an address will have more than -// math.MaxUint64 NanoAva -func TestTxOutputOverflow(t *testing.T) { - // Modify the genesis tx so the address controlled by [keys[0]] - // has math.MaxUint64 NanoAva - initBalances := map[string]uint64{ - keys[0].PublicKey().Address().String(): math.MaxUint64, - keys[1].PublicKey().Address().String(): defaultInitBalance, - keys[2].PublicKey().Address().String(): defaultInitBalance, - } - genesisTx := GenesisTx(initBalances) - - // Initialize vm - vmDB := memdb.New() - msgChan := make(chan common.Message, 1) - ctx.Lock.Lock() - vm := &VM{} - vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) - vm.batchTimeout = 0 - - // Create a new private key - testPK, err := vm.CreateKey() - if err != nil { - t.Fatalf("CreateKey(): %s", err) - } - // Get the address controlled by the new private key - testAddr, err := vm.GetAddress(testPK) - if err != nil { - t.Fatalf("GetAddress(%q): %s", testPK, err) - } - - // Get string repr. of keys[0] - cb58 := formatting.CB58{Bytes: keys[0].Bytes()} - privKey0 := cb58.String() - - // Send [math.MaxUint64 - txFeeTest] NanoAva from [privKey0] to [testAddr] - _, err = vm.Send(math.MaxUint64-txFeeTest, "", testAddr, []string{privKey0}) - if err != nil { - t.Fatalf("Send(%d,%q,%q,%v): failed with error - %s", uint64(math.MaxUint64-txFeeTest), "", testAddr, []string{privKey0}, err) - } - ctx.Lock.Unlock() - - if msg := <-msgChan; msg != common.PendingTxs { - t.Fatalf("Wrong message") - } - - // Accept the transaction - ctx.Lock.Lock() - if txs := vm.PendingTxs(); len(txs) != 1 { - t.Fatalf("PendingTxs(): returned wrong number of transactions - expected: %d; returned: %d", 1, len(txs)) - } else { - txs[0].Accept() - } - if txs := vm.PendingTxs(); len(txs) != 0 { - t.Fatalf("PendingTxs(): there should not have been any pending transactions") - } - - // Ensure that [testAddr] has balance [math.MaxUint64 - txFeeTest] - if testbal, err := vm.GetBalance(testAddr, ""); err != nil { - t.Fatalf("GetBalance(%q): %s", testAddr, err) - } else if testbal != math.MaxUint64-txFeeTest { - t.Fatalf("GetBalance(%q,%q): Balance Not Equal(%d,%d)", testAddr, "", uint64(math.MaxUint64-txFeeTest), testbal) - } - - // Ensure that the address controlled by [keys[0]] has balance 0 - if testbal, err := vm.GetBalance(keys[0].PublicKey().Address().String(), ""); err != nil { - t.Fatalf("GetBalance(%q): %s", keys[0].PublicKey().Address().String(), err) - } else if testbal != 0 { - // Balance of new address should be 0 - t.Fatalf("GetBalance(%q,%q): Balance Not Equal(%d,%d)", keys[0].PublicKey().Address().String(), "", 0, testbal) - } - - cb58.Bytes = keys[1].Bytes() - privKey1 := cb58.String() - - // Send [2*txFeeTest+1] NanoAva from [key1Str] to [testAddr] - // Should overflow [testAddr] by 1 - _, err = vm.Send(2*txFeeTest+1, "", testAddr, []string{privKey1}) - if err == errOutputOverflow { - t.Fatalf("Expected output to overflow but it did not") - } - ctx.Lock.Unlock() -} diff --git a/vms/timestampvm/factory.go b/vms/timestampvm/factory.go index 0800395..fb9a9ae 100644 --- a/vms/timestampvm/factory.go +++ b/vms/timestampvm/factory.go @@ -14,4 +14,4 @@ var ( type Factory struct{} // New ... -func (f *Factory) New() interface{} { return &VM{} } +func (f *Factory) New() (interface{}, error) { return &VM{}, nil } diff --git a/vms/timestampvm/service.go b/vms/timestampvm/service.go index e872d80..ec6c5c2 100644 --- a/vms/timestampvm/service.go +++ b/vms/timestampvm/service.go @@ -8,6 +8,7 @@ import ( "net/http" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/json" "github.com/ava-labs/gecko/utils/formatting" ) @@ -50,10 +51,10 @@ func (s *Service) ProposeBlock(_ *http.Request, args *ProposeBlockArgs, reply *P // APIBlock is the API representation of a block type APIBlock struct { - Timestamp int64 `json:"timestamp"` // Timestamp of most recent block - Data string `json:"data"` // Data in the most recent block. Base 58 repr. of 5 bytes. - ID string `json:"id"` // String repr. of ID of the most recent block - ParentID string `json:"parentID"` // String repr. of ID of the most recent block's parent + Timestamp json.Uint64 `json:"timestamp"` // Timestamp of most recent block + Data string `json:"data"` // Data in the most recent block. Base 58 repr. of 5 bytes. + ID string `json:"id"` // String repr. of ID of the most recent block + ParentID string `json:"parentID"` // String repr. of ID of the most recent block's parent } // GetBlockArgs are the arguments to GetBlock @@ -93,7 +94,7 @@ func (s *Service) GetBlock(_ *http.Request, args *GetBlockArgs, reply *GetBlockR } reply.APIBlock.ID = block.ID().String() - reply.APIBlock.Timestamp = block.Timestamp + reply.APIBlock.Timestamp = json.Uint64(block.Timestamp) reply.APIBlock.ParentID = block.ParentID().String() byteFormatter := formatting.CB58{Bytes: block.Data[:]} reply.Data = byteFormatter.String() diff --git a/xputtest/README.md b/xputtest/README.md new file mode 100644 index 0000000..3b42682 --- /dev/null +++ b/xputtest/README.md @@ -0,0 +1,17 @@ +# Throughput testing + +A throughput test is run in two parts. First a network must be running with at least one of the nodes running a throughput server. To start a throughput server when running a node the `--xput-server-enabled=true` flag should be passed. + +An example single node network can be started with: + +```sh +./build/ava --public-ip=127.0.0.1 --xput-server-port=9652 --xput-server-enabled=true --db-enabled=false --staking-tls-enabled=false --snow-sample-size=1 --snow-quorum-size=1 +``` + +The thoughput node can be started with: + +```sh +./build/xputtest --ip=127.0.0.1 --port=9652 --sp-chain +``` + +The above example with run a throughput test on the simple payment chain. Tests can be run with `--sp-dag` to run throughput tests on the simple payment dag. Tests can be run with `--avm` to run throughput tests on the AVA virtual machine. diff --git a/xputtest/avm.go b/xputtest/avm.go new file mode 100644 index 0000000..16d0315 --- /dev/null +++ b/xputtest/avm.go @@ -0,0 +1,108 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "time" + + "github.com/ava-labs/salticidae-go" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/networking" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/vms/avm" + "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/xputtest/avmwallet" +) + +// benchmark an instance of the avm +func (n *network) benchmarkAVM(chain *platformvm.CreateChainTx) { + genesisBytes := chain.GenesisData + wallet, err := avmwallet.NewWallet(n.log, n.networkID, chain.ID(), config.AvaTxFee) + n.log.AssertNoError(err) + + factory := crypto.FactorySECP256K1R{} + sk, err := factory.ToPrivateKey(config.Key) + n.log.AssertNoError(err) + wallet.ImportKey(sk.(*crypto.PrivateKeySECP256K1R)) + + codec := wallet.Codec() + + genesis := avm.Genesis{} + n.log.AssertNoError(codec.Unmarshal(genesisBytes, &genesis)) + + genesisTx := genesis.Txs[0] + tx := avm.Tx{ + UnsignedTx: &genesisTx.CreateAssetTx, + } + txBytes, err := codec.Marshal(&tx) + n.log.AssertNoError(err) + tx.Initialize(txBytes) + + for _, utxo := range tx.UTXOs() { + wallet.AddUTXO(utxo) + } + + assetID := genesisTx.ID() + + n.log.AssertNoError(wallet.GenerateTxs(config.NumTxs, assetID)) + + go n.log.RecoverAndPanic(func() { n.IssueAVM(chain.ID(), assetID, wallet) }) +} + +// issue transactions to the instance of the avm funded by the provided wallet +func (n *network) IssueAVM(chainID ids.ID, assetID ids.ID, wallet *avmwallet.Wallet) { + n.log.Debug("Issuing with %d", wallet.Balance(assetID)) + numAccepted := 0 + numPending := 0 + + n.decided <- ids.ID{} + + // track the last second of transactions + meter := timer.TimedMeter{Duration: time.Second} + for d := range n.decided { + // display the TPS every 1000 txs + if numAccepted%1000 == 0 { + n.log.Info("TPS: %d", meter.Ticks()) + } + + // d is the ID of the tx that was accepted + if !d.IsZero() { + meter.Tick() + n.log.Debug("Finalized %s", d) + numAccepted++ + numPending-- + } + + // Issue all the txs that we can right now + for numPending < config.MaxOutstandingTxs && wallet.Balance(assetID) > 0 && numAccepted+numPending < config.NumTxs { + tx := wallet.NextTx() + n.log.AssertTrue(tx != nil, "Tx creation failed") + + // send the IssueTx message + it, err := n.build.IssueTx(chainID, tx.Bytes()) + n.log.AssertNoError(err) + ds := it.DataStream() + ba := salticidae.NewByteArrayMovedFromDataStream(ds, false) + newMsg := salticidae.NewMsgMovedFromByteArray(networking.IssueTx, ba, false) + + n.conn.GetNet().SendMsg(newMsg, n.conn) + + ds.Free() + ba.Free() + newMsg.Free() + + numPending++ + n.log.Debug("Sent tx, pending = %d, accepted = %d", numPending, numAccepted) + } + + // If we are done issuing txs, return from the function + if numAccepted+numPending >= config.NumTxs { + n.log.Info("done with test") + net.ec.Stop() + return + } + } +} diff --git a/xputtest/avmwallet/utxo_set.go b/xputtest/avmwallet/utxo_set.go new file mode 100644 index 0000000..c346969 --- /dev/null +++ b/xputtest/avmwallet/utxo_set.go @@ -0,0 +1,92 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avmwallet + +import ( + "fmt" + "strings" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/vms/components/ava" +) + +// UTXOSet ... +type UTXOSet struct { + // Key: The id of a UTXO + // Value: The index in UTXOs of that UTXO + utxoMap map[[32]byte]int + + // List of UTXOs in this set + // This can be used to iterate over. It should not be modified externally. + UTXOs []*ava.UTXO +} + +// Put ... +func (us *UTXOSet) Put(utxo *ava.UTXO) { + if us.utxoMap == nil { + us.utxoMap = make(map[[32]byte]int) + } + utxoID := utxo.InputID() + utxoKey := utxoID.Key() + if _, ok := us.utxoMap[utxoKey]; !ok { + us.utxoMap[utxoKey] = len(us.UTXOs) + us.UTXOs = append(us.UTXOs, utxo) + } +} + +// Get ... +func (us *UTXOSet) Get(id ids.ID) *ava.UTXO { + if us.utxoMap == nil { + return nil + } + if i, ok := us.utxoMap[id.Key()]; ok { + utxo := us.UTXOs[i] + return utxo + } + return nil +} + +// Remove ... +func (us *UTXOSet) Remove(id ids.ID) *ava.UTXO { + i, ok := us.utxoMap[id.Key()] + if !ok { + return nil + } + utxoI := us.UTXOs[i] + + j := len(us.UTXOs) - 1 + utxoJ := us.UTXOs[j] + + us.UTXOs[i] = us.UTXOs[j] + us.UTXOs = us.UTXOs[:j] + + us.utxoMap[utxoJ.InputID().Key()] = i + delete(us.utxoMap, utxoI.InputID().Key()) + + return utxoI +} + +// PrefixedString returns a string with each new line prefixed with [prefix] +func (us *UTXOSet) PrefixedString(prefix string) string { + s := strings.Builder{} + + s.WriteString(fmt.Sprintf("UTXOs (length=%d):", len(us.UTXOs))) + for i, utxo := range us.UTXOs { + utxoID := utxo.InputID() + txID, txIndex := utxo.InputSource() + + s.WriteString(fmt.Sprintf("\n%sUTXO[%d]:"+ + "\n%s UTXOID: %s"+ + "\n%s TxID: %s"+ + "\n%s TxIndex: %d", + prefix, i, + prefix, utxoID, + prefix, txID, + prefix, txIndex)) + } + + return s.String() +} + +func (us *UTXOSet) String() string { return us.PrefixedString(" ") } diff --git a/xputtest/avmwallet/wallet.go b/xputtest/avmwallet/wallet.go new file mode 100644 index 0000000..ef01eb0 --- /dev/null +++ b/xputtest/avmwallet/wallet.go @@ -0,0 +1,315 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avmwallet + +import ( + "errors" + "fmt" + + stdmath "math" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/snow" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/hashing" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/math" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/utils/wrappers" + "github.com/ava-labs/gecko/vms/avm" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +// Wallet is a holder for keys and UTXOs for the Ava DAG. +type Wallet struct { + networkID uint32 + chainID ids.ID + + clock timer.Clock + codec codec.Codec + log logging.Logger + + keychain *secp256k1fx.Keychain // Mapping from public address to the SigningKeys + utxoSet *UTXOSet // Mapping from utxoIDs to UTXOs + + balance map[[32]byte]uint64 + txFee uint64 + + txsSent int32 + txs []*avm.Tx +} + +// NewWallet returns a new Wallet +func NewWallet(log logging.Logger, networkID uint32, chainID ids.ID, txFee uint64) (*Wallet, error) { + c := codec.NewDefault() + errs := wrappers.Errs{} + errs.Add( + c.RegisterType(&avm.BaseTx{}), + c.RegisterType(&avm.CreateAssetTx{}), + c.RegisterType(&avm.OperationTx{}), + c.RegisterType(&avm.ImportTx{}), + c.RegisterType(&avm.ExportTx{}), + c.RegisterType(&secp256k1fx.TransferInput{}), + c.RegisterType(&secp256k1fx.MintOutput{}), + c.RegisterType(&secp256k1fx.TransferOutput{}), + c.RegisterType(&secp256k1fx.MintOperation{}), + c.RegisterType(&secp256k1fx.Credential{}), + ) + return &Wallet{ + networkID: networkID, + chainID: chainID, + codec: c, + log: log, + keychain: secp256k1fx.NewKeychain(), + utxoSet: &UTXOSet{}, + balance: make(map[[32]byte]uint64), + txFee: txFee, + }, errs.Err +} + +// Codec returns the codec used for serialization +func (w *Wallet) Codec() codec.Codec { return w.codec } + +// GetAddress returns one of the addresses this wallet manages. If no address +// exists, one will be created. +func (w *Wallet) GetAddress() (ids.ShortID, error) { + if w.keychain.Addrs.Len() == 0 { + return w.CreateAddress() + } + return w.keychain.Addrs.CappedList(1)[0], nil +} + +// CreateAddress returns a new address. +// It also saves the address and the private key that controls it +// so the address can be used later +func (w *Wallet) CreateAddress() (ids.ShortID, error) { + privKey, err := w.keychain.New() + return privKey.PublicKey().Address(), err +} + +// ImportKey imports a private key into this wallet +func (w *Wallet) ImportKey(sk *crypto.PrivateKeySECP256K1R) { w.keychain.Add(sk) } + +// AddUTXO adds a new UTXO to this wallet if this wallet may spend it +// The UTXO's output must be an OutputPayment +func (w *Wallet) AddUTXO(utxo *ava.UTXO) { + out, ok := utxo.Out.(ava.Transferable) + if !ok { + return + } + + if _, _, err := w.keychain.Spend(out, stdmath.MaxUint64); err == nil { + w.utxoSet.Put(utxo) + w.balance[utxo.AssetID().Key()] += out.Amount() + } +} + +// RemoveUTXO from this wallet +func (w *Wallet) RemoveUTXO(utxoID ids.ID) { + utxo := w.utxoSet.Get(utxoID) + if utxo == nil { + return + } + + assetID := utxo.AssetID() + assetKey := assetID.Key() + newBalance := w.balance[assetKey] - utxo.Out.(ava.Transferable).Amount() + if newBalance == 0 { + delete(w.balance, assetKey) + } else { + w.balance[assetKey] = newBalance + } + + w.utxoSet.Remove(utxoID) +} + +// Balance returns the amount of the assets in this wallet +func (w *Wallet) Balance(assetID ids.ID) uint64 { return w.balance[assetID.Key()] } + +// CreateTx returns a tx that sends [amount] of [assetID] to [destAddr] +func (w *Wallet) CreateTx(assetID ids.ID, amount uint64, destAddr ids.ShortID) (*avm.Tx, error) { + if amount == 0 { + return nil, errors.New("invalid amount") + } + + amountSpent := uint64(0) + time := w.clock.Unix() + + ins := []*ava.TransferableInput{} + keys := [][]*crypto.PrivateKeySECP256K1R{} + for _, utxo := range w.utxoSet.UTXOs { + if !utxo.AssetID().Equals(assetID) { + continue + } + inputIntf, signers, err := w.keychain.Spend(utxo.Out, time) + if err != nil { + continue + } + input, ok := inputIntf.(ava.Transferable) + if !ok { + continue + } + spent, err := math.Add64(amountSpent, input.Amount()) + if err != nil { + return nil, err + } + amountSpent = spent + + in := &ava.TransferableInput{ + UTXOID: utxo.UTXOID, + Asset: ava.Asset{ID: assetID}, + In: input, + } + + ins = append(ins, in) + keys = append(keys, signers) + + if amountSpent >= amount { + break + } + } + + if amountSpent < amount { + return nil, errors.New("insufficient funds") + } + + ava.SortTransferableInputsWithSigners(ins, keys) + + outs := []*ava.TransferableOutput{&ava.TransferableOutput{ + Asset: ava.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amount, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{destAddr}, + }, + }, + }} + + if amountSpent > amount { + changeAddr, err := w.GetAddress() + if err != nil { + return nil, err + } + outs = append(outs, &ava.TransferableOutput{ + Asset: ava.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: amountSpent - amount, + Locktime: 0, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{changeAddr}, + }, + }, + }) + } + + ava.SortTransferableOutputs(outs, w.codec) + + tx := &avm.Tx{ + UnsignedTx: &avm.BaseTx{ + NetID: w.networkID, + BCID: w.chainID, + Outs: outs, + Ins: ins, + }, + } + + unsignedBytes, err := w.codec.Marshal(&tx.UnsignedTx) + if err != nil { + return nil, err + } + hash := hashing.ComputeHash256(unsignedBytes) + + for _, credKeys := range keys { + cred := &secp256k1fx.Credential{} + for _, key := range credKeys { + sig, err := key.SignHash(hash) + if err != nil { + return nil, err + } + fixedSig := [crypto.SECP256K1RSigLen]byte{} + copy(fixedSig[:], sig) + + cred.Sigs = append(cred.Sigs, fixedSig) + } + tx.Creds = append(tx.Creds, cred) + } + + b, err := w.codec.Marshal(tx) + if err != nil { + return nil, err + } + tx.Initialize(b) + + return tx, nil +} + +// GenerateTxs generates the transactions that will be sent +// during the test +// Generate them all on test initialization so tx generation is not bottleneck +// in testing +func (w *Wallet) GenerateTxs(numTxs int, assetID ids.ID) error { + w.log.Info("Generating %d transactions", numTxs) + + ctx := snow.DefaultContextTest() + ctx.NetworkID = w.networkID + ctx.ChainID = w.chainID + + frequency := numTxs / 50 + if frequency > 1000 { + frequency = 1000 + } + + w.txs = make([]*avm.Tx, numTxs) + for i := 0; i < numTxs; i++ { + addr, err := w.CreateAddress() + if err != nil { + return err + } + tx, err := w.CreateTx(assetID, 1, addr) + if err != nil { + return err + } + + for _, utxoID := range tx.InputUTXOs() { + w.RemoveUTXO(utxoID.InputID()) + } + for _, utxo := range tx.UTXOs() { + w.AddUTXO(utxo) + } + + if numGenerated := i + 1; numGenerated%frequency == 0 { + w.log.Info("Generated %d out of %d transactions", numGenerated, numTxs) + } + + w.txs[i] = tx + } + + w.log.Info("Finished generating %d transactions", numTxs) + return nil +} + +// NextTx returns the next tx to be sent as part of xput test +func (w *Wallet) NextTx() *avm.Tx { + if len(w.txs) == 0 { + return nil + } + tx := w.txs[0] + w.txs = w.txs[1:] + return tx +} + +func (w *Wallet) String() string { + return fmt.Sprintf( + "Keychain:\n"+ + "%s\n"+ + "%s", + w.keychain.PrefixedString(" "), + w.utxoSet.PrefixedString(" "), + ) +} diff --git a/xputtest/avmwallet/wallet_test.go b/xputtest/avmwallet/wallet_test.go new file mode 100644 index 0000000..ddd8bc3 --- /dev/null +++ b/xputtest/avmwallet/wallet_test.go @@ -0,0 +1,218 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package avmwallet + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/vms/components/ava" + "github.com/ava-labs/gecko/vms/secp256k1fx" +) + +func TestNewWallet(t *testing.T) { + chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) + if err != nil { + t.Fatal(err) + } + if w == nil { + t.Fatalf("failed to create the new wallet") + } +} + +func TestWalletGetAddress(t *testing.T) { + chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) + if err != nil { + t.Fatal(err) + } + + addr0, err := w.GetAddress() + if err != nil { + t.Fatal(err) + } + if addr0.IsZero() || addr0.Equals(ids.ShortEmpty) { + t.Fatalf("expected new address but got %s", addr0) + } +} + +func TestWalletGetMultipleAddresses(t *testing.T) { + chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) + if err != nil { + t.Fatal(err) + } + + addr0, err := w.GetAddress() + if err != nil { + t.Fatal(err) + } + addr1, err := w.GetAddress() + if err != nil { + t.Fatal(err) + } + if !addr0.Equals(addr1) { + t.Fatalf("Should have returned the same address from multiple Get Address calls") + } +} + +func TestWalletEmptyBalance(t *testing.T) { + chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) + if err != nil { + t.Fatal(err) + } + + if balance := w.Balance(ids.Empty); balance != 0 { + t.Fatalf("expected balance to be 0, was %d", balance) + } +} + +func TestWalletAddUTXO(t *testing.T) { + chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) + if err != nil { + t.Fatal(err) + } + + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{TxID: ids.Empty.Prefix(0)}, + Asset: ava.Asset{ID: ids.Empty.Prefix(1)}, + Out: &secp256k1fx.TransferOutput{ + Amt: 1000, + }, + } + + w.AddUTXO(utxo) + + if balance := w.Balance(utxo.AssetID()); balance != 1000 { + t.Fatalf("expected balance to be 1000, was %d", balance) + } +} + +func TestWalletAddInvalidUTXO(t *testing.T) { + chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) + if err != nil { + t.Fatal(err) + } + + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{TxID: ids.Empty.Prefix(0)}, + Asset: ava.Asset{ID: ids.Empty.Prefix(1)}, + } + + w.AddUTXO(utxo) + + if balance := w.Balance(utxo.AssetID()); balance != 0 { + t.Fatalf("expected balance to be 0, was %d", balance) + } +} + +func TestWalletCreateTx(t *testing.T) { + chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) + if err != nil { + t.Fatal(err) + } + + assetID := ids.Empty.Prefix(0) + + addr, err := w.GetAddress() + if err != nil { + t.Fatal(err) + } + utxo := &ava.UTXO{ + UTXOID: ava.UTXOID{TxID: ids.Empty.Prefix(1)}, + Asset: ava.Asset{ID: assetID}, + Out: &secp256k1fx.TransferOutput{ + Amt: 1000, + OutputOwners: secp256k1fx.OutputOwners{ + Threshold: 1, + Addrs: []ids.ShortID{addr}, + }, + }, + } + + w.AddUTXO(utxo) + + destAddr, err := w.CreateAddress() + if err != nil { + t.Fatal(err) + } + + tx, err := w.CreateTx(assetID, 1000, destAddr) + if err != nil { + t.Fatal(err) + } + + if balance := w.Balance(utxo.AssetID()); balance != 1000 { + t.Fatalf("expected balance to be 1000, was %d", balance) + } + + for _, utxo := range tx.InputUTXOs() { + w.RemoveUTXO(utxo.InputID()) + } + + if balance := w.Balance(utxo.AssetID()); balance != 0 { + t.Fatalf("expected balance to be 0, was %d", balance) + } +} + +func TestWalletImportKey(t *testing.T) { + chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) + if err != nil { + t.Fatal(err) + } + + factory := crypto.FactorySECP256K1R{} + sk, err := factory.NewPrivateKey() + if err != nil { + t.Fatal(err) + } + + w.ImportKey(sk.(*crypto.PrivateKeySECP256K1R)) + + addr0 := sk.PublicKey().Address() + addr1, err := w.GetAddress() + if err != nil { + t.Fatal(err) + } + if !addr0.Equals(addr1) { + t.Fatalf("Should have returned the same address from the Get Address call") + } +} + +func TestWalletString(t *testing.T) { + chainID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}) + w, err := NewWallet(logging.NoLog{}, 12345, chainID, 0) + if err != nil { + t.Fatal(err) + } + + skBytes := []byte{ + 0x4a, 0x99, 0x82, 0x98, 0x5c, 0x39, 0xa8, 0x04, + 0x87, 0x4c, 0x62, 0x3c, 0xd4, 0x9e, 0xa7, 0x7d, + 0x63, 0x5f, 0x92, 0x7c, 0xb9, 0x6b, 0x3f, 0xb7, + 0x3b, 0x93, 0x59, 0xa2, 0x4f, 0xb4, 0x0c, 0x9e, + } + factory := crypto.FactorySECP256K1R{} + sk, err := factory.ToPrivateKey(skBytes) + if err != nil { + t.Fatal(err) + } + + w.ImportKey(sk.(*crypto.PrivateKeySECP256K1R)) + + expected := "Keychain:" + + "\n Key[0]: Key: ZrYnAmArnk97JGzkq3kxTmFuKQnmajc86Xyd3JXC29meZ7znH Address: EHQiyKpq1VxkyNzt9bj1BLn5tzQ6Vt96q" + + "\nUTXOs (length=0):" + if str := w.String(); str != expected { + t.Fatalf("got:\n%s\n\nexpected:\n%s", str, expected) + } +} diff --git a/xputtest/subnets.go b/xputtest/chains.go similarity index 76% rename from xputtest/subnets.go rename to xputtest/chains.go index aeb01dc..e2b57df 100644 --- a/xputtest/subnets.go +++ b/xputtest/chains.go @@ -8,7 +8,8 @@ type ChainType int // Chain types const ( - UnknownChain ChainType = iota - ChainChain - DagChain + unknown ChainType = iota + spChain + spDAG + avmDAG ) diff --git a/xputtest/chainwallet/wallet.go b/xputtest/chainwallet/wallet.go index 9f4b92b..c96e115 100644 --- a/xputtest/chainwallet/wallet.go +++ b/xputtest/chainwallet/wallet.go @@ -4,47 +4,52 @@ package chainwallet import ( + "errors" "fmt" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/vms/spchainvm" ) -// The max number of transactions this wallet can send as part of the throughput tests -// lower --> low startup time but test has shorter duration -// higher --> high startup time but test has longer duration -const ( - MaxNumTxs = 25000 -) - // Wallet is a holder for keys and UTXOs. type Wallet struct { - networkID uint32 - chainID ids.ID - keyChain *spchainvm.KeyChain // Mapping from public address to the SigningKeys + networkID uint32 + chainID ids.ID + + log logging.Logger + + keychain *spchainvm.Keychain // Mapping from public address to the SigningKeys accountSet map[[20]byte]spchainvm.Account // Mapping from addresses to accounts balance uint64 - TxsSent int32 - txs [MaxNumTxs]*spchainvm.Tx + + txs []*spchainvm.Tx } // NewWallet ... -func NewWallet(networkID uint32, chainID ids.ID) Wallet { - return Wallet{ +func NewWallet(log logging.Logger, networkID uint32, chainID ids.ID) *Wallet { + return &Wallet{ networkID: networkID, chainID: chainID, - keyChain: spchainvm.NewKeyChain(networkID, chainID), + log: log, + keychain: spchainvm.NewKeychain(networkID, chainID), accountSet: make(map[[20]byte]spchainvm.Account), } } // CreateAddress returns a brand new address! Ready to receive funds! -func (w *Wallet) CreateAddress() ids.ShortID { return w.keyChain.New().PublicKey().Address() } +func (w *Wallet) CreateAddress() (ids.ShortID, error) { + sk, err := w.keychain.New() + if err != nil { + return ids.ShortID{}, err + } + return sk.PublicKey().Address(), nil +} // ImportKey imports a private key into this wallet -func (w *Wallet) ImportKey(sk *crypto.PrivateKeySECP256K1R) { w.keyChain.Add(sk) } +func (w *Wallet) ImportKey(sk *crypto.PrivateKeySECP256K1R) { w.keychain.Add(sk) } // AddAccount adds a new account to this wallet, if this wallet can spend it. func (w *Wallet) AddAccount(account spchainvm.Account) { @@ -61,73 +66,79 @@ func (w *Wallet) Balance() uint64 { return w.balance } // during the test // Generate them all on test initialization so tx generation is not bottleneck // in testing -func (w *Wallet) GenerateTxs() { +func (w *Wallet) GenerateTxs(numTxs int) error { + w.log.Info("Generating %d transactions", numTxs) + ctx := snow.DefaultContextTest() ctx.NetworkID = w.networkID ctx.ChainID = w.chainID - for i := 0; i < MaxNumTxs; i++ { - if i%1000 == 0 { - fmt.Printf("generated %d transactions\n", i) - } - for _, account := range w.accountSet { - accountID := account.ID() - if key, exists := w.keyChain.Get(accountID); exists { - amount := uint64(1) - if tx, sendAccount, err := account.CreateTx(amount, accountID, ctx, key); err == nil { - newAccount, err := sendAccount.Receive(tx, ctx) - if err != nil { - panic("shouldn't error") - } - w.accountSet[accountID.Key()] = newAccount - w.txs[i] = tx - continue - } else { - panic("shouldn't error here either: " + err.Error()) - } - } else { - panic("shouldn't not exist") - } - } + frequency := numTxs / 50 + if frequency > 1000 { + frequency = 1000 } + + w.txs = make([]*spchainvm.Tx, numTxs) + for i := range w.txs { + tx, err := w.MakeTx() + if err != nil { + return err + } + + if numGenerated := i + 1; numGenerated%frequency == 0 { + w.log.Info("Generated %d out of %d transactions", numGenerated, numTxs) + } + + w.txs[i] = tx + } + + w.log.Info("Finished generating %d transactions", numTxs) + + return nil } -/* -// Send a new transaction -func (w *Wallet) Send() *spchainvm.Tx { +// NextTx returns the next tx to be sent as part of xput test +func (w *Wallet) NextTx() *spchainvm.Tx { + if len(w.txs) == 0 { + return nil + } + tx := w.txs[0] + w.txs = w.txs[1:] + return tx +} + +// MakeTx creates a new transaction and update the state to after the tx is accepted +func (w *Wallet) MakeTx() (*spchainvm.Tx, error) { ctx := snow.DefaultContextTest() ctx.NetworkID = w.networkID ctx.ChainID = w.chainID for _, account := range w.accountSet { accountID := account.ID() - if key, exists := w.keyChain.Get(accountID); exists { - amount := uint64(1) - if tx, sendAccount, err := account.CreateTx(amount, accountID, ctx, key); err == nil { - newAccount, err := sendAccount.Receive(tx, ctx) - if err == nil { - w.accountSet[accountID.Key()] = newAccount - return tx - } - } + key, exists := w.keychain.Get(accountID) + if !exists { + return nil, errors.New("missing account") } - } - return nil -} -*/ -// NextTx returns the next tx to be sent as part of xput test -func (w *Wallet) NextTx() *spchainvm.Tx { - if w.TxsSent >= MaxNumTxs { - return nil + amount := uint64(1) + tx, sendAccount, err := account.CreateTx(amount, accountID, ctx, key) + if err != nil { + continue + } + + newAccount, err := sendAccount.Receive(tx, ctx) + if err != nil { + return nil, err + } + w.accountSet[accountID.Key()] = newAccount + return tx, nil } - w.TxsSent++ - return w.txs[w.TxsSent-1] + return nil, errors.New("empty") } func (w Wallet) String() string { return fmt.Sprintf( - "KeyChain:\n"+ + "Keychain:\n"+ "%s", - w.keyChain.PrefixedString(" ")) + w.keychain.PrefixedString(" ")) } diff --git a/xputtest/config.go b/xputtest/config.go index 5066069..0efe70e 100644 --- a/xputtest/config.go +++ b/xputtest/config.go @@ -23,7 +23,10 @@ type Config struct { LoggingConfig logging.Config // Key describes which key to use to issue transactions + Key []byte + + // NumTxs describes the number of transactions to issue // MaxOutstandingTxs describes how many txs to pipeline - Key, MaxOutstandingTxs int - Chain ChainType + NumTxs, MaxOutstandingTxs int + Chain ChainType } diff --git a/xputtest/dagwallet/utxo_set.go b/xputtest/dagwallet/utxo_set.go index ae513e8..142a47c 100644 --- a/xputtest/dagwallet/utxo_set.go +++ b/xputtest/dagwallet/utxo_set.go @@ -11,49 +11,53 @@ import ( "github.com/ava-labs/gecko/vms/spdagvm" ) -// UtxoSet ... -type UtxoSet struct { - // This can be used to iterate over. However, it should not be modified externally. +// UTXOSet ... +type UTXOSet struct { + // Key: The id of a UTXO + // Value: The index in UTXOs of that UTXO utxoMap map[[32]byte]int - Utxos []*spdagvm.UTXO + + // List of UTXOs in this set + // This can be used to iterate over. It should not be modified externally. + UTXOs []*spdagvm.UTXO } // Put ... -func (us *UtxoSet) Put(utxo *spdagvm.UTXO) { +func (us *UTXOSet) Put(utxo *spdagvm.UTXO) { if us.utxoMap == nil { us.utxoMap = make(map[[32]byte]int) } if _, ok := us.utxoMap[utxo.ID().Key()]; !ok { - us.utxoMap[utxo.ID().Key()] = len(us.Utxos) - us.Utxos = append(us.Utxos, utxo) + us.utxoMap[utxo.ID().Key()] = len(us.UTXOs) + us.UTXOs = append(us.UTXOs, utxo) } } // Get ... -func (us *UtxoSet) Get(id ids.ID) *spdagvm.UTXO { +func (us *UTXOSet) Get(id ids.ID) *spdagvm.UTXO { if us.utxoMap == nil { return nil } if i, ok := us.utxoMap[id.Key()]; ok { - utxo := us.Utxos[i] + utxo := us.UTXOs[i] return utxo } return nil } // Remove ... -func (us *UtxoSet) Remove(id ids.ID) *spdagvm.UTXO { +func (us *UTXOSet) Remove(id ids.ID) *spdagvm.UTXO { i, ok := us.utxoMap[id.Key()] if !ok { return nil } - utxoI := us.Utxos[i] + utxoI := us.UTXOs[i] - j := len(us.Utxos) - 1 - utxoJ := us.Utxos[j] + j := len(us.UTXOs) - 1 + utxoJ := us.UTXOs[j] - us.Utxos[i] = us.Utxos[j] - us.Utxos = us.Utxos[:j] + us.UTXOs[i] = us.UTXOs[j] + us.UTXOs = us.UTXOs[:j] us.utxoMap[utxoJ.ID().Key()] = i delete(us.utxoMap, utxoI.ID().Key()) @@ -61,14 +65,14 @@ func (us *UtxoSet) Remove(id ids.ID) *spdagvm.UTXO { return utxoI } -func (us *UtxoSet) string(prefix string) string { +func (us *UTXOSet) string(prefix string) string { s := strings.Builder{} - for i, utxo := range us.Utxos { + for i, utxo := range us.UTXOs { out := utxo.Out().(*spdagvm.OutputPayment) sourceID, sourceIndex := utxo.Source() - s.WriteString(fmt.Sprintf("%sUtxo[%d]:"+ + s.WriteString(fmt.Sprintf("%sUTXO[%d]:"+ "\n%s InputID: %s"+ "\n%s InputIndex: %d"+ "\n%s Locktime: %d"+ @@ -83,6 +87,6 @@ func (us *UtxoSet) string(prefix string) string { return strings.TrimSuffix(s.String(), "\n") } -func (us *UtxoSet) String() string { +func (us *UTXOSet) String() string { return us.string("") } diff --git a/xputtest/dagwallet/wallet.go b/xputtest/dagwallet/wallet.go index df308a2..a10be8f 100644 --- a/xputtest/dagwallet/wallet.go +++ b/xputtest/dagwallet/wallet.go @@ -18,19 +18,19 @@ type Wallet struct { networkID uint32 chainID ids.ID clock timer.Clock - keyChain *spdagvm.KeyChain // Mapping from public address to the SigningKeys - utxoSet *UtxoSet // Mapping from utxoIDs to Utxos + keychain *spdagvm.Keychain // Mapping from public address to the SigningKeys + utxoSet *UTXOSet // Mapping from utxoIDs to UTXOs balance uint64 txFee uint64 } // NewWallet returns a new Wallet -func NewWallet(networkID uint32, chainID ids.ID, txFee uint64) Wallet { - return Wallet{ +func NewWallet(networkID uint32, chainID ids.ID, txFee uint64) *Wallet { + return &Wallet{ networkID: networkID, chainID: chainID, - keyChain: &spdagvm.KeyChain{}, - utxoSet: &UtxoSet{}, + keychain: spdagvm.NewKeychain(networkID, chainID), + utxoSet: &UTXOSet{}, txFee: txFee, } } @@ -38,32 +38,32 @@ func NewWallet(networkID uint32, chainID ids.ID, txFee uint64) Wallet { // GetAddress returns one of the addresses this wallet manages. If no address // exists, one will be created. func (w *Wallet) GetAddress() ids.ShortID { - if w.keyChain.Addrs.Len() == 0 { + if w.keychain.Addrs.Len() == 0 { return w.CreateAddress() } - return w.keyChain.Addrs.CappedList(1)[0] + return w.keychain.Addrs.CappedList(1)[0] } // CreateAddress returns a new address. // It also saves the address and the private key that controls it // so the address can be used later func (w *Wallet) CreateAddress() ids.ShortID { - privKey, _ := w.keyChain.New() + privKey, _ := w.keychain.New() return privKey.PublicKey().Address() } // ImportKey imports a private key into this wallet -func (w *Wallet) ImportKey(sk *crypto.PrivateKeySECP256K1R) { w.keyChain.Add(sk) } +func (w *Wallet) ImportKey(sk *crypto.PrivateKeySECP256K1R) { w.keychain.Add(sk) } -// AddUtxo adds a new UTXO to this wallet if this wallet may spend it +// AddUTXO adds a new UTXO to this wallet if this wallet may spend it // The UTXO's output must be an OutputPayment -func (w *Wallet) AddUtxo(utxo *spdagvm.UTXO) { +func (w *Wallet) AddUTXO(utxo *spdagvm.UTXO) { out, ok := utxo.Out().(*spdagvm.OutputPayment) if !ok { return } - if _, _, err := w.keyChain.Spend(utxo, math.MaxUint64); err == nil { + if _, _, err := w.keychain.Spend(utxo, math.MaxUint64); err == nil { w.utxoSet.Put(utxo) w.balance += out.Amount() } @@ -83,18 +83,18 @@ func (w *Wallet) Send(amount uint64, locktime uint64, destAddr ids.ShortID) *spd // Send any change to an address this wallet controls changeAddr := ids.ShortID{} - if w.keyChain.Addrs.Len() < 1000 { + if w.keychain.Addrs.Len() < 1000 { changeAddr = w.CreateAddress() } else { changeAddr = w.GetAddress() } - utxoList := w.utxoSet.Utxos // List of UTXOs this wallet may spend + utxoList := w.utxoSet.UTXOs // List of UTXOs this wallet may spend destAddrs := []ids.ShortID{destAddr} // Build the transaction - tx, err := builder.NewTxFromUTXOs(w.keyChain, utxoList, amount, w.txFee, locktime, 1, destAddrs, changeAddr, currentTime) + tx, err := builder.NewTxFromUTXOs(w.keychain, utxoList, amount, w.txFee, locktime, 1, destAddrs, changeAddr, currentTime) if err != nil { panic(err) } @@ -102,8 +102,8 @@ func (w *Wallet) Send(amount uint64, locktime uint64, destAddr ids.ShortID) *spd // Remove from [w.utxoSet] any UTXOs used to fund [tx] for _, in := range tx.Ins() { if in, ok := in.(*spdagvm.InputPayment); ok { - inUtxoID := in.InputID() - w.utxoSet.Remove(inUtxoID) + inUTXOID := in.InputID() + w.utxoSet.Remove(inUTXOID) w.balance -= in.Amount() // Deduct from [w.balance] the amount sent } } @@ -113,10 +113,10 @@ func (w *Wallet) Send(amount uint64, locktime uint64, destAddr ids.ShortID) *spd func (w Wallet) String() string { return fmt.Sprintf( - "KeyChain:\n"+ + "Keychain:\n"+ "%s\n"+ - "UtxoSet:\n"+ + "UTXOSet:\n"+ "%s", - w.keyChain.PrefixedString(" "), + w.keychain.PrefixedString(" "), w.utxoSet.string(" ")) } diff --git a/xputtest/main.go b/xputtest/main.go index 10b634f..3ed3236 100644 --- a/xputtest/main.go +++ b/xputtest/main.go @@ -3,80 +3,30 @@ package main -// #include "salticidae/network.h" -// void onTerm(int sig, void *); -// void decidedTx(msg_t *, msgnetwork_conn_t *, void *); -import "C" - import ( "fmt" "os" "path" "runtime" "runtime/pprof" - "time" - "unsafe" "github.com/ava-labs/salticidae-go" "github.com/ava-labs/gecko/genesis" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/networking" "github.com/ava-labs/gecko/utils/crypto" - "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/logging" - "github.com/ava-labs/gecko/utils/timer" - "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/vms/avm" "github.com/ava-labs/gecko/vms/spchainvm" "github.com/ava-labs/gecko/vms/spdagvm" - "github.com/ava-labs/gecko/xputtest/chainwallet" - "github.com/ava-labs/gecko/xputtest/dagwallet" ) -// tp stores the persistent data needed when running the test. -type tp struct { - ec salticidae.EventContext - build networking.Builder - - conn salticidae.MsgNetworkConn - - log logging.Logger - decided chan ids.ID - - networkID uint32 -} - -var t = tp{} - -//export onTerm -func onTerm(C.int, unsafe.Pointer) { - t.log.Info("Terminate signal received") - t.ec.Stop() -} - -// decidedTx handles the recept of a decidedTx message -//export decidedTx -func decidedTx(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { - msg := salticidae.MsgFromC(salticidae.CMsg(_msg)) - - pMsg, err := t.build.Parse(networking.DecidedTx, msg.GetPayloadByMove()) - if err != nil { - t.log.Warn("Failed to parse DecidedTx message") - return - } - - txID, err := ids.ToID(pMsg.Get(networking.TxID).([]byte)) - t.log.AssertNoError(err) // Length is checked in message parsing - - t.log.Debug("Decided %s", txID) - t.decided <- txID -} - func main() { if err != nil { fmt.Printf("Failed to parse arguments: %s\n", err) } + // set up logging config.LoggingConfig.Directory = path.Join(config.LoggingConfig.Directory, "client") log, err := logging.New(config.LoggingConfig) if err != nil { @@ -86,46 +36,32 @@ func main() { defer log.Stop() - t.log = log + // initialize state based on CLI args + net.log = log crypto.EnableCrypto = config.EnableCrypto - t.decided = make(chan ids.ID, config.MaxOutstandingTxs) + net.decided = make(chan ids.ID, config.MaxOutstandingTxs) - if config.Key >= len(genesis.Keys) || config.Key < 0 { - log.Fatal("Unknown key specified") - return - } + // Init the network + log.AssertNoError(net.Initialize()) - t.ec = salticidae.NewEventContext() - evInt := salticidae.NewSigEvent(t.ec, salticidae.SigEventCallback(C.onTerm), nil) - evInt.Add(salticidae.SIGINT) - evTerm := salticidae.NewSigEvent(t.ec, salticidae.SigEventCallback(C.onTerm), nil) - evTerm.Add(salticidae.SIGTERM) + net.net.Start() + defer net.net.Stop() + // connect to the node serr := salticidae.NewError() - netconfig := salticidae.NewMsgNetworkConfig() - net := salticidae.NewMsgNetwork(t.ec, netconfig, &serr) - if serr.GetCode() != 0 { - log.Fatal("Sync error %s", salticidae.StrError(serr.GetCode())) - return - } - - net.RegHandler(networking.DecidedTx, salticidae.MsgNetworkMsgCallback(C.decidedTx), nil) - - net.Start() - defer net.Stop() - remoteIP := salticidae.NewNetAddrFromIPPortString(config.RemoteIP.String(), true, &serr) if code := serr.GetCode(); code != 0 { log.Fatal("Sync error %s", salticidae.StrError(serr.GetCode())) return } - t.conn = net.ConnectSync(remoteIP, true, &serr) + net.conn = net.net.ConnectSync(remoteIP, true, &serr) if serr.GetCode() != 0 { log.Fatal("Sync error %s", salticidae.StrError(serr.GetCode())) return } + // start a cpu profile file, gErr := os.Create("cpu_client.profile") log.AssertNoError(gErr) gErr = pprof.StartCPUProfile(file) @@ -135,192 +71,27 @@ func main() { defer file.Close() defer pprof.StopCPUProfile() - t.networkID = config.NetworkID + net.networkID = config.NetworkID + // start the benchmark we want to run switch config.Chain { - case ChainChain: - t.benchmarkSnowman() - case DagChain: - t.benchmarkAvalanche() + case spChain: + tx, err := genesis.VMGenesis(config.NetworkID, spchainvm.ID) + log.AssertNoError(err) + net.benchmarkSPChain(tx) + case spDAG: + tx, err := genesis.VMGenesis(config.NetworkID, spdagvm.ID) + log.AssertNoError(err) + net.benchmarkSPChain(tx) + case avmDAG: + tx, err := genesis.VMGenesis(config.NetworkID, avm.ID) + log.AssertNoError(err) + net.benchmarkSPChain(tx) default: - t.log.Fatal("did not specify whether to test dag or chain. Exiting") + log.Fatal("did not specify whether to test dag or chain. Exiting") return } - t.ec.Dispatch() -} - -func (t *tp) benchmarkAvalanche() { - platformGenesisBytes := genesis.Genesis(t.networkID) - genesisState := &platformvm.Genesis{} - err := platformvm.Codec.Unmarshal(platformGenesisBytes, genesisState) - t.log.AssertNoError(err) - t.log.AssertNoError(genesisState.Initialize()) - - spDAGChain := genesisState.Chains[2] - if name := spDAGChain.ChainName; name != "Simple DAG Payments" { - panic("Wrong chain name") - } - genesisBytes := spDAGChain.GenesisData - - wallet := dagwallet.NewWallet(t.networkID, spDAGChain.ID(), config.AvaTxFee) - - codec := spdagvm.Codec{} - tx, err := codec.UnmarshalTx(genesisBytes) - t.log.AssertNoError(err) - - cb58 := formatting.CB58{} - keyStr := genesis.Keys[config.Key] - t.log.AssertNoError(cb58.FromString(keyStr)) - factory := crypto.FactorySECP256K1R{} - skGen, err := factory.ToPrivateKey(cb58.Bytes) - t.log.AssertNoError(err) - sk := skGen.(*crypto.PrivateKeySECP256K1R) - wallet.ImportKey(sk) - - for _, utxo := range tx.UTXOs() { - wallet.AddUtxo(utxo) - } - - go t.log.RecoverAndPanic(func() { t.IssueAvalanche(spDAGChain.ID(), wallet) }) -} - -func (t *tp) IssueAvalanche(chainID ids.ID, wallet dagwallet.Wallet) { - t.log.Info("starting avalanche benchmark") - pending := make(map[[32]byte]*spdagvm.Tx) - canAdd := []*spdagvm.Tx{} - numAccepted := 0 - - t.decided <- ids.ID{} - meter := timer.TimedMeter{Duration: time.Second} - for d := range t.decided { - if numAccepted%1000 == 0 { - t.log.Info("TPS: %d", meter.Ticks()) - } - if !d.IsZero() { - meter.Tick() - key := d.Key() - if tx := pending[key]; tx != nil { - canAdd = append(canAdd, tx) - - t.log.Debug("Finalized %s", d) - delete(pending, key) - numAccepted++ - } - } - - for len(pending) < config.MaxOutstandingTxs && (wallet.Balance() > 0 || len(canAdd) > 0) { - if wallet.Balance() == 0 { - tx := canAdd[0] - canAdd = canAdd[1:] - - for _, utxo := range tx.UTXOs() { - wallet.AddUtxo(utxo) - } - } - - tx := wallet.Send(1, 0, wallet.GetAddress()) - t.log.AssertTrue(tx != nil, "Tx creation failed") - - it, err := t.build.IssueTx(chainID, tx.Bytes()) - t.log.AssertNoError(err) - ds := it.DataStream() - ba := salticidae.NewByteArrayMovedFromDataStream(ds, false) - newMsg := salticidae.NewMsgMovedFromByteArray(networking.IssueTx, ba, false) - - t.conn.GetNet().SendMsg(newMsg, t.conn) - - ds.Free() - ba.Free() - newMsg.Free() - - pending[tx.ID().Key()] = tx - t.log.Debug("Sent tx, pending = %d, accepted = %d", len(pending), numAccepted) - } - } -} - -func (t *tp) benchmarkSnowman() { - platformGenesisBytes := genesis.Genesis(t.networkID) - genesisState := &platformvm.Genesis{} - err := platformvm.Codec.Unmarshal(platformGenesisBytes, genesisState) - t.log.AssertNoError(err) - t.log.AssertNoError(genesisState.Initialize()) - - spchainChain := genesisState.Chains[3] - if name := spchainChain.ChainName; name != "Simple Chain Payments" { - panic("Wrong chain name") - } - genesisBytes := spchainChain.GenesisData - - wallet := chainwallet.NewWallet(t.networkID, spchainChain.ID()) - - codec := spchainvm.Codec{} - accounts, err := codec.UnmarshalGenesis(genesisBytes) - t.log.AssertNoError(err) - - cb58 := formatting.CB58{} - factory := crypto.FactorySECP256K1R{} - for _, keyStr := range genesis.Keys { - t.log.AssertNoError(cb58.FromString(keyStr)) - skGen, err := factory.ToPrivateKey(cb58.Bytes) - t.log.AssertNoError(err) - sk := skGen.(*crypto.PrivateKeySECP256K1R) - wallet.ImportKey(sk) - } - - for _, account := range accounts { - wallet.AddAccount(account) - break - } - - wallet.GenerateTxs() - - go t.log.RecoverAndPanic(func() { t.IssueSnowman(spchainChain.ID(), wallet) }) -} - -func (t *tp) IssueSnowman(chainID ids.ID, wallet chainwallet.Wallet) { - t.log.Debug("Issuing with %d", wallet.Balance()) - numAccepted := 0 - numPending := 0 - - t.decided <- ids.ID{} - - meter := timer.TimedMeter{Duration: time.Second} - for d := range t.decided { - if numAccepted%1000 == 0 { - t.log.Info("TPS: %d", meter.Ticks()) - } - if !d.IsZero() { - meter.Tick() - t.log.Debug("Finalized %s", d) - numAccepted++ - numPending-- - } - - for numPending < config.MaxOutstandingTxs && wallet.Balance() > 0 && wallet.TxsSent < chainwallet.MaxNumTxs { - tx := wallet.NextTx() - t.log.AssertTrue(tx != nil, "Tx creation failed") - - it, err := t.build.IssueTx(chainID, tx.Bytes()) - t.log.AssertNoError(err) - ds := it.DataStream() - ba := salticidae.NewByteArrayMovedFromDataStream(ds, false) - newMsg := salticidae.NewMsgMovedFromByteArray(networking.IssueTx, ba, false) - - t.conn.GetNet().SendMsg(newMsg, t.conn) - - ds.Free() - ba.Free() - newMsg.Free() - - numPending++ - t.log.Debug("Sent tx, pending = %d, accepted = %d", numPending, numAccepted) - } - if wallet.TxsSent >= chainwallet.MaxNumTxs { - fmt.Println("done with test") - return - } - } - + // start processing network messages + net.ec.Dispatch() } diff --git a/xputtest/network.go b/xputtest/network.go new file mode 100644 index 0000000..8cd10a1 --- /dev/null +++ b/xputtest/network.go @@ -0,0 +1,78 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +// #include "salticidae/network.h" +// void onTerm(int sig, void *); +// void decidedTx(msg_t *, msgnetwork_conn_t *, void *); +import "C" + +import ( + "fmt" + "unsafe" + + "github.com/ava-labs/salticidae-go" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/networking" + "github.com/ava-labs/gecko/utils/logging" +) + +// network stores the persistent data needed when running the test. +type network struct { + ec salticidae.EventContext + build networking.Builder + + net salticidae.MsgNetwork + conn salticidae.MsgNetworkConn + + log logging.Logger + decided chan ids.ID + + networkID uint32 +} + +var net = network{} + +func (n *network) Initialize() error { + n.ec = salticidae.NewEventContext() + evInt := salticidae.NewSigEvent(n.ec, salticidae.SigEventCallback(C.onTerm), nil) + evInt.Add(salticidae.SIGINT) + evTerm := salticidae.NewSigEvent(n.ec, salticidae.SigEventCallback(C.onTerm), nil) + evTerm.Add(salticidae.SIGTERM) + + serr := salticidae.NewError() + netconfig := salticidae.NewMsgNetworkConfig() + n.net = salticidae.NewMsgNetwork(n.ec, netconfig, &serr) + if serr.GetCode() != 0 { + return fmt.Errorf("sync error %s", salticidae.StrError(serr.GetCode())) + } + + n.net.RegHandler(networking.DecidedTx, salticidae.MsgNetworkMsgCallback(C.decidedTx), nil) + return nil +} + +//export onTerm +func onTerm(C.int, unsafe.Pointer) { + net.log.Info("Terminate signal received") + net.ec.Stop() +} + +// decidedTx handles the recept of a decidedTx message +//export decidedTx +func decidedTx(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Pointer) { + msg := salticidae.MsgFromC(salticidae.CMsg(_msg)) + + pMsg, err := net.build.Parse(networking.DecidedTx, msg.GetPayloadByMove()) + if err != nil { + net.log.Warn("Failed to parse DecidedTx message") + return + } + + txID, err := ids.ToID(pMsg.Get(networking.TxID).([]byte)) + net.log.AssertNoError(err) // Length is checked in message parsing + + net.log.Debug("Decided %s", txID) + net.decided <- txID +} diff --git a/xputtest/params.go b/xputtest/params.go index cf9b132..2b107a9 100644 --- a/xputtest/params.go +++ b/xputtest/params.go @@ -6,10 +6,13 @@ package main import ( "flag" "fmt" - "net" + "os" + + stdnet "net" "github.com/ava-labs/gecko/genesis" "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/wrappers" ) @@ -27,45 +30,55 @@ func init() { loggingConfig, err := logging.DefaultConfig() errs.Add(err) + fs := flag.NewFlagSet("xputtest", flag.ContinueOnError) + // NetworkID: - networkName := flag.String("network-id", genesis.LocalName, "Network ID this node will connect to") + networkName := fs.String("network-id", genesis.LocalName, "Network ID this node will connect to") // Ava fees: - flag.Uint64Var(&config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva") + fs.Uint64Var(&config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva") // Assertions: - flag.BoolVar(&loggingConfig.Assertions, "assertions-enabled", true, "Turn on assertion execution") + fs.BoolVar(&loggingConfig.Assertions, "assertions-enabled", true, "Turn on assertion execution") // Crypto: - flag.BoolVar(&config.EnableCrypto, "signature-verification-enabled", true, "Turn on signature verification") + fs.BoolVar(&config.EnableCrypto, "signature-verification-enabled", true, "Turn on signature verification") // Remote Server: - ip := flag.String("ip", "127.0.0.1", "IP address of the remote server socket") - port := flag.Uint("port", 9652, "Port of the remote server socket") + ip := fs.String("ip", "127.0.0.1", "IP address of the remote server socket") + port := fs.Uint("port", 9652, "Port of the remote server socket") // Logging: - logsDir := flag.String("log-dir", "", "Logging directory for Ava") - logLevel := flag.String("log-level", "info", "The log level. Should be one of {all, debug, info, warn, error, fatal, off}") + logsDir := fs.String("log-dir", "", "Logging directory for Ava") + logLevel := fs.String("log-level", "info", "The log level. Should be one of {all, debug, info, warn, error, fatal, off}") // Test Variables: - chain := flag.Bool("chain", false, "Execute chain transactions") - dag := flag.Bool("dag", false, "Execute dag transactions") - flag.IntVar(&config.Key, "key", 0, "Index of the genesis key list to use") - flag.IntVar(&config.MaxOutstandingTxs, "max_outstanding", 1000, "Maximum number of transactions to leave outstanding") + spchain := fs.Bool("sp-chain", false, "Execute simple payment chain transactions") + spdag := fs.Bool("sp-dag", false, "Execute simple payment dag transactions") + avm := fs.Bool("avm", false, "Execute avm transactions") + key := fs.String("key", "", "Funded key in the genesis key to use to issue transactions") + fs.IntVar(&config.NumTxs, "num-txs", 25000, "Total number of transaction to issue") + fs.IntVar(&config.MaxOutstandingTxs, "max-outstanding", 1000, "Maximum number of transactions to leave outstanding") - flag.Parse() + ferr := fs.Parse(os.Args[1:]) + + if ferr == flag.ErrHelp { + // display usage/help text and exit successfully + os.Exit(0) + } + + if ferr != nil { + // other type of error occurred when parsing args + os.Exit(2) + } networkID, err := genesis.NetworkID(*networkName) errs.Add(err) - if networkID != genesis.LocalID { - errs.Add(fmt.Errorf("the only supported networkID is: %s", genesis.LocalName)) - } - config.NetworkID = networkID // Remote: - parsedIP := net.ParseIP(*ip) + parsedIP := stdnet.ParseIP(*ip) if parsedIP == nil { errs.Add(fmt.Errorf("invalid IP Address %s", *ip)) } @@ -74,6 +87,10 @@ func init() { Port: uint16(*port), } + cb58 := formatting.CB58{} + errs.Add(cb58.FromString(*key)) + config.Key = cb58.Bytes + // Logging: if *logsDir != "" { loggingConfig.Directory = *logsDir @@ -86,11 +103,13 @@ func init() { // Test Variables: switch { - case *chain: - config.Chain = ChainChain - case *dag: - config.Chain = DagChain + case *spchain: + config.Chain = spChain + case *spdag: + config.Chain = spDAG + case *avm: + config.Chain = avmDAG default: - config.Chain = UnknownChain + config.Chain = unknown } } diff --git a/xputtest/spchain.go b/xputtest/spchain.go new file mode 100644 index 0000000..816f893 --- /dev/null +++ b/xputtest/spchain.go @@ -0,0 +1,89 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "time" + + "github.com/ava-labs/salticidae-go" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/networking" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/vms/spchainvm" + "github.com/ava-labs/gecko/xputtest/chainwallet" +) + +// benchmark an instance of the sp chain +func (n *network) benchmarkSPChain(chain *platformvm.CreateChainTx) { + genesisBytes := chain.GenesisData + wallet := chainwallet.NewWallet(n.log, n.networkID, chain.ID()) + + codec := spchainvm.Codec{} + accounts, err := codec.UnmarshalGenesis(genesisBytes) + n.log.AssertNoError(err) + + factory := crypto.FactorySECP256K1R{} + skGen, err := factory.ToPrivateKey(config.Key) + n.log.AssertNoError(err) + sk := skGen.(*crypto.PrivateKeySECP256K1R) + wallet.ImportKey(sk) + + for _, account := range accounts { + wallet.AddAccount(account) + break + } + + n.log.AssertNoError(wallet.GenerateTxs(config.NumTxs)) + + go n.log.RecoverAndPanic(func() { n.IssueSPChain(chain.ID(), wallet) }) +} + +func (n *network) IssueSPChain(chainID ids.ID, wallet *chainwallet.Wallet) { + n.log.Debug("Issuing with %d", wallet.Balance()) + numAccepted := 0 + numPending := 0 + + n.decided <- ids.ID{} + + meter := timer.TimedMeter{Duration: time.Second} + for d := range n.decided { + if numAccepted%1000 == 0 { + n.log.Info("TPS: %d", meter.Ticks()) + } + if !d.IsZero() { + meter.Tick() + n.log.Debug("Finalized %s", d) + numAccepted++ + numPending-- + } + + for numPending < config.MaxOutstandingTxs && wallet.Balance() > 0 && numAccepted+numPending < config.NumTxs { + tx := wallet.NextTx() + n.log.AssertTrue(tx != nil, "Tx creation failed") + + it, err := n.build.IssueTx(chainID, tx.Bytes()) + n.log.AssertNoError(err) + ds := it.DataStream() + ba := salticidae.NewByteArrayMovedFromDataStream(ds, false) + newMsg := salticidae.NewMsgMovedFromByteArray(networking.IssueTx, ba, false) + + n.conn.GetNet().SendMsg(newMsg, n.conn) + + ds.Free() + ba.Free() + newMsg.Free() + + numPending++ + n.log.Debug("Sent tx, pending = %d, accepted = %d", numPending, numAccepted) + } + if numAccepted+numPending >= config.NumTxs { + n.log.Info("done with test") + net.ec.Stop() + return + } + } +} diff --git a/xputtest/spdag.go b/xputtest/spdag.go new file mode 100644 index 0000000..a0196c4 --- /dev/null +++ b/xputtest/spdag.go @@ -0,0 +1,96 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package main + +import ( + "time" + + "github.com/ava-labs/salticidae-go" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/networking" + "github.com/ava-labs/gecko/utils/crypto" + "github.com/ava-labs/gecko/utils/timer" + "github.com/ava-labs/gecko/vms/platformvm" + "github.com/ava-labs/gecko/vms/spdagvm" + "github.com/ava-labs/gecko/xputtest/dagwallet" +) + +// benchmark an instance of the sp dag +func (n *network) benchmarkSPDAG(chain *platformvm.CreateChainTx) { + genesisBytes := chain.GenesisData + wallet := dagwallet.NewWallet(n.networkID, chain.ID(), config.AvaTxFee) + + codec := spdagvm.Codec{} + tx, err := codec.UnmarshalTx(genesisBytes) + n.log.AssertNoError(err) + + factory := crypto.FactorySECP256K1R{} + skGen, err := factory.ToPrivateKey(config.Key) + n.log.AssertNoError(err) + sk := skGen.(*crypto.PrivateKeySECP256K1R) + wallet.ImportKey(sk) + + for _, utxo := range tx.UTXOs() { + wallet.AddUTXO(utxo) + } + + go n.log.RecoverAndPanic(func() { n.IssueSPDAG(chain.ID(), wallet) }) +} + +// issue transactions to the instance of the spdag funded by the provided wallet +func (n *network) IssueSPDAG(chainID ids.ID, wallet *dagwallet.Wallet) { + n.log.Info("starting avalanche benchmark") + pending := make(map[[32]byte]*spdagvm.Tx) + canAdd := []*spdagvm.Tx{} + numAccepted := 0 + + n.decided <- ids.ID{} + meter := timer.TimedMeter{Duration: time.Second} + for d := range n.decided { + if numAccepted%1000 == 0 { + n.log.Info("TPS: %d", meter.Ticks()) + } + if !d.IsZero() { + meter.Tick() + key := d.Key() + if tx := pending[key]; tx != nil { + canAdd = append(canAdd, tx) + + n.log.Debug("Finalized %s", d) + delete(pending, key) + numAccepted++ + } + } + + for len(pending) < config.MaxOutstandingTxs && (wallet.Balance() > 0 || len(canAdd) > 0) { + if wallet.Balance() == 0 { + tx := canAdd[0] + canAdd = canAdd[1:] + + for _, utxo := range tx.UTXOs() { + wallet.AddUTXO(utxo) + } + } + + tx := wallet.Send(1, 0, wallet.GetAddress()) + n.log.AssertTrue(tx != nil, "Tx creation failed") + + it, err := n.build.IssueTx(chainID, tx.Bytes()) + n.log.AssertNoError(err) + ds := it.DataStream() + ba := salticidae.NewByteArrayMovedFromDataStream(ds, false) + newMsg := salticidae.NewMsgMovedFromByteArray(networking.IssueTx, ba, false) + + n.conn.GetNet().SendMsg(newMsg, n.conn) + + ds.Free() + ba.Free() + newMsg.Free() + + pending[tx.ID().Key()] = tx + n.log.Debug("Sent tx, pending = %d, accepted = %d", len(pending), numAccepted) + } + } +}