mirror of https://github.com/poanetwork/gecko.git
Merge branch 'master' into patch-1
This commit is contained in:
commit
413ff7b0b4
|
@ -10,16 +10,19 @@ import (
|
|||
|
||||
"github.com/ava-labs/gecko/api"
|
||||
"github.com/ava-labs/gecko/chains"
|
||||
"github.com/ava-labs/gecko/genesis"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/network"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
"github.com/ava-labs/gecko/version"
|
||||
|
||||
cjson "github.com/ava-labs/gecko/utils/json"
|
||||
)
|
||||
|
||||
// Admin is the API service for node admin management
|
||||
type Admin struct {
|
||||
version version.Version
|
||||
nodeID ids.ShortID
|
||||
networkID uint32
|
||||
log logging.Logger
|
||||
|
@ -30,12 +33,13 @@ type Admin struct {
|
|||
}
|
||||
|
||||
// NewService returns a new admin API service
|
||||
func NewService(nodeID ids.ShortID, networkID uint32, log logging.Logger, chainManager chains.Manager, peers network.Network, httpServer *api.Server) *common.HTTPHandler {
|
||||
func NewService(version version.Version, nodeID ids.ShortID, networkID uint32, log logging.Logger, chainManager chains.Manager, peers network.Network, httpServer *api.Server) *common.HTTPHandler {
|
||||
newServer := rpc.NewServer()
|
||||
codec := cjson.NewCodec()
|
||||
newServer.RegisterCodec(codec, "application/json")
|
||||
newServer.RegisterCodec(codec, "application/json;charset=UTF-8")
|
||||
newServer.RegisterService(&Admin{
|
||||
version: version,
|
||||
nodeID: nodeID,
|
||||
networkID: networkID,
|
||||
log: log,
|
||||
|
@ -46,8 +50,18 @@ func NewService(nodeID ids.ShortID, networkID uint32, log logging.Logger, chainM
|
|||
return &common.HTTPHandler{Handler: newServer}
|
||||
}
|
||||
|
||||
// GetNodeIDArgs are the arguments for calling GetNodeID
|
||||
type GetNodeIDArgs struct{}
|
||||
// GetNodeVersionReply are the results from calling GetNodeVersion
|
||||
type GetNodeVersionReply struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// GetNodeVersion returns the version this node is running
|
||||
func (service *Admin) GetNodeVersion(_ *http.Request, _ *struct{}, reply *GetNodeVersionReply) error {
|
||||
service.log.Debug("Admin: GetNodeVersion called")
|
||||
|
||||
reply.Version = service.version.String()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNodeIDReply are the results from calling GetNodeID
|
||||
type GetNodeIDReply struct {
|
||||
|
@ -55,29 +69,39 @@ type GetNodeIDReply struct {
|
|||
}
|
||||
|
||||
// GetNodeID returns the node ID of this node
|
||||
func (service *Admin) GetNodeID(r *http.Request, args *GetNodeIDArgs, reply *GetNodeIDReply) error {
|
||||
func (service *Admin) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error {
|
||||
service.log.Debug("Admin: GetNodeID called")
|
||||
|
||||
reply.NodeID = service.nodeID
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNetworkIDArgs are the arguments for calling GetNetworkID
|
||||
type GetNetworkIDArgs struct{}
|
||||
|
||||
// GetNetworkIDReply are the results from calling GetNetworkID
|
||||
type GetNetworkIDReply struct {
|
||||
NetworkID cjson.Uint32 `json:"networkID"`
|
||||
}
|
||||
|
||||
// GetNetworkID returns the network ID this node is running on
|
||||
func (service *Admin) GetNetworkID(r *http.Request, args *GetNetworkIDArgs, reply *GetNetworkIDReply) error {
|
||||
func (service *Admin) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error {
|
||||
service.log.Debug("Admin: GetNetworkID called")
|
||||
|
||||
reply.NetworkID = cjson.Uint32(service.networkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNetworkNameReply is the result from calling GetNetworkName
|
||||
type GetNetworkNameReply struct {
|
||||
NetworkName string `json:"networkName"`
|
||||
}
|
||||
|
||||
// GetNetworkName returns the network name this node is running on
|
||||
func (service *Admin) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error {
|
||||
service.log.Debug("Admin: GetNetworkName called")
|
||||
|
||||
reply.NetworkName = genesis.NetworkName(service.networkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlockchainIDArgs are the arguments for calling GetBlockchainID
|
||||
type GetBlockchainIDArgs struct {
|
||||
Alias string `json:"alias"`
|
||||
|
@ -89,7 +113,7 @@ type GetBlockchainIDReply struct {
|
|||
}
|
||||
|
||||
// GetBlockchainID returns the blockchain ID that resolves the alias that was supplied
|
||||
func (service *Admin) GetBlockchainID(r *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error {
|
||||
func (service *Admin) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error {
|
||||
service.log.Debug("Admin: GetBlockchainID called")
|
||||
|
||||
bID, err := service.chainManager.Lookup(args.Alias)
|
||||
|
@ -97,16 +121,13 @@ func (service *Admin) GetBlockchainID(r *http.Request, args *GetBlockchainIDArgs
|
|||
return err
|
||||
}
|
||||
|
||||
// PeersArgs are the arguments for calling Peers
|
||||
type PeersArgs struct{}
|
||||
|
||||
// PeersReply are the results from calling Peers
|
||||
type PeersReply struct {
|
||||
Peers []network.PeerID `json:"peers"`
|
||||
}
|
||||
|
||||
// Peers returns the list of current validators
|
||||
func (service *Admin) Peers(r *http.Request, args *PeersArgs, reply *PeersReply) error {
|
||||
func (service *Admin) Peers(_ *http.Request, _ *struct{}, reply *PeersReply) error {
|
||||
service.log.Debug("Admin: Peers called")
|
||||
reply.Peers = service.networking.Peers()
|
||||
return nil
|
||||
|
@ -123,22 +144,19 @@ type StartCPUProfilerReply struct {
|
|||
}
|
||||
|
||||
// StartCPUProfiler starts a cpu profile writing to the specified file
|
||||
func (service *Admin) StartCPUProfiler(r *http.Request, args *StartCPUProfilerArgs, reply *StartCPUProfilerReply) error {
|
||||
func (service *Admin) StartCPUProfiler(_ *http.Request, args *StartCPUProfilerArgs, reply *StartCPUProfilerReply) error {
|
||||
service.log.Debug("Admin: StartCPUProfiler called with %s", args.Filename)
|
||||
reply.Success = true
|
||||
return service.performance.StartCPUProfiler(args.Filename)
|
||||
}
|
||||
|
||||
// StopCPUProfilerArgs are the arguments for calling StopCPUProfiler
|
||||
type StopCPUProfilerArgs struct{}
|
||||
|
||||
// StopCPUProfilerReply are the results from calling StopCPUProfiler
|
||||
type StopCPUProfilerReply struct {
|
||||
Success bool `json:"success"`
|
||||
}
|
||||
|
||||
// StopCPUProfiler stops the cpu profile
|
||||
func (service *Admin) StopCPUProfiler(r *http.Request, args *StopCPUProfilerArgs, reply *StopCPUProfilerReply) error {
|
||||
func (service *Admin) StopCPUProfiler(_ *http.Request, _ *struct{}, reply *StopCPUProfilerReply) error {
|
||||
service.log.Debug("Admin: StopCPUProfiler called")
|
||||
reply.Success = true
|
||||
return service.performance.StopCPUProfiler()
|
||||
|
@ -155,7 +173,7 @@ type MemoryProfileReply struct {
|
|||
}
|
||||
|
||||
// MemoryProfile runs a memory profile writing to the specified file
|
||||
func (service *Admin) MemoryProfile(r *http.Request, args *MemoryProfileArgs, reply *MemoryProfileReply) error {
|
||||
func (service *Admin) MemoryProfile(_ *http.Request, args *MemoryProfileArgs, reply *MemoryProfileReply) error {
|
||||
service.log.Debug("Admin: MemoryProfile called with %s", args.Filename)
|
||||
reply.Success = true
|
||||
return service.performance.MemoryProfile(args.Filename)
|
||||
|
@ -172,7 +190,7 @@ type LockProfileReply struct {
|
|||
}
|
||||
|
||||
// LockProfile runs a mutex profile writing to the specified file
|
||||
func (service *Admin) LockProfile(r *http.Request, args *LockProfileArgs, reply *LockProfileReply) error {
|
||||
func (service *Admin) LockProfile(_ *http.Request, args *LockProfileArgs, reply *LockProfileReply) error {
|
||||
service.log.Debug("Admin: LockProfile called with %s", args.Filename)
|
||||
reply.Success = true
|
||||
return service.performance.LockProfile(args.Filename)
|
||||
|
@ -190,7 +208,7 @@ type AliasReply struct {
|
|||
}
|
||||
|
||||
// Alias attempts to alias an HTTP endpoint to a new name
|
||||
func (service *Admin) Alias(r *http.Request, args *AliasArgs, reply *AliasReply) error {
|
||||
func (service *Admin) Alias(_ *http.Request, args *AliasArgs, reply *AliasReply) error {
|
||||
service.log.Debug("Admin: Alias called with URL: %s, Alias: %s", args.Endpoint, args.Alias)
|
||||
reply.Success = true
|
||||
return service.httpServer.AddAliasesWithReadLock(args.Endpoint, args.Alias)
|
||||
|
@ -233,7 +251,7 @@ type StacktraceReply struct {
|
|||
}
|
||||
|
||||
// Stacktrace returns the current global stacktrace
|
||||
func (service *Admin) Stacktrace(_ *http.Request, _ *StacktraceArgs, reply *StacktraceReply) error {
|
||||
func (service *Admin) Stacktrace(_ *http.Request, _ *struct{}, reply *StacktraceReply) error {
|
||||
reply.Stacktrace = logging.Stacktrace{Global: true}.String()
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -8,12 +8,14 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/gorilla/rpc/v2"
|
||||
|
||||
"github.com/ava-labs/gecko/chains/atomic"
|
||||
"github.com/ava-labs/gecko/database"
|
||||
"github.com/ava-labs/gecko/database/encdb"
|
||||
"github.com/ava-labs/gecko/database/memdb"
|
||||
"github.com/ava-labs/gecko/database/prefixdb"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
|
@ -29,8 +31,17 @@ const (
|
|||
// maxUserPassLen is the maximum length of the username or password allowed
|
||||
maxUserPassLen = 1024
|
||||
|
||||
// requiredPassScore defines the score a password must achieve to be accepted
|
||||
// as a password with strong characteristics by the zxcvbn package
|
||||
// maxCheckedPassLen limits the length of the password that should be
|
||||
// strength checked.
|
||||
//
|
||||
// As per issue https://github.com/ava-labs/gecko/issues/195 it was found
|
||||
// the longer the length of password the slower zxcvbn.PasswordStrength()
|
||||
// performs. To avoid performance issues and a DoS vector, we only check the
|
||||
// first 50 characters of the password.
|
||||
maxCheckedPassLen = 50
|
||||
|
||||
// requiredPassScore defines the score a password must achieve to be
|
||||
// accepted as a password with strong characteristics by the zxcvbn package
|
||||
//
|
||||
// The scoring mechanism defined is as follows;
|
||||
//
|
||||
|
@ -136,44 +147,10 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre
|
|||
defer ks.lock.Unlock()
|
||||
|
||||
ks.log.Verbo("CreateUser called with %.*s", maxUserPassLen, args.Username)
|
||||
|
||||
if len(args.Username) > maxUserPassLen || len(args.Password) > maxUserPassLen {
|
||||
return errUserPassMaxLength
|
||||
}
|
||||
|
||||
if args.Username == "" {
|
||||
return errEmptyUsername
|
||||
}
|
||||
if usr, err := ks.getUser(args.Username); err == nil || usr != nil {
|
||||
return fmt.Errorf("user already exists: %s", args.Username)
|
||||
}
|
||||
|
||||
// As per issue https://github.com/ava-labs/gecko/issues/195 it was found the longer the length of password the slower zxcvbn.PasswordStrength() performs.
|
||||
// To avoid performance issues and DOS vector we only check the first 50 characters of the password.
|
||||
checkPass := args.Password
|
||||
|
||||
if len(args.Password) > 50 {
|
||||
checkPass = args.Password[:50]
|
||||
}
|
||||
|
||||
if zxcvbn.PasswordStrength(checkPass, nil).Score < requiredPassScore {
|
||||
return errWeakPassword
|
||||
}
|
||||
|
||||
usr := &User{}
|
||||
if err := usr.Initialize(args.Password); err != nil {
|
||||
if err := ks.AddUser(args.Username, args.Password); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
usrBytes, err := ks.codec.Marshal(usr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ks.userDB.Put([]byte(args.Username), usrBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
ks.users[args.Username] = usr
|
||||
reply.Success = true
|
||||
return nil
|
||||
}
|
||||
|
@ -274,6 +251,10 @@ func (ks *Keystore) ImportUser(r *http.Request, args *ImportUserArgs, reply *Imp
|
|||
|
||||
ks.log.Verbo("ImportUser called for %s", args.Username)
|
||||
|
||||
if args.Username == "" {
|
||||
return errEmptyUsername
|
||||
}
|
||||
|
||||
if usr, err := ks.getUser(args.Username); err == nil || usr != nil {
|
||||
return fmt.Errorf("user already exists: %s", args.Username)
|
||||
}
|
||||
|
@ -407,3 +388,51 @@ func (ks *Keystore) GetDatabase(bID ids.ID, username, password string) (database
|
|||
|
||||
return encDB, nil
|
||||
}
|
||||
|
||||
// AddUser attempts to register this username and password as a new user of the
|
||||
// keystore.
|
||||
func (ks *Keystore) AddUser(username, password string) error {
|
||||
if len(username) > maxUserPassLen || len(password) > maxUserPassLen {
|
||||
return errUserPassMaxLength
|
||||
}
|
||||
|
||||
if username == "" {
|
||||
return errEmptyUsername
|
||||
}
|
||||
if usr, err := ks.getUser(username); err == nil || usr != nil {
|
||||
return fmt.Errorf("user already exists: %s", username)
|
||||
}
|
||||
|
||||
checkPass := password
|
||||
if len(password) > maxCheckedPassLen {
|
||||
checkPass = password[:maxCheckedPassLen]
|
||||
}
|
||||
|
||||
if zxcvbn.PasswordStrength(checkPass, nil).Score < requiredPassScore {
|
||||
return errWeakPassword
|
||||
}
|
||||
|
||||
usr := &User{}
|
||||
if err := usr.Initialize(password); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
usrBytes, err := ks.codec.Marshal(usr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ks.userDB.Put([]byte(username), usrBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
ks.users[username] = usr
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateTestKeystore returns a new keystore that can be utilized for testing
|
||||
func CreateTestKeystore(t *testing.T) *Keystore {
|
||||
ks := &Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
return ks
|
||||
}
|
||||
|
|
|
@ -10,9 +10,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ava-labs/gecko/database/memdb"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -22,8 +20,7 @@ var (
|
|||
)
|
||||
|
||||
func TestServiceListNoUsers(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
reply := ListUsersReply{}
|
||||
if err := ks.ListUsers(nil, &ListUsersArgs{}, &reply); err != nil {
|
||||
|
@ -35,8 +32,7 @@ func TestServiceListNoUsers(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceCreateUser(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
{
|
||||
reply := CreateUserReply{}
|
||||
|
@ -75,8 +71,7 @@ func genStr(n int) string {
|
|||
// TestServiceCreateUserArgsChecks generates excessively long usernames or
|
||||
// passwords to assure the santity checks on string length are not exceeded
|
||||
func TestServiceCreateUserArgsCheck(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
{
|
||||
reply := CreateUserReply{}
|
||||
|
@ -117,8 +112,7 @@ func TestServiceCreateUserArgsCheck(t *testing.T) {
|
|||
// TestServiceCreateUserWeakPassword tests creating a new user with a weak
|
||||
// password to ensure the password strength check is working
|
||||
func TestServiceCreateUserWeakPassword(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
{
|
||||
reply := CreateUserReply{}
|
||||
|
@ -138,8 +132,7 @@ func TestServiceCreateUserWeakPassword(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceCreateDuplicate(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
{
|
||||
reply := CreateUserReply{}
|
||||
|
@ -166,8 +159,7 @@ func TestServiceCreateDuplicate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceCreateUserNoName(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
reply := CreateUserReply{}
|
||||
if err := ks.CreateUser(nil, &CreateUserArgs{
|
||||
|
@ -178,8 +170,7 @@ func TestServiceCreateUserNoName(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceUseBlockchainDB(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
{
|
||||
reply := CreateUserReply{}
|
||||
|
@ -218,8 +209,7 @@ func TestServiceUseBlockchainDB(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceExportImport(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
{
|
||||
reply := CreateUserReply{}
|
||||
|
@ -252,8 +242,7 @@ func TestServiceExportImport(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newKS := Keystore{}
|
||||
newKS.Initialize(logging.NoLog{}, memdb.New())
|
||||
newKS := CreateTestKeystore(t)
|
||||
|
||||
{
|
||||
reply := ImportUserReply{}
|
||||
|
@ -266,6 +255,17 @@ func TestServiceExportImport(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
{
|
||||
reply := ImportUserReply{}
|
||||
if err := newKS.ImportUser(nil, &ImportUserArgs{
|
||||
Username: "",
|
||||
Password: "strongPassword",
|
||||
User: exportReply.User,
|
||||
}, &reply); err == nil {
|
||||
t.Fatal("Should have errored due to empty username")
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
reply := ImportUserReply{}
|
||||
if err := newKS.ImportUser(nil, &ImportUserArgs{
|
||||
|
@ -347,11 +347,10 @@ func TestServiceDeleteUser(t *testing.T) {
|
|||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.desc, func(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
if tt.setup != nil {
|
||||
if err := tt.setup(&ks); err != nil {
|
||||
if err := tt.setup(ks); err != nil {
|
||||
t.Fatalf("failed to create user setup in keystore: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -75,8 +75,9 @@ func (s *Server) RegisterChain(ctx *snow.Context, vmIntf interface{}) {
|
|||
}
|
||||
|
||||
// all subroutes to a chain begin with "bc/<the chain's ID>"
|
||||
defaultEndpoint := "bc/" + ctx.ChainID.String()
|
||||
httpLogger, err := s.factory.MakeChain(ctx.ChainID, "http")
|
||||
chainID := ctx.ChainID.String()
|
||||
defaultEndpoint := "bc/" + chainID
|
||||
httpLogger, err := s.factory.MakeChain(chainID, "http")
|
||||
if err != nil {
|
||||
s.log.Error("Failed to create new http logger: %s", err)
|
||||
return
|
||||
|
|
|
@ -38,7 +38,7 @@ import (
|
|||
|
||||
const (
|
||||
defaultChannelSize = 1000
|
||||
requestTimeout = 2 * time.Second
|
||||
requestTimeout = 4 * time.Second
|
||||
gossipFrequency = 10 * time.Second
|
||||
shutdownTimeout = 1 * time.Second
|
||||
)
|
||||
|
@ -204,6 +204,31 @@ func (m *manager) ForceCreateChain(chain ChainParameters) {
|
|||
return
|
||||
}
|
||||
|
||||
primaryAlias, err := m.PrimaryAlias(chain.ID)
|
||||
if err != nil {
|
||||
primaryAlias = chain.ID.String()
|
||||
}
|
||||
|
||||
// Create the log and context of the chain
|
||||
chainLog, err := m.logFactory.MakeChain(primaryAlias, "")
|
||||
if err != nil {
|
||||
m.log.Error("error while creating chain's log %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := &snow.Context{
|
||||
NetworkID: m.networkID,
|
||||
ChainID: chain.ID,
|
||||
Log: chainLog,
|
||||
DecisionDispatcher: m.decisionEvents,
|
||||
ConsensusDispatcher: m.consensusEvents,
|
||||
NodeID: m.nodeID,
|
||||
HTTP: m.server,
|
||||
Keystore: m.keystore.NewBlockchainKeyStore(chain.ID),
|
||||
SharedMemory: m.sharedMemory.NewBlockchainSharedMemory(chain.ID),
|
||||
BCLookup: m,
|
||||
}
|
||||
|
||||
// Get a factory for the vm we want to use on our chain
|
||||
vmFactory, err := m.vmManager.GetVMFactory(vmID)
|
||||
if err != nil {
|
||||
|
@ -212,7 +237,7 @@ func (m *manager) ForceCreateChain(chain ChainParameters) {
|
|||
}
|
||||
|
||||
// Create the chain
|
||||
vm, err := vmFactory.New()
|
||||
vm, err := vmFactory.New(ctx)
|
||||
if err != nil {
|
||||
m.log.Error("error while creating vm: %s", err)
|
||||
return
|
||||
|
@ -234,7 +259,7 @@ func (m *manager) ForceCreateChain(chain ChainParameters) {
|
|||
return
|
||||
}
|
||||
|
||||
fx, err := fxFactory.New()
|
||||
fx, err := fxFactory.New(ctx)
|
||||
if err != nil {
|
||||
m.log.Error("error while creating fx: %s", err)
|
||||
return
|
||||
|
@ -247,31 +272,8 @@ func (m *manager) ForceCreateChain(chain ChainParameters) {
|
|||
}
|
||||
}
|
||||
|
||||
// Create the log and context of the chain
|
||||
chainLog, err := m.logFactory.MakeChain(chain.ID, "")
|
||||
if err != nil {
|
||||
m.log.Error("error while creating chain's log %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := &snow.Context{
|
||||
NetworkID: m.networkID,
|
||||
ChainID: chain.ID,
|
||||
Log: chainLog,
|
||||
DecisionDispatcher: m.decisionEvents,
|
||||
ConsensusDispatcher: m.consensusEvents,
|
||||
NodeID: m.nodeID,
|
||||
HTTP: m.server,
|
||||
Keystore: m.keystore.NewBlockchainKeyStore(chain.ID),
|
||||
SharedMemory: m.sharedMemory.NewBlockchainSharedMemory(chain.ID),
|
||||
BCLookup: m,
|
||||
}
|
||||
consensusParams := m.consensusParams
|
||||
if alias, err := m.PrimaryAlias(ctx.ChainID); err == nil {
|
||||
consensusParams.Namespace = fmt.Sprintf("gecko_%s", alias)
|
||||
} else {
|
||||
consensusParams.Namespace = fmt.Sprintf("gecko_%s", ctx.ChainID)
|
||||
}
|
||||
consensusParams.Namespace = fmt.Sprintf("gecko_%s", primaryAlias)
|
||||
|
||||
// The validators of this blockchain
|
||||
var validators validators.Set // Validators validating this blockchain
|
||||
|
@ -360,8 +362,8 @@ func (m *manager) createAvalancheChain(
|
|||
db := prefixdb.New(ctx.ChainID.Bytes(), m.db)
|
||||
vmDB := prefixdb.New([]byte("vm"), db)
|
||||
vertexDB := prefixdb.New([]byte("vertex"), db)
|
||||
vertexBootstrappingDB := prefixdb.New([]byte("vertex_bootstrapping"), db)
|
||||
txBootstrappingDB := prefixdb.New([]byte("tx_bootstrapping"), db)
|
||||
vertexBootstrappingDB := prefixdb.New([]byte("vertex_bs"), db)
|
||||
txBootstrappingDB := prefixdb.New([]byte("tx_bs"), db)
|
||||
|
||||
vtxBlocker, err := queue.New(vertexBootstrappingDB)
|
||||
if err != nil {
|
||||
|
@ -429,7 +431,13 @@ func (m *manager) createAvalancheChain(
|
|||
|
||||
// Asynchronously passes messages from the network to the consensus engine
|
||||
handler := &router.Handler{}
|
||||
handler.Initialize(&engine, msgChan, defaultChannelSize)
|
||||
handler.Initialize(
|
||||
&engine,
|
||||
msgChan,
|
||||
defaultChannelSize,
|
||||
fmt.Sprintf("%s_handler", consensusParams.Namespace),
|
||||
consensusParams.Metrics,
|
||||
)
|
||||
|
||||
// Allows messages to be routed to the new chain
|
||||
m.chainRouter.AddChain(handler)
|
||||
|
@ -465,7 +473,7 @@ func (m *manager) createSnowmanChain(
|
|||
|
||||
db := prefixdb.New(ctx.ChainID.Bytes(), m.db)
|
||||
vmDB := prefixdb.New([]byte("vm"), db)
|
||||
bootstrappingDB := prefixdb.New([]byte("bootstrapping"), db)
|
||||
bootstrappingDB := prefixdb.New([]byte("bs"), db)
|
||||
|
||||
blocked, err := queue.New(bootstrappingDB)
|
||||
if err != nil {
|
||||
|
@ -515,7 +523,13 @@ func (m *manager) createSnowmanChain(
|
|||
|
||||
// Asynchronously passes messages from the network to the consensus engine
|
||||
handler := &router.Handler{}
|
||||
handler.Initialize(&engine, msgChan, defaultChannelSize)
|
||||
handler.Initialize(
|
||||
&engine,
|
||||
msgChan,
|
||||
defaultChannelSize,
|
||||
fmt.Sprintf("%s_handler", consensusParams.Namespace),
|
||||
consensusParams.Metrics,
|
||||
)
|
||||
|
||||
// Allow incoming messages to be routed to the new chain
|
||||
m.chainRouter.AddChain(handler)
|
||||
|
|
2
go.mod
2
go.mod
|
@ -6,7 +6,7 @@ require (
|
|||
github.com/AppsFlyer/go-sundheit v0.2.0
|
||||
github.com/allegro/bigcache v1.2.1 // indirect
|
||||
github.com/aristanetworks/goarista v0.0.0-20200520141224-0f14e646773f // indirect
|
||||
github.com/ava-labs/coreth v0.2.0 // Added manually; don't delete
|
||||
github.com/ava-labs/coreth v0.2.4 // Added manually; don't delete
|
||||
github.com/ava-labs/go-ethereum v1.9.3 // indirect
|
||||
github.com/deckarep/golang-set v1.7.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1 v1.0.3
|
||||
|
|
4
go.sum
4
go.sum
|
@ -17,8 +17,8 @@ github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm
|
|||
github.com/aristanetworks/goarista v0.0.0-20200520141224-0f14e646773f h1:uM6lu1fpmCwf54zb6Ckkvphioq8MLlyFb/TlTgPpCKc=
|
||||
github.com/aristanetworks/goarista v0.0.0-20200520141224-0f14e646773f/go.mod h1:QZe5Yh80Hp1b6JxQdpfSEEe8X7hTyTEZSosSrFf/oJE=
|
||||
github.com/aristanetworks/splunk-hec-go v0.3.3/go.mod h1:1VHO9r17b0K7WmOlLb9nTk/2YanvOEnLMUgsFrxBROc=
|
||||
github.com/ava-labs/coreth v0.2.0 h1:HjR4RMTnWvXhXlnEbFNGF5pbcxfemVxZeEzC4BTIrIw=
|
||||
github.com/ava-labs/coreth v0.2.0/go.mod h1:pGolKipwq5vGIY2IBBcBkMYrqniXMsS5SBn+BBi4+Js=
|
||||
github.com/ava-labs/coreth v0.2.4 h1:MhnbuRyMcij7WU4+frayp40quc44AMPc4IrxXhmucWw=
|
||||
github.com/ava-labs/coreth v0.2.4/go.mod h1:pGolKipwq5vGIY2IBBcBkMYrqniXMsS5SBn+BBi4+Js=
|
||||
github.com/ava-labs/go-ethereum v1.9.3 h1:GmnMZ/dlvVAPFmWBzEpRJX49pUAymPfoASLNRJqR0AY=
|
||||
github.com/ava-labs/go-ethereum v1.9.3/go.mod h1:a+agc6fXfZFsPZCylA3ry4Y8CLCqLKg3Rc23NXZ9aw8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
|
|
|
@ -62,6 +62,9 @@ func (b *UniqueBag) Difference(diff *UniqueBag) {
|
|||
// GetSet ...
|
||||
func (b *UniqueBag) GetSet(id ID) BitSet { return (*b)[*id.ID] }
|
||||
|
||||
// RemoveSet ...
|
||||
func (b *UniqueBag) RemoveSet(id ID) { delete(*b, id.Key()) }
|
||||
|
||||
// List ...
|
||||
func (b *UniqueBag) List() []ID {
|
||||
idList := []ID(nil)
|
||||
|
|
11
main/main.go
11
main/main.go
|
@ -41,11 +41,14 @@ func main() {
|
|||
defer Config.DB.Close()
|
||||
|
||||
if Config.StakingIP.IsZero() {
|
||||
log.Warn("NAT traversal has failed. If this node becomes a staker, it may lose its reward due to being unreachable.")
|
||||
log.Warn("NAT traversal has failed. It will be able to connect to less nodes.")
|
||||
}
|
||||
|
||||
// Track if sybil control is enforced
|
||||
if !Config.EnableStaking {
|
||||
if !Config.EnableStaking && Config.EnableP2PTLS {
|
||||
log.Warn("Staking is disabled. Sybil control is not enforced.")
|
||||
}
|
||||
if !Config.EnableStaking && !Config.EnableP2PTLS {
|
||||
log.Warn("Staking and p2p encryption are disabled. Packet spoofing is possible.")
|
||||
}
|
||||
|
||||
|
@ -62,7 +65,7 @@ func main() {
|
|||
|
||||
// Track if assertions should be executed
|
||||
if Config.LoggingConfig.Assertions {
|
||||
log.Warn("assertions are enabled. This may slow down execution")
|
||||
log.Debug("assertions are enabled. This may slow down execution")
|
||||
}
|
||||
|
||||
mapper := nat.NewDefaultMapper(log, Config.Nat, nat.TCP, "gecko")
|
||||
|
@ -83,5 +86,5 @@ func main() {
|
|||
|
||||
log.Debug("dispatching node handlers")
|
||||
err = node.Dispatch()
|
||||
log.Debug("dispatch returned with: %s", err)
|
||||
log.Debug("node dispatching returned with %s", err)
|
||||
}
|
||||
|
|
126
main/params.go
126
main/params.go
|
@ -25,6 +25,7 @@ import (
|
|||
"github.com/ava-labs/gecko/utils/formatting"
|
||||
"github.com/ava-labs/gecko/utils/hashing"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
"github.com/ava-labs/gecko/utils/random"
|
||||
"github.com/ava-labs/gecko/utils/wrappers"
|
||||
)
|
||||
|
||||
|
@ -36,6 +37,7 @@ const (
|
|||
var (
|
||||
Config = node.Config{}
|
||||
Err error
|
||||
defaultNetworkName = genesis.TestnetName
|
||||
defaultDbDir = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "db"))
|
||||
defaultStakingKeyPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.key"))
|
||||
defaultStakingCertPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.crt"))
|
||||
|
@ -48,7 +50,8 @@ var (
|
|||
)
|
||||
|
||||
var (
|
||||
errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs")
|
||||
errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs")
|
||||
errStakingRequiresTLS = errors.New("if staking is enabled, network TLS must also be enabled")
|
||||
)
|
||||
|
||||
// GetIPs returns the default IPs for each network
|
||||
|
@ -56,11 +59,26 @@ func GetIPs(networkID uint32) []string {
|
|||
switch networkID {
|
||||
case genesis.DenaliID:
|
||||
return []string{
|
||||
"3.20.56.211:21001",
|
||||
"18.224.140.156:21001",
|
||||
"3.133.83.66:21001",
|
||||
"3.133.131.39:21001",
|
||||
"18.188.121.35:21001",
|
||||
"3.133.83.66:21001",
|
||||
"3.15.206.239:21001",
|
||||
"18.224.140.156:21001",
|
||||
"3.133.131.39:21001",
|
||||
"18.191.29.54:21001",
|
||||
"18.224.172.110:21001",
|
||||
"18.223.211.203:21001",
|
||||
"18.216.130.143:21001",
|
||||
"18.223.184.147:21001",
|
||||
"52.15.48.84:21001",
|
||||
"18.189.194.220:21001",
|
||||
"18.223.119.104:21001",
|
||||
"3.133.155.41:21001",
|
||||
"13.58.170.174:21001",
|
||||
"3.21.245.246:21001",
|
||||
"52.15.190.149:21001",
|
||||
"18.188.95.241:21001",
|
||||
"3.12.197.248:21001",
|
||||
"3.17.39.236:21001",
|
||||
}
|
||||
case genesis.CascadeID:
|
||||
return []string{
|
||||
|
@ -75,6 +93,68 @@ func GetIPs(networkID uint32) []string {
|
|||
}
|
||||
}
|
||||
|
||||
// GetIDs returns the default IDs for each network
|
||||
func GetIDs(networkID uint32) []string {
|
||||
switch networkID {
|
||||
case genesis.DenaliID:
|
||||
return []string{
|
||||
"NpagUxt6KQiwPch9Sd4osv8kD1TZnkjdk",
|
||||
"2m38qc95mhHXtrhjyGbe7r2NhniqHHJRB",
|
||||
"LQwRLm4cbJ7T2kxcxp4uXCU5XD8DFrE1C",
|
||||
"hArafGhY2HFTbwaaVh1CSCUCUCiJ2Vfb",
|
||||
"4QBwET5o8kUhvt9xArhir4d3R25CtmZho",
|
||||
"HGZ8ae74J3odT8ESreAdCtdnvWG1J4X5n",
|
||||
"4KXitMCoE9p2BHA6VzXtaTxLoEjNDo2Pt",
|
||||
"JyE4P8f4cTryNV8DCz2M81bMtGhFFHexG",
|
||||
"EzGaipqomyK9UKx9DBHV6Ky3y68hoknrF",
|
||||
"CYKruAjwH1BmV3m37sXNuprbr7dGQuJwG",
|
||||
"LegbVf6qaMKcsXPnLStkdc1JVktmmiDxy",
|
||||
"FesGqwKq7z5nPFHa5iwZctHE5EZV9Lpdq",
|
||||
"BFa1padLXBj7VHa2JYvYGzcTBPQGjPhUy",
|
||||
"4B4rc5vdD1758JSBYL1xyvE5NHGzz6xzH",
|
||||
"EDESh4DfZFC15i613pMtWniQ9arbBZRnL",
|
||||
"CZmZ9xpCzkWqjAyS7L4htzh5Lg6kf1k18",
|
||||
"CTtkcXvVdhpNp6f97LEUXPwsRD3A2ZHqP",
|
||||
"84KbQHSDnojroCVY7vQ7u9Tx7pUonPaS",
|
||||
"JjvzhxnLHLUQ5HjVRkvG827ivbLXPwA9u",
|
||||
"4CWTbdvgXHY1CLXqQNAp22nJDo5nAmts6",
|
||||
}
|
||||
case genesis.CascadeID:
|
||||
return []string{
|
||||
"NX4zVkuiRJZYe6Nzzav7GXN3TakUet3Co",
|
||||
"CMsa8cMw4eib1Hb8GG4xiUKAq5eE1BwUX",
|
||||
"DsMP6jLhi1MkDVc3qx9xx9AAZWx8e87Jd",
|
||||
"N86eodVZja3GEyZJTo3DFUPGpxEEvjGHs",
|
||||
"EkKeGSLUbHrrtuayBtbwgWDRUiAziC3ao",
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// GetDefaultBootstraps returns the default bootstraps this node should connect
|
||||
// to
|
||||
func GetDefaultBootstraps(networkID uint32, count int) ([]string, []string) {
|
||||
ips := GetIPs(networkID)
|
||||
ids := GetIDs(networkID)
|
||||
|
||||
if numIPs := len(ips); numIPs < count {
|
||||
count = numIPs
|
||||
}
|
||||
|
||||
sampledIPs := make([]string, 0, count)
|
||||
sampledIDs := make([]string, 0, count)
|
||||
|
||||
sampler := random.Uniform{N: len(ips)}
|
||||
for i := 0; i < count; i++ {
|
||||
i := sampler.Sample()
|
||||
sampledIPs = append(sampledIPs, ips[i])
|
||||
sampledIDs = append(sampledIDs, ids[i])
|
||||
}
|
||||
|
||||
return sampledIPs, sampledIDs
|
||||
}
|
||||
|
||||
// Parse the CLI arguments
|
||||
func init() {
|
||||
errs := &wrappers.Errs{}
|
||||
|
@ -87,8 +167,11 @@ func init() {
|
|||
|
||||
fs := flag.NewFlagSet("gecko", flag.ContinueOnError)
|
||||
|
||||
// If this is true, print the version and quit.
|
||||
version := fs.Bool("version", false, "If true, print version and quit")
|
||||
|
||||
// NetworkID:
|
||||
networkName := fs.String("network-id", genesis.TestnetName, "Network ID this node will connect to")
|
||||
networkName := fs.String("network-id", defaultNetworkName, "Network ID this node will connect to")
|
||||
|
||||
// Ava fees:
|
||||
fs.Uint64Var(&Config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva")
|
||||
|
@ -119,7 +202,9 @@ func init() {
|
|||
|
||||
// Staking:
|
||||
consensusPort := fs.Uint("staking-port", 9651, "Port of the consensus server")
|
||||
fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Require TLS to authenticate staking connections")
|
||||
// TODO - keeping same flag for backwards compatibility, should be changed to "staking-enabled"
|
||||
fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Enable staking. If enabled, Network TLS is required.")
|
||||
fs.BoolVar(&Config.EnableP2PTLS, "p2p-tls-enabled", true, "Require TLS to authenticate network communication")
|
||||
fs.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", defaultStakingKeyPath, "TLS private key for staking")
|
||||
fs.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", defaultStakingCertPath, "TLS certificate for staking")
|
||||
|
||||
|
@ -152,6 +237,19 @@ func init() {
|
|||
|
||||
ferr := fs.Parse(os.Args[1:])
|
||||
|
||||
if *version { // If --version used, print version and exit
|
||||
networkID, err := genesis.NetworkID(defaultNetworkName)
|
||||
if errs.Add(err); err != nil {
|
||||
return
|
||||
}
|
||||
networkGeneration := genesis.NetworkName(networkID)
|
||||
fmt.Printf(
|
||||
"%s [database=%s, network=%s/%s]\n",
|
||||
node.Version, dbVersion, defaultNetworkName, networkGeneration,
|
||||
)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if ferr == flag.ErrHelp {
|
||||
// display usage/help text and exit successfully
|
||||
os.Exit(0)
|
||||
|
@ -206,9 +304,11 @@ func init() {
|
|||
Port: uint16(*consensusPort),
|
||||
}
|
||||
|
||||
defaultBootstrapIPs, defaultBootstrapIDs := GetDefaultBootstraps(networkID, 5)
|
||||
|
||||
// Bootstrapping:
|
||||
if *bootstrapIPs == "default" {
|
||||
*bootstrapIPs = strings.Join(GetIPs(networkID), ",")
|
||||
*bootstrapIPs = strings.Join(defaultBootstrapIPs, ",")
|
||||
}
|
||||
for _, ip := range strings.Split(*bootstrapIPs, ",") {
|
||||
if ip != "" {
|
||||
|
@ -227,10 +327,16 @@ func init() {
|
|||
if *bootstrapIPs == "" {
|
||||
*bootstrapIDs = ""
|
||||
} else {
|
||||
*bootstrapIDs = strings.Join(genesis.GetConfig(networkID).StakerIDs, ",")
|
||||
*bootstrapIDs = strings.Join(defaultBootstrapIDs, ",")
|
||||
}
|
||||
}
|
||||
if Config.EnableStaking {
|
||||
|
||||
if Config.EnableStaking && !Config.EnableP2PTLS {
|
||||
errs.Add(errStakingRequiresTLS)
|
||||
return
|
||||
}
|
||||
|
||||
if Config.EnableP2PTLS {
|
||||
i := 0
|
||||
cb58 := formatting.CB58{}
|
||||
for _, id := range strings.Split(*bootstrapIDs, ",") {
|
||||
|
|
|
@ -89,6 +89,15 @@ func (m Builder) Get(chainID ids.ID, requestID uint32, containerID ids.ID) (Msg,
|
|||
})
|
||||
}
|
||||
|
||||
// GetAncestors message
|
||||
func (m Builder) GetAncestors(chainID ids.ID, requestID uint32, containerID ids.ID) (Msg, error) {
|
||||
return m.Pack(GetAncestors, map[Field]interface{}{
|
||||
ChainID: chainID.Bytes(),
|
||||
RequestID: requestID,
|
||||
ContainerID: containerID.Bytes(),
|
||||
})
|
||||
}
|
||||
|
||||
// Put message
|
||||
func (m Builder) Put(chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) (Msg, error) {
|
||||
return m.Pack(Put, map[Field]interface{}{
|
||||
|
@ -99,6 +108,15 @@ func (m Builder) Put(chainID ids.ID, requestID uint32, containerID ids.ID, conta
|
|||
})
|
||||
}
|
||||
|
||||
// MultiPut message
|
||||
func (m Builder) MultiPut(chainID ids.ID, requestID uint32, containers [][]byte) (Msg, error) {
|
||||
return m.Pack(MultiPut, map[Field]interface{}{
|
||||
ChainID: chainID.Bytes(),
|
||||
RequestID: requestID,
|
||||
MultiContainerBytes: containers,
|
||||
})
|
||||
}
|
||||
|
||||
// PushQuery message
|
||||
func (m Builder) PushQuery(chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) (Msg, error) {
|
||||
return m.Pack(PushQuery, map[Field]interface{}{
|
||||
|
|
|
@ -79,14 +79,8 @@ func TestBuildGetPeerList(t *testing.T) {
|
|||
|
||||
func TestBuildPeerList(t *testing.T) {
|
||||
ips := []utils.IPDesc{
|
||||
utils.IPDesc{
|
||||
IP: net.IPv6loopback,
|
||||
Port: 12345,
|
||||
},
|
||||
utils.IPDesc{
|
||||
IP: net.IPv6loopback,
|
||||
Port: 54321,
|
||||
},
|
||||
{IP: net.IPv6loopback, Port: 12345},
|
||||
{IP: net.IPv6loopback, Port: 54321},
|
||||
}
|
||||
|
||||
msg, err := TestBuilder.PeerList(ips)
|
||||
|
|
|
@ -12,17 +12,18 @@ type Field uint32
|
|||
|
||||
// Fields that may be packed. These values are not sent over the wire.
|
||||
const (
|
||||
VersionStr Field = iota // Used in handshake
|
||||
NetworkID // Used in handshake
|
||||
NodeID // Used in handshake
|
||||
MyTime // Used in handshake
|
||||
IP // Used in handshake
|
||||
Peers // Used in handshake
|
||||
ChainID // Used for dispatching
|
||||
RequestID // Used for all messages
|
||||
ContainerID // Used for querying
|
||||
ContainerBytes // Used for gossiping
|
||||
ContainerIDs // Used for querying
|
||||
VersionStr Field = iota // Used in handshake
|
||||
NetworkID // Used in handshake
|
||||
NodeID // Used in handshake
|
||||
MyTime // Used in handshake
|
||||
IP // Used in handshake
|
||||
Peers // Used in handshake
|
||||
ChainID // Used for dispatching
|
||||
RequestID // Used for all messages
|
||||
ContainerID // Used for querying
|
||||
ContainerBytes // Used for gossiping
|
||||
ContainerIDs // Used for querying
|
||||
MultiContainerBytes // Used in MultiPut
|
||||
)
|
||||
|
||||
// Packer returns the packer function that can be used to pack this field.
|
||||
|
@ -50,6 +51,8 @@ func (f Field) Packer() func(*wrappers.Packer, interface{}) {
|
|||
return wrappers.TryPackBytes
|
||||
case ContainerIDs:
|
||||
return wrappers.TryPackHashes
|
||||
case MultiContainerBytes:
|
||||
return wrappers.TryPack2DBytes
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
@ -80,6 +83,8 @@ func (f Field) Unpacker() func(*wrappers.Packer) interface{} {
|
|||
return wrappers.TryUnpackBytes
|
||||
case ContainerIDs:
|
||||
return wrappers.TryUnpackHashes
|
||||
case MultiContainerBytes:
|
||||
return wrappers.TryUnpack2DBytes
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
@ -107,6 +112,8 @@ func (f Field) String() string {
|
|||
return "Container Bytes"
|
||||
case ContainerIDs:
|
||||
return "Container IDs"
|
||||
case MultiContainerBytes:
|
||||
return "MultiContainerBytes"
|
||||
default:
|
||||
return "Unknown Field"
|
||||
}
|
||||
|
@ -135,8 +142,12 @@ func (op Op) String() string {
|
|||
return "accepted"
|
||||
case Get:
|
||||
return "get"
|
||||
case GetAncestors:
|
||||
return "get_ancestors"
|
||||
case Put:
|
||||
return "put"
|
||||
case MultiPut:
|
||||
return "multi_put"
|
||||
case PushQuery:
|
||||
return "push_query"
|
||||
case PullQuery:
|
||||
|
@ -166,26 +177,33 @@ const (
|
|||
PushQuery
|
||||
PullQuery
|
||||
Chits
|
||||
// Bootstrapping:
|
||||
// TODO: Move GetAncestors and MultiPut with the rest of the bootstrapping
|
||||
// commands when we do non-backwards compatible upgrade
|
||||
GetAncestors
|
||||
MultiPut
|
||||
)
|
||||
|
||||
// Defines the messages that can be sent/received with this network
|
||||
var (
|
||||
Messages = map[Op][]Field{
|
||||
// Handshake:
|
||||
GetVersion: []Field{},
|
||||
Version: []Field{NetworkID, NodeID, MyTime, IP, VersionStr},
|
||||
GetPeerList: []Field{},
|
||||
PeerList: []Field{Peers},
|
||||
GetVersion: {},
|
||||
Version: {NetworkID, NodeID, MyTime, IP, VersionStr},
|
||||
GetPeerList: {},
|
||||
PeerList: {Peers},
|
||||
// Bootstrapping:
|
||||
GetAcceptedFrontier: []Field{ChainID, RequestID},
|
||||
AcceptedFrontier: []Field{ChainID, RequestID, ContainerIDs},
|
||||
GetAccepted: []Field{ChainID, RequestID, ContainerIDs},
|
||||
Accepted: []Field{ChainID, RequestID, ContainerIDs},
|
||||
GetAcceptedFrontier: {ChainID, RequestID},
|
||||
AcceptedFrontier: {ChainID, RequestID, ContainerIDs},
|
||||
GetAccepted: {ChainID, RequestID, ContainerIDs},
|
||||
Accepted: {ChainID, RequestID, ContainerIDs},
|
||||
GetAncestors: {ChainID, RequestID, ContainerID},
|
||||
MultiPut: {ChainID, RequestID, MultiContainerBytes},
|
||||
// Consensus:
|
||||
Get: []Field{ChainID, RequestID, ContainerID},
|
||||
Put: []Field{ChainID, RequestID, ContainerID, ContainerBytes},
|
||||
PushQuery: []Field{ChainID, RequestID, ContainerID, ContainerBytes},
|
||||
PullQuery: []Field{ChainID, RequestID, ContainerID},
|
||||
Chits: []Field{ChainID, RequestID, ContainerIDs},
|
||||
Get: {ChainID, RequestID, ContainerID},
|
||||
Put: {ChainID, RequestID, ContainerID, ContainerBytes},
|
||||
PushQuery: {ChainID, RequestID, ContainerID, ContainerBytes},
|
||||
PullQuery: {ChainID, RequestID, ContainerID},
|
||||
Chits: {ChainID, RequestID, ContainerIDs},
|
||||
}
|
||||
)
|
||||
|
|
|
@ -56,7 +56,7 @@ type metrics struct {
|
|||
getPeerlist, peerlist,
|
||||
getAcceptedFrontier, acceptedFrontier,
|
||||
getAccepted, accepted,
|
||||
get, put,
|
||||
get, getAncestors, put, multiPut,
|
||||
pushQuery, pullQuery, chits messageMetrics
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,9 @@ func (m *metrics) initialize(registerer prometheus.Registerer) error {
|
|||
errs.Add(m.getAccepted.initialize(GetAccepted, registerer))
|
||||
errs.Add(m.accepted.initialize(Accepted, registerer))
|
||||
errs.Add(m.get.initialize(Get, registerer))
|
||||
errs.Add(m.getAncestors.initialize(GetAncestors, registerer))
|
||||
errs.Add(m.put.initialize(Put, registerer))
|
||||
errs.Add(m.multiPut.initialize(MultiPut, registerer))
|
||||
errs.Add(m.pushQuery.initialize(PushQuery, registerer))
|
||||
errs.Add(m.pullQuery.initialize(PullQuery, registerer))
|
||||
errs.Add(m.chits.initialize(Chits, registerer))
|
||||
|
@ -111,8 +113,12 @@ func (m *metrics) message(msgType Op) *messageMetrics {
|
|||
return &m.accepted
|
||||
case Get:
|
||||
return &m.get
|
||||
case GetAncestors:
|
||||
return &m.getAncestors
|
||||
case Put:
|
||||
return &m.put
|
||||
case MultiPut:
|
||||
return &m.multiPut
|
||||
case PushQuery:
|
||||
return &m.pushQuery
|
||||
case PullQuery:
|
||||
|
|
|
@ -21,24 +21,28 @@ import (
|
|||
"github.com/ava-labs/gecko/snow/triggers"
|
||||
"github.com/ava-labs/gecko/snow/validators"
|
||||
"github.com/ava-labs/gecko/utils"
|
||||
"github.com/ava-labs/gecko/utils/formatting"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
"github.com/ava-labs/gecko/utils/random"
|
||||
"github.com/ava-labs/gecko/utils/timer"
|
||||
"github.com/ava-labs/gecko/version"
|
||||
)
|
||||
|
||||
// reasonable default values
|
||||
const (
|
||||
defaultInitialReconnectDelay = time.Second
|
||||
defaultMaxReconnectDelay = time.Hour
|
||||
defaultMaxMessageSize uint32 = 1 << 21
|
||||
defaultSendQueueSize = 1 << 10
|
||||
defaultMaxClockDifference = time.Minute
|
||||
defaultPeerListGossipSpacing = time.Minute
|
||||
defaultPeerListGossipSize = 100
|
||||
defaultPeerListStakerGossipFraction = 2
|
||||
defaultGetVersionTimeout = 2 * time.Second
|
||||
defaultAllowPrivateIPs = true
|
||||
defaultGossipSize = 50
|
||||
defaultInitialReconnectDelay = time.Second
|
||||
defaultMaxReconnectDelay = time.Hour
|
||||
DefaultMaxMessageSize uint32 = 1 << 21
|
||||
defaultSendQueueSize = 1 << 10
|
||||
defaultMaxNetworkPendingSendBytes = 1 << 29 // 512MB
|
||||
defaultNetworkPendingSendBytesToRateLimit = defaultMaxNetworkPendingSendBytes / 4
|
||||
defaultMaxClockDifference = time.Minute
|
||||
defaultPeerListGossipSpacing = time.Minute
|
||||
defaultPeerListGossipSize = 100
|
||||
defaultPeerListStakerGossipFraction = 2
|
||||
defaultGetVersionTimeout = 2 * time.Second
|
||||
defaultAllowPrivateIPs = true
|
||||
defaultGossipSize = 50
|
||||
)
|
||||
|
||||
// Network defines the functionality of the networking library.
|
||||
|
@ -102,23 +106,26 @@ type network struct {
|
|||
clock timer.Clock
|
||||
lastHeartbeat int64
|
||||
|
||||
initialReconnectDelay time.Duration
|
||||
maxReconnectDelay time.Duration
|
||||
maxMessageSize uint32
|
||||
sendQueueSize int
|
||||
maxClockDifference time.Duration
|
||||
peerListGossipSpacing time.Duration
|
||||
peerListGossipSize int
|
||||
peerListStakerGossipFraction int
|
||||
getVersionTimeout time.Duration
|
||||
allowPrivateIPs bool
|
||||
gossipSize int
|
||||
initialReconnectDelay time.Duration
|
||||
maxReconnectDelay time.Duration
|
||||
maxMessageSize uint32
|
||||
sendQueueSize int
|
||||
maxNetworkPendingSendBytes int
|
||||
networkPendingSendBytesToRateLimit int
|
||||
maxClockDifference time.Duration
|
||||
peerListGossipSpacing time.Duration
|
||||
peerListGossipSize int
|
||||
peerListStakerGossipFraction int
|
||||
getVersionTimeout time.Duration
|
||||
allowPrivateIPs bool
|
||||
gossipSize int
|
||||
|
||||
executor timer.Executor
|
||||
|
||||
b Builder
|
||||
|
||||
stateLock sync.Mutex
|
||||
pendingBytes int
|
||||
closed bool
|
||||
disconnectedIPs map[string]struct{}
|
||||
connectedIPs map[string]struct{}
|
||||
|
@ -162,8 +169,10 @@ func NewDefaultNetwork(
|
|||
router,
|
||||
defaultInitialReconnectDelay,
|
||||
defaultMaxReconnectDelay,
|
||||
defaultMaxMessageSize,
|
||||
DefaultMaxMessageSize,
|
||||
defaultSendQueueSize,
|
||||
defaultMaxNetworkPendingSendBytes,
|
||||
defaultNetworkPendingSendBytesToRateLimit,
|
||||
defaultMaxClockDifference,
|
||||
defaultPeerListGossipSpacing,
|
||||
defaultPeerListGossipSize,
|
||||
|
@ -193,6 +202,8 @@ func NewNetwork(
|
|||
maxReconnectDelay time.Duration,
|
||||
maxMessageSize uint32,
|
||||
sendQueueSize int,
|
||||
maxNetworkPendingSendBytes int,
|
||||
networkPendingSendBytesToRateLimit int,
|
||||
maxClockDifference time.Duration,
|
||||
peerListGossipSpacing time.Duration,
|
||||
peerListGossipSize int,
|
||||
|
@ -202,35 +213,37 @@ func NewNetwork(
|
|||
gossipSize int,
|
||||
) Network {
|
||||
net := &network{
|
||||
log: log,
|
||||
id: id,
|
||||
ip: ip,
|
||||
networkID: networkID,
|
||||
version: version,
|
||||
parser: parser,
|
||||
listener: listener,
|
||||
dialer: dialer,
|
||||
serverUpgrader: serverUpgrader,
|
||||
clientUpgrader: clientUpgrader,
|
||||
vdrs: vdrs,
|
||||
router: router,
|
||||
nodeID: rand.Uint32(),
|
||||
initialReconnectDelay: initialReconnectDelay,
|
||||
maxReconnectDelay: maxReconnectDelay,
|
||||
maxMessageSize: maxMessageSize,
|
||||
sendQueueSize: sendQueueSize,
|
||||
maxClockDifference: maxClockDifference,
|
||||
peerListGossipSpacing: peerListGossipSpacing,
|
||||
peerListGossipSize: peerListGossipSize,
|
||||
peerListStakerGossipFraction: peerListStakerGossipFraction,
|
||||
getVersionTimeout: getVersionTimeout,
|
||||
allowPrivateIPs: allowPrivateIPs,
|
||||
gossipSize: gossipSize,
|
||||
log: log,
|
||||
id: id,
|
||||
ip: ip,
|
||||
networkID: networkID,
|
||||
version: version,
|
||||
parser: parser,
|
||||
listener: listener,
|
||||
dialer: dialer,
|
||||
serverUpgrader: serverUpgrader,
|
||||
clientUpgrader: clientUpgrader,
|
||||
vdrs: vdrs,
|
||||
router: router,
|
||||
nodeID: rand.Uint32(),
|
||||
initialReconnectDelay: initialReconnectDelay,
|
||||
maxReconnectDelay: maxReconnectDelay,
|
||||
maxMessageSize: maxMessageSize,
|
||||
sendQueueSize: sendQueueSize,
|
||||
maxNetworkPendingSendBytes: maxNetworkPendingSendBytes,
|
||||
networkPendingSendBytesToRateLimit: networkPendingSendBytesToRateLimit,
|
||||
maxClockDifference: maxClockDifference,
|
||||
peerListGossipSpacing: peerListGossipSpacing,
|
||||
peerListGossipSize: peerListGossipSize,
|
||||
peerListStakerGossipFraction: peerListStakerGossipFraction,
|
||||
getVersionTimeout: getVersionTimeout,
|
||||
allowPrivateIPs: allowPrivateIPs,
|
||||
gossipSize: gossipSize,
|
||||
|
||||
disconnectedIPs: make(map[string]struct{}),
|
||||
connectedIPs: make(map[string]struct{}),
|
||||
retryDelay: make(map[string]time.Duration),
|
||||
myIPs: map[string]struct{}{ip.String(): struct{}{}},
|
||||
myIPs: map[string]struct{}{ip.String(): {}},
|
||||
peers: make(map[[20]byte]*peer),
|
||||
}
|
||||
net.initialize(registerer)
|
||||
|
@ -266,8 +279,11 @@ func (n *network) GetAcceptedFrontier(validatorIDs ids.ShortSet, chainID ids.ID,
|
|||
func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
|
||||
msg, err := n.b.AcceptedFrontier(chainID, requestID, containerIDs)
|
||||
if err != nil {
|
||||
n.log.Error("attempted to pack too large of an AcceptedFrontier message.\nNumber of containerIDs: %d",
|
||||
containerIDs.Len())
|
||||
n.log.Error("failed to build AcceptedFrontier(%s, %d, %s): %s",
|
||||
chainID,
|
||||
requestID,
|
||||
containerIDs,
|
||||
err)
|
||||
return // Packing message failed
|
||||
}
|
||||
|
||||
|
@ -279,7 +295,11 @@ func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requ
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send an AcceptedFrontier message to: %s", validatorID)
|
||||
n.log.Debug("failed to send AcceptedFrontier(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerIDs)
|
||||
n.acceptedFrontier.numFailed.Inc()
|
||||
} else {
|
||||
n.acceptedFrontier.numSent.Inc()
|
||||
|
@ -290,6 +310,11 @@ func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requ
|
|||
func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
|
||||
msg, err := n.b.GetAccepted(chainID, requestID, containerIDs)
|
||||
if err != nil {
|
||||
n.log.Error("failed to build GetAccepted(%s, %d, %s): %s",
|
||||
chainID,
|
||||
requestID,
|
||||
containerIDs,
|
||||
err)
|
||||
for _, validatorID := range validatorIDs.List() {
|
||||
vID := validatorID
|
||||
n.executor.Add(func() { n.router.GetAcceptedFailed(vID, chainID, requestID) })
|
||||
|
@ -307,6 +332,11 @@ func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, request
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send GetAccepted(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerIDs)
|
||||
n.executor.Add(func() { n.router.GetAcceptedFailed(vID, chainID, requestID) })
|
||||
n.getAccepted.numFailed.Inc()
|
||||
} else {
|
||||
|
@ -319,8 +349,11 @@ func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, request
|
|||
func (n *network) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
|
||||
msg, err := n.b.Accepted(chainID, requestID, containerIDs)
|
||||
if err != nil {
|
||||
n.log.Error("attempted to pack too large of an Accepted message.\nNumber of containerIDs: %d",
|
||||
containerIDs.Len())
|
||||
n.log.Error("failed to build Accepted(%s, %d, %s): %s",
|
||||
chainID,
|
||||
requestID,
|
||||
containerIDs,
|
||||
err)
|
||||
return // Packing message failed
|
||||
}
|
||||
|
||||
|
@ -332,13 +365,72 @@ func (n *network) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID ui
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send an Accepted message to: %s", validatorID)
|
||||
n.log.Debug("failed to send Accepted(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerIDs)
|
||||
n.accepted.numFailed.Inc()
|
||||
} else {
|
||||
n.accepted.numSent.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// GetAncestors implements the Sender interface.
|
||||
func (n *network) GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
|
||||
msg, err := n.b.GetAncestors(chainID, requestID, containerID)
|
||||
if err != nil {
|
||||
n.log.Error("failed to build GetAncestors message: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
n.stateLock.Lock()
|
||||
defer n.stateLock.Unlock()
|
||||
|
||||
peer, sent := n.peers[validatorID.Key()]
|
||||
if sent {
|
||||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send GetAncestors(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerID)
|
||||
n.executor.Add(func() { n.router.GetAncestorsFailed(validatorID, chainID, requestID) })
|
||||
n.getAncestors.numFailed.Inc()
|
||||
} else {
|
||||
n.getAncestors.numSent.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// MultiPut implements the Sender interface.
|
||||
func (n *network) MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte) {
|
||||
msg, err := n.b.MultiPut(chainID, requestID, containers)
|
||||
if err != nil {
|
||||
n.log.Error("failed to build MultiPut message because of container of size %d", len(containers))
|
||||
return
|
||||
}
|
||||
|
||||
n.stateLock.Lock()
|
||||
defer n.stateLock.Unlock()
|
||||
|
||||
peer, sent := n.peers[validatorID.Key()]
|
||||
if sent {
|
||||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send MultiPut(%s, %s, %d, %d)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
len(containers))
|
||||
n.multiPut.numFailed.Inc()
|
||||
} else {
|
||||
n.multiPut.numSent.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// Get implements the Sender interface.
|
||||
func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
|
||||
msg, err := n.b.Get(chainID, requestID, containerID)
|
||||
|
@ -352,7 +444,12 @@ func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32,
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send a Get message to: %s", validatorID)
|
||||
n.log.Debug("failed to send Get(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerID)
|
||||
n.executor.Add(func() { n.router.GetFailed(validatorID, chainID, requestID) })
|
||||
n.get.numFailed.Inc()
|
||||
} else {
|
||||
n.get.numSent.Inc()
|
||||
|
@ -363,7 +460,12 @@ func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32,
|
|||
func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
|
||||
msg, err := n.b.Put(chainID, requestID, containerID, container)
|
||||
if err != nil {
|
||||
n.log.Error("failed to build Put message because of container of size %d", len(container))
|
||||
n.log.Error("failed to build Put(%s, %d, %s): %s. len(container) : %d",
|
||||
chainID,
|
||||
requestID,
|
||||
containerID,
|
||||
err,
|
||||
len(container))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -375,7 +477,12 @@ func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32,
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send a Put message to: %s", validatorID)
|
||||
n.log.Debug("failed to send Put(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerID)
|
||||
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
|
||||
n.put.numFailed.Inc()
|
||||
} else {
|
||||
n.put.numSent.Inc()
|
||||
|
@ -386,11 +493,17 @@ func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32,
|
|||
func (n *network) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
|
||||
msg, err := n.b.PushQuery(chainID, requestID, containerID, container)
|
||||
if err != nil {
|
||||
n.log.Error("failed to build PushQuery(%s, %d, %s): %s. len(container): %d",
|
||||
chainID,
|
||||
requestID,
|
||||
containerID,
|
||||
err,
|
||||
len(container))
|
||||
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
|
||||
for _, validatorID := range validatorIDs.List() {
|
||||
vID := validatorID
|
||||
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
|
||||
}
|
||||
n.log.Error("attempted to pack too large of a PushQuery message.\nContainer length: %d", len(container))
|
||||
return // Packing message failed
|
||||
}
|
||||
|
||||
|
@ -404,7 +517,12 @@ func (n *network) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed sending a PushQuery message to: %s", vID)
|
||||
n.log.Debug("failed to send PushQuery(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerID)
|
||||
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
|
||||
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
|
||||
n.pushQuery.numFailed.Inc()
|
||||
} else {
|
||||
|
@ -428,7 +546,11 @@ func (n *network) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed sending a PullQuery message to: %s", vID)
|
||||
n.log.Debug("failed to send PullQuery(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerID)
|
||||
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
|
||||
n.pullQuery.numFailed.Inc()
|
||||
} else {
|
||||
|
@ -441,7 +563,11 @@ func (n *network) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
|
|||
func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) {
|
||||
msg, err := n.b.Chits(chainID, requestID, votes)
|
||||
if err != nil {
|
||||
n.log.Error("failed to build Chits message because of %d votes", votes.Len())
|
||||
n.log.Error("failed to build Chits(%s, %d, %s): %s",
|
||||
chainID,
|
||||
requestID,
|
||||
votes,
|
||||
err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -453,7 +579,11 @@ func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint3
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send a Chits message to: %s", validatorID)
|
||||
n.log.Debug("failed to send Chits(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
votes)
|
||||
n.chits.numFailed.Inc()
|
||||
} else {
|
||||
n.chits.numSent.Inc()
|
||||
|
@ -463,7 +593,8 @@ func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint3
|
|||
// Gossip attempts to gossip the container to the network
|
||||
func (n *network) Gossip(chainID, containerID ids.ID, container []byte) {
|
||||
if err := n.gossipContainer(chainID, containerID, container); err != nil {
|
||||
n.log.Error("error gossiping container %s to %s: %s", containerID, chainID, err)
|
||||
n.log.Debug("failed to Gossip(%s, %s): %s", chainID, containerID, err)
|
||||
n.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -637,7 +768,9 @@ func (n *network) gossip() {
|
|||
}
|
||||
msg, err := n.b.PeerList(ips)
|
||||
if err != nil {
|
||||
n.log.Warn("failed to gossip PeerList message due to %s", err)
|
||||
n.log.Error("failed to build peer list to gossip: %s. len(ips): %d",
|
||||
err,
|
||||
len(ips))
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -692,11 +825,12 @@ func (n *network) connectTo(ip utils.IPDesc) {
|
|||
|
||||
if delay == 0 {
|
||||
delay = n.initialReconnectDelay
|
||||
} else {
|
||||
delay *= 2
|
||||
}
|
||||
|
||||
delay = time.Duration(float64(delay) * (1 + rand.Float64()))
|
||||
if delay > n.maxReconnectDelay {
|
||||
delay = n.maxReconnectDelay
|
||||
// set the timeout to [.75, 1) * maxReconnectDelay
|
||||
delay = time.Duration(float64(n.maxReconnectDelay) * (3 + rand.Float64()) / 4)
|
||||
}
|
||||
|
||||
n.stateLock.Lock()
|
||||
|
|
|
@ -31,6 +31,10 @@ type peer struct {
|
|||
// state lock held.
|
||||
closed bool
|
||||
|
||||
// number of bytes currently in the send queue, is only modifed when the
|
||||
// network state lock held.
|
||||
pendingBytes int
|
||||
|
||||
// queue of messages this connection is attempting to send the peer. Is
|
||||
// closed when the connection is closed.
|
||||
sender chan []byte
|
||||
|
@ -155,6 +159,10 @@ func (p *peer) WriteMessages() {
|
|||
p.id,
|
||||
formatting.DumpBytes{Bytes: msg})
|
||||
|
||||
p.net.stateLock.Lock()
|
||||
p.pendingBytes -= len(msg)
|
||||
p.net.stateLock.Unlock()
|
||||
|
||||
packer := wrappers.Packer{Bytes: make([]byte, len(msg)+wrappers.IntLen)}
|
||||
packer.PackBytes(msg)
|
||||
msg = packer.Bytes
|
||||
|
@ -184,8 +192,22 @@ func (p *peer) send(msg Msg) bool {
|
|||
p.net.log.Debug("dropping message to %s due to a closed connection", p.id)
|
||||
return false
|
||||
}
|
||||
|
||||
msgBytes := msg.Bytes()
|
||||
newPendingBytes := p.net.pendingBytes + len(msgBytes)
|
||||
newConnPendingBytes := p.pendingBytes + len(msgBytes)
|
||||
if newPendingBytes > p.net.networkPendingSendBytesToRateLimit && // Check to see if we should be enforcing any rate limiting
|
||||
uint32(p.pendingBytes) > p.net.maxMessageSize && // this connection should have a minimum allowed bandwidth
|
||||
(newPendingBytes > p.net.maxNetworkPendingSendBytes || // Check to see if this message would put too much memory into the network
|
||||
newConnPendingBytes > p.net.maxNetworkPendingSendBytes/20) { // Check to see if this connection is using too much memory
|
||||
p.net.log.Debug("dropping message to %s due to a send queue with too many bytes", p.id)
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case p.sender <- msg.Bytes():
|
||||
case p.sender <- msgBytes:
|
||||
p.net.pendingBytes = newPendingBytes
|
||||
p.pendingBytes = newConnPendingBytes
|
||||
return true
|
||||
default:
|
||||
p.net.log.Debug("dropping message to %s due to a full send queue", p.id)
|
||||
|
@ -201,7 +223,7 @@ func (p *peer) handle(msg Msg) {
|
|||
op := msg.Op()
|
||||
msgMetrics := p.net.message(op)
|
||||
if msgMetrics == nil {
|
||||
p.net.log.Debug("dropping an unknown message from %s with op %d", p.id, op)
|
||||
p.net.log.Debug("dropping an unknown message from %s with op %s", p.id, op.String())
|
||||
return
|
||||
}
|
||||
msgMetrics.numReceived.Inc()
|
||||
|
@ -236,14 +258,20 @@ func (p *peer) handle(msg Msg) {
|
|||
p.accepted(msg)
|
||||
case Get:
|
||||
p.get(msg)
|
||||
case GetAncestors:
|
||||
p.getAncestors(msg)
|
||||
case Put:
|
||||
p.put(msg)
|
||||
case MultiPut:
|
||||
p.multiPut(msg)
|
||||
case PushQuery:
|
||||
p.pushQuery(msg)
|
||||
case PullQuery:
|
||||
p.pullQuery(msg)
|
||||
case Chits:
|
||||
p.chits(msg)
|
||||
default:
|
||||
p.net.log.Debug("dropping an unknown message from %s with op %s", p.id, op.String())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -537,6 +565,16 @@ func (p *peer) get(msg Msg) {
|
|||
p.net.router.Get(p.id, chainID, requestID, containerID)
|
||||
}
|
||||
|
||||
func (p *peer) getAncestors(msg Msg) {
|
||||
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
|
||||
p.net.log.AssertNoError(err)
|
||||
requestID := msg.Get(RequestID).(uint32)
|
||||
containerID, err := ids.ToID(msg.Get(ContainerID).([]byte))
|
||||
p.net.log.AssertNoError(err)
|
||||
|
||||
p.net.router.GetAncestors(p.id, chainID, requestID, containerID)
|
||||
}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) put(msg Msg) {
|
||||
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
|
||||
|
@ -549,6 +587,16 @@ func (p *peer) put(msg Msg) {
|
|||
p.net.router.Put(p.id, chainID, requestID, containerID, container)
|
||||
}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) multiPut(msg Msg) {
|
||||
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
|
||||
p.net.log.AssertNoError(err)
|
||||
requestID := msg.Get(RequestID).(uint32)
|
||||
containers := msg.Get(MultiContainerBytes).([][]byte)
|
||||
|
||||
p.net.router.MultiPut(p.id, chainID, requestID, containers)
|
||||
}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) pushQuery(msg Msg) {
|
||||
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
|
||||
|
|
|
@ -34,6 +34,7 @@ type Config struct {
|
|||
|
||||
// Staking configuration
|
||||
StakingIP utils.IPDesc
|
||||
EnableP2PTLS bool
|
||||
EnableStaking bool
|
||||
StakingKeyFile string
|
||||
StakingCertFile string
|
||||
|
|
12
node/node.go
12
node/node.go
|
@ -55,7 +55,8 @@ const (
|
|||
var (
|
||||
genesisHashKey = []byte("genesisID")
|
||||
|
||||
nodeVersion = version.NewDefaultVersion("avalanche", 0, 5, 1)
|
||||
// Version is the version of this code
|
||||
Version = version.NewDefaultVersion("avalanche", 0, 5, 5)
|
||||
versionParser = version.NewDefaultParser()
|
||||
)
|
||||
|
||||
|
@ -118,7 +119,7 @@ func (n *Node) initNetworking() error {
|
|||
dialer := network.NewDialer(TCP)
|
||||
|
||||
var serverUpgrader, clientUpgrader network.Upgrader
|
||||
if n.Config.EnableStaking {
|
||||
if n.Config.EnableP2PTLS {
|
||||
cert, err := tls.LoadX509KeyPair(n.Config.StakingCertFile, n.Config.StakingKeyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -156,7 +157,7 @@ func (n *Node) initNetworking() error {
|
|||
n.ID,
|
||||
n.Config.StakingIP,
|
||||
n.Config.NetworkID,
|
||||
nodeVersion,
|
||||
Version,
|
||||
versionParser,
|
||||
listener,
|
||||
dialer,
|
||||
|
@ -252,7 +253,7 @@ func (n *Node) initDatabase() error {
|
|||
// Otherwise, it is a hash of the TLS certificate that this node
|
||||
// uses for P2P communication
|
||||
func (n *Node) initNodeID() error {
|
||||
if !n.Config.EnableStaking {
|
||||
if !n.Config.EnableP2PTLS {
|
||||
n.ID = ids.NewShortID(hashing.ComputeHash160Array([]byte(n.Config.StakingIP.String())))
|
||||
n.Log.Info("Set the node's ID to %s", n.ID)
|
||||
return nil
|
||||
|
@ -460,7 +461,7 @@ func (n *Node) initMetricsAPI() {
|
|||
func (n *Node) initAdminAPI() {
|
||||
if n.Config.AdminAPIEnabled {
|
||||
n.Log.Info("initializing Admin API")
|
||||
service := admin.NewService(n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.Net, &n.APIServer)
|
||||
service := admin.NewService(Version, n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.Net, &n.APIServer)
|
||||
n.APIServer.AddRoute(service, &sync.RWMutex{}, "admin", "", n.HTTPLog)
|
||||
}
|
||||
}
|
||||
|
@ -525,6 +526,7 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg
|
|||
n.Log = logger
|
||||
n.LogFactory = logFactory
|
||||
n.Config = Config
|
||||
n.Log.Info("Gecko version is: %s", Version)
|
||||
|
||||
httpLog, err := logFactory.MakeSubdir("http")
|
||||
if err != nil {
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
- name: Kill Node
|
||||
command: killall -SIGINT ava
|
||||
command: killall -SIGTERM ava
|
||||
ignore_errors: true
|
||||
|
||||
- name: Kill EVM
|
||||
command: killall -SIGTERM evm
|
||||
ignore_errors: true
|
||||
|
|
|
@ -15,7 +15,7 @@ GECKO_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Directory
|
|||
BUILD_DIR=$GECKO_PATH/build # Where binaries go
|
||||
PLUGIN_DIR="$BUILD_DIR/plugins" # Where plugin binaries (namely coreth) go
|
||||
|
||||
CORETH_VER="0.2.0" # Should match coreth version in go.mod
|
||||
CORETH_VER="0.2.4" # Should match coreth version in go.mod
|
||||
CORETH_PATH="$GOPATH/pkg/mod/github.com/ava-labs/coreth@v$CORETH_VER"
|
||||
|
||||
# Build Gecko
|
||||
|
|
|
@ -77,6 +77,10 @@ type Vertex interface {
|
|||
// Returns the vertices this vertex depends on
|
||||
Parents() []Vertex
|
||||
|
||||
// Returns the height of this vertex. A vertex's height is defined by one
|
||||
// greater than the maximum height of the parents.
|
||||
Height() uint64
|
||||
|
||||
// Returns a series of state transitions to be performed on acceptance
|
||||
Txs() []snowstorm.Tx
|
||||
|
||||
|
|
|
@ -37,14 +37,14 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr
|
|||
Namespace: namespace,
|
||||
Name: "vtx_accepted",
|
||||
Help: "Latency of accepting from the time the vertex was issued in milliseconds",
|
||||
Buckets: timer.Buckets,
|
||||
Buckets: timer.MillisecondsBuckets,
|
||||
})
|
||||
m.latRejected = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Name: "vtx_rejected",
|
||||
Help: "Latency of rejecting from the time the vertex was issued in milliseconds",
|
||||
Buckets: timer.Buckets,
|
||||
Buckets: timer.MillisecondsBuckets,
|
||||
})
|
||||
|
||||
if err := registerer.Register(m.numProcessing); err != nil {
|
||||
|
|
|
@ -16,7 +16,7 @@ type Vtx struct {
|
|||
id ids.ID
|
||||
txs []snowstorm.Tx
|
||||
|
||||
height int
|
||||
height uint64
|
||||
status choices.Status
|
||||
|
||||
bytes []byte
|
||||
|
@ -25,6 +25,7 @@ type Vtx struct {
|
|||
func (v *Vtx) ID() ids.ID { return v.id }
|
||||
func (v *Vtx) ParentIDs() []ids.ID { return nil }
|
||||
func (v *Vtx) Parents() []Vertex { return v.dependencies }
|
||||
func (v *Vtx) Height() uint64 { return v.height }
|
||||
func (v *Vtx) Txs() []snowstorm.Tx { return v.txs }
|
||||
func (v *Vtx) Status() choices.Status { return v.status }
|
||||
func (v *Vtx) Live() {}
|
||||
|
|
|
@ -37,14 +37,14 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr
|
|||
Namespace: namespace,
|
||||
Name: "accepted",
|
||||
Help: "Latency of accepting from the time the block was issued in milliseconds",
|
||||
Buckets: timer.Buckets,
|
||||
Buckets: timer.MillisecondsBuckets,
|
||||
})
|
||||
m.latRejected = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rejected",
|
||||
Help: "Latency of rejecting from the time the block was issued in milliseconds",
|
||||
Buckets: timer.Buckets,
|
||||
Buckets: timer.MillisecondsBuckets,
|
||||
})
|
||||
|
||||
if err := registerer.Register(m.numProcessing); err != nil {
|
||||
|
|
|
@ -37,14 +37,14 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr
|
|||
Namespace: namespace,
|
||||
Name: "tx_accepted",
|
||||
Help: "Latency of accepting from the time the transaction was issued in milliseconds",
|
||||
Buckets: timer.Buckets,
|
||||
Buckets: timer.MillisecondsBuckets,
|
||||
})
|
||||
m.latRejected = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Name: "tx_rejected",
|
||||
Help: "Latency of rejecting from the time the transaction was issued in milliseconds",
|
||||
Buckets: timer.Buckets,
|
||||
Buckets: timer.MillisecondsBuckets,
|
||||
})
|
||||
|
||||
if err := registerer.Register(m.numProcessing); err != nil {
|
||||
|
|
|
@ -6,6 +6,7 @@ package avalanche
|
|||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ava-labs/gecko/cache"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/choices"
|
||||
"github.com/ava-labs/gecko/snow/consensus/avalanche"
|
||||
|
@ -15,6 +16,10 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
cacheSize = 100000
|
||||
)
|
||||
|
||||
// BootstrapConfig ...
|
||||
type BootstrapConfig struct {
|
||||
common.Config
|
||||
|
@ -32,24 +37,29 @@ type bootstrapper struct {
|
|||
metrics
|
||||
common.Bootstrapper
|
||||
|
||||
// IDs of vertices that we're already in the process of getting
|
||||
// TODO: Find a better way to track; this keeps every single vertex's ID in memory when bootstrapping from nothing
|
||||
seen ids.Set
|
||||
// true if all of the vertices in the original accepted frontier have been processed
|
||||
processedStartingAcceptedFrontier bool
|
||||
|
||||
numFetched uint64 // number of vertices that have been fetched from validators
|
||||
// number of vertices fetched so far
|
||||
numFetched uint32
|
||||
|
||||
// vtxReqs prevents asking validators for the same vertex
|
||||
vtxReqs common.Requests
|
||||
// tracks which validators were asked for which containers in which requests
|
||||
outstandingRequests common.Requests
|
||||
|
||||
// IDs of vertices that we have requested from other validators but haven't received
|
||||
pending ids.Set
|
||||
finished bool
|
||||
// Contains IDs of vertices that have recently been processed
|
||||
processedCache *cache.LRU
|
||||
|
||||
// true if bootstrapping is done
|
||||
finished bool
|
||||
|
||||
// Called when bootstrapping is done
|
||||
onFinished func() error
|
||||
}
|
||||
|
||||
// Initialize this engine.
|
||||
func (b *bootstrapper) Initialize(config BootstrapConfig) error {
|
||||
b.BootstrapConfig = config
|
||||
b.processedCache = &cache.LRU{Size: cacheSize}
|
||||
|
||||
b.VtxBlocked.SetParser(&vtxParser{
|
||||
log: config.Context.Log,
|
||||
|
@ -88,118 +98,49 @@ func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set {
|
|||
return acceptedVtxIDs
|
||||
}
|
||||
|
||||
// ForceAccepted ...
|
||||
func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) error {
|
||||
for _, vtxID := range acceptedContainerIDs.List() {
|
||||
if err := b.fetch(vtxID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if numPending := b.pending.Len(); numPending == 0 {
|
||||
// TODO: This typically indicates bootstrapping has failed, so this
|
||||
// should be handled appropriately
|
||||
return b.finish()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put ...
|
||||
func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) error {
|
||||
vtx, err := b.State.ParseVertex(vtxBytes)
|
||||
if err != nil {
|
||||
b.BootstrapConfig.Context.Log.Debug("ParseVertex failed due to %s for block:\n%s",
|
||||
err,
|
||||
formatting.DumpBytes{Bytes: vtxBytes})
|
||||
|
||||
return b.GetFailed(vdr, requestID)
|
||||
}
|
||||
|
||||
if !b.pending.Contains(vtx.ID()) {
|
||||
b.BootstrapConfig.Context.Log.Debug("Validator %s sent an unrequested vertex:\n%s",
|
||||
vdr,
|
||||
formatting.DumpBytes{Bytes: vtxBytes})
|
||||
|
||||
return b.GetFailed(vdr, requestID)
|
||||
}
|
||||
|
||||
return b.addVertex(vtx)
|
||||
}
|
||||
|
||||
// GetFailed ...
|
||||
func (b *bootstrapper) GetFailed(vdr ids.ShortID, requestID uint32) error {
|
||||
vtxID, ok := b.vtxReqs.Remove(vdr, requestID)
|
||||
if !ok {
|
||||
b.BootstrapConfig.Context.Log.Debug("GetFailed called without sending the corresponding Get message from %s",
|
||||
vdr)
|
||||
return nil
|
||||
}
|
||||
|
||||
b.sendRequest(vtxID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get vertex [vtxID] and its ancestors
|
||||
func (b *bootstrapper) fetch(vtxID ids.ID) error {
|
||||
if b.pending.Contains(vtxID) {
|
||||
// Make sure we haven't already requested this block
|
||||
if b.outstandingRequests.Contains(vtxID) {
|
||||
return nil
|
||||
}
|
||||
|
||||
vtx, err := b.State.GetVertex(vtxID)
|
||||
if err != nil {
|
||||
b.sendRequest(vtxID)
|
||||
// Make sure we don't already have this vertex
|
||||
if _, err := b.State.GetVertex(vtxID); err == nil {
|
||||
return nil
|
||||
}
|
||||
return b.storeVertex(vtx)
|
||||
}
|
||||
|
||||
func (b *bootstrapper) sendRequest(vtxID ids.ID) {
|
||||
validators := b.BootstrapConfig.Validators.Sample(1)
|
||||
validators := b.BootstrapConfig.Validators.Sample(1) // validator to send request to
|
||||
if len(validators) == 0 {
|
||||
b.BootstrapConfig.Context.Log.Error("Dropping request for %s as there are no validators", vtxID)
|
||||
return
|
||||
return fmt.Errorf("Dropping request for %s as there are no validators", vtxID)
|
||||
}
|
||||
validatorID := validators[0].ID()
|
||||
b.RequestID++
|
||||
|
||||
b.vtxReqs.RemoveAny(vtxID)
|
||||
b.vtxReqs.Add(validatorID, b.RequestID, vtxID)
|
||||
|
||||
b.pending.Add(vtxID)
|
||||
b.BootstrapConfig.Sender.Get(validatorID, b.RequestID, vtxID)
|
||||
|
||||
b.numBSPendingRequests.Set(float64(b.pending.Len()))
|
||||
}
|
||||
|
||||
func (b *bootstrapper) addVertex(vtx avalanche.Vertex) error {
|
||||
if err := b.storeVertex(vtx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numPending := b.pending.Len(); numPending == 0 {
|
||||
return b.finish()
|
||||
}
|
||||
b.outstandingRequests.Add(validatorID, b.RequestID, vtxID)
|
||||
b.BootstrapConfig.Sender.GetAncestors(validatorID, b.RequestID, vtxID) // request vertex and ancestors
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bootstrapper) storeVertex(vtx avalanche.Vertex) error {
|
||||
vts := []avalanche.Vertex{vtx}
|
||||
b.numFetched++
|
||||
if b.numFetched%2500 == 0 { // perioidcally inform user of progress
|
||||
b.BootstrapConfig.Context.Log.Info("bootstrapping has fetched %d vertices", b.numFetched)
|
||||
}
|
||||
// Process vertices
|
||||
func (b *bootstrapper) process(vtx avalanche.Vertex) error {
|
||||
toProcess := []avalanche.Vertex{vtx}
|
||||
for len(toProcess) > 0 {
|
||||
newLen := len(toProcess) - 1
|
||||
vtx := toProcess[newLen]
|
||||
toProcess = toProcess[:newLen]
|
||||
if _, ok := b.processedCache.Get(vtx.ID()); ok { // already processed this
|
||||
continue
|
||||
}
|
||||
|
||||
for len(vts) > 0 {
|
||||
newLen := len(vts) - 1
|
||||
vtx := vts[newLen]
|
||||
vts = vts[:newLen]
|
||||
|
||||
vtxID := vtx.ID()
|
||||
switch status := vtx.Status(); status {
|
||||
switch vtx.Status() {
|
||||
case choices.Unknown:
|
||||
b.sendRequest(vtxID)
|
||||
if err := b.fetch(vtx.ID()); err != nil {
|
||||
return err
|
||||
}
|
||||
case choices.Rejected:
|
||||
return fmt.Errorf("tried to accept %s even though it was previously rejected", vtx.ID())
|
||||
case choices.Processing:
|
||||
b.pending.Remove(vtxID)
|
||||
|
||||
if err := b.VtxBlocked.Push(&vertexJob{
|
||||
log: b.BootstrapConfig.Context.Log,
|
||||
numAccepted: b.numBSVtx,
|
||||
|
@ -207,11 +148,12 @@ func (b *bootstrapper) storeVertex(vtx avalanche.Vertex) error {
|
|||
vtx: vtx,
|
||||
}); err == nil {
|
||||
b.numBSBlockedVtx.Inc()
|
||||
b.numFetched++ // Progress tracker
|
||||
if b.numFetched%common.StatusUpdateFrequency == 0 {
|
||||
b.BootstrapConfig.Context.Log.Info("fetched %d vertices", b.numFetched)
|
||||
}
|
||||
} else {
|
||||
b.BootstrapConfig.Context.Log.Verbo("couldn't push to vtxBlocked")
|
||||
}
|
||||
if err := b.VtxBlocked.Commit(); err != nil {
|
||||
return err
|
||||
b.BootstrapConfig.Context.Log.Verbo("couldn't push to vtxBlocked: %s", err)
|
||||
}
|
||||
for _, tx := range vtx.Txs() {
|
||||
if err := b.TxBlocked.Push(&txJob{
|
||||
|
@ -222,53 +164,134 @@ func (b *bootstrapper) storeVertex(vtx avalanche.Vertex) error {
|
|||
}); err == nil {
|
||||
b.numBSBlockedTx.Inc()
|
||||
} else {
|
||||
b.BootstrapConfig.Context.Log.Verbo("couldn't push to txBlocked")
|
||||
b.BootstrapConfig.Context.Log.Verbo("couldn't push to txBlocked: %s", err)
|
||||
}
|
||||
}
|
||||
if err := b.TxBlocked.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, parent := range vtx.Parents() {
|
||||
if parentID := parent.ID(); !b.seen.Contains(parentID) {
|
||||
b.seen.Add(parentID)
|
||||
vts = append(vts, parent)
|
||||
}
|
||||
toProcess = append(toProcess, parent)
|
||||
}
|
||||
case choices.Accepted:
|
||||
b.BootstrapConfig.Context.Log.Verbo("bootstrapping confirmed %s", vtxID)
|
||||
case choices.Rejected:
|
||||
return fmt.Errorf("bootstrapping wants to accept %s, however it was previously rejected", vtxID)
|
||||
b.processedCache.Put(vtx.ID(), nil)
|
||||
}
|
||||
}
|
||||
|
||||
numPending := b.pending.Len()
|
||||
b.numBSPendingRequests.Set(float64(numPending))
|
||||
if err := b.VtxBlocked.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.TxBlocked.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier {
|
||||
return b.finish()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MultiPut handles the receipt of multiple containers. Should be received in response to a GetAncestors message to [vdr]
|
||||
// with request ID [requestID]. Expects vtxs[0] to be the vertex requested in the corresponding GetAncestors.
|
||||
func (b *bootstrapper) MultiPut(vdr ids.ShortID, requestID uint32, vtxs [][]byte) error {
|
||||
if lenVtxs := len(vtxs); lenVtxs > common.MaxContainersPerMultiPut {
|
||||
b.BootstrapConfig.Context.Log.Debug("MultiPut(%s, %d) contains more than maximum number of vertices", vdr, requestID)
|
||||
return b.GetAncestorsFailed(vdr, requestID)
|
||||
} else if lenVtxs == 0 {
|
||||
b.BootstrapConfig.Context.Log.Debug("MultiPut(%s, %d) contains no vertices", vdr, requestID)
|
||||
return b.GetAncestorsFailed(vdr, requestID)
|
||||
}
|
||||
|
||||
// Make sure this is in response to a request we made
|
||||
neededVtxID, needed := b.outstandingRequests.Remove(vdr, requestID)
|
||||
if !needed { // this message isn't in response to a request we made
|
||||
b.BootstrapConfig.Context.Log.Debug("received unexpected MultiPut from %s with ID %d", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
neededVtx, err := b.State.ParseVertex(vtxs[0]) // the vertex we requested
|
||||
if err != nil {
|
||||
b.BootstrapConfig.Context.Log.Debug("Failed to parse requested vertex %s: %w", neededVtxID, err)
|
||||
b.BootstrapConfig.Context.Log.Verbo("vertex: %s", formatting.DumpBytes{Bytes: vtxs[0]})
|
||||
return b.fetch(neededVtxID)
|
||||
} else if actualID := neededVtx.ID(); !actualID.Equals(neededVtxID) {
|
||||
b.BootstrapConfig.Context.Log.Debug("expected the first block to be the requested block, %s, but is %s", neededVtxID, actualID)
|
||||
return b.fetch(neededVtxID)
|
||||
}
|
||||
|
||||
for _, vtxBytes := range vtxs { // Parse/persist all the vertices
|
||||
if _, err := b.State.ParseVertex(vtxBytes); err != nil { // Persists the vtx
|
||||
b.BootstrapConfig.Context.Log.Debug("Failed to parse vertex: %w", err)
|
||||
b.BootstrapConfig.Context.Log.Verbo("vertex: %s", formatting.DumpBytes{Bytes: vtxBytes})
|
||||
}
|
||||
}
|
||||
|
||||
return b.process(neededVtx)
|
||||
}
|
||||
|
||||
// GetAncestorsFailed is called when a GetAncestors message we sent fails
|
||||
func (b *bootstrapper) GetAncestorsFailed(vdr ids.ShortID, requestID uint32) error {
|
||||
vtxID, ok := b.outstandingRequests.Remove(vdr, requestID)
|
||||
if !ok {
|
||||
b.BootstrapConfig.Context.Log.Debug("GetAncestorsFailed(%s, %d) called but there was no outstanding request to this validator with this ID", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
// Send another request for the vertex
|
||||
return b.fetch(vtxID)
|
||||
}
|
||||
|
||||
// ForceAccepted ...
|
||||
func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) error {
|
||||
if err := b.VM.Bootstrapping(); err != nil {
|
||||
return fmt.Errorf("failed to notify VM that bootstrapping has started: %w",
|
||||
err)
|
||||
}
|
||||
|
||||
for _, vtxID := range acceptedContainerIDs.List() {
|
||||
if vtx, err := b.State.GetVertex(vtxID); err == nil {
|
||||
if err := b.process(vtx); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := b.fetch(vtxID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b.processedStartingAcceptedFrontier = true
|
||||
|
||||
if numPending := b.outstandingRequests.Len(); numPending == 0 {
|
||||
return b.finish()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Finish bootstrapping
|
||||
func (b *bootstrapper) finish() error {
|
||||
if b.finished {
|
||||
return nil
|
||||
}
|
||||
b.BootstrapConfig.Context.Log.Info("bootstrapping finished fetching vertices. executing state transitions...")
|
||||
b.BootstrapConfig.Context.Log.Info("finished fetching vertices. executing transaction state transitions...")
|
||||
|
||||
if err := b.executeAll(b.TxBlocked, b.numBSBlockedTx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.BootstrapConfig.Context.Log.Info("executing vertex state transitions...")
|
||||
|
||||
if err := b.executeAll(b.VtxBlocked, b.numBSBlockedVtx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.VM.Bootstrapped(); err != nil {
|
||||
return fmt.Errorf("failed to notify VM that bootstrapping has finished: %w",
|
||||
err)
|
||||
}
|
||||
|
||||
// Start consensus
|
||||
if err := b.onFinished(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.seen = ids.Set{}
|
||||
b.finished = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bootstrapper) executeAll(jobs *queue.Jobs, numBlocked prometheus.Gauge) error {
|
||||
numExecuted := 0
|
||||
for job, err := jobs.Pop(); err == nil; job, err = jobs.Pop() {
|
||||
numBlocked.Dec()
|
||||
b.BootstrapConfig.Context.Log.Debug("Executing: %s", job.ID())
|
||||
|
@ -279,6 +302,10 @@ func (b *bootstrapper) executeAll(jobs *queue.Jobs, numBlocked prometheus.Gauge)
|
|||
if err := jobs.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
numExecuted++
|
||||
if numExecuted%common.StatusUpdateFrequency == 0 { // Periodically print progress
|
||||
b.BootstrapConfig.Context.Log.Info("executed %d operations", numExecuted)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -27,7 +27,7 @@ type Vtx struct {
|
|||
id ids.ID
|
||||
txs []snowstorm.Tx
|
||||
|
||||
height int
|
||||
height uint64
|
||||
status choices.Status
|
||||
|
||||
bytes []byte
|
||||
|
@ -36,6 +36,7 @@ type Vtx struct {
|
|||
func (v *Vtx) ID() ids.ID { return v.id }
|
||||
func (v *Vtx) DependencyIDs() []ids.ID { return nil }
|
||||
func (v *Vtx) Parents() []avalanche.Vertex { return v.parents }
|
||||
func (v *Vtx) Height() uint64 { return v.height }
|
||||
func (v *Vtx) Txs() []snowstorm.Tx { return v.txs }
|
||||
func (v *Vtx) Status() choices.Status { return v.status }
|
||||
func (v *Vtx) Accept() error { v.status = choices.Accepted; return nil }
|
||||
|
|
|
@ -121,6 +121,12 @@ func (vtx *uniqueVertex) Parents() []avalanche.Vertex {
|
|||
return vtx.v.parents
|
||||
}
|
||||
|
||||
func (vtx *uniqueVertex) Height() uint64 {
|
||||
vtx.refresh()
|
||||
|
||||
return vtx.v.vtx.height
|
||||
}
|
||||
|
||||
func (vtx *uniqueVertex) Txs() []snowstorm.Tx {
|
||||
vtx.refresh()
|
||||
|
||||
|
|
|
@ -4,8 +4,12 @@
|
|||
package avalanche
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/network"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
"github.com/ava-labs/gecko/snow/choices"
|
||||
"github.com/ava-labs/gecko/snow/consensus/avalanche"
|
||||
"github.com/ava-labs/gecko/snow/consensus/snowstorm"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
|
@ -15,6 +19,12 @@ import (
|
|||
"github.com/ava-labs/gecko/utils/wrappers"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO define this constant in one place rather than here and in snowman
|
||||
// Max containers size in a MultiPut message
|
||||
maxContainersLen = int(4 * network.DefaultMaxMessageSize / 5)
|
||||
)
|
||||
|
||||
// Transitive implements the Engine interface by attempting to fetch all
|
||||
// transitive dependencies.
|
||||
type Transitive struct {
|
||||
|
@ -40,7 +50,7 @@ type Transitive struct {
|
|||
|
||||
// Initialize implements the Engine interface
|
||||
func (t *Transitive) Initialize(config Config) error {
|
||||
config.Context.Log.Info("Initializing Avalanche consensus")
|
||||
config.Context.Log.Info("Initializing consensus engine")
|
||||
|
||||
t.Config = config
|
||||
t.metrics.Initialize(config.Context.Log, config.Params.Namespace, config.Params.Metrics)
|
||||
|
@ -61,13 +71,13 @@ func (t *Transitive) finishBootstrapping() error {
|
|||
if vtx, err := t.Config.State.GetVertex(vtxID); err == nil {
|
||||
frontier = append(frontier, vtx)
|
||||
} else {
|
||||
t.Config.Context.Log.Error("Vertex %s failed to be loaded from the frontier with %s", vtxID, err)
|
||||
t.Config.Context.Log.Error("vertex %s failed to be loaded from the frontier with %s", vtxID, err)
|
||||
}
|
||||
}
|
||||
t.Consensus.Initialize(t.Config.Context, t.Params, frontier)
|
||||
t.bootstrapped = true
|
||||
|
||||
t.Config.Context.Log.Info("Bootstrapping finished with %d vertices in the accepted frontier", len(frontier))
|
||||
t.Config.Context.Log.Info("bootstrapping finished with %d vertices in the accepted frontier", len(frontier))
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -75,7 +85,7 @@ func (t *Transitive) finishBootstrapping() error {
|
|||
func (t *Transitive) Gossip() error {
|
||||
edge := t.Config.State.Edge()
|
||||
if len(edge) == 0 {
|
||||
t.Config.Context.Log.Debug("Dropping gossip request as no vertices have been accepted")
|
||||
t.Config.Context.Log.Verbo("dropping gossip request as no vertices have been accepted")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -83,18 +93,18 @@ func (t *Transitive) Gossip() error {
|
|||
vtxID := edge[sampler.Sample()]
|
||||
vtx, err := t.Config.State.GetVertex(vtxID)
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Warn("Dropping gossip request as %s couldn't be loaded due to %s", vtxID, err)
|
||||
t.Config.Context.Log.Warn("dropping gossip request as %s couldn't be loaded due to: %s", vtxID, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
t.Config.Context.Log.Debug("Gossiping %s as accepted to the network", vtxID)
|
||||
t.Config.Context.Log.Verbo("gossiping %s as accepted to the network", vtxID)
|
||||
t.Config.Sender.Gossip(vtxID, vtx.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown implements the Engine interface
|
||||
func (t *Transitive) Shutdown() error {
|
||||
t.Config.Context.Log.Info("Shutting down Avalanche consensus")
|
||||
t.Config.Context.Log.Info("shutting down consensus engine")
|
||||
return t.Config.VM.Shutdown()
|
||||
}
|
||||
|
||||
|
@ -110,19 +120,63 @@ func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, vtxID ids.ID) error
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetAncestors implements the Engine interface
|
||||
func (t *Transitive) GetAncestors(vdr ids.ShortID, requestID uint32, vtxID ids.ID) error {
|
||||
startTime := time.Now()
|
||||
t.Config.Context.Log.Verbo("GetAncestors(%s, %d, %s) called", vdr, requestID, vtxID)
|
||||
vertex, err := t.Config.State.GetVertex(vtxID)
|
||||
if err != nil || vertex.Status() == choices.Unknown {
|
||||
t.Config.Context.Log.Verbo("dropping getAncestors")
|
||||
return nil // Don't have the requested vertex. Drop message.
|
||||
}
|
||||
|
||||
queue := make([]avalanche.Vertex, 1, common.MaxContainersPerMultiPut) // for BFS
|
||||
queue[0] = vertex
|
||||
ancestorsBytesLen := 0 // length, in bytes, of vertex and its ancestors
|
||||
ancestorsBytes := make([][]byte, 0, common.MaxContainersPerMultiPut) // vertex and its ancestors in BFS order
|
||||
visited := ids.Set{} // IDs of vertices that have been in queue before
|
||||
visited.Add(vertex.ID())
|
||||
|
||||
for len(ancestorsBytes) < common.MaxContainersPerMultiPut && len(queue) > 0 && time.Since(startTime) < common.MaxTimeFetchingAncestors {
|
||||
var vtx avalanche.Vertex
|
||||
vtx, queue = queue[0], queue[1:] // pop
|
||||
vtxBytes := vtx.Bytes()
|
||||
// Ensure response size isn't too large. Include wrappers.IntLen because the size of the message
|
||||
// is included with each container, and the size is repr. by an int.
|
||||
if newLen := wrappers.IntLen + ancestorsBytesLen + len(vtxBytes); newLen < maxContainersLen {
|
||||
ancestorsBytes = append(ancestorsBytes, vtxBytes)
|
||||
ancestorsBytesLen = newLen
|
||||
} else { // reached maximum response size
|
||||
break
|
||||
}
|
||||
for _, parent := range vtx.Parents() {
|
||||
if parent.Status() == choices.Unknown { // Don't have this vertex;ignore
|
||||
continue
|
||||
}
|
||||
if parentID := parent.ID(); !visited.Contains(parentID) { // If already visited, ignore
|
||||
queue = append(queue, parent)
|
||||
visited.Add(parentID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Config.Sender.MultiPut(vdr, requestID, ancestorsBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put implements the Engine interface
|
||||
func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) error {
|
||||
t.Config.Context.Log.Verbo("Put called for vertexID %s", vtxID)
|
||||
t.Config.Context.Log.Verbo("Put(%s, %d, %s) called", vdr, requestID, vtxID)
|
||||
|
||||
if !t.bootstrapped {
|
||||
return t.bootstrapper.Put(vdr, requestID, vtxID, vtxBytes)
|
||||
if !t.bootstrapped { // Bootstrapping unfinished --> didn't call Get --> this message is invalid
|
||||
t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
|
||||
return nil
|
||||
}
|
||||
|
||||
vtx, err := t.Config.State.ParseVertex(vtxBytes)
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Debug("ParseVertex failed due to %s for block:\n%s",
|
||||
err,
|
||||
formatting.DumpBytes{Bytes: vtxBytes})
|
||||
t.Config.Context.Log.Debug("failed to parse vertex %s due to: %s", vtxID, err)
|
||||
t.Config.Context.Log.Verbo("vertex:\n%s", formatting.DumpBytes{Bytes: vtxBytes})
|
||||
return t.GetFailed(vdr, requestID)
|
||||
}
|
||||
_, err = t.insertFrom(vdr, vtx)
|
||||
|
@ -131,14 +185,14 @@ func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxByt
|
|||
|
||||
// GetFailed implements the Engine interface
|
||||
func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) error {
|
||||
if !t.bootstrapped {
|
||||
return t.bootstrapper.GetFailed(vdr, requestID)
|
||||
if !t.bootstrapped { // Bootstrapping unfinished --> didn't call Get --> this message is invalid
|
||||
t.Config.Context.Log.Debug("dropping GetFailed(%s, %d) due to bootstrapping", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
vtxID, ok := t.vtxReqs.Remove(vdr, requestID)
|
||||
if !ok {
|
||||
t.Config.Context.Log.Warn("GetFailed called without sending the corresponding Get message from %s",
|
||||
vdr)
|
||||
t.Config.Context.Log.Debug("GetFailed(%s, %d) called without having sent corresponding Get", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -160,7 +214,7 @@ func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) error {
|
|||
// PullQuery implements the Engine interface
|
||||
func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID) error {
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Debug("Dropping PullQuery for %s due to bootstrapping", vtxID)
|
||||
t.Config.Context.Log.Debug("dropping PullQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -188,15 +242,14 @@ func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID)
|
|||
// PushQuery implements the Engine interface
|
||||
func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) error {
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Debug("Dropping PushQuery for %s due to bootstrapping", vtxID)
|
||||
t.Config.Context.Log.Debug("dropping PushQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
|
||||
return nil
|
||||
}
|
||||
|
||||
vtx, err := t.Config.State.ParseVertex(vtxBytes)
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Warn("ParseVertex failed due to %s for block:\n%s",
|
||||
err,
|
||||
formatting.DumpBytes{Bytes: vtxBytes})
|
||||
t.Config.Context.Log.Debug("failed to parse vertex %s due to: %s", vtxID, err)
|
||||
t.Config.Context.Log.Verbo("vertex:\n%s", formatting.DumpBytes{Bytes: vtxBytes})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -210,7 +263,7 @@ func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID,
|
|||
// Chits implements the Engine interface
|
||||
func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) error {
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Debug("Dropping Chits due to bootstrapping")
|
||||
t.Config.Context.Log.Debug("dropping Chits(%s, %d) due to bootstrapping", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -241,7 +294,7 @@ func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) error {
|
|||
// Notify implements the Engine interface
|
||||
func (t *Transitive) Notify(msg common.Message) error {
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Warn("Dropping Notify due to bootstrapping")
|
||||
t.Config.Context.Log.Debug("dropping Notify due to bootstrapping")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -345,7 +398,7 @@ func (t *Transitive) insert(vtx avalanche.Vertex) error {
|
|||
}
|
||||
}
|
||||
|
||||
t.Config.Context.Log.Verbo("Vertex: %s is blocking on %d vertices and %d transactions", vtxID, i.vtxDeps.Len(), i.txDeps.Len())
|
||||
t.Config.Context.Log.Verbo("vertex %s is blocking on %d vertices and %d transactions", vtxID, i.vtxDeps.Len(), i.txDeps.Len())
|
||||
|
||||
t.vtxBlocked.Register(&vtxIssuer{i: i})
|
||||
t.txBlocked.Register(&txIssuer{i: i})
|
||||
|
@ -403,7 +456,7 @@ func (t *Transitive) issueRepoll() {
|
|||
preferredIDs := t.Consensus.Preferences().List()
|
||||
numPreferredIDs := len(preferredIDs)
|
||||
if numPreferredIDs == 0 {
|
||||
t.Config.Context.Log.Error("Re-query attempt was dropped due to no pending vertices")
|
||||
t.Config.Context.Log.Error("re-query attempt was dropped due to no pending vertices")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -422,12 +475,12 @@ func (t *Transitive) issueRepoll() {
|
|||
if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet.Len()) {
|
||||
t.Config.Sender.PullQuery(vdrSet, t.RequestID, vtxID)
|
||||
} else if numVdrs < p.K {
|
||||
t.Config.Context.Log.Error("Re-query for %s was dropped due to an insufficient number of validators", vtxID)
|
||||
t.Config.Context.Log.Error("re-query for %s was dropped due to an insufficient number of validators", vtxID)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Transitive) issueBatch(txs []snowstorm.Tx) error {
|
||||
t.Config.Context.Log.Verbo("Batching %d transactions into a new vertex", len(txs))
|
||||
t.Config.Context.Log.Verbo("batching %d transactions into a new vertex", len(txs))
|
||||
|
||||
virtuousIDs := t.Consensus.Virtuous().List()
|
||||
sampler := random.Uniform{N: len(virtuousIDs)}
|
||||
|
@ -438,7 +491,7 @@ func (t *Transitive) issueBatch(txs []snowstorm.Tx) error {
|
|||
|
||||
vtx, err := t.Config.State.BuildVertex(parentIDs, txs)
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Warn("Error building new vertex with %d parents and %d transactions", len(parentIDs), len(txs))
|
||||
t.Config.Context.Log.Warn("error building new vertex with %d parents and %d transactions", len(parentIDs), len(txs))
|
||||
return nil
|
||||
}
|
||||
return t.insert(vtx)
|
||||
|
@ -446,7 +499,7 @@ func (t *Transitive) issueBatch(txs []snowstorm.Tx) error {
|
|||
|
||||
func (t *Transitive) sendRequest(vdr ids.ShortID, vtxID ids.ID) {
|
||||
if t.vtxReqs.Contains(vtxID) {
|
||||
t.Config.Context.Log.Debug("Not requesting a vertex because we have recently sent a request")
|
||||
t.Config.Context.Log.Debug("not requesting a vertex because we have recently sent a request")
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -2167,6 +2167,9 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) {
|
|||
|
||||
vm.Default(true)
|
||||
|
||||
vm.CantBootstrapping = false
|
||||
vm.CantBootstrapped = false
|
||||
|
||||
utxos := []ids.ID{GenerateID(), GenerateID()}
|
||||
|
||||
txID0 := GenerateID()
|
||||
|
@ -2272,7 +2275,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) {
|
|||
panic("Unknown vertex requested")
|
||||
}
|
||||
|
||||
sender.GetF = func(inVdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
sender.GetAncestorsF = func(inVdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdrID.Equals(inVdr) {
|
||||
t.Fatalf("Asking wrong validator for vertex")
|
||||
}
|
||||
|
@ -2315,7 +2318,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) {
|
|||
panic("Unknown bytes provided")
|
||||
}
|
||||
|
||||
te.Put(vdrID, *requestID, vtxID0, vtxBytes0)
|
||||
te.MultiPut(vdrID, *requestID, [][]byte{vtxBytes0})
|
||||
|
||||
vm.ParseTxF = nil
|
||||
st.parseVertex = nil
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
package avalanche
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/consensus/avalanche"
|
||||
)
|
||||
|
||||
// A vertexItem is a Vertex managed by the priority queue.
|
||||
type vertexItem struct {
|
||||
vertex avalanche.Vertex
|
||||
index int // The index of the item in the heap.
|
||||
}
|
||||
|
||||
// A priorityQueue implements heap.Interface and holds vertexItems.
|
||||
type priorityQueue []*vertexItem
|
||||
|
||||
func (pq priorityQueue) Len() int { return len(pq) }
|
||||
|
||||
// Returns true if the vertex at index i has greater height than the vertex at
|
||||
// index j.
|
||||
func (pq priorityQueue) Less(i, j int) bool {
|
||||
statusI := pq[i].vertex.Status()
|
||||
statusJ := pq[j].vertex.Status()
|
||||
|
||||
// Put unknown vertices at the front of the heap to ensure once we have made
|
||||
// it below a certain height in DAG traversal we do not need to reset
|
||||
if !statusI.Fetched() {
|
||||
return true
|
||||
}
|
||||
if !statusJ.Fetched() {
|
||||
return false
|
||||
}
|
||||
return pq[i].vertex.Height() > pq[j].vertex.Height()
|
||||
}
|
||||
|
||||
func (pq priorityQueue) Swap(i, j int) {
|
||||
pq[i], pq[j] = pq[j], pq[i]
|
||||
pq[i].index = i
|
||||
pq[j].index = j
|
||||
}
|
||||
|
||||
// Push adds an item to this priority queue. x must have type *vertexItem
|
||||
func (pq *priorityQueue) Push(x interface{}) {
|
||||
n := len(*pq)
|
||||
item := x.(*vertexItem)
|
||||
item.index = n
|
||||
*pq = append(*pq, item)
|
||||
}
|
||||
|
||||
// Pop returns the last item in this priorityQueue
|
||||
func (pq *priorityQueue) Pop() interface{} {
|
||||
old := *pq
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil
|
||||
item.index = -1
|
||||
*pq = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// vertexHeap defines the functionality of a heap of vertices
|
||||
// with unique VertexIDs ordered by height
|
||||
type vertexHeap interface {
|
||||
Clear()
|
||||
Push(avalanche.Vertex)
|
||||
Pop() avalanche.Vertex // Requires that there be at least one element
|
||||
Contains(avalanche.Vertex) bool
|
||||
Len() int
|
||||
}
|
||||
|
||||
type maxHeightVertexHeap struct {
|
||||
heap *priorityQueue
|
||||
elementIDs ids.Set
|
||||
}
|
||||
|
||||
func newMaxVertexHeap() *maxHeightVertexHeap {
|
||||
return &maxHeightVertexHeap{
|
||||
heap: &priorityQueue{},
|
||||
elementIDs: ids.Set{},
|
||||
}
|
||||
}
|
||||
|
||||
func (vh *maxHeightVertexHeap) Clear() {
|
||||
vh.heap = &priorityQueue{}
|
||||
vh.elementIDs.Clear()
|
||||
}
|
||||
|
||||
// Push adds an element to this heap. Returns true if the element was added.
|
||||
// Returns false if it was already in the heap.
|
||||
func (vh *maxHeightVertexHeap) Push(vtx avalanche.Vertex) bool {
|
||||
vtxID := vtx.ID()
|
||||
if vh.elementIDs.Contains(vtxID) {
|
||||
return false
|
||||
}
|
||||
|
||||
vh.elementIDs.Add(vtxID)
|
||||
item := &vertexItem{
|
||||
vertex: vtx,
|
||||
}
|
||||
heap.Push(vh.heap, item)
|
||||
return true
|
||||
}
|
||||
|
||||
// If there are any vertices in this heap with status Unknown, removes one such
|
||||
// vertex and returns it. Otherwise, removes and returns the vertex in this heap
|
||||
// with the greatest height.
|
||||
func (vh *maxHeightVertexHeap) Pop() avalanche.Vertex {
|
||||
vtx := heap.Pop(vh.heap).(*vertexItem).vertex
|
||||
vh.elementIDs.Remove(vtx.ID())
|
||||
return vtx
|
||||
}
|
||||
|
||||
func (vh *maxHeightVertexHeap) Len() int { return vh.heap.Len() }
|
||||
|
||||
func (vh *maxHeightVertexHeap) Contains(vtxID ids.ID) bool { return vh.elementIDs.Contains(vtxID) }
|
|
@ -0,0 +1,130 @@
|
|||
package avalanche
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ava-labs/gecko/snow/choices"
|
||||
"github.com/ava-labs/gecko/snow/consensus/avalanche"
|
||||
)
|
||||
|
||||
// This example inserts several ints into an IntHeap, checks the minimum,
|
||||
// and removes them in order of priority.
|
||||
func TestUniqueVertexHeapReturnsOrdered(t *testing.T) {
|
||||
h := newMaxVertexHeap()
|
||||
|
||||
vtx0 := &Vtx{
|
||||
id: GenerateID(),
|
||||
height: 0,
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
vtx1 := &Vtx{
|
||||
id: GenerateID(),
|
||||
height: 1,
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
vtx2 := &Vtx{
|
||||
id: GenerateID(),
|
||||
height: 1,
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
vtx3 := &Vtx{
|
||||
id: GenerateID(),
|
||||
height: 3,
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
vtx4 := &Vtx{
|
||||
id: GenerateID(),
|
||||
status: choices.Unknown,
|
||||
}
|
||||
|
||||
vts := []avalanche.Vertex{vtx0, vtx1, vtx2, vtx3, vtx4}
|
||||
|
||||
for _, vtx := range vts {
|
||||
h.Push(vtx)
|
||||
}
|
||||
|
||||
vtxZ := h.Pop()
|
||||
if !vtxZ.ID().Equals(vtx4.ID()) {
|
||||
t.Fatalf("Heap did not pop unknown element first")
|
||||
}
|
||||
|
||||
vtxA := h.Pop()
|
||||
if vtxA.Height() != 3 {
|
||||
t.Fatalf("First height from heap was incorrect")
|
||||
} else if !vtxA.ID().Equals(vtx3.ID()) {
|
||||
t.Fatalf("Incorrect ID on vertex popped from heap")
|
||||
}
|
||||
|
||||
vtxB := h.Pop()
|
||||
if vtxB.Height() != 1 {
|
||||
t.Fatalf("First height from heap was incorrect")
|
||||
} else if !vtxB.ID().Equals(vtx1.ID()) && !vtxB.ID().Equals(vtx2.ID()) {
|
||||
t.Fatalf("Incorrect ID on vertex popped from heap")
|
||||
}
|
||||
|
||||
vtxC := h.Pop()
|
||||
if vtxC.Height() != 1 {
|
||||
t.Fatalf("First height from heap was incorrect")
|
||||
} else if !vtxC.ID().Equals(vtx1.ID()) && !vtxC.ID().Equals(vtx2.ID()) {
|
||||
t.Fatalf("Incorrect ID on vertex popped from heap")
|
||||
}
|
||||
|
||||
if vtxB.ID().Equals(vtxC.ID()) {
|
||||
t.Fatalf("Heap returned same element more than once")
|
||||
}
|
||||
|
||||
vtxD := h.Pop()
|
||||
if vtxD.Height() != 0 {
|
||||
t.Fatalf("Last height returned was incorrect")
|
||||
} else if !vtxD.ID().Equals(vtx0.ID()) {
|
||||
t.Fatalf("Last item from heap had incorrect ID")
|
||||
}
|
||||
|
||||
if h.Len() != 0 {
|
||||
t.Fatalf("Heap was not empty after popping all of its elements")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniqueVertexHeapRemainsUnique(t *testing.T) {
|
||||
h := newMaxVertexHeap()
|
||||
|
||||
vtx0 := &Vtx{
|
||||
height: 0,
|
||||
id: GenerateID(),
|
||||
status: choices.Processing,
|
||||
}
|
||||
vtx1 := &Vtx{
|
||||
height: 1,
|
||||
id: GenerateID(),
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
sharedID := GenerateID()
|
||||
vtx2 := &Vtx{
|
||||
height: 1,
|
||||
id: sharedID,
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
vtx3 := &Vtx{
|
||||
height: 2,
|
||||
id: sharedID,
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
pushed1 := h.Push(vtx0)
|
||||
pushed2 := h.Push(vtx1)
|
||||
pushed3 := h.Push(vtx2)
|
||||
pushed4 := h.Push(vtx3)
|
||||
if h.Len() != 3 {
|
||||
t.Fatalf("Unique Vertex Heap has incorrect length: %d", h.Len())
|
||||
} else if !(pushed1 && pushed2 && pushed3) {
|
||||
t.Fatalf("Failed to push a new unique element")
|
||||
} else if pushed4 {
|
||||
t.Fatalf("Pushed non-unique element to the unique vertex heap")
|
||||
}
|
||||
}
|
|
@ -5,7 +5,6 @@ package avalanche
|
|||
|
||||
import (
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/consensus/avalanche"
|
||||
"github.com/ava-labs/gecko/snow/consensus/snowstorm"
|
||||
)
|
||||
|
||||
|
@ -60,47 +59,56 @@ func (v *voter) Update() {
|
|||
}
|
||||
|
||||
if v.t.Consensus.Quiesce() {
|
||||
v.t.Config.Context.Log.Verbo("Avalanche engine can quiesce")
|
||||
v.t.Config.Context.Log.Debug("Avalanche engine can quiesce")
|
||||
return
|
||||
}
|
||||
|
||||
v.t.Config.Context.Log.Verbo("Avalanche engine can't quiesce")
|
||||
v.t.Config.Context.Log.Debug("Avalanche engine can't quiesce")
|
||||
v.t.errs.Add(v.t.repoll())
|
||||
}
|
||||
|
||||
func (v *voter) bubbleVotes(votes ids.UniqueBag) ids.UniqueBag {
|
||||
bubbledVotes := ids.UniqueBag{}
|
||||
vertexHeap := newMaxVertexHeap()
|
||||
for _, vote := range votes.List() {
|
||||
set := votes.GetSet(vote)
|
||||
vtx, err := v.t.Config.State.GetVertex(vote)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
vts := []avalanche.Vertex{vtx}
|
||||
for len(vts) > 0 {
|
||||
vtx := vts[0]
|
||||
vts = vts[1:]
|
||||
vertexHeap.Push(vtx)
|
||||
}
|
||||
|
||||
status := vtx.Status()
|
||||
if !status.Fetched() {
|
||||
v.t.Config.Context.Log.Verbo("Dropping %d vote(s) for %s because the vertex is unknown", set.Len(), vtx.ID())
|
||||
continue
|
||||
}
|
||||
for vertexHeap.Len() > 0 {
|
||||
vtx := vertexHeap.Pop()
|
||||
vtxID := vtx.ID()
|
||||
set := votes.GetSet(vtxID)
|
||||
status := vtx.Status()
|
||||
|
||||
if status.Decided() {
|
||||
v.t.Config.Context.Log.Verbo("Dropping %d vote(s) for %s because the vertex is decided", set.Len(), vtx.ID())
|
||||
continue
|
||||
}
|
||||
if !status.Fetched() {
|
||||
v.t.Config.Context.Log.Verbo("Dropping %d vote(s) for %s because the vertex is unknown", set.Len(), vtxID)
|
||||
bubbledVotes.RemoveSet(vtx.ID())
|
||||
continue
|
||||
}
|
||||
|
||||
if v.t.Consensus.VertexIssued(vtx) {
|
||||
v.t.Config.Context.Log.Verbo("Applying %d vote(s) for %s", set.Len(), vtx.ID())
|
||||
bubbledVotes.UnionSet(vtx.ID(), set)
|
||||
} else {
|
||||
v.t.Config.Context.Log.Verbo("Bubbling %d vote(s) for %s because the vertex isn't issued", set.Len(), vtx.ID())
|
||||
vts = append(vts, vtx.Parents()...)
|
||||
if status.Decided() {
|
||||
v.t.Config.Context.Log.Verbo("Dropping %d vote(s) for %s because the vertex is decided", set.Len(), vtxID)
|
||||
bubbledVotes.RemoveSet(vtx.ID())
|
||||
continue
|
||||
}
|
||||
|
||||
if v.t.Consensus.VertexIssued(vtx) {
|
||||
v.t.Config.Context.Log.Verbo("Applying %d vote(s) for %s", set.Len(), vtx.ID())
|
||||
bubbledVotes.UnionSet(vtx.ID(), set)
|
||||
} else {
|
||||
v.t.Config.Context.Log.Verbo("Bubbling %d vote(s) for %s because the vertex isn't issued", set.Len(), vtx.ID())
|
||||
bubbledVotes.RemoveSet(vtx.ID()) // Remove votes for this vertex because it hasn't been issued
|
||||
for _, parentVtx := range vtx.Parents() {
|
||||
bubbledVotes.UnionSet(parentVtx.ID(), set)
|
||||
vertexHeap.Push(parentVtx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bubbledVotes
|
||||
}
|
||||
|
|
|
@ -5,15 +5,31 @@ package common
|
|||
|
||||
import (
|
||||
stdmath "math"
|
||||
"time"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/utils/math"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxContainersPerMultiPut is the maximum number of containers that can be sent in a MultiPut
|
||||
MaxContainersPerMultiPut = 2000
|
||||
|
||||
// StatusUpdateFrequency ... bootstrapper logs "processed X blocks/vertices" every [statusUpdateFrequency] blocks/vertices
|
||||
StatusUpdateFrequency = 2500
|
||||
)
|
||||
|
||||
var (
|
||||
// MaxTimeFetchingAncestors is the maximum amount of time to spend fetching vertices during a call to GetAncestors
|
||||
MaxTimeFetchingAncestors = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
// Bootstrapper implements the Engine interface.
|
||||
type Bootstrapper struct {
|
||||
Config
|
||||
|
||||
// IDs of validators we have requested the accepted frontier from but haven't
|
||||
// received a reply from
|
||||
pendingAcceptedFrontier ids.ShortSet
|
||||
acceptedFrontier ids.Set
|
||||
|
||||
|
@ -43,6 +59,7 @@ func (b *Bootstrapper) Startup() error {
|
|||
return b.Bootstrapable.ForceAccepted(ids.Set{})
|
||||
}
|
||||
|
||||
// Ask each of the bootstrap validators to send their accepted frontier
|
||||
vdrs := ids.ShortSet{}
|
||||
vdrs.Union(b.pendingAcceptedFrontier)
|
||||
|
||||
|
@ -59,6 +76,7 @@ func (b *Bootstrapper) GetAcceptedFrontier(validatorID ids.ShortID, requestID ui
|
|||
|
||||
// GetAcceptedFrontierFailed implements the Engine interface.
|
||||
func (b *Bootstrapper) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) error {
|
||||
// If we can't get a response from [validatorID], act as though they said their accepted frontier is empty
|
||||
b.AcceptedFrontier(validatorID, requestID, ids.Set{})
|
||||
return nil
|
||||
}
|
||||
|
@ -69,10 +87,16 @@ func (b *Bootstrapper) AcceptedFrontier(validatorID ids.ShortID, requestID uint3
|
|||
b.Context.Log.Debug("Received an AcceptedFrontier message from %s unexpectedly", validatorID)
|
||||
return nil
|
||||
}
|
||||
// Mark that we received a response from [validatorID]
|
||||
b.pendingAcceptedFrontier.Remove(validatorID)
|
||||
|
||||
// Union the reported accepted frontier from [validatorID] with the accepted frontier we got from others
|
||||
b.acceptedFrontier.Union(containerIDs)
|
||||
|
||||
// We've received the accepted frontier from every bootstrap validator
|
||||
// Ask each bootstrap validator to filter the list of containers that we were
|
||||
// told are on the accepted frontier such that the list only contains containers
|
||||
// they think are accepted
|
||||
if b.pendingAcceptedFrontier.Len() == 0 {
|
||||
vdrs := ids.ShortSet{}
|
||||
vdrs.Union(b.pendingAccepted)
|
||||
|
@ -91,6 +115,8 @@ func (b *Bootstrapper) GetAccepted(validatorID ids.ShortID, requestID uint32, co
|
|||
|
||||
// GetAcceptedFailed implements the Engine interface.
|
||||
func (b *Bootstrapper) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) error {
|
||||
// If we can't get a response from [validatorID], act as though they said
|
||||
// that they think none of the containers we sent them in GetAccepted are accepted
|
||||
return b.Accepted(validatorID, requestID, ids.Set{})
|
||||
}
|
||||
|
||||
|
@ -100,10 +126,11 @@ func (b *Bootstrapper) Accepted(validatorID ids.ShortID, requestID uint32, conta
|
|||
b.Context.Log.Debug("Received an Accepted message from %s unexpectedly", validatorID)
|
||||
return nil
|
||||
}
|
||||
// Mark that we received a response from [validatorID]
|
||||
b.pendingAccepted.Remove(validatorID)
|
||||
|
||||
weight := uint64(0)
|
||||
if vdr, ok := b.Validators.Get(validatorID); ok {
|
||||
if vdr, ok := b.Beacons.Get(validatorID); ok {
|
||||
weight = vdr.Weight()
|
||||
}
|
||||
|
||||
|
@ -121,6 +148,8 @@ func (b *Bootstrapper) Accepted(validatorID ids.ShortID, requestID uint32, conta
|
|||
return nil
|
||||
}
|
||||
|
||||
// We've received the filtered accepted frontier from every bootstrap validator
|
||||
// Accept all containers that have a sufficient weight behind them
|
||||
accepted := ids.Set{}
|
||||
for key, weight := range b.acceptedVotes {
|
||||
if weight >= b.Config.Alpha {
|
||||
|
|
|
@ -135,6 +135,21 @@ type FetchHandler interface {
|
|||
// dropped.
|
||||
Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error
|
||||
|
||||
// Notify this engine of a request for a container and its ancestors.
|
||||
// The request is from validator [validatorID]. The requested container is [containerID].
|
||||
//
|
||||
// This function can be called by any validator. It is not safe to assume
|
||||
// this message is utilizing a unique requestID. It is also not safe to
|
||||
// assume the requested containerID exists. However, the validatorID is
|
||||
// assumed to be authenticated.
|
||||
//
|
||||
// This engine should respond with a MultiPut message with the same requestID,
|
||||
// which contains [containerID] as well as its ancestors. See MultiPut's documentation.
|
||||
//
|
||||
// If this engine doesn't have some ancestors, it should reply with its best effort attempt at getting them.
|
||||
// If this engine doesn't have [containerID] it can ignore this message.
|
||||
GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error
|
||||
|
||||
// Notify this engine of a container.
|
||||
//
|
||||
// This function can be called by any validator. It is not safe to assume
|
||||
|
@ -152,6 +167,24 @@ type FetchHandler interface {
|
|||
container []byte,
|
||||
) error
|
||||
|
||||
// Notify this engine of multiple containers.
|
||||
// Each element of [containers] is the byte representation of a container.
|
||||
//
|
||||
// This should only be called during bootstrapping, and in response to a GetAncestors message to
|
||||
// [validatorID] with request ID [requestID]. This call should contain the container requested in
|
||||
// that message, along with ancestors.
|
||||
// The containers should be in BFS order (ie the first container must be the container
|
||||
// requested in the GetAncestors message and further back ancestors are later in [containers]
|
||||
//
|
||||
// It is not safe to assume this message is in response to a GetAncestor message, that this
|
||||
// message has a unique requestID or that any of the containers in [containers] are valid.
|
||||
// However, the validatorID is assumed to be authenticated.
|
||||
MultiPut(
|
||||
validatorID ids.ShortID,
|
||||
requestID uint32,
|
||||
containers [][]byte,
|
||||
) error
|
||||
|
||||
// Notify this engine that a get request it issued has failed.
|
||||
//
|
||||
// This function will be called if the engine sent a Get message that is not
|
||||
|
@ -161,6 +194,16 @@ type FetchHandler interface {
|
|||
// The validatorID and requestID are assumed to be the same as those sent in
|
||||
// the Get message.
|
||||
GetFailed(validatorID ids.ShortID, requestID uint32) error
|
||||
|
||||
// Notify this engine that a GetAncestors request it issued has failed.
|
||||
//
|
||||
// This function will be called if the engine sent a GetAncestors message that is not
|
||||
// anticipated to be responded to. This could be because the recipient of
|
||||
// the message is unknown or if the message request has timed out.
|
||||
//
|
||||
// The validatorID and requestID are assumed to be the same as those sent in
|
||||
// the GetAncestors message.
|
||||
GetAncestorsFailed(validatorID ids.ShortID, requestID uint32) error
|
||||
}
|
||||
|
||||
// QueryHandler defines how a consensus engine reacts to query messages from
|
||||
|
|
|
@ -85,10 +85,13 @@ func (j *Jobs) Execute(job Job) error {
|
|||
|
||||
jobID := job.ID()
|
||||
|
||||
blocking, _ := j.state.Blocking(j.db, jobID)
|
||||
j.state.DeleteBlocking(j.db, jobID)
|
||||
blocking, err := j.state.Blocking(j.db, jobID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
j.state.DeleteBlocking(j.db, jobID, blocking)
|
||||
|
||||
for _, blockedID := range blocking.List() {
|
||||
for _, blockedID := range blocking {
|
||||
job, err := j.state.Job(j.db, blockedID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -142,9 +145,7 @@ func (j *Jobs) block(job Job, deps ids.Set) error {
|
|||
|
||||
jobID := job.ID()
|
||||
for _, depID := range deps.List() {
|
||||
blocking, _ := j.state.Blocking(j.db, depID)
|
||||
blocking.Add(jobID)
|
||||
if err := j.state.SetBlocking(j.db, depID, blocking); err != nil {
|
||||
if err := j.state.AddBlocking(j.db, depID, jobID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -95,25 +95,31 @@ func (ps *prefixedState) Job(db database.Database, id ids.ID) (Job, error) {
|
|||
return ps.state.Job(db, p.Bytes)
|
||||
}
|
||||
|
||||
func (ps *prefixedState) SetBlocking(db database.Database, id ids.ID, blocking ids.Set) error {
|
||||
func (ps *prefixedState) AddBlocking(db database.Database, id ids.ID, blocking ids.ID) error {
|
||||
p := wrappers.Packer{Bytes: make([]byte, 1+hashing.HashLen)}
|
||||
|
||||
p.PackByte(blockingID)
|
||||
p.PackFixedBytes(id.Bytes())
|
||||
|
||||
return ps.state.SetIDs(db, p.Bytes, blocking)
|
||||
return ps.state.AddID(db, p.Bytes, blocking)
|
||||
}
|
||||
|
||||
func (ps *prefixedState) DeleteBlocking(db database.Database, id ids.ID) error {
|
||||
func (ps *prefixedState) DeleteBlocking(db database.Database, id ids.ID, blocking []ids.ID) error {
|
||||
p := wrappers.Packer{Bytes: make([]byte, 1+hashing.HashLen)}
|
||||
|
||||
p.PackByte(blockingID)
|
||||
p.PackFixedBytes(id.Bytes())
|
||||
|
||||
return db.Delete(p.Bytes)
|
||||
for _, blocked := range blocking {
|
||||
if err := ps.state.RemoveID(db, p.Bytes, blocked); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *prefixedState) Blocking(db database.Database, id ids.ID) (ids.Set, error) {
|
||||
func (ps *prefixedState) Blocking(db database.Database, id ids.ID) ([]ids.ID, error) {
|
||||
p := wrappers.Packer{Bytes: make([]byte, 1+hashing.HashLen)}
|
||||
|
||||
p.PackByte(blockingID)
|
||||
|
|
|
@ -4,12 +4,18 @@
|
|||
package queue
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ava-labs/gecko/database"
|
||||
"github.com/ava-labs/gecko/database/prefixdb"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/utils/hashing"
|
||||
"github.com/ava-labs/gecko/utils/wrappers"
|
||||
)
|
||||
|
||||
var (
|
||||
errZeroID = errors.New("zero id")
|
||||
)
|
||||
|
||||
type state struct{ jobs *Jobs }
|
||||
|
||||
func (s *state) SetInt(db database.Database, key []byte, size uint32) error {
|
||||
|
@ -42,30 +48,37 @@ func (s *state) Job(db database.Database, key []byte) (Job, error) {
|
|||
return s.jobs.parser.Parse(value)
|
||||
}
|
||||
|
||||
func (s *state) SetIDs(db database.Database, key []byte, blocking ids.Set) error {
|
||||
p := wrappers.Packer{Bytes: make([]byte, wrappers.IntLen+hashing.HashLen*blocking.Len())}
|
||||
// IDs returns a slice of IDs from storage
|
||||
func (s *state) IDs(db database.Database, prefix []byte) ([]ids.ID, error) {
|
||||
idSlice := []ids.ID(nil)
|
||||
iter := prefixdb.NewNested(prefix, db).NewIterator()
|
||||
defer iter.Release()
|
||||
|
||||
p.PackInt(uint32(blocking.Len()))
|
||||
for _, id := range blocking.List() {
|
||||
p.PackFixedBytes(id.Bytes())
|
||||
for iter.Next() {
|
||||
keyID, err := ids.ToID(iter.Key())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
idSlice = append(idSlice, keyID)
|
||||
}
|
||||
|
||||
return db.Put(key, p.Bytes)
|
||||
return idSlice, nil
|
||||
}
|
||||
|
||||
func (s *state) IDs(db database.Database, key []byte) (ids.Set, error) {
|
||||
bytes, err := db.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// AddID saves an ID to the prefixed database
|
||||
func (s *state) AddID(db database.Database, prefix []byte, key ids.ID) error {
|
||||
if key.IsZero() {
|
||||
return errZeroID
|
||||
}
|
||||
|
||||
p := wrappers.Packer{Bytes: bytes}
|
||||
|
||||
blocking := ids.Set{}
|
||||
for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- {
|
||||
id, _ := ids.ToID(p.UnpackFixedBytes(hashing.HashLen))
|
||||
blocking.Add(id)
|
||||
}
|
||||
|
||||
return blocking, p.Err
|
||||
pdb := prefixdb.NewNested(prefix, db)
|
||||
return pdb.Put(key.Bytes(), nil)
|
||||
}
|
||||
|
||||
// RemoveID removes an ID from the prefixed database
|
||||
func (s *state) RemoveID(db database.Database, prefix []byte, key ids.ID) error {
|
||||
if key.IsZero() {
|
||||
return errZeroID
|
||||
}
|
||||
pdb := prefixdb.NewNested(prefix, db)
|
||||
return pdb.Delete(key.Bytes())
|
||||
}
|
||||
|
|
|
@ -50,9 +50,17 @@ type FetchSender interface {
|
|||
// to this validator
|
||||
Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
|
||||
|
||||
// GetAncestors requests that the validator with ID [validatorID] send container [containerID] and its
|
||||
// ancestors. The maximum number of ancestors to send in response is defined in snow/engine/common/bootstrapper.go
|
||||
GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
|
||||
|
||||
// Tell the specified validator that the container whose ID is <containerID>
|
||||
// has body <container>
|
||||
Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte)
|
||||
|
||||
// Give the specified validator several containers at once
|
||||
// Should be in response to a GetAncestors message with request ID [requestID] from the validator
|
||||
MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte)
|
||||
}
|
||||
|
||||
// QuerySender defines how a consensus engine sends query messages to other
|
||||
|
|
|
@ -32,21 +32,26 @@ type EngineTest struct {
|
|||
CantAccepted,
|
||||
|
||||
CantGet,
|
||||
CantGetAncestors,
|
||||
CantGetFailed,
|
||||
CantGetAncestorsFailed,
|
||||
CantPut,
|
||||
CantMultiPut,
|
||||
|
||||
CantPushQuery,
|
||||
CantPullQuery,
|
||||
CantQueryFailed,
|
||||
CantChits bool
|
||||
|
||||
ContextF func() *snow.Context
|
||||
StartupF, GossipF, ShutdownF func() error
|
||||
NotifyF func(Message) error
|
||||
GetF, PullQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error
|
||||
PutF, PushQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) error
|
||||
AcceptedFrontierF, GetAcceptedF, AcceptedF, ChitsF func(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) error
|
||||
GetAcceptedFrontierF, GetFailedF, QueryFailedF, GetAcceptedFrontierFailedF, GetAcceptedFailedF func(validatorID ids.ShortID, requestID uint32) error
|
||||
ContextF func() *snow.Context
|
||||
StartupF, GossipF, ShutdownF func() error
|
||||
NotifyF func(Message) error
|
||||
GetF, GetAncestorsF, PullQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error
|
||||
PutF, PushQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) error
|
||||
MultiPutF func(validatorID ids.ShortID, requestID uint32, containers [][]byte) error
|
||||
AcceptedFrontierF, GetAcceptedF, AcceptedF, ChitsF func(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) error
|
||||
GetAcceptedFrontierF, GetFailedF, GetAncestorsFailedF,
|
||||
QueryFailedF, GetAcceptedFrontierFailedF, GetAcceptedFailedF func(validatorID ids.ShortID, requestID uint32) error
|
||||
}
|
||||
|
||||
var _ Engine = &EngineTest{}
|
||||
|
@ -70,8 +75,11 @@ func (e *EngineTest) Default(cant bool) {
|
|||
e.CantAccepted = cant
|
||||
|
||||
e.CantGet = cant
|
||||
e.CantGetAncestors = cant
|
||||
e.CantGetAncestorsFailed = cant
|
||||
e.CantGetFailed = cant
|
||||
e.CantPut = cant
|
||||
e.CantMultiPut = cant
|
||||
|
||||
e.CantPushQuery = cant
|
||||
e.CantPullQuery = cant
|
||||
|
@ -233,6 +241,16 @@ func (e *EngineTest) Get(validatorID ids.ShortID, requestID uint32, containerID
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetAncestors ...
|
||||
func (e *EngineTest) GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error {
|
||||
if e.GetAncestorsF != nil {
|
||||
e.GetAncestorsF(validatorID, requestID, containerID)
|
||||
} else if e.CantGetAncestors && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called GetAncestors")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFailed ...
|
||||
func (e *EngineTest) GetFailed(validatorID ids.ShortID, requestID uint32) error {
|
||||
if e.GetFailedF != nil {
|
||||
|
@ -246,6 +264,19 @@ func (e *EngineTest) GetFailed(validatorID ids.ShortID, requestID uint32) error
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetAncestorsFailed ...
|
||||
func (e *EngineTest) GetAncestorsFailed(validatorID ids.ShortID, requestID uint32) error {
|
||||
if e.GetAncestorsFailedF != nil {
|
||||
return e.GetAncestorsFailedF(validatorID, requestID)
|
||||
} else if e.CantGetAncestorsFailed {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called GetAncestorsFailed")
|
||||
}
|
||||
return errors.New("Unexpectedly called GetAncestorsFailed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put ...
|
||||
func (e *EngineTest) Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) error {
|
||||
if e.PutF != nil {
|
||||
|
@ -259,6 +290,19 @@ func (e *EngineTest) Put(validatorID ids.ShortID, requestID uint32, containerID
|
|||
return nil
|
||||
}
|
||||
|
||||
// MultiPut ...
|
||||
func (e *EngineTest) MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte) error {
|
||||
if e.MultiPutF != nil {
|
||||
return e.MultiPutF(validatorID, requestID, containers)
|
||||
} else if e.CantMultiPut {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called MultiPut")
|
||||
}
|
||||
return errors.New("Unexpectedly called MultiPut")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PushQuery ...
|
||||
func (e *EngineTest) PushQuery(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) error {
|
||||
if e.PushQueryF != nil {
|
||||
|
|
|
@ -15,7 +15,7 @@ type SenderTest struct {
|
|||
|
||||
CantGetAcceptedFrontier, CantAcceptedFrontier,
|
||||
CantGetAccepted, CantAccepted,
|
||||
CantGet, CantPut,
|
||||
CantGet, CantGetAncestors, CantPut, CantMultiPut,
|
||||
CantPullQuery, CantPushQuery, CantChits,
|
||||
CantGossip bool
|
||||
|
||||
|
@ -24,7 +24,9 @@ type SenderTest struct {
|
|||
GetAcceptedF func(ids.ShortSet, uint32, ids.Set)
|
||||
AcceptedF func(ids.ShortID, uint32, ids.Set)
|
||||
GetF func(ids.ShortID, uint32, ids.ID)
|
||||
GetAncestorsF func(ids.ShortID, uint32, ids.ID)
|
||||
PutF func(ids.ShortID, uint32, ids.ID, []byte)
|
||||
MultiPutF func(ids.ShortID, uint32, [][]byte)
|
||||
PushQueryF func(ids.ShortSet, uint32, ids.ID, []byte)
|
||||
PullQueryF func(ids.ShortSet, uint32, ids.ID)
|
||||
ChitsF func(ids.ShortID, uint32, ids.Set)
|
||||
|
@ -38,7 +40,9 @@ func (s *SenderTest) Default(cant bool) {
|
|||
s.CantGetAccepted = cant
|
||||
s.CantAccepted = cant
|
||||
s.CantGet = cant
|
||||
s.CantGetAccepted = cant
|
||||
s.CantPut = cant
|
||||
s.CantMultiPut = cant
|
||||
s.CantPullQuery = cant
|
||||
s.CantPushQuery = cant
|
||||
s.CantChits = cant
|
||||
|
@ -100,6 +104,17 @@ func (s *SenderTest) Get(vdr ids.ShortID, requestID uint32, vtxID ids.ID) {
|
|||
}
|
||||
}
|
||||
|
||||
// GetAncestors calls GetAncestorsF if it was initialized. If it
|
||||
// wasn't initialized and this function shouldn't be called and testing was
|
||||
// initialized, then testing will fail.
|
||||
func (s *SenderTest) GetAncestors(validatorID ids.ShortID, requestID uint32, vtxID ids.ID) {
|
||||
if s.GetAncestorsF != nil {
|
||||
s.GetAncestorsF(validatorID, requestID, vtxID)
|
||||
} else if s.CantGetAncestors && s.T != nil {
|
||||
s.T.Fatalf("Unexpectedly called CantGetAncestors")
|
||||
}
|
||||
}
|
||||
|
||||
// Put calls PutF if it was initialized. If it wasn't initialized and this
|
||||
// function shouldn't be called and testing was initialized, then testing will
|
||||
// fail.
|
||||
|
@ -111,6 +126,17 @@ func (s *SenderTest) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtx []
|
|||
}
|
||||
}
|
||||
|
||||
// MultiPut calls MultiPutF if it was initialized. If it wasn't initialized and this
|
||||
// function shouldn't be called and testing was initialized, then testing will
|
||||
// fail.
|
||||
func (s *SenderTest) MultiPut(vdr ids.ShortID, requestID uint32, vtxs [][]byte) {
|
||||
if s.MultiPutF != nil {
|
||||
s.MultiPutF(vdr, requestID, vtxs)
|
||||
} else if s.CantMultiPut && s.T != nil {
|
||||
s.T.Fatalf("Unexpectedly called MultiPut")
|
||||
}
|
||||
}
|
||||
|
||||
// PushQuery calls PushQueryF if it was initialized. If it wasn't initialized
|
||||
// and this function shouldn't be called and testing was initialized, then
|
||||
// testing will fail.
|
||||
|
|
|
@ -19,19 +19,22 @@ var (
|
|||
type VMTest struct {
|
||||
T *testing.T
|
||||
|
||||
CantInitialize, CantShutdown, CantCreateHandlers, CantCreateStaticHandlers bool
|
||||
CantInitialize, CantBootstrapping, CantBootstrapped, CantShutdown, CantCreateHandlers, CantCreateStaticHandlers bool
|
||||
|
||||
InitializeF func(*snow.Context, database.Database, []byte, chan<- Message, []*Fx) error
|
||||
ShutdownF func() error
|
||||
CreateHandlersF func() map[string]*HTTPHandler
|
||||
CreateStaticHandlersF func() map[string]*HTTPHandler
|
||||
InitializeF func(*snow.Context, database.Database, []byte, chan<- Message, []*Fx) error
|
||||
BootstrappingF, BootstrappedF, ShutdownF func() error
|
||||
CreateHandlersF func() map[string]*HTTPHandler
|
||||
CreateStaticHandlersF func() map[string]*HTTPHandler
|
||||
}
|
||||
|
||||
// Default ...
|
||||
func (vm *VMTest) Default(cant bool) {
|
||||
vm.CantInitialize = cant
|
||||
vm.CantBootstrapping = cant
|
||||
vm.CantBootstrapped = cant
|
||||
vm.CantShutdown = cant
|
||||
vm.CantCreateHandlers = cant
|
||||
vm.CantCreateStaticHandlers = cant
|
||||
}
|
||||
|
||||
// Initialize ...
|
||||
|
@ -45,6 +48,32 @@ func (vm *VMTest) Initialize(ctx *snow.Context, db database.Database, initState
|
|||
return errInitialize
|
||||
}
|
||||
|
||||
// Bootstrapping ...
|
||||
func (vm *VMTest) Bootstrapping() error {
|
||||
if vm.BootstrappingF != nil {
|
||||
return vm.BootstrappingF()
|
||||
} else if vm.CantBootstrapping {
|
||||
if vm.T != nil {
|
||||
vm.T.Fatalf("Unexpectedly called Bootstrapping")
|
||||
}
|
||||
return errors.New("Unexpectedly called Bootstrapping")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bootstrapped ...
|
||||
func (vm *VMTest) Bootstrapped() error {
|
||||
if vm.BootstrappedF != nil {
|
||||
return vm.BootstrappedF()
|
||||
} else if vm.CantBootstrapped {
|
||||
if vm.T != nil {
|
||||
vm.T.Fatalf("Unexpectedly called Bootstrapped")
|
||||
}
|
||||
return errors.New("Unexpectedly called Bootstrapped")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown ...
|
||||
func (vm *VMTest) Shutdown() error {
|
||||
if vm.ShutdownF != nil {
|
||||
|
|
|
@ -12,8 +12,7 @@ import (
|
|||
type VM interface {
|
||||
// Initialize this VM.
|
||||
// [ctx]: Metadata about this VM.
|
||||
// [ctx.networkID]: The ID of the network this VM's chain is running
|
||||
// on.
|
||||
// [ctx.networkID]: The ID of the network this VM's chain is running on.
|
||||
// [ctx.chainID]: The unique ID of the chain this VM is running on.
|
||||
// [ctx.Log]: Used to log messages
|
||||
// [ctx.NodeID]: The unique staker ID of this node.
|
||||
|
@ -37,6 +36,12 @@ type VM interface {
|
|||
fxs []*Fx,
|
||||
) error
|
||||
|
||||
// Bootstrapping is called when the node is starting to bootstrap this chain.
|
||||
Bootstrapping() error
|
||||
|
||||
// Bootstrapped is called when the node is done bootstrapping this chain.
|
||||
Bootstrapped() error
|
||||
|
||||
// Shutdown is called when the node is shutting down.
|
||||
Shutdown() error
|
||||
|
||||
|
|
|
@ -22,9 +22,6 @@ type BootstrapConfig struct {
|
|||
// Blocked tracks operations that are blocked on blocks
|
||||
Blocked *queue.Jobs
|
||||
|
||||
// blocks that have outstanding get requests
|
||||
blkReqs common.Requests
|
||||
|
||||
VM ChainVM
|
||||
|
||||
Bootstrapped func()
|
||||
|
@ -35,8 +32,19 @@ type bootstrapper struct {
|
|||
metrics
|
||||
common.Bootstrapper
|
||||
|
||||
pending ids.Set
|
||||
finished bool
|
||||
// true if all of the vertices in the original accepted frontier have been processed
|
||||
processedStartingAcceptedFrontier bool
|
||||
|
||||
// Number of blocks fetched
|
||||
numFetched uint32
|
||||
|
||||
// tracks which validators were asked for which containers in which requests
|
||||
outstandingRequests common.Requests
|
||||
|
||||
// true if bootstrapping is done
|
||||
finished bool
|
||||
|
||||
// Called when bootstrapping is done
|
||||
onFinished func() error
|
||||
}
|
||||
|
||||
|
@ -56,14 +64,14 @@ func (b *bootstrapper) Initialize(config BootstrapConfig) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// CurrentAcceptedFrontier ...
|
||||
// CurrentAcceptedFrontier returns the last accepted block
|
||||
func (b *bootstrapper) CurrentAcceptedFrontier() ids.Set {
|
||||
acceptedFrontier := ids.Set{}
|
||||
acceptedFrontier.Add(b.VM.LastAccepted())
|
||||
return acceptedFrontier
|
||||
}
|
||||
|
||||
// FilterAccepted ...
|
||||
// FilterAccepted returns the blocks in [containerIDs] that we have accepted
|
||||
func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set {
|
||||
acceptedIDs := ids.Set{}
|
||||
for _, blkID := range containerIDs.List() {
|
||||
|
@ -76,118 +84,122 @@ func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set {
|
|||
|
||||
// ForceAccepted ...
|
||||
func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) error {
|
||||
if err := b.VM.Bootstrapping(); err != nil {
|
||||
return fmt.Errorf("failed to notify VM that bootstrapping has started: %w",
|
||||
err)
|
||||
}
|
||||
|
||||
for _, blkID := range acceptedContainerIDs.List() {
|
||||
if err := b.fetch(blkID); err != nil {
|
||||
if blk, err := b.VM.GetBlock(blkID); err == nil {
|
||||
if err := b.process(blk); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := b.fetch(blkID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if numPending := b.pending.Len(); numPending == 0 {
|
||||
// TODO: This typically indicates bootstrapping has failed, so this
|
||||
// should be handled appropriately
|
||||
b.processedStartingAcceptedFrontier = true
|
||||
if numPending := b.outstandingRequests.Len(); numPending == 0 {
|
||||
return b.finish()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put ...
|
||||
func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) error {
|
||||
b.BootstrapConfig.Context.Log.Verbo("Put called for blkID %s", blkID)
|
||||
|
||||
blk, err := b.VM.ParseBlock(blkBytes)
|
||||
if err != nil {
|
||||
b.BootstrapConfig.Context.Log.Debug("ParseBlock failed due to %s for block:\n%s",
|
||||
err,
|
||||
formatting.DumpBytes{Bytes: blkBytes})
|
||||
|
||||
b.GetFailed(vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !b.pending.Contains(blk.ID()) {
|
||||
b.BootstrapConfig.Context.Log.Debug("Validator %s sent an unrequested block:\n%s",
|
||||
vdr,
|
||||
formatting.DumpBytes{Bytes: blkBytes})
|
||||
|
||||
b.GetFailed(vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
return b.addBlock(blk)
|
||||
}
|
||||
|
||||
// GetFailed ...
|
||||
func (b *bootstrapper) GetFailed(vdr ids.ShortID, requestID uint32) error {
|
||||
blkID, ok := b.blkReqs.Remove(vdr, requestID)
|
||||
if !ok {
|
||||
b.BootstrapConfig.Context.Log.Debug("GetFailed called without sending the corresponding Get message from %s",
|
||||
vdr)
|
||||
return nil
|
||||
}
|
||||
b.sendRequest(blkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get block [blkID] and its ancestors from a validator
|
||||
func (b *bootstrapper) fetch(blkID ids.ID) error {
|
||||
if b.pending.Contains(blkID) {
|
||||
// Make sure we haven't already requested this block
|
||||
if b.outstandingRequests.Contains(blkID) {
|
||||
return nil
|
||||
}
|
||||
|
||||
blk, err := b.VM.GetBlock(blkID)
|
||||
if err != nil {
|
||||
b.sendRequest(blkID)
|
||||
// Make sure we don't already have this block
|
||||
if _, err := b.VM.GetBlock(blkID); err == nil {
|
||||
return nil
|
||||
}
|
||||
return b.storeBlock(blk)
|
||||
}
|
||||
|
||||
func (b *bootstrapper) sendRequest(blkID ids.ID) {
|
||||
validators := b.BootstrapConfig.Validators.Sample(1)
|
||||
validators := b.BootstrapConfig.Validators.Sample(1) // validator to send request to
|
||||
if len(validators) == 0 {
|
||||
b.BootstrapConfig.Context.Log.Error("Dropping request for %s as there are no validators", blkID)
|
||||
return
|
||||
return fmt.Errorf("Dropping request for %s as there are no validators", blkID)
|
||||
}
|
||||
validatorID := validators[0].ID()
|
||||
b.RequestID++
|
||||
|
||||
b.blkReqs.RemoveAny(blkID)
|
||||
b.blkReqs.Add(validatorID, b.RequestID, blkID)
|
||||
|
||||
b.pending.Add(blkID)
|
||||
b.BootstrapConfig.Sender.Get(validatorID, b.RequestID, blkID)
|
||||
|
||||
b.numPendingRequests.Set(float64(b.pending.Len()))
|
||||
}
|
||||
|
||||
func (b *bootstrapper) addBlock(blk snowman.Block) error {
|
||||
if err := b.storeBlock(blk); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numPending := b.pending.Len(); numPending == 0 {
|
||||
return b.finish()
|
||||
}
|
||||
b.outstandingRequests.Add(validatorID, b.RequestID, blkID)
|
||||
b.BootstrapConfig.Sender.GetAncestors(validatorID, b.RequestID, blkID) // request block and ancestors
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bootstrapper) storeBlock(blk snowman.Block) error {
|
||||
// MultiPut handles the receipt of multiple containers. Should be received in response to a GetAncestors message to [vdr]
|
||||
// with request ID [requestID]
|
||||
func (b *bootstrapper) MultiPut(vdr ids.ShortID, requestID uint32, blks [][]byte) error {
|
||||
if lenBlks := len(blks); lenBlks > common.MaxContainersPerMultiPut {
|
||||
b.BootstrapConfig.Context.Log.Debug("MultiPut(%s, %d) contains more than maximum number of blocks", vdr, requestID)
|
||||
return b.GetAncestorsFailed(vdr, requestID)
|
||||
} else if lenBlks == 0 {
|
||||
b.BootstrapConfig.Context.Log.Debug("MultiPut(%s, %d) contains no blocks", vdr, requestID)
|
||||
return b.GetAncestorsFailed(vdr, requestID)
|
||||
}
|
||||
|
||||
// Make sure this is in response to a request we made
|
||||
wantedBlkID, ok := b.outstandingRequests.Remove(vdr, requestID)
|
||||
if !ok { // this message isn't in response to a request we made
|
||||
b.BootstrapConfig.Context.Log.Debug("received unexpected MultiPut from %s with ID %d", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
wantedBlk, err := b.VM.ParseBlock(blks[0]) // the block we requested
|
||||
if err != nil {
|
||||
b.BootstrapConfig.Context.Log.Debug("Failed to parse requested block %s: %w", wantedBlkID, err)
|
||||
return b.fetch(wantedBlkID)
|
||||
} else if actualID := wantedBlk.ID(); !actualID.Equals(wantedBlkID) {
|
||||
b.BootstrapConfig.Context.Log.Debug("expected the first block to be the requested block, %s, but is %s", wantedBlk, actualID)
|
||||
return b.fetch(wantedBlkID)
|
||||
}
|
||||
|
||||
for _, blkBytes := range blks {
|
||||
if _, err := b.VM.ParseBlock(blkBytes); err != nil { // persists the block
|
||||
b.BootstrapConfig.Context.Log.Debug("Failed to parse block: %w", err)
|
||||
b.BootstrapConfig.Context.Log.Verbo("block: %s", formatting.DumpBytes{Bytes: blkBytes})
|
||||
}
|
||||
}
|
||||
|
||||
return b.process(wantedBlk)
|
||||
}
|
||||
|
||||
// GetAncestorsFailed is called when a GetAncestors message we sent fails
|
||||
func (b *bootstrapper) GetAncestorsFailed(vdr ids.ShortID, requestID uint32) error {
|
||||
blkID, ok := b.outstandingRequests.Remove(vdr, requestID)
|
||||
if !ok {
|
||||
b.BootstrapConfig.Context.Log.Debug("GetAncestorsFailed(%s, %d) called but there was no outstanding request to this validator with this ID", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
// Send another request for this
|
||||
return b.fetch(blkID)
|
||||
}
|
||||
|
||||
// process a block
|
||||
func (b *bootstrapper) process(blk snowman.Block) error {
|
||||
status := blk.Status()
|
||||
blkID := blk.ID()
|
||||
for status == choices.Processing {
|
||||
b.pending.Remove(blkID)
|
||||
|
||||
if err := b.Blocked.Push(&blockJob{
|
||||
numAccepted: b.numBootstrapped,
|
||||
numDropped: b.numDropped,
|
||||
blk: blk,
|
||||
}); err == nil {
|
||||
b.numBlocked.Inc()
|
||||
b.numFetched++ // Progress tracker
|
||||
if b.numFetched%common.StatusUpdateFrequency == 0 { // Periodically print progress
|
||||
b.BootstrapConfig.Context.Log.Info("fetched %d blocks", b.numFetched)
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.Blocked.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Process this block's parent
|
||||
blk = blk.Parent()
|
||||
status = blk.Status()
|
||||
blkID = blk.ID()
|
||||
|
@ -195,15 +207,16 @@ func (b *bootstrapper) storeBlock(blk snowman.Block) error {
|
|||
|
||||
switch status := blk.Status(); status {
|
||||
case choices.Unknown:
|
||||
b.sendRequest(blkID)
|
||||
case choices.Accepted:
|
||||
b.BootstrapConfig.Context.Log.Verbo("Bootstrapping confirmed %s", blkID)
|
||||
case choices.Rejected:
|
||||
if err := b.fetch(blkID); err != nil {
|
||||
return err
|
||||
}
|
||||
case choices.Rejected: // Should never happen
|
||||
return fmt.Errorf("bootstrapping wants to accept %s, however it was previously rejected", blkID)
|
||||
}
|
||||
|
||||
numPending := b.pending.Len()
|
||||
b.numPendingRequests.Set(float64(numPending))
|
||||
if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier {
|
||||
return b.finish()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -211,11 +224,17 @@ func (b *bootstrapper) finish() error {
|
|||
if b.finished {
|
||||
return nil
|
||||
}
|
||||
b.BootstrapConfig.Context.Log.Info("bootstrapping finished fetching blocks. executing state transitions...")
|
||||
|
||||
if err := b.executeAll(b.Blocked, b.numBlocked); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.VM.Bootstrapped(); err != nil {
|
||||
return fmt.Errorf("failed to notify VM that bootstrapping has finished: %w",
|
||||
err)
|
||||
}
|
||||
|
||||
// Start consensus
|
||||
if err := b.onFinished(); err != nil {
|
||||
return err
|
||||
|
@ -229,6 +248,7 @@ func (b *bootstrapper) finish() error {
|
|||
}
|
||||
|
||||
func (b *bootstrapper) executeAll(jobs *queue.Jobs, numBlocked prometheus.Gauge) error {
|
||||
numExecuted := 0
|
||||
for job, err := jobs.Pop(); err == nil; job, err = jobs.Pop() {
|
||||
numBlocked.Dec()
|
||||
if err := jobs.Execute(job); err != nil {
|
||||
|
@ -237,6 +257,10 @@ func (b *bootstrapper) executeAll(jobs *queue.Jobs, numBlocked prometheus.Gauge)
|
|||
if err := jobs.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
numExecuted++
|
||||
if numExecuted%common.StatusUpdateFrequency == 0 { // Periodically print progress
|
||||
b.BootstrapConfig.Context.Log.Info("executed %d blocks", numExecuted)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -52,7 +52,13 @@ func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest,
|
|||
peerID := peer.ID()
|
||||
peers.Add(peer)
|
||||
|
||||
handler.Initialize(engine, make(chan common.Message), 1)
|
||||
handler.Initialize(
|
||||
engine,
|
||||
make(chan common.Message),
|
||||
1,
|
||||
"",
|
||||
prometheus.NewRegistry(),
|
||||
)
|
||||
timeouts.Initialize(0)
|
||||
router.Initialize(ctx.Log, timeouts, time.Hour, time.Second)
|
||||
|
||||
|
@ -72,8 +78,9 @@ func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest,
|
|||
}, peerID, sender, vm
|
||||
}
|
||||
|
||||
// Single node in the accepted frontier; no need to fecth parent
|
||||
func TestBootstrapperSingleFrontier(t *testing.T) {
|
||||
config, peerID, sender, vm := newConfig(t)
|
||||
config, _, _, vm := newConfig(t)
|
||||
|
||||
blkID0 := ids.Empty.Prefix(0)
|
||||
blkID1 := ids.Empty.Prefix(1)
|
||||
|
@ -98,6 +105,8 @@ func TestBootstrapperSingleFrontier(t *testing.T) {
|
|||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() error { *finished = true; return nil }
|
||||
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(blkID1)
|
||||
|
@ -105,57 +114,41 @@ func TestBootstrapperSingleFrontier(t *testing.T) {
|
|||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID1):
|
||||
return nil, errUnknownBlock
|
||||
return blk1, nil
|
||||
case blkID.Equals(blkID0):
|
||||
return blk0, nil
|
||||
default:
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
|
||||
reqID := new(uint32)
|
||||
sender.GetF = func(vdr ids.ShortID, innerReqID uint32, blkID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
switch {
|
||||
case blkID.Equals(blkID1):
|
||||
default:
|
||||
t.Fatalf("Requested unknown vertex")
|
||||
}
|
||||
|
||||
*reqID = innerReqID
|
||||
}
|
||||
|
||||
bs.ForceAccepted(acceptedIDs)
|
||||
|
||||
vm.GetBlockF = nil
|
||||
sender.GetF = nil
|
||||
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes1):
|
||||
return blk1, nil
|
||||
case bytes.Equal(blkBytes, blkBytes0):
|
||||
return blk0, nil
|
||||
}
|
||||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() error { *finished = true; return nil }
|
||||
vm.CantBootstrapping = false
|
||||
vm.CantBootstrapped = false
|
||||
|
||||
bs.Put(peerID, *reqID, blkID1, blkBytes1)
|
||||
|
||||
vm.ParseBlockF = nil
|
||||
bs.onFinished = nil
|
||||
|
||||
if !*finished {
|
||||
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should finish
|
||||
t.Fatal(err)
|
||||
} else if !*finished {
|
||||
t.Fatalf("Bootstrapping should have finished")
|
||||
}
|
||||
if blk1.Status() != choices.Accepted {
|
||||
} else if blk1.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
}
|
||||
}
|
||||
|
||||
// Requests the unknown block and gets back a MultiPut with unexpected request ID.
|
||||
// Requests again and gets response from unexpected peer.
|
||||
// Requests again and gets an unexpected block.
|
||||
// Requests again and gets the expected block.
|
||||
func TestBootstrapperUnknownByzantineResponse(t *testing.T) {
|
||||
config, peerID, sender, vm := newConfig(t)
|
||||
|
||||
|
@ -167,103 +160,6 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) {
|
|||
blkBytes1 := []byte{1}
|
||||
blkBytes2 := []byte{2}
|
||||
|
||||
blk0 := &Blk{
|
||||
id: blkID0,
|
||||
height: 0,
|
||||
status: choices.Accepted,
|
||||
bytes: blkBytes0,
|
||||
}
|
||||
blk1 := &Blk{
|
||||
parent: blk0,
|
||||
id: blkID1,
|
||||
height: 1,
|
||||
status: choices.Processing,
|
||||
bytes: blkBytes1,
|
||||
}
|
||||
blk2 := &Blk{
|
||||
parent: blk1,
|
||||
id: blkID2,
|
||||
height: 2,
|
||||
status: choices.Processing,
|
||||
bytes: blkBytes2,
|
||||
}
|
||||
|
||||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(blkID1)
|
||||
|
||||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID1):
|
||||
return nil, errUnknownBlock
|
||||
default:
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
|
||||
requestID := new(uint32)
|
||||
sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
switch {
|
||||
case vtxID.Equals(blkID1):
|
||||
default:
|
||||
t.Fatalf("Requested unknown block")
|
||||
}
|
||||
|
||||
*requestID = reqID
|
||||
}
|
||||
|
||||
bs.ForceAccepted(acceptedIDs)
|
||||
|
||||
vm.GetBlockF = nil
|
||||
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes1):
|
||||
return blk1, nil
|
||||
case bytes.Equal(blkBytes, blkBytes2):
|
||||
return blk2, nil
|
||||
}
|
||||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() error { *finished = true; return nil }
|
||||
|
||||
bs.Put(peerID, *requestID, blkID2, blkBytes2)
|
||||
bs.Put(peerID, *requestID, blkID1, blkBytes1)
|
||||
|
||||
vm.ParseBlockF = nil
|
||||
|
||||
if !*finished {
|
||||
t.Fatalf("Bootstrapping should have finished")
|
||||
}
|
||||
if blk1.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
}
|
||||
if blk2.Status() != choices.Processing {
|
||||
t.Fatalf("Block should be processing")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBootstrapperDependency(t *testing.T) {
|
||||
config, peerID, sender, vm := newConfig(t)
|
||||
|
||||
blkID0 := ids.Empty.Prefix(0)
|
||||
blkID1 := ids.Empty.Prefix(1)
|
||||
blkID2 := ids.Empty.Prefix(2)
|
||||
|
||||
blkBytes0 := []byte{0}
|
||||
blkBytes1 := []byte{1}
|
||||
blkBytes2 := []byte{2}
|
||||
|
||||
blk0 := &Blk{
|
||||
id: blkID0,
|
||||
height: 0,
|
||||
|
@ -288,42 +184,36 @@ func TestBootstrapperDependency(t *testing.T) {
|
|||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() error { *finished = true; return nil }
|
||||
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(blkID2)
|
||||
|
||||
parsedBlk1 := false
|
||||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID0):
|
||||
return blk0, nil
|
||||
case blkID.Equals(blkID1):
|
||||
if parsedBlk1 {
|
||||
return blk1, nil
|
||||
}
|
||||
return nil, errUnknownBlock
|
||||
case blkID.Equals(blkID2):
|
||||
return blk2, nil
|
||||
default:
|
||||
t.Fatalf("Requested unknown block")
|
||||
panic("Requested unknown block")
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
|
||||
requestID := new(uint32)
|
||||
sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
switch {
|
||||
case vtxID.Equals(blkID1):
|
||||
default:
|
||||
t.Fatalf("Requested unknown block")
|
||||
}
|
||||
|
||||
*requestID = reqID
|
||||
}
|
||||
|
||||
bs.ForceAccepted(acceptedIDs)
|
||||
|
||||
vm.GetBlockF = nil
|
||||
sender.GetF = nil
|
||||
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes0):
|
||||
return blk0, nil
|
||||
case bytes.Equal(blkBytes, blkBytes1):
|
||||
blk1.status = choices.Processing
|
||||
parsedBlk1 = true
|
||||
return blk1, nil
|
||||
case bytes.Equal(blkBytes, blkBytes2):
|
||||
return blk2, nil
|
||||
|
@ -332,20 +222,325 @@ func TestBootstrapperDependency(t *testing.T) {
|
|||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
blk1.status = choices.Processing
|
||||
requestID := new(uint32)
|
||||
sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
switch {
|
||||
case vtxID.Equals(blkID1):
|
||||
default:
|
||||
t.Fatalf("should have requested blk1")
|
||||
}
|
||||
*requestID = reqID
|
||||
}
|
||||
vm.CantBootstrapping = false
|
||||
|
||||
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk1
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
oldReqID := *requestID
|
||||
if err := bs.MultiPut(peerID, *requestID+1, [][]byte{blkBytes1}); err != nil { // respond with wrong request ID
|
||||
t.Fatal(err)
|
||||
} else if oldReqID != *requestID {
|
||||
t.Fatal("should not have sent new request")
|
||||
}
|
||||
|
||||
if err := bs.MultiPut(ids.NewShortID([20]byte{1, 2, 3}), *requestID, [][]byte{blkBytes1}); err != nil { // respond from wrong peer
|
||||
t.Fatal(err)
|
||||
} else if oldReqID != *requestID {
|
||||
t.Fatal("should not have sent new request")
|
||||
}
|
||||
|
||||
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes0}); err != nil { // respond with wrong block
|
||||
t.Fatal(err)
|
||||
} else if oldReqID == *requestID {
|
||||
t.Fatal("should have sent new request")
|
||||
}
|
||||
|
||||
vm.CantBootstrapped = false
|
||||
|
||||
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes1}); err != nil { // respond with right block
|
||||
t.Fatal(err)
|
||||
} else if !*finished {
|
||||
t.Fatalf("Bootstrapping should have finished")
|
||||
} else if blk0.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
} else if blk1.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
} else if blk2.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
}
|
||||
}
|
||||
|
||||
// There are multiple needed blocks and MultiPut returns one at a time
|
||||
func TestBootstrapperPartialFetch(t *testing.T) {
|
||||
config, peerID, sender, vm := newConfig(t)
|
||||
|
||||
blkID0 := ids.Empty.Prefix(0)
|
||||
blkID1 := ids.Empty.Prefix(1)
|
||||
blkID2 := ids.Empty.Prefix(2)
|
||||
blkID3 := ids.Empty.Prefix(3)
|
||||
|
||||
blkBytes0 := []byte{0}
|
||||
blkBytes1 := []byte{1}
|
||||
blkBytes2 := []byte{2}
|
||||
blkBytes3 := []byte{3}
|
||||
|
||||
blk0 := &Blk{
|
||||
id: blkID0,
|
||||
height: 0,
|
||||
status: choices.Accepted,
|
||||
bytes: blkBytes0,
|
||||
}
|
||||
blk1 := &Blk{
|
||||
parent: blk0,
|
||||
id: blkID1,
|
||||
height: 1,
|
||||
status: choices.Unknown,
|
||||
bytes: blkBytes1,
|
||||
}
|
||||
blk2 := &Blk{
|
||||
parent: blk1,
|
||||
id: blkID2,
|
||||
height: 2,
|
||||
status: choices.Unknown,
|
||||
bytes: blkBytes2,
|
||||
}
|
||||
blk3 := &Blk{
|
||||
parent: blk2,
|
||||
id: blkID3,
|
||||
height: 3,
|
||||
status: choices.Processing,
|
||||
bytes: blkBytes3,
|
||||
}
|
||||
|
||||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() error { *finished = true; return nil }
|
||||
|
||||
bs.Put(peerID, *requestID, blkID1, blkBytes1)
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(blkID3)
|
||||
|
||||
parsedBlk1 := false
|
||||
parsedBlk2 := false
|
||||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID0):
|
||||
return blk0, nil
|
||||
case blkID.Equals(blkID1):
|
||||
if parsedBlk1 {
|
||||
return blk1, nil
|
||||
}
|
||||
return nil, errUnknownBlock
|
||||
case blkID.Equals(blkID2):
|
||||
if parsedBlk2 {
|
||||
return blk2, nil
|
||||
}
|
||||
return nil, errUnknownBlock
|
||||
case blkID.Equals(blkID3):
|
||||
return blk3, nil
|
||||
default:
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes0):
|
||||
return blk0, nil
|
||||
case bytes.Equal(blkBytes, blkBytes1):
|
||||
blk1.status = choices.Processing
|
||||
parsedBlk1 = true
|
||||
return blk1, nil
|
||||
case bytes.Equal(blkBytes, blkBytes2):
|
||||
blk2.status = choices.Processing
|
||||
parsedBlk2 = true
|
||||
return blk2, nil
|
||||
case bytes.Equal(blkBytes, blkBytes3):
|
||||
return blk3, nil
|
||||
}
|
||||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
requestID := new(uint32)
|
||||
requested := ids.Empty
|
||||
sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
switch {
|
||||
case vtxID.Equals(blkID1), vtxID.Equals(blkID2):
|
||||
default:
|
||||
t.Fatalf("should have requested blk1 or blk2")
|
||||
}
|
||||
*requestID = reqID
|
||||
requested = vtxID
|
||||
}
|
||||
|
||||
vm.CantBootstrapping = false
|
||||
|
||||
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk2
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes2}); err != nil { // respond with blk2
|
||||
t.Fatal(err)
|
||||
} else if !requested.Equals(blkID1) {
|
||||
t.Fatal("should have requested blk1")
|
||||
}
|
||||
|
||||
vm.CantBootstrapped = false
|
||||
|
||||
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes1}); err != nil { // respond with blk1
|
||||
t.Fatal(err)
|
||||
} else if !requested.Equals(blkID1) {
|
||||
t.Fatal("should not have requested another block")
|
||||
}
|
||||
|
||||
if !*finished {
|
||||
t.Fatalf("Bootstrapping should have finished")
|
||||
}
|
||||
if blk1.Status() != choices.Accepted {
|
||||
} else if blk0.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
} else if blk1.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
} else if blk2.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
}
|
||||
if blk2.Status() != choices.Accepted {
|
||||
}
|
||||
|
||||
// There are multiple needed blocks and MultiPut returns all at once
|
||||
func TestBootstrapperMultiPut(t *testing.T) {
|
||||
config, peerID, sender, vm := newConfig(t)
|
||||
|
||||
blkID0 := ids.Empty.Prefix(0)
|
||||
blkID1 := ids.Empty.Prefix(1)
|
||||
blkID2 := ids.Empty.Prefix(2)
|
||||
blkID3 := ids.Empty.Prefix(3)
|
||||
|
||||
blkBytes0 := []byte{0}
|
||||
blkBytes1 := []byte{1}
|
||||
blkBytes2 := []byte{2}
|
||||
blkBytes3 := []byte{3}
|
||||
|
||||
blk0 := &Blk{
|
||||
id: blkID0,
|
||||
height: 0,
|
||||
status: choices.Accepted,
|
||||
bytes: blkBytes0,
|
||||
}
|
||||
blk1 := &Blk{
|
||||
parent: blk0,
|
||||
id: blkID1,
|
||||
height: 1,
|
||||
status: choices.Unknown,
|
||||
bytes: blkBytes1,
|
||||
}
|
||||
blk2 := &Blk{
|
||||
parent: blk1,
|
||||
id: blkID2,
|
||||
height: 2,
|
||||
status: choices.Unknown,
|
||||
bytes: blkBytes2,
|
||||
}
|
||||
blk3 := &Blk{
|
||||
parent: blk2,
|
||||
id: blkID3,
|
||||
height: 3,
|
||||
status: choices.Processing,
|
||||
bytes: blkBytes3,
|
||||
}
|
||||
vm.CantBootstrapping = false
|
||||
|
||||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() error { *finished = true; return nil }
|
||||
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(blkID3)
|
||||
|
||||
parsedBlk1 := false
|
||||
parsedBlk2 := false
|
||||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID0):
|
||||
return blk0, nil
|
||||
case blkID.Equals(blkID1):
|
||||
if parsedBlk1 {
|
||||
return blk1, nil
|
||||
}
|
||||
return nil, errUnknownBlock
|
||||
case blkID.Equals(blkID2):
|
||||
if parsedBlk2 {
|
||||
return blk2, nil
|
||||
}
|
||||
return nil, errUnknownBlock
|
||||
case blkID.Equals(blkID3):
|
||||
return blk3, nil
|
||||
default:
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes0):
|
||||
return blk0, nil
|
||||
case bytes.Equal(blkBytes, blkBytes1):
|
||||
blk1.status = choices.Processing
|
||||
parsedBlk1 = true
|
||||
return blk1, nil
|
||||
case bytes.Equal(blkBytes, blkBytes2):
|
||||
blk2.status = choices.Processing
|
||||
parsedBlk2 = true
|
||||
return blk2, nil
|
||||
case bytes.Equal(blkBytes, blkBytes3):
|
||||
return blk3, nil
|
||||
}
|
||||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
requestID := new(uint32)
|
||||
requested := ids.Empty
|
||||
sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
switch {
|
||||
case vtxID.Equals(blkID1), vtxID.Equals(blkID2):
|
||||
default:
|
||||
t.Fatalf("should have requested blk1 or blk2")
|
||||
}
|
||||
*requestID = reqID
|
||||
requested = vtxID
|
||||
}
|
||||
|
||||
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk2
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
vm.CantBootstrapped = false
|
||||
|
||||
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes2, blkBytes1}); err != nil { // respond with blk2 and blk1
|
||||
t.Fatal(err)
|
||||
} else if !requested.Equals(blkID2) {
|
||||
t.Fatal("should not have requested another block")
|
||||
}
|
||||
|
||||
if !*finished {
|
||||
t.Fatalf("Bootstrapping should have finished")
|
||||
} else if blk0.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
} else if blk1.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
} else if blk2.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
}
|
||||
}
|
||||
|
@ -410,6 +605,7 @@ func TestBootstrapperFilterAccepted(t *testing.T) {
|
|||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
vm.CantBootstrapping = false
|
||||
|
||||
accepted := bs.FilterAccepted(blkIDs)
|
||||
|
||||
|
@ -426,164 +622,3 @@ func TestBootstrapperFilterAccepted(t *testing.T) {
|
|||
t.Fatalf("Blk shouldn't be accepted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBootstrapperPartialFetch(t *testing.T) {
|
||||
config, _, sender, vm := newConfig(t)
|
||||
|
||||
blkID0 := ids.Empty.Prefix(0)
|
||||
blkID1 := ids.Empty.Prefix(1)
|
||||
|
||||
blkBytes0 := []byte{0}
|
||||
|
||||
blk0 := &Blk{
|
||||
id: blkID0,
|
||||
height: 0,
|
||||
status: choices.Accepted,
|
||||
bytes: blkBytes0,
|
||||
}
|
||||
|
||||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(
|
||||
blkID0,
|
||||
blkID1,
|
||||
)
|
||||
|
||||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID0):
|
||||
return blk0, nil
|
||||
case blkID.Equals(blkID1):
|
||||
return nil, errUnknownBlock
|
||||
default:
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
|
||||
sender.CantGet = false
|
||||
bs.onFinished = func() error { return nil }
|
||||
|
||||
bs.ForceAccepted(acceptedIDs)
|
||||
|
||||
if bs.finished {
|
||||
t.Fatalf("should have requested a block")
|
||||
}
|
||||
|
||||
if bs.pending.Len() != 1 {
|
||||
t.Fatalf("wrong number pending")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBootstrapperWrongIDByzantineResponse(t *testing.T) {
|
||||
config, peerID, sender, vm := newConfig(t)
|
||||
|
||||
blkID0 := ids.Empty.Prefix(0)
|
||||
blkID1 := ids.Empty.Prefix(1)
|
||||
blkID2 := ids.Empty.Prefix(2)
|
||||
|
||||
blkBytes0 := []byte{0}
|
||||
blkBytes1 := []byte{1}
|
||||
blkBytes2 := []byte{2}
|
||||
|
||||
blk0 := &Blk{
|
||||
id: blkID0,
|
||||
height: 0,
|
||||
status: choices.Accepted,
|
||||
bytes: blkBytes0,
|
||||
}
|
||||
blk1 := &Blk{
|
||||
parent: blk0,
|
||||
id: blkID1,
|
||||
height: 1,
|
||||
status: choices.Processing,
|
||||
bytes: blkBytes1,
|
||||
}
|
||||
blk2 := &Blk{
|
||||
parent: blk1,
|
||||
id: blkID2,
|
||||
height: 2,
|
||||
status: choices.Processing,
|
||||
bytes: blkBytes2,
|
||||
}
|
||||
|
||||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(blkID1)
|
||||
|
||||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID1):
|
||||
return nil, errUnknownBlock
|
||||
default:
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
|
||||
requestID := new(uint32)
|
||||
sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
switch {
|
||||
case vtxID.Equals(blkID1):
|
||||
default:
|
||||
t.Fatalf("Requested unknown block")
|
||||
}
|
||||
|
||||
*requestID = reqID
|
||||
}
|
||||
|
||||
bs.ForceAccepted(acceptedIDs)
|
||||
|
||||
vm.GetBlockF = nil
|
||||
sender.GetF = nil
|
||||
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes2):
|
||||
return blk2, nil
|
||||
}
|
||||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
sender.CantGet = false
|
||||
|
||||
bs.Put(peerID, *requestID, blkID1, blkBytes2)
|
||||
|
||||
sender.CantGet = true
|
||||
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes1):
|
||||
return blk1, nil
|
||||
}
|
||||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() error { *finished = true; return nil }
|
||||
|
||||
bs.Put(peerID, *requestID, blkID1, blkBytes1)
|
||||
|
||||
vm.ParseBlockF = nil
|
||||
|
||||
if !*finished {
|
||||
t.Fatalf("Bootstrapping should have finished")
|
||||
}
|
||||
if blk1.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
}
|
||||
if blk2.Status() != choices.Processing {
|
||||
t.Fatalf("Block should be processing")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,10 @@
|
|||
package snowman
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/network"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
"github.com/ava-labs/gecko/snow/choices"
|
||||
"github.com/ava-labs/gecko/snow/consensus/snowman"
|
||||
|
@ -14,6 +17,12 @@ import (
|
|||
"github.com/ava-labs/gecko/utils/wrappers"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO define this constant in one place rather than here and in snowman
|
||||
// Max containers size in a MultiPut message
|
||||
maxContainersLen = int(4 * network.DefaultMaxMessageSize / 5)
|
||||
)
|
||||
|
||||
// Transitive implements the Engine interface by attempting to fetch all
|
||||
// transitive dependencies.
|
||||
type Transitive struct {
|
||||
|
@ -44,7 +53,7 @@ type Transitive struct {
|
|||
|
||||
// Initialize implements the Engine interface
|
||||
func (t *Transitive) Initialize(config Config) error {
|
||||
config.Context.Log.Info("Initializing Snowman consensus")
|
||||
config.Context.Log.Info("initializing consensus engine")
|
||||
|
||||
t.Config = config
|
||||
t.metrics.Initialize(
|
||||
|
@ -78,7 +87,7 @@ func (t *Transitive) finishBootstrapping() error {
|
|||
// oracle block
|
||||
tail, err := t.Config.VM.GetBlock(tailID)
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Error("Failed to get last accepted block due to: %s", err)
|
||||
t.Config.Context.Log.Error("failed to get last accepted block due to: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
|
@ -96,7 +105,7 @@ func (t *Transitive) finishBootstrapping() error {
|
|||
t.Config.VM.SetPreference(tailID)
|
||||
}
|
||||
|
||||
t.Config.Context.Log.Info("Bootstrapping finished with %s as the last accepted block", tailID)
|
||||
t.Config.Context.Log.Info("bootstrapping finished with %s as the last accepted block", tailID)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -105,18 +114,18 @@ func (t *Transitive) Gossip() error {
|
|||
blkID := t.Config.VM.LastAccepted()
|
||||
blk, err := t.Config.VM.GetBlock(blkID)
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Warn("Dropping gossip request as %s couldn't be loaded due to %s", blkID, err)
|
||||
t.Config.Context.Log.Warn("dropping gossip request as %s couldn't be loaded due to %s", blkID, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
t.Config.Context.Log.Debug("Gossiping %s as accepted to the network", blkID)
|
||||
t.Config.Context.Log.Verbo("gossiping %s as accepted to the network", blkID)
|
||||
t.Config.Sender.Gossip(blkID, blk.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown implements the Engine interface
|
||||
func (t *Transitive) Shutdown() error {
|
||||
t.Config.Context.Log.Info("Shutting down Snowman consensus")
|
||||
t.Config.Context.Log.Info("shutting down consensus engine")
|
||||
return t.Config.VM.Shutdown()
|
||||
}
|
||||
|
||||
|
@ -130,9 +139,7 @@ func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, blkID ids.ID) error
|
|||
// If we failed to get the block, that means either an unexpected error
|
||||
// has occurred, the validator is not following the protocol, or the
|
||||
// block has been pruned.
|
||||
t.Config.Context.Log.Warn("Get called for blockID %s errored with %s",
|
||||
blkID,
|
||||
err)
|
||||
t.Config.Context.Log.Debug("Get(%s, %d, %s) failed with: %s", vdr, requestID, blkID, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -141,22 +148,51 @@ func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, blkID ids.ID) error
|
|||
return nil
|
||||
}
|
||||
|
||||
// GetAncestors implements the Engine interface
|
||||
func (t *Transitive) GetAncestors(vdr ids.ShortID, requestID uint32, blkID ids.ID) error {
|
||||
startTime := time.Now()
|
||||
blk, err := t.Config.VM.GetBlock(blkID)
|
||||
if err != nil { // Don't have the block. Drop this request.
|
||||
t.Config.Context.Log.Verbo("couldn't get block %s. dropping GetAncestors(%s, %d, %s)", blkID, vdr, requestID, blkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
ancestorsBytes := make([][]byte, 1, common.MaxContainersPerMultiPut) // First elt is byte repr. of blk, then its parents, then grandparent, etc.
|
||||
ancestorsBytes[0] = blk.Bytes()
|
||||
ancestorsBytesLen := len(blk.Bytes()) + wrappers.IntLen // length, in bytes, of all elements of ancestors
|
||||
|
||||
for numFetched := 1; numFetched < common.MaxContainersPerMultiPut && time.Since(startTime) < common.MaxTimeFetchingAncestors; numFetched++ {
|
||||
blk = blk.Parent()
|
||||
if blk.Status() == choices.Unknown {
|
||||
break
|
||||
}
|
||||
blkBytes := blk.Bytes()
|
||||
// Ensure response size isn't too large. Include wrappers.IntLen because the size of the message
|
||||
// is included with each container, and the size is repr. by an int.
|
||||
if newLen := wrappers.IntLen + ancestorsBytesLen + len(blkBytes); newLen < maxContainersLen {
|
||||
ancestorsBytes = append(ancestorsBytes, blkBytes)
|
||||
ancestorsBytesLen = newLen
|
||||
} else { // reached maximum response size
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
t.Config.Sender.MultiPut(vdr, requestID, ancestorsBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put implements the Engine interface
|
||||
func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) error {
|
||||
t.Config.Context.Log.Verbo("Put called for blockID %s", blkID)
|
||||
|
||||
// if the engine hasn't been bootstrapped, forward the request to the
|
||||
// bootstrapper
|
||||
// bootstrapping isn't done --> we didn't send any gets --> this put is invalid
|
||||
if !t.bootstrapped {
|
||||
return t.bootstrapper.Put(vdr, requestID, blkID, blkBytes)
|
||||
t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
blk, err := t.Config.VM.ParseBlock(blkBytes)
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Debug("ParseBlock failed due to %s for block:\n%s",
|
||||
err,
|
||||
formatting.DumpBytes{Bytes: blkBytes})
|
||||
|
||||
t.Config.Context.Log.Debug("failed to parse block %s: %s", blkID, err)
|
||||
t.Config.Context.Log.Verbo("block:\n%s", formatting.DumpBytes{Bytes: blkBytes})
|
||||
// because GetFailed doesn't utilize the assumption that we actually
|
||||
// sent a Get message, we can safely call GetFailed here to potentially
|
||||
// abandon the request.
|
||||
|
@ -174,10 +210,10 @@ func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkByt
|
|||
|
||||
// GetFailed implements the Engine interface
|
||||
func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) error {
|
||||
// if the engine hasn't been bootstrapped, forward the request to the
|
||||
// bootstrapper
|
||||
// not done bootstrapping --> didn't send a get --> this message is invalid
|
||||
if !t.bootstrapped {
|
||||
return t.bootstrapper.GetFailed(vdr, requestID)
|
||||
t.Config.Context.Log.Debug("dropping GetFailed(%s, %d) due to bootstrapping")
|
||||
return nil
|
||||
}
|
||||
|
||||
// we don't use the assumption that this function is called after a failed
|
||||
|
@ -185,8 +221,7 @@ func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) error {
|
|||
// and also get what the request was for if it exists
|
||||
blkID, ok := t.blkReqs.Remove(vdr, requestID)
|
||||
if !ok {
|
||||
t.Config.Context.Log.Warn("GetFailed called without sending the corresponding Get message from %s",
|
||||
vdr)
|
||||
t.Config.Context.Log.Debug("getFailed(%s, %d) called without having sent corresponding Get", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -201,8 +236,7 @@ func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID)
|
|||
// if the engine hasn't been bootstrapped, we aren't ready to respond to
|
||||
// queries
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Debug("Dropping PullQuery for %s due to bootstrapping",
|
||||
blkID)
|
||||
t.Config.Context.Log.Debug("dropping PullQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -234,16 +268,15 @@ func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID,
|
|||
// if the engine hasn't been bootstrapped, we aren't ready to respond to
|
||||
// queries
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Debug("Dropping PushQuery for %s due to bootstrapping", blkID)
|
||||
t.Config.Context.Log.Debug("dropping PushQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
blk, err := t.Config.VM.ParseBlock(blkBytes)
|
||||
// If the parsing fails, we just drop the request, as we didn't ask for it
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Warn("ParseBlock failed due to %s for block:\n%s",
|
||||
err,
|
||||
formatting.DumpBytes{Bytes: blkBytes})
|
||||
t.Config.Context.Log.Debug("failed to parse block %s: %s", blkID, err)
|
||||
t.Config.Context.Log.Verbo("block:\n%s", formatting.DumpBytes{Bytes: blkBytes})
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -264,17 +297,13 @@ func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID,
|
|||
func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) error {
|
||||
// if the engine hasn't been bootstrapped, we shouldn't be receiving chits
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Debug("Dropping Chits due to bootstrapping")
|
||||
t.Config.Context.Log.Debug("dropping Chits(%s, %d) due to bootstrapping", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Since this is snowman, there should only be one ID in the vote set
|
||||
if votes.Len() != 1 {
|
||||
t.Config.Context.Log.Debug("Chits was called with the wrong number of votes %d. ValidatorID: %s, RequestID: %d",
|
||||
votes.Len(),
|
||||
vdr,
|
||||
requestID)
|
||||
|
||||
t.Config.Context.Log.Debug("Chits(%s, %d) was called with %d votes (expected 1)", vdr, requestID, votes.Len())
|
||||
// because QueryFailed doesn't utilize the assumption that we actually
|
||||
// sent a Query message, we can safely call QueryFailed here to
|
||||
// potentially abandon the request.
|
||||
|
@ -282,7 +311,7 @@ func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) err
|
|||
}
|
||||
vote := votes.List()[0]
|
||||
|
||||
t.Config.Context.Log.Verbo("Chit was called. RequestID: %v. Vote: %s", requestID, vote)
|
||||
t.Config.Context.Log.Verbo("Chits(%s, %d) contains vote for %s", vdr, requestID, vote)
|
||||
|
||||
v := &voter{
|
||||
t: t,
|
||||
|
@ -310,7 +339,7 @@ func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) err
|
|||
func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) error {
|
||||
// if the engine hasn't been bootstrapped, we won't have sent a query
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Warn("Dropping QueryFailed due to bootstrapping")
|
||||
t.Config.Context.Log.Warn("dropping QueryFailed(%s, %d) due to bootstrapping", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -326,24 +355,24 @@ func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) error {
|
|||
func (t *Transitive) Notify(msg common.Message) error {
|
||||
// if the engine hasn't been bootstrapped, we shouldn't issuing blocks
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Warn("Dropping Notify due to bootstrapping")
|
||||
t.Config.Context.Log.Debug("dropping Notify due to bootstrapping")
|
||||
return nil
|
||||
}
|
||||
|
||||
t.Config.Context.Log.Verbo("Snowman engine notified of %s from the vm", msg)
|
||||
t.Config.Context.Log.Verbo("snowman engine notified of %s from the vm", msg)
|
||||
switch msg {
|
||||
case common.PendingTxs:
|
||||
// the pending txs message means we should attempt to build a block.
|
||||
blk, err := t.Config.VM.BuildBlock()
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Verbo("VM.BuildBlock errored with %s", err)
|
||||
t.Config.Context.Log.Debug("VM.BuildBlock errored with: %s", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// a newly created block is expected to be processing. If this check
|
||||
// fails, there is potentially an error in the VM this engine is running
|
||||
if status := blk.Status(); status != choices.Processing {
|
||||
t.Config.Context.Log.Warn("Attempting to issue a block with status: %s, expected Processing", status)
|
||||
t.Config.Context.Log.Warn("attempting to issue a block with status: %s, expected Processing", status)
|
||||
}
|
||||
|
||||
// the newly created block should be built on top of the preferred
|
||||
|
@ -351,7 +380,7 @@ func (t *Transitive) Notify(msg common.Message) error {
|
|||
// confirmed.
|
||||
parentID := blk.Parent().ID()
|
||||
if pref := t.Consensus.Preference(); !parentID.Equals(pref) {
|
||||
t.Config.Context.Log.Warn("Built block with parent: %s, expected %s", parentID, pref)
|
||||
t.Config.Context.Log.Warn("built block with parent: %s, expected %s", parentID, pref)
|
||||
}
|
||||
|
||||
added, err := t.insertAll(blk)
|
||||
|
@ -361,12 +390,12 @@ func (t *Transitive) Notify(msg common.Message) error {
|
|||
|
||||
// inserting the block shouldn't have any missing dependencies
|
||||
if added {
|
||||
t.Config.Context.Log.Verbo("Successfully issued new block from the VM")
|
||||
t.Config.Context.Log.Verbo("successfully issued new block from the VM")
|
||||
} else {
|
||||
t.Config.Context.Log.Warn("VM.BuildBlock returned a block that is pending for ancestors")
|
||||
}
|
||||
default:
|
||||
t.Config.Context.Log.Warn("Unexpected message from the VM: %s", msg)
|
||||
t.Config.Context.Log.Warn("unexpected message from the VM: %s", msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -476,7 +505,7 @@ func (t *Transitive) insert(blk snowman.Block) error {
|
|||
// block on the parent if needed
|
||||
if parent := blk.Parent(); !t.Consensus.Issued(parent) {
|
||||
parentID := parent.ID()
|
||||
t.Config.Context.Log.Verbo("Block waiting for parent %s", parentID)
|
||||
t.Config.Context.Log.Verbo("block %s waiting for parent %s", blkID, parentID)
|
||||
i.deps.Add(parentID)
|
||||
}
|
||||
|
||||
|
@ -494,10 +523,9 @@ func (t *Transitive) sendRequest(vdr ids.ShortID, blkID ids.ID) {
|
|||
return
|
||||
}
|
||||
|
||||
t.Config.Context.Log.Verbo("Sending Get message for %s", blkID)
|
||||
|
||||
t.RequestID++
|
||||
t.blkReqs.Add(vdr, t.RequestID, blkID)
|
||||
t.Config.Context.Log.Verbo("sending Get(%s, %d, %s)", vdr, t.RequestID, blkID)
|
||||
t.Config.Sender.Get(vdr, t.RequestID, blkID)
|
||||
|
||||
// Tracks performance statistics
|
||||
|
@ -506,7 +534,7 @@ func (t *Transitive) sendRequest(vdr ids.ShortID, blkID ids.ID) {
|
|||
|
||||
// send a pull request for this block ID
|
||||
func (t *Transitive) pullSample(blkID ids.ID) {
|
||||
t.Config.Context.Log.Verbo("About to sample from: %s", t.Config.Validators)
|
||||
t.Config.Context.Log.Verbo("about to sample from: %s", t.Config.Validators)
|
||||
p := t.Consensus.Parameters()
|
||||
vdrs := t.Config.Validators.Sample(p.K)
|
||||
vdrSet := ids.ShortSet{}
|
||||
|
@ -515,13 +543,13 @@ func (t *Transitive) pullSample(blkID ids.ID) {
|
|||
}
|
||||
|
||||
if numVdrs := len(vdrs); numVdrs != p.K {
|
||||
t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blkID)
|
||||
t.Config.Context.Log.Error("query for %s was dropped due to an insufficient number of validators", blkID)
|
||||
return
|
||||
}
|
||||
|
||||
t.RequestID++
|
||||
if !t.polls.Add(t.RequestID, vdrSet.Len()) {
|
||||
t.Config.Context.Log.Error("Query for %s was dropped due to use of a duplicated requestID", blkID)
|
||||
t.Config.Context.Log.Error("query for %s was dropped due to use of a duplicated requestID", blkID)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -530,7 +558,7 @@ func (t *Transitive) pullSample(blkID ids.ID) {
|
|||
|
||||
// send a push request for this block
|
||||
func (t *Transitive) pushSample(blk snowman.Block) {
|
||||
t.Config.Context.Log.Verbo("About to sample from: %s", t.Config.Validators)
|
||||
t.Config.Context.Log.Verbo("about to sample from: %s", t.Config.Validators)
|
||||
p := t.Consensus.Parameters()
|
||||
vdrs := t.Config.Validators.Sample(p.K)
|
||||
vdrSet := ids.ShortSet{}
|
||||
|
@ -540,13 +568,13 @@ func (t *Transitive) pushSample(blk snowman.Block) {
|
|||
|
||||
blkID := blk.ID()
|
||||
if numVdrs := len(vdrs); numVdrs != p.K {
|
||||
t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blkID)
|
||||
t.Config.Context.Log.Error("query for %s was dropped due to an insufficient number of validators", blkID)
|
||||
return
|
||||
}
|
||||
|
||||
t.RequestID++
|
||||
if !t.polls.Add(t.RequestID, vdrSet.Len()) {
|
||||
t.Config.Context.Log.Error("Query for %s was dropped due to use of a duplicated requestID", blkID)
|
||||
t.Config.Context.Log.Error("query for %s was dropped due to use of a duplicated requestID", blkID)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -564,7 +592,7 @@ func (t *Transitive) deliver(blk snowman.Block) error {
|
|||
t.pending.Remove(blkID)
|
||||
|
||||
if err := blk.Verify(); err != nil {
|
||||
t.Config.Context.Log.Debug("Block failed verification due to %s, dropping block", err)
|
||||
t.Config.Context.Log.Debug("block failed verification due to %s, dropping block", err)
|
||||
|
||||
// if verify fails, then all decedents are also invalid
|
||||
t.blocked.Abandon(blkID)
|
||||
|
@ -572,7 +600,7 @@ func (t *Transitive) deliver(blk snowman.Block) error {
|
|||
return t.errs.Err
|
||||
}
|
||||
|
||||
t.Config.Context.Log.Verbo("Adding block to consensus: %s", blkID)
|
||||
t.Config.Context.Log.Verbo("adding block to consensus: %s", blkID)
|
||||
t.Consensus.Add(blk)
|
||||
|
||||
// Add all the oracle blocks if they exist. We call verify on all the blocks
|
||||
|
@ -584,7 +612,7 @@ func (t *Transitive) deliver(blk snowman.Block) error {
|
|||
case OracleBlock:
|
||||
for _, blk := range blk.Options() {
|
||||
if err := blk.Verify(); err != nil {
|
||||
t.Config.Context.Log.Debug("Block failed verification due to %s, dropping block", err)
|
||||
t.Config.Context.Log.Debug("block failed verification due to %s, dropping block", err)
|
||||
dropped = append(dropped, blk)
|
||||
} else {
|
||||
t.Consensus.Add(blk)
|
||||
|
|
|
@ -45,7 +45,7 @@ func (v *voter) Update() {
|
|||
// must be bubbled to the nearest valid block
|
||||
results = v.bubbleVotes(results)
|
||||
|
||||
v.t.Config.Context.Log.Verbo("Finishing poll [%d] with:\n%s", v.requestID, &results)
|
||||
v.t.Config.Context.Log.Debug("Finishing poll [%d] with:\n%s", v.requestID, &results)
|
||||
if err := v.t.Consensus.RecordPoll(results); err != nil {
|
||||
v.t.errs.Add(err)
|
||||
return
|
||||
|
@ -54,11 +54,11 @@ func (v *voter) Update() {
|
|||
v.t.Config.VM.SetPreference(v.t.Consensus.Preference())
|
||||
|
||||
if v.t.Consensus.Finalized() {
|
||||
v.t.Config.Context.Log.Verbo("Snowman engine can quiesce")
|
||||
v.t.Config.Context.Log.Debug("Snowman engine can quiesce")
|
||||
return
|
||||
}
|
||||
|
||||
v.t.Config.Context.Log.Verbo("Snowman engine can't quiesce")
|
||||
v.t.Config.Context.Log.Debug("Snowman engine can't quiesce")
|
||||
v.t.repoll()
|
||||
}
|
||||
|
||||
|
|
|
@ -7,6 +7,8 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ava-labs/gecko/utils/formatting"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/networking/timeout"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
|
@ -64,25 +66,25 @@ func (sr *ChainRouter) AddChain(chain *Handler) {
|
|||
// RemoveChain removes the specified chain so that incoming
|
||||
// messages can't be routed to it
|
||||
func (sr *ChainRouter) RemoveChain(chainID ids.ID) {
|
||||
sr.lock.Lock()
|
||||
defer sr.lock.Unlock()
|
||||
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.Shutdown()
|
||||
close(chain.msgs)
|
||||
|
||||
ticker := time.NewTicker(sr.closeTimeout)
|
||||
select {
|
||||
case _, _ = <-chain.closed:
|
||||
case <-ticker.C:
|
||||
chain.Context().Log.Warn("timed out while shutting down")
|
||||
}
|
||||
ticker.Stop()
|
||||
|
||||
delete(sr.chains, chainID.Key())
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.lock.RLock()
|
||||
chain, exists := sr.chains[chainID.Key()]
|
||||
if !exists {
|
||||
sr.log.Debug("can't remove unknown chain %s", chainID)
|
||||
sr.lock.RUnlock()
|
||||
return
|
||||
}
|
||||
chain.Shutdown()
|
||||
close(chain.msgs)
|
||||
delete(sr.chains, chainID.Key())
|
||||
sr.lock.RUnlock()
|
||||
|
||||
ticker := time.NewTicker(sr.closeTimeout)
|
||||
select {
|
||||
case _, _ = <-chain.closed:
|
||||
case <-ticker.C:
|
||||
chain.Context().Log.Warn("timed out while shutting down")
|
||||
}
|
||||
ticker.Stop()
|
||||
}
|
||||
|
||||
// GetAcceptedFrontier routes an incoming GetAcceptedFrontier request from the
|
||||
|
@ -95,7 +97,7 @@ func (sr *ChainRouter) GetAcceptedFrontier(validatorID ids.ShortID, chainID ids.
|
|||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.GetAcceptedFrontier(validatorID, requestID)
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("GetAcceptedFrontier(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -106,11 +108,12 @@ func (sr *ChainRouter) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID,
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.AcceptedFrontier(validatorID, requestID, containerIDs)
|
||||
if chain.AcceptedFrontier(validatorID, requestID, containerIDs) {
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("AcceptedFrontier(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerIDs)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -121,12 +124,19 @@ func (sr *ChainRouter) GetAcceptedFrontierFailed(validatorID ids.ShortID, chainI
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.GetAcceptedFrontierFailed(validatorID, requestID)
|
||||
if !chain.GetAcceptedFrontierFailed(validatorID, requestID) {
|
||||
sr.log.Debug("deferring GetAcceptedFrontier timeout due to a full queue on %s", chainID)
|
||||
// Defer this call to later
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.GetAcceptedFrontierFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Error("GetAcceptedFrontierFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// GetAccepted routes an incoming GetAccepted request from the
|
||||
|
@ -139,7 +149,7 @@ func (sr *ChainRouter) GetAccepted(validatorID ids.ShortID, chainID ids.ID, requ
|
|||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.GetAccepted(validatorID, requestID, containerIDs)
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("GetAccepted(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerIDs)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -150,11 +160,12 @@ func (sr *ChainRouter) Accepted(validatorID ids.ShortID, chainID ids.ID, request
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.Accepted(validatorID, requestID, containerIDs)
|
||||
if chain.Accepted(validatorID, requestID, containerIDs) {
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("Accepted(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerIDs)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -165,12 +176,69 @@ func (sr *ChainRouter) GetAcceptedFailed(validatorID ids.ShortID, chainID ids.ID
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.GetAcceptedFailed(validatorID, requestID)
|
||||
if !chain.GetAcceptedFailed(validatorID, requestID) {
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.log.Debug("deferring GetAccepted timeout due to a full queue on %s", chainID)
|
||||
sr.GetAcceptedFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Error("GetAcceptedFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// GetAncestors routes an incoming GetAncestors message from the validator with ID [validatorID]
|
||||
// to the consensus engine working on the chain with ID [chainID]
|
||||
// The maximum number of ancestors to respond with is define in snow/engine/commong/bootstrapper.go
|
||||
func (sr *ChainRouter) GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
|
||||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.GetAncestors(validatorID, requestID, containerID)
|
||||
} else {
|
||||
sr.log.Debug("GetAncestors(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
}
|
||||
|
||||
// MultiPut routes an incoming MultiPut message from the validator with ID [validatorID]
|
||||
// to the consensus engine working on the chain with ID [chainID]
|
||||
func (sr *ChainRouter) MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte) {
|
||||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
// This message came in response to a GetAncestors message from this node, and when we sent that
|
||||
// message we set a timeout. Since we got a response, cancel the timeout.
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
if chain.MultiPut(validatorID, requestID, containers) {
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("MultiPut(%s, %s, %d, %d) dropped due to unknown chain", validatorID, chainID, requestID, len(containers))
|
||||
}
|
||||
}
|
||||
|
||||
// GetAncestorsFailed routes an incoming GetAncestorsFailed message from the validator with ID [validatorID]
|
||||
// to the consensus engine working on the chain with ID [chainID]
|
||||
func (sr *ChainRouter) GetAncestorsFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32) {
|
||||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
if !chain.GetAncestorsFailed(validatorID, requestID) {
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.log.Debug("deferring GetAncestors timeout due to a full queue on %s", chainID)
|
||||
sr.GetAncestorsFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
} else {
|
||||
sr.log.Error("GetAncestorsFailed(%s, %s, %d, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// Get routes an incoming Get request from the validator with ID [validatorID]
|
||||
|
@ -182,7 +250,7 @@ func (sr *ChainRouter) Get(validatorID ids.ShortID, chainID ids.ID, requestID ui
|
|||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.Get(validatorID, requestID, containerID)
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("Get(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -194,11 +262,13 @@ func (sr *ChainRouter) Put(validatorID ids.ShortID, chainID ids.ID, requestID ui
|
|||
|
||||
// This message came in response to a Get message from this node, and when we sent that Get
|
||||
// message we set a timeout. Since we got a response, cancel the timeout.
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.Put(validatorID, requestID, containerID, container)
|
||||
if chain.Put(validatorID, requestID, containerID, container) {
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("Put(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID)
|
||||
sr.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -208,12 +278,18 @@ func (sr *ChainRouter) GetFailed(validatorID ids.ShortID, chainID ids.ID, reques
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.GetFailed(validatorID, requestID)
|
||||
if !chain.GetFailed(validatorID, requestID) {
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.log.Debug("deferring Get timeout due to a full queue on %s", chainID)
|
||||
sr.GetFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Error("GetFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// PushQuery routes an incoming PushQuery request from the validator with ID [validatorID]
|
||||
|
@ -225,7 +301,8 @@ func (sr *ChainRouter) PushQuery(validatorID ids.ShortID, chainID ids.ID, reques
|
|||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.PushQuery(validatorID, requestID, containerID, container)
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("PushQuery(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID)
|
||||
sr.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -238,7 +315,7 @@ func (sr *ChainRouter) PullQuery(validatorID ids.ShortID, chainID ids.ID, reques
|
|||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.PullQuery(validatorID, requestID, containerID)
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("PullQuery(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -249,11 +326,12 @@ func (sr *ChainRouter) Chits(validatorID ids.ShortID, chainID ids.ID, requestID
|
|||
defer sr.lock.RUnlock()
|
||||
|
||||
// Cancel timeout we set when sent the message asking for these Chits
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.Chits(validatorID, requestID, votes)
|
||||
if chain.Chits(validatorID, requestID, votes) {
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("Chits(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, votes)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -263,25 +341,32 @@ func (sr *ChainRouter) QueryFailed(validatorID ids.ShortID, chainID ids.ID, requ
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.QueryFailed(validatorID, requestID)
|
||||
if !chain.QueryFailed(validatorID, requestID) {
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.log.Debug("deferring Query timeout due to a full queue on %s", chainID)
|
||||
sr.QueryFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Error("QueryFailed(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// Shutdown shuts down this router
|
||||
func (sr *ChainRouter) Shutdown() {
|
||||
sr.lock.Lock()
|
||||
for _, chain := range sr.chains {
|
||||
chain.Shutdown()
|
||||
close(chain.msgs)
|
||||
}
|
||||
prevChains := sr.chains
|
||||
sr.chains = map[[32]byte]*Handler{}
|
||||
sr.lock.Unlock()
|
||||
|
||||
for _, chain := range prevChains {
|
||||
chain.Shutdown()
|
||||
close(chain.msgs)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(sr.closeTimeout)
|
||||
timedout := false
|
||||
for _, chain := range prevChains {
|
||||
|
@ -300,8 +385,8 @@ func (sr *ChainRouter) Shutdown() {
|
|||
|
||||
// Gossip accepted containers
|
||||
func (sr *ChainRouter) Gossip() {
|
||||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
sr.lock.Lock()
|
||||
defer sr.lock.Unlock()
|
||||
|
||||
for _, chain := range sr.chains {
|
||||
chain.Gossip()
|
|
@ -4,14 +4,19 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// Handler passes incoming messages from the network to the consensus engine
|
||||
// (Actually, it receives the incoming messages from a ChainRouter, but same difference)
|
||||
type Handler struct {
|
||||
metrics
|
||||
|
||||
msgs chan message
|
||||
closed chan struct{}
|
||||
engine common.Engine
|
||||
|
@ -21,7 +26,14 @@ type Handler struct {
|
|||
}
|
||||
|
||||
// Initialize this consensus handler
|
||||
func (h *Handler) Initialize(engine common.Engine, msgChan <-chan common.Message, bufferSize int) {
|
||||
func (h *Handler) Initialize(
|
||||
engine common.Engine,
|
||||
msgChan <-chan common.Message,
|
||||
bufferSize int,
|
||||
namespace string,
|
||||
metrics prometheus.Registerer,
|
||||
) {
|
||||
h.metrics.Initialize(namespace, metrics)
|
||||
h.msgs = make(chan message, bufferSize)
|
||||
h.closed = make(chan struct{})
|
||||
h.engine = engine
|
||||
|
@ -47,6 +59,7 @@ func (h *Handler) Dispatch() {
|
|||
if !ok {
|
||||
return
|
||||
}
|
||||
h.metrics.pending.Dec()
|
||||
if closing {
|
||||
log.Debug("dropping message due to closing:\n%s", msg)
|
||||
continue
|
||||
|
@ -73,6 +86,7 @@ func (h *Handler) Dispatch() {
|
|||
// Returns true iff this consensus handler (and its associated engine) should shutdown
|
||||
// (due to receipt of a shutdown message)
|
||||
func (h *Handler) dispatchMsg(msg message) bool {
|
||||
startTime := time.Now()
|
||||
ctx := h.engine.Context()
|
||||
|
||||
ctx.Lock.Lock()
|
||||
|
@ -86,36 +100,61 @@ func (h *Handler) dispatchMsg(msg message) bool {
|
|||
switch msg.messageType {
|
||||
case getAcceptedFrontierMsg:
|
||||
err = h.engine.GetAcceptedFrontier(msg.validatorID, msg.requestID)
|
||||
h.getAcceptedFrontier.Observe(float64(time.Now().Sub(startTime)))
|
||||
case acceptedFrontierMsg:
|
||||
err = h.engine.AcceptedFrontier(msg.validatorID, msg.requestID, msg.containerIDs)
|
||||
h.acceptedFrontier.Observe(float64(time.Now().Sub(startTime)))
|
||||
case getAcceptedFrontierFailedMsg:
|
||||
err = h.engine.GetAcceptedFrontierFailed(msg.validatorID, msg.requestID)
|
||||
h.getAcceptedFrontierFailed.Observe(float64(time.Now().Sub(startTime)))
|
||||
case getAcceptedMsg:
|
||||
err = h.engine.GetAccepted(msg.validatorID, msg.requestID, msg.containerIDs)
|
||||
h.getAccepted.Observe(float64(time.Now().Sub(startTime)))
|
||||
case acceptedMsg:
|
||||
err = h.engine.Accepted(msg.validatorID, msg.requestID, msg.containerIDs)
|
||||
h.accepted.Observe(float64(time.Now().Sub(startTime)))
|
||||
case getAcceptedFailedMsg:
|
||||
err = h.engine.GetAcceptedFailed(msg.validatorID, msg.requestID)
|
||||
h.getAcceptedFailed.Observe(float64(time.Now().Sub(startTime)))
|
||||
case getAncestorsMsg:
|
||||
err = h.engine.GetAncestors(msg.validatorID, msg.requestID, msg.containerID)
|
||||
h.getAncestors.Observe(float64(time.Now().Sub(startTime)))
|
||||
case getAncestorsFailedMsg:
|
||||
err = h.engine.GetAncestorsFailed(msg.validatorID, msg.requestID)
|
||||
h.getAncestorsFailed.Observe(float64(time.Now().Sub(startTime)))
|
||||
case multiPutMsg:
|
||||
err = h.engine.MultiPut(msg.validatorID, msg.requestID, msg.containers)
|
||||
h.multiPut.Observe(float64(time.Now().Sub(startTime)))
|
||||
case getMsg:
|
||||
err = h.engine.Get(msg.validatorID, msg.requestID, msg.containerID)
|
||||
h.get.Observe(float64(time.Now().Sub(startTime)))
|
||||
case getFailedMsg:
|
||||
err = h.engine.GetFailed(msg.validatorID, msg.requestID)
|
||||
h.getFailed.Observe(float64(time.Now().Sub(startTime)))
|
||||
case putMsg:
|
||||
err = h.engine.Put(msg.validatorID, msg.requestID, msg.containerID, msg.container)
|
||||
h.put.Observe(float64(time.Now().Sub(startTime)))
|
||||
case pushQueryMsg:
|
||||
err = h.engine.PushQuery(msg.validatorID, msg.requestID, msg.containerID, msg.container)
|
||||
h.pushQuery.Observe(float64(time.Now().Sub(startTime)))
|
||||
case pullQueryMsg:
|
||||
err = h.engine.PullQuery(msg.validatorID, msg.requestID, msg.containerID)
|
||||
h.pullQuery.Observe(float64(time.Now().Sub(startTime)))
|
||||
case queryFailedMsg:
|
||||
err = h.engine.QueryFailed(msg.validatorID, msg.requestID)
|
||||
h.queryFailed.Observe(float64(time.Now().Sub(startTime)))
|
||||
case chitsMsg:
|
||||
err = h.engine.Chits(msg.validatorID, msg.requestID, msg.containerIDs)
|
||||
h.chits.Observe(float64(time.Now().Sub(startTime)))
|
||||
case notifyMsg:
|
||||
err = h.engine.Notify(msg.notification)
|
||||
h.notify.Observe(float64(time.Now().Sub(startTime)))
|
||||
case gossipMsg:
|
||||
err = h.engine.Gossip()
|
||||
h.gossip.Observe(float64(time.Now().Sub(startTime)))
|
||||
case shutdownMsg:
|
||||
err = h.engine.Shutdown()
|
||||
h.shutdown.Observe(float64(time.Now().Sub(startTime)))
|
||||
done = true
|
||||
}
|
||||
|
||||
|
@ -127,147 +166,192 @@ func (h *Handler) dispatchMsg(msg message) bool {
|
|||
|
||||
// GetAcceptedFrontier passes a GetAcceptedFrontier message received from the
|
||||
// network to the consensus engine.
|
||||
func (h *Handler) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32) {
|
||||
h.msgs <- message{
|
||||
func (h *Handler) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAcceptedFrontierMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// AcceptedFrontier passes a AcceptedFrontier message received from the network
|
||||
// to the consensus engine.
|
||||
func (h *Handler) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
h.msgs <- message{
|
||||
func (h *Handler) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: acceptedFrontierMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerIDs: containerIDs,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// GetAcceptedFrontierFailed passes a GetAcceptedFrontierFailed message received
|
||||
// from the network to the consensus engine.
|
||||
func (h *Handler) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
h.msgs <- message{
|
||||
func (h *Handler) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAcceptedFrontierFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// GetAccepted passes a GetAccepted message received from the
|
||||
// network to the consensus engine.
|
||||
func (h *Handler) GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
h.msgs <- message{
|
||||
func (h *Handler) GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAcceptedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerIDs: containerIDs,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Accepted passes a Accepted message received from the network to the consensus
|
||||
// engine.
|
||||
func (h *Handler) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
h.msgs <- message{
|
||||
func (h *Handler) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: acceptedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerIDs: containerIDs,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// GetAcceptedFailed passes a GetAcceptedFailed message received from the
|
||||
// network to the consensus engine.
|
||||
func (h *Handler) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
h.msgs <- message{
|
||||
func (h *Handler) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAcceptedFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Get passes a Get message received from the network to the consensus engine.
|
||||
func (h *Handler) Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) {
|
||||
h.msgs <- message{
|
||||
func (h *Handler) Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: containerID,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// GetAncestors passes a GetAncestors message received from the network to the consensus engine.
|
||||
func (h *Handler) GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAncestorsMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: containerID,
|
||||
})
|
||||
}
|
||||
|
||||
// Put passes a Put message received from the network to the consensus engine.
|
||||
func (h *Handler) Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) {
|
||||
h.msgs <- message{
|
||||
func (h *Handler) Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: putMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: containerID,
|
||||
container: container,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// MultiPut passes a MultiPut message received from the network to the consensus engine.
|
||||
func (h *Handler) MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: multiPutMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containers: containers,
|
||||
})
|
||||
}
|
||||
|
||||
// GetFailed passes a GetFailed message to the consensus engine.
|
||||
func (h *Handler) GetFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
h.msgs <- message{
|
||||
func (h *Handler) GetFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// GetAncestorsFailed passes a GetAncestorsFailed message to the consensus engine.
|
||||
func (h *Handler) GetAncestorsFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAncestorsFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
})
|
||||
}
|
||||
|
||||
// PushQuery passes a PushQuery message received from the network to the consensus engine.
|
||||
func (h *Handler) PushQuery(validatorID ids.ShortID, requestID uint32, blockID ids.ID, block []byte) {
|
||||
h.msgs <- message{
|
||||
func (h *Handler) PushQuery(validatorID ids.ShortID, requestID uint32, blockID ids.ID, block []byte) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: pushQueryMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: blockID,
|
||||
container: block,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// PullQuery passes a PullQuery message received from the network to the consensus engine.
|
||||
func (h *Handler) PullQuery(validatorID ids.ShortID, requestID uint32, blockID ids.ID) {
|
||||
h.msgs <- message{
|
||||
func (h *Handler) PullQuery(validatorID ids.ShortID, requestID uint32, blockID ids.ID) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: pullQueryMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: blockID,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Chits passes a Chits message received from the network to the consensus engine.
|
||||
func (h *Handler) Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set) {
|
||||
h.msgs <- message{
|
||||
func (h *Handler) Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: chitsMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerIDs: votes,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// QueryFailed passes a QueryFailed message received from the network to the consensus engine.
|
||||
func (h *Handler) QueryFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
h.msgs <- message{
|
||||
func (h *Handler) QueryFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: queryFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Gossip passes a gossip request to the consensus engine
|
||||
func (h *Handler) Gossip() { h.msgs <- message{messageType: gossipMsg} }
|
||||
|
||||
// Shutdown shuts down the dispatcher
|
||||
func (h *Handler) Shutdown() { h.msgs <- message{messageType: shutdownMsg} }
|
||||
func (h *Handler) Gossip() bool {
|
||||
return h.sendMsg(message{messageType: gossipMsg})
|
||||
}
|
||||
|
||||
// Notify ...
|
||||
func (h *Handler) Notify(msg common.Message) {
|
||||
h.msgs <- message{
|
||||
func (h *Handler) Notify(msg common.Message) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: notifyMsg,
|
||||
notification: msg,
|
||||
})
|
||||
}
|
||||
|
||||
// Shutdown shuts down the dispatcher
|
||||
func (h *Handler) Shutdown() {
|
||||
h.metrics.pending.Inc()
|
||||
h.msgs <- message{messageType: shutdownMsg}
|
||||
}
|
||||
|
||||
func (h *Handler) sendMsg(msg message) bool {
|
||||
select {
|
||||
case h.msgs <- msg:
|
||||
h.metrics.pending.Inc()
|
||||
return true
|
||||
default:
|
||||
h.metrics.dropped.Inc()
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,6 +31,9 @@ const (
|
|||
notifyMsg
|
||||
gossipMsg
|
||||
shutdownMsg
|
||||
getAncestorsMsg
|
||||
multiPutMsg
|
||||
getAncestorsFailedMsg
|
||||
)
|
||||
|
||||
type message struct {
|
||||
|
@ -39,6 +42,7 @@ type message struct {
|
|||
requestID uint32
|
||||
containerID ids.ID
|
||||
container []byte
|
||||
containers [][]byte
|
||||
containerIDs ids.Set
|
||||
notification common.Message
|
||||
}
|
||||
|
@ -74,8 +78,12 @@ func (t msgType) String() string {
|
|||
return "Get Accepted Failed Message"
|
||||
case getMsg:
|
||||
return "Get Message"
|
||||
case getAncestorsMsg:
|
||||
return "Get Ancestors Message"
|
||||
case putMsg:
|
||||
return "Put Message"
|
||||
case multiPutMsg:
|
||||
return "MultiPut Message"
|
||||
case getFailedMsg:
|
||||
return "Get Failed Message"
|
||||
case pushQueryMsg:
|
||||
|
|
|
@ -0,0 +1,90 @@
|
|||
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
|
||||
// See the file LICENSE for licensing terms.
|
||||
|
||||
package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/ava-labs/gecko/utils/timer"
|
||||
"github.com/ava-labs/gecko/utils/wrappers"
|
||||
)
|
||||
|
||||
func initHistogram(namespace, name string, registerer prometheus.Registerer, errs *wrappers.Errs) prometheus.Histogram {
|
||||
histogram := prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
Help: "Time spent processing this request in nanoseconds",
|
||||
Buckets: timer.NanosecondsBuckets,
|
||||
})
|
||||
|
||||
if err := registerer.Register(histogram); err != nil {
|
||||
errs.Add(fmt.Errorf("failed to register %s statistics due to %s", name, err))
|
||||
}
|
||||
return histogram
|
||||
}
|
||||
|
||||
type metrics struct {
|
||||
pending prometheus.Gauge
|
||||
dropped prometheus.Counter
|
||||
getAcceptedFrontier, acceptedFrontier, getAcceptedFrontierFailed,
|
||||
getAccepted, accepted, getAcceptedFailed,
|
||||
getAncestors, multiPut, getAncestorsFailed,
|
||||
get, put, getFailed,
|
||||
pushQuery, pullQuery, chits, queryFailed,
|
||||
notify,
|
||||
gossip,
|
||||
shutdown prometheus.Histogram
|
||||
}
|
||||
|
||||
// Initialize implements the Engine interface
|
||||
func (m *metrics) Initialize(namespace string, registerer prometheus.Registerer) error {
|
||||
errs := wrappers.Errs{}
|
||||
|
||||
m.pending = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "pending",
|
||||
Help: "Number of pending events",
|
||||
})
|
||||
|
||||
if err := registerer.Register(m.pending); err != nil {
|
||||
errs.Add(fmt.Errorf("failed to register pending statistics due to %s", err))
|
||||
}
|
||||
|
||||
m.dropped = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "dropped",
|
||||
Help: "Number of dropped events",
|
||||
})
|
||||
|
||||
if err := registerer.Register(m.dropped); err != nil {
|
||||
errs.Add(fmt.Errorf("failed to register dropped statistics due to %s", err))
|
||||
}
|
||||
|
||||
m.getAcceptedFrontier = initHistogram(namespace, "get_accepted_frontier", registerer, &errs)
|
||||
m.acceptedFrontier = initHistogram(namespace, "accepted_frontier", registerer, &errs)
|
||||
m.getAcceptedFrontierFailed = initHistogram(namespace, "get_accepted_frontier_failed", registerer, &errs)
|
||||
m.getAccepted = initHistogram(namespace, "get_accepted", registerer, &errs)
|
||||
m.accepted = initHistogram(namespace, "accepted", registerer, &errs)
|
||||
m.getAcceptedFailed = initHistogram(namespace, "get_accepted_failed", registerer, &errs)
|
||||
m.getAncestors = initHistogram(namespace, "get_ancestors", registerer, &errs)
|
||||
m.multiPut = initHistogram(namespace, "multi_put", registerer, &errs)
|
||||
m.getAncestorsFailed = initHistogram(namespace, "get_ancestors_failed", registerer, &errs)
|
||||
m.get = initHistogram(namespace, "get", registerer, &errs)
|
||||
m.put = initHistogram(namespace, "put", registerer, &errs)
|
||||
m.getFailed = initHistogram(namespace, "get_failed", registerer, &errs)
|
||||
m.pushQuery = initHistogram(namespace, "push_query", registerer, &errs)
|
||||
m.pullQuery = initHistogram(namespace, "pull_query", registerer, &errs)
|
||||
m.chits = initHistogram(namespace, "chits", registerer, &errs)
|
||||
m.queryFailed = initHistogram(namespace, "query_failed", registerer, &errs)
|
||||
m.notify = initHistogram(namespace, "notify", registerer, &errs)
|
||||
m.gossip = initHistogram(namespace, "gossip", registerer, &errs)
|
||||
m.shutdown = initHistogram(namespace, "shutdown", registerer, &errs)
|
||||
|
||||
return errs.Err
|
||||
}
|
|
@ -36,7 +36,9 @@ type ExternalRouter interface {
|
|||
GetAccepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
|
||||
Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
|
||||
Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
|
||||
MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte)
|
||||
PushQuery(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
|
||||
PullQuery(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set)
|
||||
|
@ -47,5 +49,6 @@ type InternalRouter interface {
|
|||
GetAcceptedFrontierFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
|
||||
GetAcceptedFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
|
||||
GetFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
|
||||
GetAncestorsFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
|
||||
QueryFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
|
||||
}
|
||||
|
|
|
@ -15,7 +15,10 @@ type ExternalSender interface {
|
|||
Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
|
||||
|
||||
Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
|
||||
Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
|
||||
MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte)
|
||||
|
||||
PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
|
||||
PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
|
|
|
@ -93,6 +93,20 @@ func (s *Sender) Get(validatorID ids.ShortID, requestID uint32, containerID ids.
|
|||
s.sender.Get(validatorID, s.ctx.ChainID, requestID, containerID)
|
||||
}
|
||||
|
||||
// GetAncestors sends a GetAncestors message
|
||||
func (s *Sender) GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) {
|
||||
s.ctx.Log.Verbo("Sending GetAncestors to validator %s. RequestID: %d. ContainerID: %s", validatorID, requestID, containerID)
|
||||
// Sending a GetAncestors to myself will always fail
|
||||
if validatorID.Equals(s.ctx.NodeID) {
|
||||
go s.router.GetAncestorsFailed(validatorID, s.ctx.ChainID, requestID)
|
||||
return
|
||||
}
|
||||
s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() {
|
||||
s.router.GetAncestorsFailed(validatorID, s.ctx.ChainID, requestID)
|
||||
})
|
||||
s.sender.GetAncestors(validatorID, s.ctx.ChainID, requestID, containerID)
|
||||
}
|
||||
|
||||
// Put sends a Put message to the consensus engine running on the specified chain
|
||||
// on the specified validator.
|
||||
// The Put message signifies that this consensus engine is giving to the recipient
|
||||
|
@ -102,6 +116,14 @@ func (s *Sender) Put(validatorID ids.ShortID, requestID uint32, containerID ids.
|
|||
s.sender.Put(validatorID, s.ctx.ChainID, requestID, containerID, container)
|
||||
}
|
||||
|
||||
// MultiPut sends a MultiPut message to the consensus engine running on the specified chain
|
||||
// on the specified validator.
|
||||
// The MultiPut message gives the recipient the contents of several containers.
|
||||
func (s *Sender) MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte) {
|
||||
s.ctx.Log.Verbo("Sending MultiPut to validator %s. RequestID: %d. NumContainers: %d", validatorID, requestID, len(containers))
|
||||
s.sender.MultiPut(validatorID, s.ctx.ChainID, requestID, containers)
|
||||
}
|
||||
|
||||
// PushQuery sends a PushQuery message to the consensus engines running on the specified chains
|
||||
// on the specified validators.
|
||||
// The PushQuery message signifies that this consensus engine would like each validator to send
|
||||
|
|
|
@ -15,6 +15,7 @@ import (
|
|||
"github.com/ava-labs/gecko/snow/networking/router"
|
||||
"github.com/ava-labs/gecko/snow/networking/timeout"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func TestSenderContext(t *testing.T) {
|
||||
|
@ -58,7 +59,13 @@ func TestTimeout(t *testing.T) {
|
|||
}
|
||||
|
||||
handler := router.Handler{}
|
||||
handler.Initialize(&engine, nil, 1)
|
||||
handler.Initialize(
|
||||
&engine,
|
||||
nil,
|
||||
1,
|
||||
"",
|
||||
prometheus.NewRegistry(),
|
||||
)
|
||||
go handler.Dispatch()
|
||||
|
||||
chainRouter.AddChain(&handler)
|
||||
|
|
|
@ -16,7 +16,7 @@ type ExternalSenderTest struct {
|
|||
|
||||
CantGetAcceptedFrontier, CantAcceptedFrontier,
|
||||
CantGetAccepted, CantAccepted,
|
||||
CantGet, CantPut,
|
||||
CantGet, CantGetAncestors, CantPut, CantMultiPut,
|
||||
CantPullQuery, CantPushQuery, CantChits,
|
||||
CantGossip bool
|
||||
|
||||
|
@ -24,8 +24,9 @@ type ExternalSenderTest struct {
|
|||
AcceptedFrontierF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
|
||||
GetAcceptedF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set)
|
||||
AcceptedF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
|
||||
GetF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
GetF, GetAncestorsF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
PutF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
|
||||
MultiPutF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte)
|
||||
PushQueryF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
|
||||
PullQueryF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
ChitsF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set)
|
||||
|
@ -39,7 +40,9 @@ func (s *ExternalSenderTest) Default(cant bool) {
|
|||
s.CantGetAccepted = cant
|
||||
s.CantAccepted = cant
|
||||
s.CantGet = cant
|
||||
s.CantGetAncestors = cant
|
||||
s.CantPut = cant
|
||||
s.CantMultiPut = cant
|
||||
s.CantPullQuery = cant
|
||||
s.CantPushQuery = cant
|
||||
s.CantChits = cant
|
||||
|
@ -111,6 +114,19 @@ func (s *ExternalSenderTest) Get(vdr ids.ShortID, chainID ids.ID, requestID uint
|
|||
}
|
||||
}
|
||||
|
||||
// GetAncestors calls GetAncestorsF if it was initialized. If it wasn't initialized and this
|
||||
// function shouldn't be called and testing was initialized, then testing will
|
||||
// fail.
|
||||
func (s *ExternalSenderTest) GetAncestors(vdr ids.ShortID, chainID ids.ID, requestID uint32, vtxID ids.ID) {
|
||||
if s.GetAncestorsF != nil {
|
||||
s.GetAncestorsF(vdr, chainID, requestID, vtxID)
|
||||
} else if s.CantGetAncestors && s.T != nil {
|
||||
s.T.Fatalf("Unexpectedly called GetAncestors")
|
||||
} else if s.CantGetAncestors && s.B != nil {
|
||||
s.B.Fatalf("Unexpectedly called GetAncestors")
|
||||
}
|
||||
}
|
||||
|
||||
// Put calls PutF if it was initialized. If it wasn't initialized and this
|
||||
// function shouldn't be called and testing was initialized, then testing will
|
||||
// fail.
|
||||
|
@ -124,6 +140,19 @@ func (s *ExternalSenderTest) Put(vdr ids.ShortID, chainID ids.ID, requestID uint
|
|||
}
|
||||
}
|
||||
|
||||
// MultiPut calls MultiPutF if it was initialized. If it wasn't initialized and this
|
||||
// function shouldn't be called and testing was initialized, then testing will
|
||||
// fail.
|
||||
func (s *ExternalSenderTest) MultiPut(vdr ids.ShortID, chainID ids.ID, requestID uint32, vtxs [][]byte) {
|
||||
if s.MultiPutF != nil {
|
||||
s.MultiPutF(vdr, chainID, requestID, vtxs)
|
||||
} else if s.CantMultiPut && s.T != nil {
|
||||
s.T.Fatalf("Unexpectedly called MultiPut")
|
||||
} else if s.CantMultiPut && s.B != nil {
|
||||
s.B.Fatalf("Unexpectedly called MultiPut")
|
||||
}
|
||||
}
|
||||
|
||||
// PushQuery calls PushQueryF if it was initialized. If it wasn't initialized
|
||||
// and this function shouldn't be called and testing was initialized, then
|
||||
// testing will fail.
|
||||
|
|
|
@ -5,14 +5,12 @@ package logging
|
|||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
)
|
||||
|
||||
// Factory ...
|
||||
type Factory interface {
|
||||
Make() (Logger, error)
|
||||
MakeChain(chainID ids.ID, subdir string) (Logger, error)
|
||||
MakeChain(chainID string, subdir string) (Logger, error)
|
||||
MakeSubdir(subdir string) (Logger, error)
|
||||
Close()
|
||||
}
|
||||
|
@ -41,10 +39,10 @@ func (f *factory) Make() (Logger, error) {
|
|||
}
|
||||
|
||||
// MakeChain ...
|
||||
func (f *factory) MakeChain(chainID ids.ID, subdir string) (Logger, error) {
|
||||
func (f *factory) MakeChain(chainID string, subdir string) (Logger, error) {
|
||||
config := f.config
|
||||
config.MsgPrefix = "chain " + chainID.String()
|
||||
config.Directory = path.Join(config.Directory, "chain", chainID.String(), subdir)
|
||||
config.MsgPrefix = chainID + " Chain"
|
||||
config.Directory = path.Join(config.Directory, "chain", chainID, subdir)
|
||||
|
||||
log, err := New(config)
|
||||
if err == nil {
|
||||
|
|
|
@ -3,10 +3,6 @@
|
|||
|
||||
package logging
|
||||
|
||||
import (
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
)
|
||||
|
||||
// NoFactory ...
|
||||
type NoFactory struct{}
|
||||
|
||||
|
@ -14,7 +10,7 @@ type NoFactory struct{}
|
|||
func (NoFactory) Make() (Logger, error) { return NoLog{}, nil }
|
||||
|
||||
// MakeChain ...
|
||||
func (NoFactory) MakeChain(ids.ID, string) (Logger, error) { return NoLog{}, nil }
|
||||
func (NoFactory) MakeChain(string, string) (Logger, error) { return NoLog{}, nil }
|
||||
|
||||
// MakeSubdir ...
|
||||
func (NoFactory) MakeSubdir(string) (Logger, error) { return NoLog{}, nil }
|
||||
|
|
|
@ -3,9 +3,13 @@
|
|||
|
||||
package timer
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Useful latency buckets
|
||||
var (
|
||||
Buckets = []float64{
|
||||
MillisecondsBuckets = []float64{
|
||||
10, // 10 ms is ~ instant
|
||||
100, // 100 ms
|
||||
250, // 250 ms
|
||||
|
@ -18,4 +22,15 @@ var (
|
|||
10000, // 10 seconds
|
||||
// anything larger than 10 seconds will be bucketed together
|
||||
}
|
||||
NanosecondsBuckets = []float64{
|
||||
float64(100 * time.Nanosecond),
|
||||
float64(time.Microsecond),
|
||||
float64(10 * time.Microsecond),
|
||||
float64(100 * time.Microsecond),
|
||||
float64(time.Millisecond),
|
||||
float64(10 * time.Millisecond),
|
||||
float64(100 * time.Millisecond),
|
||||
float64(time.Second),
|
||||
// anything larger than a second will be bucketed together
|
||||
}
|
||||
)
|
||||
|
|
|
@ -256,6 +256,24 @@ func (p *Packer) UnpackFixedByteSlices(size int) [][]byte {
|
|||
return bytes
|
||||
}
|
||||
|
||||
// Pack2DByteSlice append a 2D byte slice to the byte array
|
||||
func (p *Packer) Pack2DByteSlice(byteSlices [][]byte) {
|
||||
p.PackInt(uint32(len(byteSlices)))
|
||||
for _, bytes := range byteSlices {
|
||||
p.PackBytes(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Unpack2DByteSlice returns a 2D byte slice from the byte array.
|
||||
func (p *Packer) Unpack2DByteSlice() [][]byte {
|
||||
sliceSize := p.UnpackInt()
|
||||
bytes := [][]byte(nil)
|
||||
for i := uint32(0); i < sliceSize && !p.Errored(); i++ {
|
||||
bytes = append(bytes, p.UnpackBytes())
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
// PackStr append a string to the byte array
|
||||
func (p *Packer) PackStr(str string) {
|
||||
strSize := len(str)
|
||||
|
@ -432,6 +450,20 @@ func TryUnpackBytes(packer *Packer) interface{} {
|
|||
return packer.UnpackBytes()
|
||||
}
|
||||
|
||||
// TryPack2DBytes attempts to pack the value as a 2D byte slice
|
||||
func TryPack2DBytes(packer *Packer, valIntf interface{}) {
|
||||
if val, ok := valIntf.([][]byte); ok {
|
||||
packer.Pack2DByteSlice(val)
|
||||
} else {
|
||||
packer.Add(errBadType)
|
||||
}
|
||||
}
|
||||
|
||||
// TryUnpack2DBytes attempts to unpack the value as a 2D byte slice
|
||||
func TryUnpack2DBytes(packer *Packer) interface{} {
|
||||
return packer.Unpack2DByteSlice()
|
||||
}
|
||||
|
||||
// TryPackStr attempts to pack the value as a string
|
||||
func TryPackStr(packer *Packer, valIntf interface{}) {
|
||||
if val, ok := valIntf.(string); ok {
|
||||
|
|
|
@ -506,3 +506,63 @@ func TestPackerUnpackBool(t *testing.T) {
|
|||
t.Fatalf("Packer.UnpackBool returned %t, expected sentinal value %t", actual, BoolSentinal)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPacker2DByteSlice(t *testing.T) {
|
||||
// Case: empty array
|
||||
p := Packer{MaxSize: 1024}
|
||||
arr := [][]byte{}
|
||||
p.Pack2DByteSlice(arr)
|
||||
if p.Errored() {
|
||||
t.Fatal(p.Err)
|
||||
}
|
||||
arrUnpacked := p.Unpack2DByteSlice()
|
||||
if len(arrUnpacked) != 0 {
|
||||
t.Fatal("should be empty")
|
||||
}
|
||||
|
||||
// Case: Array has one element
|
||||
p = Packer{MaxSize: 1024}
|
||||
arr = [][]byte{
|
||||
[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
}
|
||||
p.Pack2DByteSlice(arr)
|
||||
if p.Errored() {
|
||||
t.Fatal(p.Err)
|
||||
}
|
||||
p = Packer{MaxSize: 1024, Bytes: p.Bytes}
|
||||
arrUnpacked = p.Unpack2DByteSlice()
|
||||
if p.Errored() {
|
||||
t.Fatal(p.Err)
|
||||
}
|
||||
if l := len(arrUnpacked); l != 1 {
|
||||
t.Fatalf("should be length 1 but is length %d", l)
|
||||
}
|
||||
if !bytes.Equal(arrUnpacked[0], arr[0]) {
|
||||
t.Fatal("should match")
|
||||
}
|
||||
|
||||
// Case: Array has multiple elements
|
||||
p = Packer{MaxSize: 1024}
|
||||
arr = [][]byte{
|
||||
[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
[]byte{11, 12, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
}
|
||||
p.Pack2DByteSlice(arr)
|
||||
if p.Errored() {
|
||||
t.Fatal(p.Err)
|
||||
}
|
||||
p = Packer{MaxSize: 1024, Bytes: p.Bytes}
|
||||
arrUnpacked = p.Unpack2DByteSlice()
|
||||
if p.Errored() {
|
||||
t.Fatal(p.Err)
|
||||
}
|
||||
if l := len(arrUnpacked); l != 2 {
|
||||
t.Fatalf("should be length 1 but is length %d", l)
|
||||
}
|
||||
if !bytes.Equal(arrUnpacked[0], arr[0]) {
|
||||
t.Fatal("should match")
|
||||
}
|
||||
if !bytes.Equal(arrUnpacked[1], arr[1]) {
|
||||
t.Fatal("should match")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -840,6 +840,16 @@ func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cr := codecRegistry{
|
||||
index: 1,
|
||||
typeToFxIndex: vm.typeToFxIndex,
|
||||
|
@ -1386,6 +1396,16 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cr := codecRegistry{
|
||||
index: 1,
|
||||
typeToFxIndex: vm.typeToFxIndex,
|
||||
|
@ -1538,6 +1558,16 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cr := codecRegistry{
|
||||
index: 1,
|
||||
typeToFxIndex: vm.typeToFxIndex,
|
||||
|
|
|
@ -151,6 +151,16 @@ func TestIssueExportTx(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
key := keys[0]
|
||||
|
||||
tx := &Tx{UnsignedTx: &ExportTx{
|
||||
|
@ -297,6 +307,16 @@ func TestClearForceAcceptedExportTx(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
key := keys[0]
|
||||
|
||||
tx := &Tx{UnsignedTx: &ExportTx{
|
||||
|
|
|
@ -5,6 +5,7 @@ package avm
|
|||
|
||||
import (
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
)
|
||||
|
||||
// ID that this VM uses when labeled
|
||||
|
@ -19,7 +20,7 @@ type Factory struct {
|
|||
}
|
||||
|
||||
// New ...
|
||||
func (f *Factory) New() (interface{}, error) {
|
||||
func (f *Factory) New(*snow.Context) (interface{}, error) {
|
||||
return &VM{
|
||||
ava: f.AVA,
|
||||
platform: f.Platform,
|
||||
|
|
|
@ -19,6 +19,12 @@ type Fx interface {
|
|||
// return an error if the VM is incompatible.
|
||||
Initialize(vm interface{}) error
|
||||
|
||||
// Notify this Fx that the VM is in bootstrapping
|
||||
Bootstrapping() error
|
||||
|
||||
// Notify this Fx that the VM is bootstrapped
|
||||
Bootstrapped() error
|
||||
|
||||
// VerifyTransfer verifies that the specified transaction can spend the
|
||||
// provided utxo with no restrictions on the destination. If the transaction
|
||||
// can't spend the output based on the input and credential, a non-nil error
|
||||
|
|
|
@ -4,10 +4,12 @@
|
|||
package avm
|
||||
|
||||
type testFx struct {
|
||||
initialize, verifyTransfer, verifyOperation error
|
||||
initialize, bootstrapping, bootstrapped, verifyTransfer, verifyOperation error
|
||||
}
|
||||
|
||||
func (fx *testFx) Initialize(_ interface{}) error { return fx.initialize }
|
||||
func (fx *testFx) Bootstrapping() error { return fx.bootstrapping }
|
||||
func (fx *testFx) Bootstrapped() error { return fx.bootstrapped }
|
||||
func (fx *testFx) VerifyTransfer(_, _, _, _ interface{}) error { return fx.verifyTransfer }
|
||||
func (fx *testFx) VerifyOperation(_, _, _ interface{}, _ []interface{}) error {
|
||||
return fx.verifyOperation
|
||||
|
|
|
@ -140,6 +140,16 @@ func TestIssueImportTx(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
key := keys[0]
|
||||
|
||||
utxoID := ava.UTXOID{
|
||||
|
@ -288,6 +298,16 @@ func TestForceAcceptImportTx(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
key := keys[0]
|
||||
|
||||
genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t)
|
||||
|
|
|
@ -666,13 +666,20 @@ func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *I
|
|||
}
|
||||
|
||||
addresses, _ := user.Addresses(db)
|
||||
addresses = append(addresses, sk.PublicKey().Address())
|
||||
|
||||
newAddress := sk.PublicKey().Address()
|
||||
reply.Address = service.vm.Format(newAddress.Bytes())
|
||||
for _, address := range addresses {
|
||||
if newAddress.Equals(address) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
addresses = append(addresses, newAddress)
|
||||
if err := user.SetAddresses(db, addresses); err != nil {
|
||||
return fmt.Errorf("problem saving addresses: %w", err)
|
||||
}
|
||||
|
||||
reply.Address = service.vm.Format(sk.PublicKey().Address().Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -9,8 +9,10 @@ import (
|
|||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/ava-labs/gecko/api/keystore"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/choices"
|
||||
"github.com/ava-labs/gecko/utils/crypto"
|
||||
"github.com/ava-labs/gecko/utils/formatting"
|
||||
)
|
||||
|
||||
|
@ -340,3 +342,113 @@ func TestCreateVariableCapAsset(t *testing.T) {
|
|||
t.Fatalf("Wrong assetID returned from CreateFixedCapAsset %s", reply.AssetID)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImportAvmKey(t *testing.T) {
|
||||
_, vm, s := setup(t)
|
||||
defer func() {
|
||||
vm.Shutdown()
|
||||
ctx.Lock.Unlock()
|
||||
}()
|
||||
|
||||
userKeystore := keystore.CreateTestKeystore(t)
|
||||
|
||||
username := "bobby"
|
||||
password := "StrnasfqewiurPasswdn56d"
|
||||
if err := userKeystore.AddUser(username, password); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
vm.ctx.Keystore = userKeystore.NewBlockchainKeyStore(vm.ctx.ChainID)
|
||||
_, err := vm.ctx.Keystore.GetDatabase(username, password)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
factory := crypto.FactorySECP256K1R{}
|
||||
skIntf, err := factory.NewPrivateKey()
|
||||
if err != nil {
|
||||
t.Fatalf("problem generating private key: %w", err)
|
||||
}
|
||||
sk := skIntf.(*crypto.PrivateKeySECP256K1R)
|
||||
|
||||
args := ImportKeyArgs{
|
||||
Username: username,
|
||||
Password: password,
|
||||
PrivateKey: formatting.CB58{Bytes: sk.Bytes()},
|
||||
}
|
||||
reply := ImportKeyReply{}
|
||||
if err = s.ImportKey(nil, &args, &reply); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImportAvmKeyNoDuplicates(t *testing.T) {
|
||||
_, vm, s := setup(t)
|
||||
defer func() {
|
||||
vm.Shutdown()
|
||||
ctx.Lock.Unlock()
|
||||
}()
|
||||
|
||||
userKeystore := keystore.CreateTestKeystore(t)
|
||||
|
||||
username := "bobby"
|
||||
password := "StrnasfqewiurPasswdn56d"
|
||||
if err := userKeystore.AddUser(username, password); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
vm.ctx.Keystore = userKeystore.NewBlockchainKeyStore(vm.ctx.ChainID)
|
||||
_, err := vm.ctx.Keystore.GetDatabase(username, password)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
factory := crypto.FactorySECP256K1R{}
|
||||
skIntf, err := factory.NewPrivateKey()
|
||||
if err != nil {
|
||||
t.Fatalf("problem generating private key: %w", err)
|
||||
}
|
||||
sk := skIntf.(*crypto.PrivateKeySECP256K1R)
|
||||
|
||||
args := ImportKeyArgs{
|
||||
Username: username,
|
||||
Password: password,
|
||||
PrivateKey: formatting.CB58{Bytes: sk.Bytes()},
|
||||
}
|
||||
reply := ImportKeyReply{}
|
||||
if err = s.ImportKey(nil, &args, &reply); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
expectedAddress := vm.Format(sk.PublicKey().Address().Bytes())
|
||||
|
||||
if reply.Address != expectedAddress {
|
||||
t.Fatalf("Reply address: %s did not match expected address: %s", reply.Address, expectedAddress)
|
||||
}
|
||||
|
||||
reply2 := ImportKeyReply{}
|
||||
if err = s.ImportKey(nil, &args, &reply2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if reply2.Address != expectedAddress {
|
||||
t.Fatalf("Reply address: %s did not match expected address: %s", reply2.Address, expectedAddress)
|
||||
}
|
||||
|
||||
addrsArgs := ListAddressesArgs{
|
||||
Username: username,
|
||||
Password: password,
|
||||
}
|
||||
addrsReply := ListAddressesResponse{}
|
||||
if err := s.ListAddresses(nil, &addrsArgs, &addrsReply); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if len(addrsReply.Addresses) != 1 {
|
||||
t.Fatal("Importing the same key twice created duplicate addresses")
|
||||
}
|
||||
|
||||
if addrsReply.Addresses[0] != expectedAddress {
|
||||
t.Fatal("List addresses returned an incorrect address")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -61,8 +61,8 @@ func (tx *UniqueTx) refresh() {
|
|||
// intermediate object whose state I must reflect
|
||||
if status, err := tx.vm.state.Status(tx.ID()); err == nil {
|
||||
tx.status = status
|
||||
tx.unique = true
|
||||
}
|
||||
tx.unique = true
|
||||
} else {
|
||||
// If someone is in the cache, they must be up to date
|
||||
|
||||
|
|
|
@ -35,7 +35,7 @@ const (
|
|||
batchSize = 30
|
||||
stateCacheSize = 10000
|
||||
idCacheSize = 10000
|
||||
txCacheSize = 10000
|
||||
txCacheSize = 100000
|
||||
addressSep = "-"
|
||||
)
|
||||
|
||||
|
@ -45,6 +45,7 @@ var (
|
|||
errGenesisAssetMustHaveState = errors.New("genesis asset must have non-empty state")
|
||||
errInvalidAddress = errors.New("invalid address")
|
||||
errWrongBlockchainID = errors.New("wrong blockchain ID")
|
||||
errBootstrapping = errors.New("chain is currently bootstrapping")
|
||||
)
|
||||
|
||||
// VM implements the avalanche.DAGVM interface
|
||||
|
@ -67,6 +68,9 @@ type VM struct {
|
|||
// State management
|
||||
state *prefixedState
|
||||
|
||||
// Set to true once this VM is marked as `Bootstrapped` by the engine
|
||||
bootstrapped bool
|
||||
|
||||
// Transaction issuing
|
||||
timer *timer.Timer
|
||||
batchTimeout time.Duration
|
||||
|
@ -197,6 +201,29 @@ func (vm *VM) Initialize(
|
|||
return vm.db.Commit()
|
||||
}
|
||||
|
||||
// Bootstrapping is called by the consensus engine when it starts bootstrapping
|
||||
// this chain
|
||||
func (vm *VM) Bootstrapping() error {
|
||||
for _, fx := range vm.fxs {
|
||||
if err := fx.Fx.Bootstrapping(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bootstrapped is called by the consensus engine when it is done bootstrapping
|
||||
// this chain
|
||||
func (vm *VM) Bootstrapped() error {
|
||||
for _, fx := range vm.fxs {
|
||||
if err := fx.Fx.Bootstrapped(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
vm.bootstrapped = true
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown implements the avalanche.DAGVM interface
|
||||
func (vm *VM) Shutdown() error {
|
||||
if vm.timer == nil {
|
||||
|
@ -272,6 +299,9 @@ func (vm *VM) GetTx(txID ids.ID) (snowstorm.Tx, error) {
|
|||
// either accepted or rejected with the appropriate status. This function will
|
||||
// go out of scope when the transaction is removed from memory.
|
||||
func (vm *VM) IssueTx(b []byte, onDecide func(choices.Status)) (ids.ID, error) {
|
||||
if !vm.bootstrapped {
|
||||
return ids.ID{}, errBootstrapping
|
||||
}
|
||||
tx, err := vm.parseTx(b)
|
||||
if err != nil {
|
||||
return ids.ID{}, err
|
||||
|
|
|
@ -178,6 +178,14 @@ func GenesisVM(t *testing.T) ([]byte, chan common.Message, *VM) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
if err := vm.Bootstrapping(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := vm.Bootstrapped(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
return genesisBytes, issuer, vm
|
||||
}
|
||||
|
||||
|
@ -678,6 +686,16 @@ func TestIssueNFT(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
createAssetTx := &Tx{UnsignedTx: &CreateAssetTx{
|
||||
BaseTx: BaseTx{
|
||||
NetID: networkID,
|
||||
|
@ -841,6 +859,16 @@ func TestIssueProperty(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
createAssetTx := &Tx{UnsignedTx: &CreateAssetTx{
|
||||
BaseTx: BaseTx{
|
||||
NetID: networkID,
|
||||
|
|
|
@ -81,6 +81,12 @@ func (svm *SnowmanVM) GetBlock(ID ids.ID) (snowman.Block, error) {
|
|||
return nil, errBadData // Should never happen
|
||||
}
|
||||
|
||||
// Bootstrapping marks this VM as bootstrapping
|
||||
func (svm *SnowmanVM) Bootstrapping() error { return nil }
|
||||
|
||||
// Bootstrapped marks this VM as bootstrapped
|
||||
func (svm *SnowmanVM) Bootstrapped() error { return nil }
|
||||
|
||||
// Shutdown this vm
|
||||
func (svm *SnowmanVM) Shutdown() error {
|
||||
if svm.DB == nil {
|
||||
|
@ -121,7 +127,7 @@ func (svm *SnowmanVM) NotifyBlockReady() {
|
|||
select {
|
||||
case svm.ToEngine <- common.PendingTxs:
|
||||
default:
|
||||
svm.Ctx.Log.Warn("dropping message to consensus engine")
|
||||
svm.Ctx.Log.Debug("dropping message to consensus engine")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,15 +8,15 @@ import (
|
|||
"sync"
|
||||
|
||||
"github.com/ava-labs/gecko/api"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
)
|
||||
|
||||
// A VMFactory creates new instances of a VM
|
||||
type VMFactory interface {
|
||||
New() (interface{}, error)
|
||||
New(*snow.Context) (interface{}, error)
|
||||
}
|
||||
|
||||
// Manager is a VM manager.
|
||||
|
@ -110,7 +110,7 @@ func (m *manager) addStaticAPIEndpoints(vmID ids.ID) {
|
|||
vmFactory, err := m.GetVMFactory(vmID)
|
||||
m.log.AssertNoError(err)
|
||||
m.log.Debug("adding static API for VM with ID %s", vmID)
|
||||
vm, err := vmFactory.New()
|
||||
vm, err := vmFactory.New(nil)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package nftfx
|
|||
|
||||
import (
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
)
|
||||
|
||||
// ID that this Fx uses when labeled
|
||||
|
@ -13,4 +14,4 @@ var (
|
|||
type Factory struct{}
|
||||
|
||||
// New ...
|
||||
func (f *Factory) New() (interface{}, error) { return &Fx{}, nil }
|
||||
func (f *Factory) New(*snow.Context) (interface{}, error) { return &Fx{}, nil }
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
|
||||
func TestFactory(t *testing.T) {
|
||||
factory := Factory{}
|
||||
if fx, err := factory.New(); err != nil {
|
||||
if fx, err := factory.New(nil); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if fx == nil {
|
||||
t.Fatalf("Factory.New returned nil")
|
||||
|
|
|
@ -23,6 +23,13 @@ func (a *Abort) Verify() error {
|
|||
parent, ok := a.parentBlock().(*ProposalBlock)
|
||||
// Abort is a decision, so its parent must be a proposal
|
||||
if !ok {
|
||||
if err := a.Reject(); err == nil {
|
||||
if err := a.vm.DB.Commit(); err != nil {
|
||||
a.vm.Ctx.Log.Error("error committing Abort block as rejected: %s", err)
|
||||
}
|
||||
} else {
|
||||
a.vm.DB.Abort()
|
||||
}
|
||||
return errInvalidBlockType
|
||||
}
|
||||
|
||||
|
|
|
@ -56,41 +56,41 @@ func (tx *addDefaultSubnetDelegatorTx) ID() ids.ID { return tx.id }
|
|||
|
||||
// SyntacticVerify return nil iff [tx] is valid
|
||||
// If [tx] is valid, sets [tx.accountID]
|
||||
func (tx *addDefaultSubnetDelegatorTx) SyntacticVerify() error {
|
||||
func (tx *addDefaultSubnetDelegatorTx) SyntacticVerify() TxError {
|
||||
switch {
|
||||
case tx == nil:
|
||||
return errNilTx
|
||||
return tempError{errNilTx}
|
||||
case !tx.senderID.IsZero():
|
||||
return nil // Only verify the transaction once
|
||||
case tx.id.IsZero():
|
||||
return errInvalidID
|
||||
return tempError{errInvalidID}
|
||||
case tx.NetworkID != tx.vm.Ctx.NetworkID:
|
||||
return errWrongNetworkID
|
||||
return permError{errWrongNetworkID}
|
||||
case tx.NodeID.IsZero():
|
||||
return errInvalidID
|
||||
return tempError{errInvalidID}
|
||||
case tx.Wght < MinimumStakeAmount: // Ensure validator is staking at least the minimum amount
|
||||
return errWeightTooSmall
|
||||
return permError{errWeightTooSmall}
|
||||
}
|
||||
|
||||
// Ensure staking length is not too short or long
|
||||
stakingDuration := tx.Duration()
|
||||
if stakingDuration < MinimumStakingDuration {
|
||||
return errStakeTooShort
|
||||
return permError{errStakeTooShort}
|
||||
} else if stakingDuration > MaximumStakingDuration {
|
||||
return errStakeTooLong
|
||||
return permError{errStakeTooLong}
|
||||
}
|
||||
|
||||
unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetDelegatorTx)
|
||||
// Byte representation of the unsigned transaction
|
||||
unsignedBytes, err := Codec.Marshal(&unsignedIntf)
|
||||
if err != nil {
|
||||
return err
|
||||
return permError{err}
|
||||
}
|
||||
|
||||
// get account to pay tx fee from
|
||||
key, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.Sig[:])
|
||||
if err != nil {
|
||||
return err
|
||||
return permError{err}
|
||||
}
|
||||
tx.senderID = key.Address()
|
||||
|
||||
|
@ -98,7 +98,7 @@ func (tx *addDefaultSubnetDelegatorTx) SyntacticVerify() error {
|
|||
}
|
||||
|
||||
// SemanticVerify this transaction is valid.
|
||||
func (tx *addDefaultSubnetDelegatorTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), error) {
|
||||
func (tx *addDefaultSubnetDelegatorTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), TxError) {
|
||||
if err := tx.SyntacticVerify(); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
@ -106,60 +106,62 @@ func (tx *addDefaultSubnetDelegatorTx) SemanticVerify(db database.Database) (*ve
|
|||
// Ensure the proposed validator starts after the current timestamp
|
||||
currentTimestamp, err := tx.vm.getTimestamp(db)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
validatorStartTime := tx.StartTime()
|
||||
if !currentTimestamp.Before(validatorStartTime) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("chain timestamp (%s) not before validator's start time (%s)",
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("chain timestamp (%s) not before validator's start time (%s)",
|
||||
currentTimestamp,
|
||||
validatorStartTime)
|
||||
validatorStartTime)}
|
||||
}
|
||||
|
||||
// Get the account that is paying the transaction fee and, if the proposal is to add a validator
|
||||
// to the default subnet, providing the staked $AVA.
|
||||
// The ID of this account is the address associated with the public key that signed this tx
|
||||
// Get the account that is paying the transaction fee and, if the proposal
|
||||
// is to add a validator to the default subnet, providing the staked $AVA.
|
||||
// The ID of this account is the address associated with the public key that
|
||||
// signed this tx.
|
||||
accountID := tx.senderID
|
||||
account, err := tx.vm.getAccount(db, accountID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, errDBAccount
|
||||
return nil, nil, nil, nil, permError{errDBAccount}
|
||||
}
|
||||
|
||||
// The account if this block's proposal is committed and the validator is added
|
||||
// to the pending validator set. (Increase the account's nonce; decrease its balance.)
|
||||
// The account if this block's proposal is committed and the validator is
|
||||
// added to the pending validator set. (Increase the account's nonce;
|
||||
// decrease its balance.)
|
||||
newAccount, err := account.Remove(0, tx.Nonce) // Remove also removes the fee
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
|
||||
// Ensure that the period this validator validates the specified subnet is a subnet of the time they validate the default subnet
|
||||
// First, see if they're currently validating the default subnet
|
||||
currentEvents, err := tx.vm.getCurrentValidators(db, DefaultSubnetID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, fmt.Errorf("couldn't get current validators of default subnet: %v", err)
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("couldn't get current validators of default subnet: %v", err)}
|
||||
}
|
||||
if dsValidator, err := currentEvents.getDefaultSubnetStaker(tx.NodeID); err == nil {
|
||||
if !tx.DurationValidator.BoundedBy(dsValidator.StartTime(), dsValidator.EndTime()) {
|
||||
return nil, nil, nil, nil, errDSValidatorSubset
|
||||
return nil, nil, nil, nil, permError{errDSValidatorSubset}
|
||||
}
|
||||
} else {
|
||||
// They aren't currently validating the default subnet.
|
||||
// See if they will validate the default subnet in the future.
|
||||
pendingDSValidators, err := tx.vm.getPendingValidators(db, DefaultSubnetID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, fmt.Errorf("couldn't get pending validators of default subnet: %v", err)
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("couldn't get pending validators of default subnet: %v", err)}
|
||||
}
|
||||
dsValidator, err := pendingDSValidators.getDefaultSubnetStaker(tx.NodeID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, errDSValidatorSubset
|
||||
return nil, nil, nil, nil, permError{errDSValidatorSubset}
|
||||
}
|
||||
if !tx.DurationValidator.BoundedBy(dsValidator.StartTime(), dsValidator.EndTime()) {
|
||||
return nil, nil, nil, nil, errDSValidatorSubset
|
||||
return nil, nil, nil, nil, permError{errDSValidatorSubset}
|
||||
}
|
||||
}
|
||||
|
||||
pendingEvents, err := tx.vm.getPendingValidators(db, DefaultSubnetID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
|
||||
pendingEvents.Add(tx) // add validator to set of pending validators
|
||||
|
@ -168,10 +170,10 @@ func (tx *addDefaultSubnetDelegatorTx) SemanticVerify(db database.Database) (*ve
|
|||
// update the validator's account by removing the staked $AVA
|
||||
onCommitDB := versiondb.New(db)
|
||||
if err := tx.vm.putPendingValidators(onCommitDB, pendingEvents, DefaultSubnetID); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
if err := tx.vm.putAccount(onCommitDB, newAccount); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
|
||||
// If this proposal is aborted, chain state doesn't change
|
||||
|
|
|
@ -63,44 +63,44 @@ func (tx *addDefaultSubnetValidatorTx) ID() ids.ID { return tx.id }
|
|||
|
||||
// SyntacticVerify that this transaction is well formed
|
||||
// If [tx] is valid, this method also populates [tx.accountID]
|
||||
func (tx *addDefaultSubnetValidatorTx) SyntacticVerify() error {
|
||||
func (tx *addDefaultSubnetValidatorTx) SyntacticVerify() TxError {
|
||||
switch {
|
||||
case tx == nil:
|
||||
return errNilTx
|
||||
return tempError{errNilTx}
|
||||
case !tx.senderID.IsZero():
|
||||
return nil // Only verify the transaction once
|
||||
case tx.id.IsZero():
|
||||
return errInvalidID
|
||||
return tempError{errInvalidID}
|
||||
case tx.NetworkID != tx.vm.Ctx.NetworkID:
|
||||
return errWrongNetworkID
|
||||
return permError{errWrongNetworkID}
|
||||
case tx.NodeID.IsZero():
|
||||
return errInvalidID
|
||||
return tempError{errInvalidID}
|
||||
case tx.Destination.IsZero():
|
||||
return errInvalidID
|
||||
return tempError{errInvalidID}
|
||||
case tx.Wght < MinimumStakeAmount: // Ensure validator is staking at least the minimum amount
|
||||
return errWeightTooSmall
|
||||
return permError{errWeightTooSmall}
|
||||
case tx.Shares > NumberOfShares: // Ensure delegators shares are in the allowed amount
|
||||
return errTooManyShares
|
||||
return permError{errTooManyShares}
|
||||
}
|
||||
|
||||
// Ensure staking length is not too short or long
|
||||
stakingDuration := tx.Duration()
|
||||
if stakingDuration < MinimumStakingDuration {
|
||||
return errStakeTooShort
|
||||
return permError{errStakeTooShort}
|
||||
} else if stakingDuration > MaximumStakingDuration {
|
||||
return errStakeTooLong
|
||||
return permError{errStakeTooLong}
|
||||
}
|
||||
|
||||
// Byte representation of the unsigned transaction
|
||||
unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetValidatorTx)
|
||||
unsignedBytes, err := Codec.Marshal(&unsignedIntf)
|
||||
if err != nil {
|
||||
return err
|
||||
return permError{err}
|
||||
}
|
||||
|
||||
key, err := tx.vm.factory.RecoverPublicKey(unsignedBytes, tx.Sig[:]) // the public key that signed [tx]
|
||||
if err != nil {
|
||||
return err
|
||||
return permError{err}
|
||||
}
|
||||
tx.senderID = key.Address()
|
||||
|
||||
|
@ -108,7 +108,7 @@ func (tx *addDefaultSubnetValidatorTx) SyntacticVerify() error {
|
|||
}
|
||||
|
||||
// SemanticVerify this transaction is valid.
|
||||
func (tx *addDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), error) {
|
||||
func (tx *addDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), TxError) {
|
||||
if err := tx.SyntacticVerify(); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
@ -116,13 +116,13 @@ func (tx *addDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (*ve
|
|||
// Ensure the proposed validator starts after the current time
|
||||
currentTime, err := tx.vm.getTimestamp(db)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
startTime := tx.StartTime()
|
||||
if !currentTime.Before(startTime) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("chain timestamp (%s) not before validator's start time (%s)",
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("chain timestamp (%s) not before validator's start time (%s)",
|
||||
currentTime,
|
||||
startTime)
|
||||
startTime)}
|
||||
}
|
||||
|
||||
// Get the account that is paying the transaction fee and, if the proposal is to add a validator
|
||||
|
@ -131,7 +131,7 @@ func (tx *addDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (*ve
|
|||
accountID := tx.senderID
|
||||
account, err := tx.vm.getAccount(db, accountID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, errDBAccount
|
||||
return nil, nil, nil, nil, permError{errDBAccount}
|
||||
}
|
||||
|
||||
// If the transaction adds a validator to the default subnet, also deduct
|
||||
|
@ -142,31 +142,31 @@ func (tx *addDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (*ve
|
|||
// to the pending validator set. (Increase the account's nonce; decrease its balance.)
|
||||
newAccount, err := account.Remove(amount, tx.Nonce)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
|
||||
// Ensure the proposed validator is not already a validator of the specified subnet
|
||||
currentEvents, err := tx.vm.getCurrentValidators(db, DefaultSubnetID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
currentValidators := validators.NewSet()
|
||||
currentValidators.Set(tx.vm.getValidators(currentEvents))
|
||||
if currentValidators.Contains(tx.NodeID) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("validator with ID %s already in the current default validator set",
|
||||
tx.NodeID)
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("validator with ID %s already in the current default validator set",
|
||||
tx.NodeID)}
|
||||
}
|
||||
|
||||
// Ensure the proposed validator is not already slated to validate for the specified subnet
|
||||
pendingEvents, err := tx.vm.getPendingValidators(db, DefaultSubnetID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
pendingValidators := validators.NewSet()
|
||||
pendingValidators.Set(tx.vm.getValidators(pendingEvents))
|
||||
if pendingValidators.Contains(tx.NodeID) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("validator with ID %s already in the pending default validator set",
|
||||
tx.NodeID)
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("validator with ID %s already in the pending default validator set",
|
||||
tx.NodeID)}
|
||||
}
|
||||
|
||||
pendingEvents.Add(tx) // add validator to set of pending validators
|
||||
|
@ -175,10 +175,10 @@ func (tx *addDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (*ve
|
|||
// update the validator's account by removing the staked $AVA
|
||||
onCommitDB := versiondb.New(db)
|
||||
if err := tx.vm.putPendingValidators(onCommitDB, pendingEvents, DefaultSubnetID); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
if err := tx.vm.putAccount(onCommitDB, newAccount); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
|
||||
// If this proposal is aborted, chain state doesn't change
|
||||
|
|
|
@ -77,39 +77,39 @@ func (tx *addNonDefaultSubnetValidatorTx) ID() ids.ID { return tx.id }
|
|||
|
||||
// SyntacticVerify return nil iff [tx] is valid
|
||||
// If [tx] is valid, sets [tx.accountID]
|
||||
func (tx *addNonDefaultSubnetValidatorTx) SyntacticVerify() error {
|
||||
func (tx *addNonDefaultSubnetValidatorTx) SyntacticVerify() TxError {
|
||||
switch {
|
||||
case tx == nil:
|
||||
return errNilTx
|
||||
return tempError{errNilTx}
|
||||
case !tx.senderID.IsZero():
|
||||
return nil // Only verify the transaction once
|
||||
case tx.id.IsZero():
|
||||
return errInvalidID
|
||||
return tempError{errInvalidID}
|
||||
case tx.NetworkID != tx.vm.Ctx.NetworkID:
|
||||
return errWrongNetworkID
|
||||
return permError{errWrongNetworkID}
|
||||
case tx.NodeID.IsZero():
|
||||
return errInvalidID
|
||||
return tempError{errInvalidID}
|
||||
case tx.Subnet.IsZero():
|
||||
return errInvalidID
|
||||
return tempError{errInvalidID}
|
||||
case tx.Wght == 0: // Ensure the validator has some weight
|
||||
return errWeightTooSmall
|
||||
return permError{errWeightTooSmall}
|
||||
case !crypto.IsSortedAndUniqueSECP2561RSigs(tx.ControlSigs):
|
||||
return errSigsNotSorted
|
||||
return permError{errSigsNotSorted}
|
||||
}
|
||||
|
||||
// Ensure staking length is not too short or long
|
||||
stakingDuration := tx.Duration()
|
||||
if stakingDuration < MinimumStakingDuration {
|
||||
return errStakeTooShort
|
||||
return permError{errStakeTooShort}
|
||||
} else if stakingDuration > MaximumStakingDuration {
|
||||
return errStakeTooLong
|
||||
return permError{errStakeTooLong}
|
||||
}
|
||||
|
||||
// Byte representation of the unsigned transaction
|
||||
unsignedIntf := interface{}(&tx.UnsignedAddNonDefaultSubnetValidatorTx)
|
||||
unsignedBytes, err := Codec.Marshal(&unsignedIntf)
|
||||
if err != nil {
|
||||
return err
|
||||
return permError{err}
|
||||
}
|
||||
unsignedBytesHash := hashing.ComputeHash256(unsignedBytes)
|
||||
|
||||
|
@ -118,7 +118,7 @@ func (tx *addNonDefaultSubnetValidatorTx) SyntacticVerify() error {
|
|||
for i, sig := range tx.ControlSigs {
|
||||
key, err := tx.vm.factory.RecoverHashPublicKey(unsignedBytesHash, sig[:])
|
||||
if err != nil {
|
||||
return err
|
||||
return permError{err}
|
||||
}
|
||||
tx.controlIDs[i] = key.Address()
|
||||
}
|
||||
|
@ -126,7 +126,7 @@ func (tx *addNonDefaultSubnetValidatorTx) SyntacticVerify() error {
|
|||
// get account to pay tx fee from
|
||||
key, err := tx.vm.factory.RecoverHashPublicKey(unsignedBytesHash, tx.PayerSig[:])
|
||||
if err != nil {
|
||||
return err
|
||||
return permError{err}
|
||||
}
|
||||
tx.senderID = key.Address()
|
||||
|
||||
|
@ -149,7 +149,7 @@ func (h *EventHeap) getDefaultSubnetStaker(id ids.ShortID) (*addDefaultSubnetVal
|
|||
}
|
||||
|
||||
// SemanticVerify this transaction is valid.
|
||||
func (tx *addNonDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), error) {
|
||||
func (tx *addNonDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), TxError) {
|
||||
// Ensure tx is syntactically valid
|
||||
if err := tx.SyntacticVerify(); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
|
@ -158,7 +158,7 @@ func (tx *addNonDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (
|
|||
// Get info about the subnet we're adding a validator to
|
||||
subnets, err := tx.vm.getSubnets(db)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
var subnet *CreateSubnetTx
|
||||
for _, sn := range subnets {
|
||||
|
@ -168,22 +168,22 @@ func (tx *addNonDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (
|
|||
}
|
||||
}
|
||||
if subnet == nil {
|
||||
return nil, nil, nil, nil, fmt.Errorf("there is no subnet with ID %s", tx.SubnetID())
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("there is no subnet with ID %s", tx.SubnetID())}
|
||||
}
|
||||
|
||||
// Ensure the sigs on [tx] are valid
|
||||
if len(tx.ControlSigs) != int(subnet.Threshold) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("expected tx to have %d control sigs but has %d", subnet.Threshold, len(tx.ControlSigs))
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("expected tx to have %d control sigs but has %d", subnet.Threshold, len(tx.ControlSigs))}
|
||||
}
|
||||
if !crypto.IsSortedAndUniqueSECP2561RSigs(tx.ControlSigs) {
|
||||
return nil, nil, nil, nil, errors.New("control signatures aren't sorted")
|
||||
return nil, nil, nil, nil, permError{errors.New("control signatures aren't sorted")}
|
||||
}
|
||||
|
||||
controlKeys := ids.ShortSet{}
|
||||
controlKeys.Add(subnet.ControlKeys...)
|
||||
for _, controlID := range tx.controlIDs {
|
||||
if !controlKeys.Contains(controlID) {
|
||||
return nil, nil, nil, nil, errors.New("tx has control signature from key not in subnet's ControlKeys")
|
||||
return nil, nil, nil, nil, permError{errors.New("tx has control signature from key not in subnet's ControlKeys")}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -191,46 +191,46 @@ func (tx *addNonDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (
|
|||
// First, see if they're currently validating the default subnet
|
||||
currentDSValidators, err := tx.vm.getCurrentValidators(db, DefaultSubnetID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, fmt.Errorf("couldn't get current validators of default subnet: %v", err)
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("couldn't get current validators of default subnet: %v", err)}
|
||||
}
|
||||
|
||||
if dsValidator, err := currentDSValidators.getDefaultSubnetStaker(tx.NodeID); err == nil {
|
||||
if !tx.DurationValidator.BoundedBy(dsValidator.StartTime(), dsValidator.EndTime()) {
|
||||
return nil, nil, nil, nil,
|
||||
fmt.Errorf("time validating subnet [%v, %v] not subset of time validating default subnet [%v, %v]",
|
||||
permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating default subnet [%v, %v]",
|
||||
tx.DurationValidator.StartTime(), tx.DurationValidator.EndTime(),
|
||||
dsValidator.StartTime(), dsValidator.EndTime())
|
||||
dsValidator.StartTime(), dsValidator.EndTime())}
|
||||
}
|
||||
} else {
|
||||
// They aren't currently validating the default subnet.
|
||||
// See if they will validate the default subnet in the future.
|
||||
pendingDSValidators, err := tx.vm.getPendingValidators(db, DefaultSubnetID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, fmt.Errorf("couldn't get pending validators of default subnet: %v", err)
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("couldn't get pending validators of default subnet: %v", err)}
|
||||
}
|
||||
dsValidator, err := pendingDSValidators.getDefaultSubnetStaker(tx.NodeID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil,
|
||||
fmt.Errorf("validator would not be validating default subnet while validating non-default subnet")
|
||||
permError{fmt.Errorf("validator would not be validating default subnet while validating non-default subnet")}
|
||||
}
|
||||
if !tx.DurationValidator.BoundedBy(dsValidator.StartTime(), dsValidator.EndTime()) {
|
||||
return nil, nil, nil, nil,
|
||||
fmt.Errorf("time validating subnet [%v, %v] not subset of time validating default subnet [%v, %v]",
|
||||
permError{fmt.Errorf("time validating subnet [%v, %v] not subset of time validating default subnet [%v, %v]",
|
||||
tx.DurationValidator.StartTime(), tx.DurationValidator.EndTime(),
|
||||
dsValidator.StartTime(), dsValidator.EndTime())
|
||||
dsValidator.StartTime(), dsValidator.EndTime())}
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure the proposed validator starts after the current timestamp
|
||||
currentTimestamp, err := tx.vm.getTimestamp(db)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, fmt.Errorf("couldn't get current timestamp: %v", err)
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("couldn't get current timestamp: %v", err)}
|
||||
}
|
||||
validatorStartTime := tx.StartTime()
|
||||
if !currentTimestamp.Before(validatorStartTime) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("chain timestamp (%s) not before validator's start time (%s)",
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("chain timestamp (%s) not before validator's start time (%s)",
|
||||
currentTimestamp,
|
||||
validatorStartTime)
|
||||
validatorStartTime)}
|
||||
}
|
||||
|
||||
// Get the account that is paying the transaction fee and, if the proposal is to add a validator
|
||||
|
@ -239,42 +239,40 @@ func (tx *addNonDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (
|
|||
accountID := tx.senderID
|
||||
account, err := tx.vm.getAccount(db, accountID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, errDBAccount
|
||||
return nil, nil, nil, nil, permError{errDBAccount}
|
||||
}
|
||||
|
||||
// The account if this block's proposal is committed and the validator is added
|
||||
// to the pending validator set. (Increase the account's nonce; decrease its balance.)
|
||||
newAccount, err := account.Remove(0, tx.Nonce) // Remove also removes the fee
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
|
||||
// Ensure the proposed validator is not already a validator of the specified subnet
|
||||
currentEvents, err := tx.vm.getCurrentValidators(db, tx.Subnet)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, fmt.Errorf("couldn't get current validators of subnet %s: %v", tx.Subnet, err)
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("couldn't get current validators of subnet %s: %v", tx.Subnet, err)}
|
||||
}
|
||||
currentValidators := validators.NewSet()
|
||||
currentValidators.Set(tx.vm.getValidators(currentEvents))
|
||||
if currentValidators.Contains(tx.NodeID) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("validator with ID %s already in the current validator set for subnet with ID %s",
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("validator with ID %s already in the current validator set for subnet with ID %s",
|
||||
tx.NodeID,
|
||||
tx.Subnet,
|
||||
)
|
||||
tx.Subnet)}
|
||||
}
|
||||
|
||||
// Ensure the proposed validator is not already slated to validate for the specified subnet
|
||||
pendingEvents, err := tx.vm.getPendingValidators(db, tx.Subnet)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, fmt.Errorf("couldn't get pending validators of subnet %s: %v", tx.Subnet, err)
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("couldn't get pending validators of subnet %s: %v", tx.Subnet, err)}
|
||||
}
|
||||
pendingValidators := validators.NewSet()
|
||||
pendingValidators.Set(tx.vm.getValidators(pendingEvents))
|
||||
if pendingValidators.Contains(tx.NodeID) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("validator with ID %s already in the pending validator set for subnet with ID %s",
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("validator with ID %s already in the pending validator set for subnet with ID %s",
|
||||
tx.NodeID,
|
||||
tx.Subnet,
|
||||
)
|
||||
tx.Subnet)}
|
||||
}
|
||||
|
||||
pendingEvents.Add(tx) // add validator to set of pending validators
|
||||
|
@ -283,10 +281,10 @@ func (tx *addNonDefaultSubnetValidatorTx) SemanticVerify(db database.Database) (
|
|||
// update the validator's account by removing the staked $AVA
|
||||
onCommitDB := versiondb.New(db)
|
||||
if err := tx.vm.putPendingValidators(onCommitDB, pendingEvents, tx.Subnet); err != nil {
|
||||
return nil, nil, nil, nil, fmt.Errorf("couldn't put current validators: %v", err)
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("couldn't put current validators: %v", err)}
|
||||
}
|
||||
if err := tx.vm.putAccount(onCommitDB, newAccount); err != nil {
|
||||
return nil, nil, nil, nil, fmt.Errorf("couldn't put account: %v", err)
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("couldn't put account: %v", err)}
|
||||
}
|
||||
|
||||
// If this proposal is aborted, chain state doesn't change
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
package platformvm
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
|
@ -35,47 +36,47 @@ func (tx *advanceTimeTx) initialize(vm *VM) error {
|
|||
func (tx *advanceTimeTx) Timestamp() time.Time { return time.Unix(int64(tx.Time), 0) }
|
||||
|
||||
// SyntacticVerify that this transaction is well formed
|
||||
func (tx *advanceTimeTx) SyntacticVerify() error {
|
||||
func (tx *advanceTimeTx) SyntacticVerify() TxError {
|
||||
switch {
|
||||
case tx == nil:
|
||||
return errNilTx
|
||||
return tempError{errNilTx}
|
||||
case tx.vm.clock.Time().Add(Delta).Before(tx.Timestamp()):
|
||||
return errTimeTooAdvanced
|
||||
return tempError{errTimeTooAdvanced}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// SemanticVerify this transaction is valid.
|
||||
func (tx *advanceTimeTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), error) {
|
||||
func (tx *advanceTimeTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), TxError) {
|
||||
if err := tx.SyntacticVerify(); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
|
||||
currentTimestamp, err := tx.vm.getTimestamp(db)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
if tx.Time <= uint64(currentTimestamp.Unix()) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("proposed timestamp %s not after current timestamp %s",
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("proposed timestamp %s not after current timestamp %s",
|
||||
tx.Timestamp(),
|
||||
currentTimestamp)
|
||||
currentTimestamp)}
|
||||
}
|
||||
|
||||
// Only allow timestamp to move forward as far as the next validator's end time
|
||||
nextValidatorEndTime := tx.vm.nextValidatorChangeTime(db, false)
|
||||
if tx.Time > uint64(nextValidatorEndTime.Unix()) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("proposed timestamp %v later than next validator end time %s",
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("proposed timestamp %v later than next validator end time %s",
|
||||
tx.Time,
|
||||
nextValidatorEndTime)
|
||||
nextValidatorEndTime)}
|
||||
}
|
||||
|
||||
// Only allow timestamp to move forward as far as the next pending validator's start time
|
||||
nextValidatorStartTime := tx.vm.nextValidatorChangeTime(db, true)
|
||||
if tx.Time > uint64(nextValidatorStartTime.Unix()) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("proposed timestamp %v later than next validator start time %s",
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("proposed timestamp %v later than next validator start time %s",
|
||||
tx.Time,
|
||||
nextValidatorStartTime)
|
||||
nextValidatorStartTime)}
|
||||
}
|
||||
|
||||
// Calculate what the validator sets will be given new timestamp
|
||||
|
@ -85,19 +86,19 @@ func (tx *advanceTimeTx) SemanticVerify(db database.Database) (*versiondb.Databa
|
|||
// Specify what the state of the chain will be if this proposal is committed
|
||||
onCommitDB := versiondb.New(db)
|
||||
if err := tx.vm.putTimestamp(onCommitDB, tx.Timestamp()); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
|
||||
current, pending, _, _, err := tx.vm.calculateValidators(db, tx.Timestamp(), DefaultSubnetID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
|
||||
if err := tx.vm.putCurrentValidators(onCommitDB, current, DefaultSubnetID); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
if err := tx.vm.putPendingValidators(onCommitDB, pending, DefaultSubnetID); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
|
||||
// For each Subnet, calculate what current and pending validator sets should be
|
||||
|
@ -106,23 +107,23 @@ func (tx *advanceTimeTx) SemanticVerify(db database.Database) (*versiondb.Databa
|
|||
// Key: Subnet ID
|
||||
// Value: IDs of validators that will have started validating this Subnet when
|
||||
// timestamp is advanced to tx.Timestamp()
|
||||
startedValidating := make(map[ids.ID]ids.ShortSet, 0)
|
||||
startedValidating := make(map[[32]byte]ids.ShortSet, 0)
|
||||
subnets, err := tx.vm.getSubnets(db)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
for _, subnet := range subnets {
|
||||
current, pending, started, _, err := tx.vm.calculateValidators(db, tx.Timestamp(), subnet.id)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
if err := tx.vm.putCurrentValidators(onCommitDB, current, subnet.id); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
if err := tx.vm.putPendingValidators(onCommitDB, pending, subnet.id); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
startedValidating[subnet.ID()] = started
|
||||
startedValidating[subnet.ID().Key()] = started
|
||||
}
|
||||
|
||||
// If this block is committed, update the validator sets
|
||||
|
@ -154,7 +155,7 @@ func (tx *advanceTimeTx) SemanticVerify(db database.Database) (*versiondb.Databa
|
|||
continue
|
||||
}
|
||||
for _, chain := range chains {
|
||||
if chain.SubnetID.Equals(subnetID) {
|
||||
if bytes.Equal(subnetID[:], chain.SubnetID.Bytes()) {
|
||||
tx.vm.createChain(chain)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -23,6 +23,13 @@ func (c *Commit) Verify() error {
|
|||
// the parent of an Commit block should always be a proposal
|
||||
parent, ok := c.parentBlock().(*ProposalBlock)
|
||||
if !ok {
|
||||
if err := c.Reject(); err == nil {
|
||||
if err := c.vm.DB.Commit(); err != nil {
|
||||
c.vm.Ctx.Log.Error("error committing Commit block as rejected: %s", err)
|
||||
}
|
||||
} else {
|
||||
c.vm.DB.Abort()
|
||||
}
|
||||
return errInvalidBlockType
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,19 @@
|
|||
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
|
||||
// See the file LICENSE for licensing terms.
|
||||
|
||||
package platformvm
|
||||
|
||||
// TxError provides the ability for errors to be distinguished as permenant or
|
||||
// temporary
|
||||
type TxError interface {
|
||||
error
|
||||
Temporary() bool
|
||||
}
|
||||
|
||||
type tempError struct{ error }
|
||||
|
||||
func (tempError) Temporary() bool { return true }
|
||||
|
||||
type permError struct{ error }
|
||||
|
||||
func (permError) Temporary() bool { return false }
|
|
@ -6,6 +6,7 @@ package platformvm
|
|||
import (
|
||||
"github.com/ava-labs/gecko/chains"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
"github.com/ava-labs/gecko/snow/validators"
|
||||
)
|
||||
|
||||
|
@ -24,7 +25,7 @@ type Factory struct {
|
|||
}
|
||||
|
||||
// New returns a new instance of the Platform Chain
|
||||
func (f *Factory) New() (interface{}, error) {
|
||||
func (f *Factory) New(*snow.Context) (interface{}, error) {
|
||||
return &VM{
|
||||
chainManager: f.ChainManager,
|
||||
validators: f.Validators,
|
||||
|
|
|
@ -16,7 +16,13 @@ import (
|
|||
type ProposalTx interface {
|
||||
initialize(vm *VM) error
|
||||
// Attempts to verify this transaction with the provided state.
|
||||
SemanticVerify(database.Database) (onCommitDB *versiondb.Database, onAbortDB *versiondb.Database, onCommitFunc func(), onAbortFunc func(), err error)
|
||||
SemanticVerify(database.Database) (
|
||||
onCommitDB *versiondb.Database,
|
||||
onAbortDB *versiondb.Database,
|
||||
onCommitFunc func(),
|
||||
onAbortFunc func(),
|
||||
err TxError,
|
||||
)
|
||||
InitiallyPrefersCommit() bool
|
||||
}
|
||||
|
||||
|
@ -98,15 +104,33 @@ func (pb *ProposalBlock) Verify() error {
|
|||
// The parent of a proposal block (ie this block) must be a decision block
|
||||
parent, ok := parentIntf.(decision)
|
||||
if !ok {
|
||||
if err := pb.Reject(); err == nil {
|
||||
if err := pb.vm.DB.Commit(); err != nil {
|
||||
pb.vm.Ctx.Log.Error("error committing Proposal block as rejected: %s", err)
|
||||
}
|
||||
} else {
|
||||
pb.vm.DB.Abort()
|
||||
}
|
||||
return errInvalidBlockType
|
||||
}
|
||||
|
||||
// pdb is the database if this block's parent is accepted
|
||||
pdb := parent.onAccept()
|
||||
|
||||
var err error
|
||||
var err TxError
|
||||
pb.onCommitDB, pb.onAbortDB, pb.onCommitFunc, pb.onAbortFunc, err = pb.Tx.SemanticVerify(pdb)
|
||||
if err != nil {
|
||||
// If this block's transaction proposes to advance the timestamp, the transaction may fail
|
||||
// verification now but be valid in the future, so don't (permanently) mark the block as rejected.
|
||||
if !err.Temporary() {
|
||||
if err := pb.Reject(); err == nil {
|
||||
if err := pb.vm.DB.Commit(); err != nil {
|
||||
pb.vm.Ctx.Log.Error("error committing Proposal block as rejected: %s", err)
|
||||
}
|
||||
} else {
|
||||
pb.vm.DB.Abort()
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -41,12 +41,12 @@ func (tx *rewardValidatorTx) initialize(vm *VM) error {
|
|||
}
|
||||
|
||||
// SyntacticVerify that this transaction is well formed
|
||||
func (tx *rewardValidatorTx) SyntacticVerify() error {
|
||||
func (tx *rewardValidatorTx) SyntacticVerify() TxError {
|
||||
switch {
|
||||
case tx == nil:
|
||||
return errNilTx
|
||||
return tempError{errNilTx}
|
||||
case tx.TxID.IsZero():
|
||||
return errInvalidID
|
||||
return tempError{errInvalidID}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
@ -58,39 +58,39 @@ func (tx *rewardValidatorTx) SyntacticVerify() error {
|
|||
// The next validator to be removed must be the validator specified in this block.
|
||||
// The next validator to be removed must be have an end time equal to the current
|
||||
// chain timestamp.
|
||||
func (tx *rewardValidatorTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), error) {
|
||||
func (tx *rewardValidatorTx) SemanticVerify(db database.Database) (*versiondb.Database, *versiondb.Database, func(), func(), TxError) {
|
||||
if err := tx.SyntacticVerify(); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
if db == nil {
|
||||
return nil, nil, nil, nil, errDBNil
|
||||
return nil, nil, nil, nil, tempError{errDBNil}
|
||||
}
|
||||
|
||||
currentEvents, err := tx.vm.getCurrentValidators(db, DefaultSubnetID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, errDBCurrentValidators
|
||||
return nil, nil, nil, nil, permError{errDBCurrentValidators}
|
||||
}
|
||||
if currentEvents.Len() == 0 { // there is no validator to remove
|
||||
return nil, nil, nil, nil, errEmptyValidatingSet
|
||||
return nil, nil, nil, nil, permError{errEmptyValidatingSet}
|
||||
}
|
||||
|
||||
vdrTx := currentEvents.Peek()
|
||||
|
||||
if txID := vdrTx.ID(); !txID.Equals(tx.TxID) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("attempting to remove TxID: %s. Should be removing %s",
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("attempting to remove TxID: %s. Should be removing %s",
|
||||
tx.TxID,
|
||||
txID)
|
||||
txID)}
|
||||
}
|
||||
|
||||
// Verify that the chain's timestamp is the validator's end time
|
||||
currentTime, err := tx.vm.getTimestamp(db)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
if endTime := vdrTx.EndTime(); !endTime.Equal(currentTime) {
|
||||
return nil, nil, nil, nil, fmt.Errorf("attempting to remove TxID: %s before their end time %s",
|
||||
return nil, nil, nil, nil, permError{fmt.Errorf("attempting to remove TxID: %s before their end time %s",
|
||||
tx.TxID,
|
||||
endTime)
|
||||
endTime)}
|
||||
}
|
||||
|
||||
heap.Pop(currentEvents) // Remove validator from the validator set
|
||||
|
@ -99,14 +99,14 @@ func (tx *rewardValidatorTx) SemanticVerify(db database.Database) (*versiondb.Da
|
|||
// If this tx's proposal is committed, remove the validator from the validator set and update the
|
||||
// account balance to reflect the return of staked $AVA and their reward.
|
||||
if err := tx.vm.putCurrentValidators(onCommitDB, currentEvents, DefaultSubnetID); err != nil {
|
||||
return nil, nil, nil, nil, errDBPutCurrentValidators
|
||||
return nil, nil, nil, nil, permError{errDBPutCurrentValidators}
|
||||
}
|
||||
|
||||
onAbortDB := versiondb.New(db)
|
||||
// If this tx's proposal is aborted, remove the validator from the validator set and update the
|
||||
// account balance to reflect the return of staked $AVA. The validator receives no reward.
|
||||
if err := tx.vm.putCurrentValidators(onAbortDB, currentEvents, DefaultSubnetID); err != nil {
|
||||
return nil, nil, nil, nil, errDBPutCurrentValidators
|
||||
return nil, nil, nil, nil, permError{errDBPutCurrentValidators}
|
||||
}
|
||||
|
||||
switch vdrTx := vdrTx.(type) {
|
||||
|
@ -144,15 +144,15 @@ func (tx *rewardValidatorTx) SemanticVerify(db database.Database) (*versiondb.Da
|
|||
}
|
||||
|
||||
if err := tx.vm.putAccount(onCommitDB, accountWithReward); err != nil {
|
||||
return nil, nil, nil, nil, errDBPutAccount
|
||||
return nil, nil, nil, nil, tempError{errDBPutAccount}
|
||||
}
|
||||
if err := tx.vm.putAccount(onAbortDB, accountNoReward); err != nil {
|
||||
return nil, nil, nil, nil, errDBPutAccount
|
||||
return nil, nil, nil, nil, tempError{errDBPutAccount}
|
||||
}
|
||||
case *addDefaultSubnetDelegatorTx:
|
||||
parentTx, err := currentEvents.getDefaultSubnetStaker(vdrTx.NodeID)
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
|
||||
duration := vdrTx.Duration()
|
||||
|
@ -201,10 +201,10 @@ func (tx *rewardValidatorTx) SemanticVerify(db database.Database) (*versiondb.Da
|
|||
}
|
||||
|
||||
if err := tx.vm.putAccount(onCommitDB, delegatorAccountWithReward); err != nil {
|
||||
return nil, nil, nil, nil, errDBPutAccount
|
||||
return nil, nil, nil, nil, tempError{errDBPutAccount}
|
||||
}
|
||||
if err := tx.vm.putAccount(onAbortDB, delegatorAccountNoReward); err != nil {
|
||||
return nil, nil, nil, nil, errDBPutAccount
|
||||
return nil, nil, nil, nil, tempError{errDBPutAccount}
|
||||
}
|
||||
|
||||
validatorAccountID := parentTx.Destination
|
||||
|
@ -225,10 +225,10 @@ func (tx *rewardValidatorTx) SemanticVerify(db database.Database) (*versiondb.Da
|
|||
}
|
||||
|
||||
if err := tx.vm.putAccount(onCommitDB, validatorAccountWithReward); err != nil {
|
||||
return nil, nil, nil, nil, errDBPutAccount
|
||||
return nil, nil, nil, nil, permError{errDBPutAccount}
|
||||
}
|
||||
default:
|
||||
return nil, nil, nil, nil, errShouldBeDSValidator
|
||||
return nil, nil, nil, nil, permError{errShouldBeDSValidator}
|
||||
}
|
||||
|
||||
// Regardless of whether this tx is committed or aborted, update the
|
||||
|
|
|
@ -35,6 +35,7 @@ var (
|
|||
errGetStakeSource = errors.New("couldn't get account specified in 'stakeSource'")
|
||||
errNoBlockchainWithAlias = errors.New("there is no blockchain with the specified alias")
|
||||
errDSCantValidate = errors.New("new blockchain can't be validated by default Subnet")
|
||||
errNonDSUsesDS = errors.New("add non default subnet validator attempts to use default Subnet ID")
|
||||
errNilSigner = errors.New("nil ShortID 'signer' is not valid")
|
||||
errNilTo = errors.New("nil ShortID 'to' is not valid")
|
||||
errNoFunds = errors.New("no spendable funds were found")
|
||||
|
@ -43,6 +44,83 @@ var (
|
|||
// Service defines the API calls that can be made to the platform chain
|
||||
type Service struct{ vm *VM }
|
||||
|
||||
// ExportKeyArgs are arguments for ExportKey
|
||||
type ExportKeyArgs struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
Address string `json:"address"`
|
||||
}
|
||||
|
||||
// ExportKeyReply is the response for ExportKey
|
||||
type ExportKeyReply struct {
|
||||
// The decrypted PrivateKey for the Address provided in the arguments
|
||||
PrivateKey formatting.CB58 `json:"privateKey"`
|
||||
}
|
||||
|
||||
// ExportKey returns a private key from the provided user
|
||||
func (service *Service) ExportKey(r *http.Request, args *ExportKeyArgs, reply *ExportKeyReply) error {
|
||||
service.vm.SnowmanVM.Ctx.Log.Verbo("ExportKey called for user '%s'", args.Username)
|
||||
|
||||
addr, err := service.vm.ParseAddress(args.Address)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem parsing address: %w", err)
|
||||
}
|
||||
|
||||
db, err := service.vm.SnowmanVM.Ctx.Keystore.GetDatabase(args.Username, args.Password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem retrieving user: %w", err)
|
||||
}
|
||||
|
||||
user := user{db: db}
|
||||
|
||||
sk, err := user.getKey(addr)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem retrieving private key: %w", err)
|
||||
}
|
||||
|
||||
reply.PrivateKey.Bytes = sk.Bytes()
|
||||
return nil
|
||||
}
|
||||
|
||||
// ImportKeyArgs are arguments for ImportKey
|
||||
type ImportKeyArgs struct {
|
||||
Username string `json:"username"`
|
||||
Password string `json:"password"`
|
||||
PrivateKey formatting.CB58 `json:"privateKey"`
|
||||
}
|
||||
|
||||
// ImportKeyReply is the response for ImportKey
|
||||
type ImportKeyReply struct {
|
||||
// The address controlled by the PrivateKey provided in the arguments
|
||||
Address string `json:"address"`
|
||||
}
|
||||
|
||||
// ImportKey adds a private key to the provided user
|
||||
func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *ImportKeyReply) error {
|
||||
service.vm.SnowmanVM.Ctx.Log.Verbo("ImportKey called for user '%s'", args.Username)
|
||||
|
||||
db, err := service.vm.SnowmanVM.Ctx.Keystore.GetDatabase(args.Username, args.Password)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem retrieving data: %w", err)
|
||||
}
|
||||
|
||||
user := user{db: db}
|
||||
|
||||
factory := crypto.FactorySECP256K1R{}
|
||||
skIntf, err := factory.ToPrivateKey(args.PrivateKey.Bytes)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem parsing private key %s: %w", args.PrivateKey, err)
|
||||
}
|
||||
sk := skIntf.(*crypto.PrivateKeySECP256K1R)
|
||||
|
||||
if err := user.putAccount(sk); err != nil {
|
||||
return fmt.Errorf("problem saving key %w", err)
|
||||
}
|
||||
|
||||
reply.Address = service.vm.FormatAddress(sk.PublicKey().Address())
|
||||
return nil
|
||||
}
|
||||
|
||||
/*
|
||||
******************************************************
|
||||
******************* Get Subnets **********************
|
||||
|
@ -57,8 +135,8 @@ type APISubnet struct {
|
|||
// Each element of [ControlKeys] the address of a public key.
|
||||
// A transaction to add a validator to this subnet requires
|
||||
// signatures from [Threshold] of these keys to be valid.
|
||||
ControlKeys []ids.ShortID `json:"controlKeys"`
|
||||
Threshold json.Uint16 `json:"threshold"`
|
||||
ControlKeys []string `json:"controlKeys"`
|
||||
Threshold json.Uint16 `json:"threshold"`
|
||||
}
|
||||
|
||||
// GetSubnetsArgs are the arguments to GetSubnet
|
||||
|
@ -76,7 +154,7 @@ type GetSubnetsResponse struct {
|
|||
}
|
||||
|
||||
// GetSubnets returns the subnets whose ID are in [args.IDs]
|
||||
// The response will not contain the default subnet
|
||||
// The response will include the default subnet
|
||||
func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *GetSubnetsResponse) error {
|
||||
subnets, err := service.vm.getSubnets(service.vm.DB) // all subnets
|
||||
if err != nil {
|
||||
|
@ -86,14 +164,24 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon
|
|||
getAll := len(args.IDs) == 0
|
||||
|
||||
if getAll {
|
||||
response.Subnets = make([]APISubnet, len(subnets))
|
||||
response.Subnets = make([]APISubnet, len(subnets)+1)
|
||||
for i, subnet := range subnets {
|
||||
controlAddrs := []string{}
|
||||
for _, controlKeyID := range subnet.ControlKeys {
|
||||
controlAddrs = append(controlAddrs, service.vm.FormatAddress(controlKeyID))
|
||||
}
|
||||
response.Subnets[i] = APISubnet{
|
||||
ID: subnet.id,
|
||||
ControlKeys: subnet.ControlKeys,
|
||||
ControlKeys: controlAddrs,
|
||||
Threshold: json.Uint16(subnet.Threshold),
|
||||
}
|
||||
}
|
||||
// Include Default Subnet
|
||||
response.Subnets[len(subnets)] = APISubnet{
|
||||
ID: DefaultSubnetID,
|
||||
ControlKeys: []string{},
|
||||
Threshold: json.Uint16(0),
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -101,15 +189,28 @@ func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, respon
|
|||
idsSet.Add(args.IDs...)
|
||||
for _, subnet := range subnets {
|
||||
if idsSet.Contains(subnet.id) {
|
||||
controlAddrs := []string{}
|
||||
for _, controlKeyID := range subnet.ControlKeys {
|
||||
controlAddrs = append(controlAddrs, service.vm.FormatAddress(controlKeyID))
|
||||
}
|
||||
response.Subnets = append(response.Subnets,
|
||||
APISubnet{
|
||||
ID: subnet.id,
|
||||
ControlKeys: subnet.ControlKeys,
|
||||
ControlKeys: controlAddrs,
|
||||
Threshold: json.Uint16(subnet.Threshold),
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
if idsSet.Contains(DefaultSubnetID) {
|
||||
response.Subnets = append(response.Subnets,
|
||||
APISubnet{
|
||||
ID: DefaultSubnetID,
|
||||
ControlKeys: []string{},
|
||||
Threshold: json.Uint16(0),
|
||||
},
|
||||
)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -128,7 +229,7 @@ type GetCurrentValidatorsArgs struct {
|
|||
|
||||
// GetCurrentValidatorsReply are the results from calling GetCurrentValidators
|
||||
type GetCurrentValidatorsReply struct {
|
||||
Validators []APIValidator `json:"validators"`
|
||||
Validators []FormattedAPIValidator `json:"validators"`
|
||||
}
|
||||
|
||||
// GetCurrentValidators returns the list of current validators
|
||||
|
@ -144,11 +245,11 @@ func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentVa
|
|||
return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID)
|
||||
}
|
||||
|
||||
reply.Validators = make([]APIValidator, validators.Len())
|
||||
for i, tx := range validators.Txs {
|
||||
vdr := tx.Vdr()
|
||||
weight := json.Uint64(vdr.Weight())
|
||||
if args.SubnetID.Equals(DefaultSubnetID) {
|
||||
reply.Validators = make([]FormattedAPIValidator, validators.Len())
|
||||
if args.SubnetID.Equals(DefaultSubnetID) {
|
||||
for i, tx := range validators.Txs {
|
||||
vdr := tx.Vdr()
|
||||
weight := json.Uint64(vdr.Weight())
|
||||
var address ids.ShortID
|
||||
switch tx := tx.(type) {
|
||||
case *addDefaultSubnetValidatorTx:
|
||||
|
@ -159,15 +260,19 @@ func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentVa
|
|||
return fmt.Errorf("couldn't get the destination address of %s", tx.ID())
|
||||
}
|
||||
|
||||
reply.Validators[i] = APIValidator{
|
||||
reply.Validators[i] = FormattedAPIValidator{
|
||||
ID: vdr.ID(),
|
||||
StartTime: json.Uint64(tx.StartTime().Unix()),
|
||||
EndTime: json.Uint64(tx.EndTime().Unix()),
|
||||
StakeAmount: &weight,
|
||||
Address: &address,
|
||||
Address: service.vm.FormatAddress(address),
|
||||
}
|
||||
} else {
|
||||
reply.Validators[i] = APIValidator{
|
||||
}
|
||||
} else {
|
||||
for i, tx := range validators.Txs {
|
||||
vdr := tx.Vdr()
|
||||
weight := json.Uint64(vdr.Weight())
|
||||
reply.Validators[i] = FormattedAPIValidator{
|
||||
ID: vdr.ID(),
|
||||
StartTime: json.Uint64(tx.StartTime().Unix()),
|
||||
EndTime: json.Uint64(tx.EndTime().Unix()),
|
||||
|
@ -188,7 +293,7 @@ type GetPendingValidatorsArgs struct {
|
|||
|
||||
// GetPendingValidatorsReply are the results from calling GetPendingValidators
|
||||
type GetPendingValidatorsReply struct {
|
||||
Validators []APIValidator `json:"validators"`
|
||||
Validators []FormattedAPIValidator `json:"validators"`
|
||||
}
|
||||
|
||||
// GetPendingValidators returns the list of current validators
|
||||
|
@ -204,7 +309,7 @@ func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingVa
|
|||
return fmt.Errorf("couldn't get validators of subnet with ID %s. Does it exist?", args.SubnetID)
|
||||
}
|
||||
|
||||
reply.Validators = make([]APIValidator, validators.Len())
|
||||
reply.Validators = make([]FormattedAPIValidator, validators.Len())
|
||||
for i, tx := range validators.Txs {
|
||||
vdr := tx.Vdr()
|
||||
weight := json.Uint64(vdr.Weight())
|
||||
|
@ -218,15 +323,15 @@ func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingVa
|
|||
default: // Shouldn't happen
|
||||
return fmt.Errorf("couldn't get the destination address of %s", tx.ID())
|
||||
}
|
||||
reply.Validators[i] = APIValidator{
|
||||
reply.Validators[i] = FormattedAPIValidator{
|
||||
ID: vdr.ID(),
|
||||
StartTime: json.Uint64(tx.StartTime().Unix()),
|
||||
EndTime: json.Uint64(tx.EndTime().Unix()),
|
||||
StakeAmount: &weight,
|
||||
Address: &address,
|
||||
Address: service.vm.FormatAddress(address),
|
||||
}
|
||||
} else {
|
||||
reply.Validators[i] = APIValidator{
|
||||
reply.Validators[i] = FormattedAPIValidator{
|
||||
ID: vdr.ID(),
|
||||
StartTime: json.Uint64(tx.StartTime().Unix()),
|
||||
EndTime: json.Uint64(tx.EndTime().Unix()),
|
||||
|
@ -275,8 +380,8 @@ func (service *Service) SampleValidators(_ *http.Request, args *SampleValidators
|
|||
for i, vdr := range sample {
|
||||
reply.Validators[i] = vdr.ID()
|
||||
}
|
||||
|
||||
ids.SortShortIDs(reply.Validators)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -289,26 +394,30 @@ func (service *Service) SampleValidators(_ *http.Request, args *SampleValidators
|
|||
// GetAccountArgs are the arguments for calling GetAccount
|
||||
type GetAccountArgs struct {
|
||||
// Address of the account we want the information about
|
||||
Address ids.ShortID `json:"address"`
|
||||
Address string `json:"address"`
|
||||
}
|
||||
|
||||
// GetAccountReply is the response from calling GetAccount
|
||||
type GetAccountReply struct {
|
||||
Address ids.ShortID `json:"address"`
|
||||
Address string `json:"address"`
|
||||
Nonce json.Uint64 `json:"nonce"`
|
||||
Balance json.Uint64 `json:"balance"`
|
||||
}
|
||||
|
||||
// GetAccount details given account ID
|
||||
func (service *Service) GetAccount(_ *http.Request, args *GetAccountArgs, reply *GetAccountReply) error {
|
||||
account, err := service.vm.getAccount(service.vm.DB, args.Address)
|
||||
address, err := service.vm.ParseAddress(args.Address)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem parsing address: %w", err)
|
||||
}
|
||||
account, err := service.vm.getAccount(service.vm.DB, address)
|
||||
if err != nil && err != database.ErrNotFound {
|
||||
return fmt.Errorf("couldn't get account: %w", err)
|
||||
} else if err == database.ErrNotFound {
|
||||
account = newAccount(args.Address, 0, 0)
|
||||
account = newAccount(address, 0, 0)
|
||||
}
|
||||
|
||||
reply.Address = account.Address
|
||||
reply.Address = service.vm.FormatAddress(account.Address)
|
||||
reply.Balance = json.Uint64(account.Balance)
|
||||
reply.Nonce = json.Uint64(account.Nonce)
|
||||
return nil
|
||||
|
@ -323,7 +432,7 @@ type ListAccountsArgs struct {
|
|||
|
||||
// ListAccountsReply is the reply from ListAccounts
|
||||
type ListAccountsReply struct {
|
||||
Accounts []APIAccount `json:"accounts"`
|
||||
Accounts []FormattedAPIAccount `json:"accounts"`
|
||||
}
|
||||
|
||||
// ListAccounts lists all of the accounts controlled by [args.Username]
|
||||
|
@ -347,7 +456,7 @@ func (service *Service) ListAccounts(_ *http.Request, args *ListAccountsArgs, re
|
|||
return fmt.Errorf("couldn't get accounts held by user: %w", err)
|
||||
}
|
||||
|
||||
reply.Accounts = []APIAccount{}
|
||||
reply.Accounts = []FormattedAPIAccount{}
|
||||
for _, accountID := range accountIDs {
|
||||
account, err := service.vm.getAccount(service.vm.DB, accountID) // Get account whose ID is [accountID]
|
||||
if err != nil && err != database.ErrNotFound {
|
||||
|
@ -356,8 +465,8 @@ func (service *Service) ListAccounts(_ *http.Request, args *ListAccountsArgs, re
|
|||
} else if err == database.ErrNotFound {
|
||||
account = newAccount(accountID, 0, 0)
|
||||
}
|
||||
reply.Accounts = append(reply.Accounts, APIAccount{
|
||||
Address: accountID,
|
||||
reply.Accounts = append(reply.Accounts, FormattedAPIAccount{
|
||||
Address: service.vm.FormatAddress(accountID),
|
||||
Nonce: json.Uint64(account.Nonce),
|
||||
Balance: json.Uint64(account.Balance),
|
||||
})
|
||||
|
@ -382,7 +491,7 @@ type CreateAccountArgs struct {
|
|||
// CreateAccountReply are the response from calling CreateAccount
|
||||
type CreateAccountReply struct {
|
||||
// Address of the newly created account
|
||||
Address ids.ShortID `json:"address"`
|
||||
Address string `json:"address"`
|
||||
}
|
||||
|
||||
// CreateAccount creates a new account on the Platform Chain
|
||||
|
@ -429,7 +538,7 @@ func (service *Service) CreateAccount(_ *http.Request, args *CreateAccountArgs,
|
|||
return errors.New("problem saving account")
|
||||
}
|
||||
|
||||
reply.Address = privKey.PublicKey().Address()
|
||||
reply.Address = service.vm.FormatAddress(privKey.PublicKey().Address())
|
||||
|
||||
return nil
|
||||
}
|
||||
|
@ -451,7 +560,7 @@ type CreateTxResponse struct {
|
|||
|
||||
// AddDefaultSubnetValidatorArgs are the arguments to AddDefaultSubnetValidator
|
||||
type AddDefaultSubnetValidatorArgs struct {
|
||||
APIDefaultSubnetValidator
|
||||
FormattedAPIDefaultSubnetValidator
|
||||
|
||||
// Next nonce of the sender
|
||||
PayerNonce json.Uint64 `json:"payerNonce"`
|
||||
|
@ -469,6 +578,13 @@ func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefa
|
|||
return fmt.Errorf("sender's next nonce not specified")
|
||||
case int64(args.StartTime) < time.Now().Unix():
|
||||
return fmt.Errorf("start time must be in the future")
|
||||
case args.Destination == "":
|
||||
return fmt.Errorf("destination not specified")
|
||||
}
|
||||
|
||||
destination, err := service.vm.ParseAddress(args.Destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem while parsing destination: %w", err)
|
||||
}
|
||||
|
||||
// Create the transaction
|
||||
|
@ -482,7 +598,7 @@ func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefa
|
|||
End: uint64(args.EndTime),
|
||||
},
|
||||
Nonce: uint64(args.PayerNonce),
|
||||
Destination: args.Destination,
|
||||
Destination: destination,
|
||||
NetworkID: service.vm.Ctx.NetworkID,
|
||||
Shares: uint32(args.DelegationFeeRate),
|
||||
}}
|
||||
|
@ -500,7 +616,7 @@ func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefa
|
|||
type AddDefaultSubnetDelegatorArgs struct {
|
||||
APIValidator
|
||||
|
||||
Destination ids.ShortID `json:"destination"`
|
||||
Destination string `json:"destination"`
|
||||
|
||||
// Next unused nonce of the account the staked $AVA and tx fee are paid from
|
||||
PayerNonce json.Uint64 `json:"payerNonce"`
|
||||
|
@ -519,6 +635,13 @@ func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefa
|
|||
return fmt.Errorf("sender's next unused nonce not specified")
|
||||
case int64(args.StartTime) < time.Now().Unix():
|
||||
return fmt.Errorf("start time must be in the future")
|
||||
case args.Destination == "":
|
||||
return fmt.Errorf("destination must be non-empty string")
|
||||
}
|
||||
|
||||
destination, err := service.vm.ParseAddress(args.Destination)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem parsing destination address: %w", err)
|
||||
}
|
||||
|
||||
// Create the transaction
|
||||
|
@ -533,7 +656,7 @@ func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefa
|
|||
},
|
||||
NetworkID: service.vm.Ctx.NetworkID,
|
||||
Nonce: uint64(args.PayerNonce),
|
||||
Destination: args.Destination,
|
||||
Destination: destination,
|
||||
}}
|
||||
|
||||
txBytes, err := Codec.Marshal(genericTx{Tx: &tx})
|
||||
|
@ -550,7 +673,7 @@ type AddNonDefaultSubnetValidatorArgs struct {
|
|||
APIValidator
|
||||
|
||||
// ID of subnet to validate
|
||||
SubnetID ids.ID `json:"subnetID"`
|
||||
SubnetID string `json:"subnetID"`
|
||||
|
||||
// Next unused nonce of the account the tx fee is paid from
|
||||
PayerNonce json.Uint64 `json:"payerNonce"`
|
||||
|
@ -559,6 +682,20 @@ type AddNonDefaultSubnetValidatorArgs struct {
|
|||
// AddNonDefaultSubnetValidator adds a validator to a subnet other than the default subnet
|
||||
// Returns the unsigned transaction, which must be signed using Sign
|
||||
func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddNonDefaultSubnetValidatorArgs, response *CreateTxResponse) error {
|
||||
switch {
|
||||
case args.SubnetID == "":
|
||||
return errors.New("'subnetID' not given")
|
||||
}
|
||||
|
||||
subnetID, err := ids.FromString(args.SubnetID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem parsing subnetID '%s': %w", args.SubnetID, err)
|
||||
}
|
||||
|
||||
if subnetID.Equals(DefaultSubnetID) {
|
||||
return errNonDSUsesDS
|
||||
}
|
||||
|
||||
tx := addNonDefaultSubnetValidatorTx{
|
||||
UnsignedAddNonDefaultSubnetValidatorTx: UnsignedAddNonDefaultSubnetValidatorTx{
|
||||
SubnetValidator: SubnetValidator{
|
||||
|
@ -570,7 +707,7 @@ func (service *Service) AddNonDefaultSubnetValidator(_ *http.Request, args *AddN
|
|||
Start: uint64(args.StartTime),
|
||||
End: uint64(args.EndTime),
|
||||
},
|
||||
Subnet: args.SubnetID,
|
||||
Subnet: subnetID,
|
||||
},
|
||||
NetworkID: service.vm.Ctx.NetworkID,
|
||||
Nonce: uint64(args.PayerNonce),
|
||||
|
@ -611,12 +748,21 @@ func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, re
|
|||
return fmt.Errorf("sender's next nonce not specified")
|
||||
}
|
||||
|
||||
controlKeys := []ids.ShortID{}
|
||||
for _, controlKey := range args.ControlKeys {
|
||||
controlKeyID, err := service.vm.ParseAddress(controlKey)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem parsing control key: %w", err)
|
||||
}
|
||||
controlKeys = append(controlKeys, controlKeyID)
|
||||
}
|
||||
|
||||
// Create the transaction
|
||||
tx := CreateSubnetTx{
|
||||
UnsignedCreateSubnetTx: UnsignedCreateSubnetTx{
|
||||
NetworkID: service.vm.Ctx.NetworkID,
|
||||
Nonce: uint64(args.PayerNonce),
|
||||
ControlKeys: args.ControlKeys,
|
||||
ControlKeys: controlKeys,
|
||||
Threshold: uint16(args.Threshold),
|
||||
},
|
||||
key: nil,
|
||||
|
@ -697,7 +843,7 @@ type SignArgs struct {
|
|||
Tx formatting.CB58 `json:"tx"`
|
||||
|
||||
// The address of the key signing the bytes
|
||||
Signer ids.ShortID `json:"signer"`
|
||||
Signer string `json:"signer"`
|
||||
|
||||
// User that controls Signer
|
||||
Username string `json:"username"`
|
||||
|
@ -714,10 +860,15 @@ type SignResponse struct {
|
|||
func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignResponse) error {
|
||||
service.vm.Ctx.Log.Debug("sign called")
|
||||
|
||||
if args.Signer.IsZero() {
|
||||
if args.Signer == "" {
|
||||
return errNilSigner
|
||||
}
|
||||
|
||||
signer, err := service.vm.ParseAddress(args.Signer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem parsing address %w", err)
|
||||
}
|
||||
|
||||
// Get the key of the Signer
|
||||
db, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password)
|
||||
if err != nil {
|
||||
|
@ -725,11 +876,11 @@ func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignRespons
|
|||
}
|
||||
user := user{db: db}
|
||||
|
||||
key, err := user.getKey(args.Signer) // Key of [args.Signer]
|
||||
key, err := user.getKey(signer) // Key of [args.Signer]
|
||||
if err != nil {
|
||||
return errDB
|
||||
}
|
||||
if !bytes.Equal(key.PublicKey().Address().Bytes(), args.Signer.Bytes()) { // sanity check
|
||||
if !bytes.Equal(key.PublicKey().Address().Bytes(), signer.Bytes()) { // sanity check
|
||||
return errors.New("got unexpected key from database")
|
||||
}
|
||||
|
||||
|
@ -910,7 +1061,7 @@ func (service *Service) signAddNonDefaultSubnetValidatorTx(tx *addNonDefaultSubn
|
|||
// ImportAVAArgs are the arguments to ImportAVA
|
||||
type ImportAVAArgs struct {
|
||||
// ID of the account that will receive the imported funds, and pay the transaction fee
|
||||
To ids.ShortID `json:"to"`
|
||||
To string `json:"to"`
|
||||
|
||||
// Next nonce of the sender
|
||||
PayerNonce json.Uint64 `json:"payerNonce"`
|
||||
|
@ -927,12 +1078,17 @@ func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, response
|
|||
service.vm.Ctx.Log.Debug("platform.ImportAVA called")
|
||||
|
||||
switch {
|
||||
case args.To.IsZero():
|
||||
case args.To == "":
|
||||
return errNilTo
|
||||
case args.PayerNonce == 0:
|
||||
return fmt.Errorf("sender's next nonce not specified")
|
||||
}
|
||||
|
||||
toID, err := service.vm.ParseAddress(args.To)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem parsing address in 'to' field %w", err)
|
||||
}
|
||||
|
||||
// Get the key of the Signer
|
||||
db, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password)
|
||||
if err != nil {
|
||||
|
@ -941,14 +1097,14 @@ func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, response
|
|||
user := user{db: db}
|
||||
|
||||
kc := secp256k1fx.NewKeychain()
|
||||
key, err := user.getKey(args.To)
|
||||
key, err := user.getKey(toID)
|
||||
if err != nil {
|
||||
return errDB
|
||||
}
|
||||
kc.Add(key)
|
||||
|
||||
addrSet := ids.Set{}
|
||||
addrSet.Add(ids.NewID(hashing.ComputeHash256Array(args.To.Bytes())))
|
||||
addrSet.Add(ids.NewID(hashing.ComputeHash256Array(toID.Bytes())))
|
||||
|
||||
utxos, err := service.vm.GetAtomicUTXOs(addrSet)
|
||||
if err != nil {
|
||||
|
@ -998,7 +1154,7 @@ func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, response
|
|||
tx := ImportTx{UnsignedImportTx: UnsignedImportTx{
|
||||
NetworkID: service.vm.Ctx.NetworkID,
|
||||
Nonce: uint64(args.PayerNonce),
|
||||
Account: args.To,
|
||||
Account: toID,
|
||||
Ins: ins,
|
||||
}}
|
||||
|
||||
|
@ -1119,7 +1275,7 @@ func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *Is
|
|||
if err := tx.initialize(service.vm); err != nil {
|
||||
return fmt.Errorf("error initializing tx: %s", err)
|
||||
}
|
||||
service.vm.unissuedEvents.Push(tx)
|
||||
service.vm.unissuedEvents.Add(tx)
|
||||
response.TxID = tx.ID()
|
||||
case DecisionTx:
|
||||
if err := tx.initialize(service.vm); err != nil {
|
||||
|
@ -1134,7 +1290,7 @@ func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *Is
|
|||
service.vm.unissuedAtomicTxs = append(service.vm.unissuedAtomicTxs, tx)
|
||||
response.TxID = tx.ID()
|
||||
default:
|
||||
return errors.New("Could not parse given tx. Must be a TimedTx, DecisionTx, or AtomicTx")
|
||||
return errors.New("Could not parse given tx. Provided tx needs to be a TimedTx, DecisionTx, or AtomicTx")
|
||||
}
|
||||
|
||||
service.vm.resetTimer()
|
||||
|
@ -1150,7 +1306,7 @@ func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *Is
|
|||
// CreateBlockchainArgs is the arguments for calling CreateBlockchain
|
||||
type CreateBlockchainArgs struct {
|
||||
// ID of Subnet that validates the new blockchain
|
||||
SubnetID ids.ID `json:"subnetID"`
|
||||
SubnetID string `json:"subnetID"`
|
||||
|
||||
// ID of the VM the new blockchain is running
|
||||
VMID string `json:"vmID"`
|
||||
|
@ -1178,10 +1334,15 @@ func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchain
|
|||
return errors.New("sender's next nonce not specified")
|
||||
case args.VMID == "":
|
||||
return errors.New("VM not specified")
|
||||
case args.SubnetID.Equals(ids.Empty):
|
||||
case args.SubnetID == "":
|
||||
return errors.New("subnet not specified")
|
||||
}
|
||||
|
||||
subnetID, err := ids.FromString(args.SubnetID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem parsing subnetID %s, %w", args.SubnetID, err)
|
||||
}
|
||||
|
||||
vmID, err := service.vm.chainManager.LookupVM(args.VMID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("no VM with ID '%s' found", args.VMID)
|
||||
|
@ -1203,14 +1364,14 @@ func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchain
|
|||
fxIDs = append(fxIDs, secp256k1fx.ID)
|
||||
}
|
||||
|
||||
if args.SubnetID.Equals(DefaultSubnetID) {
|
||||
if subnetID.Equals(DefaultSubnetID) {
|
||||
return errDSCantValidate
|
||||
}
|
||||
|
||||
tx := CreateChainTx{
|
||||
UnsignedCreateChainTx: UnsignedCreateChainTx{
|
||||
NetworkID: service.vm.Ctx.NetworkID,
|
||||
SubnetID: args.SubnetID,
|
||||
SubnetID: subnetID,
|
||||
Nonce: uint64(args.PayerNonce),
|
||||
ChainName: args.Name,
|
||||
VMID: vmID,
|
||||
|
@ -1318,7 +1479,7 @@ func (service *Service) chainExists(blockID ids.ID, chainID ids.ID) (bool, error
|
|||
// ValidatedByArgs is the arguments for calling ValidatedBy
|
||||
type ValidatedByArgs struct {
|
||||
// ValidatedBy returns the ID of the Subnet validating the blockchain with this ID
|
||||
BlockchainID ids.ID `json:"blockchainID"`
|
||||
BlockchainID string `json:"blockchainID"`
|
||||
}
|
||||
|
||||
// ValidatedByResponse is the reply from calling ValidatedBy
|
||||
|
@ -1332,11 +1493,16 @@ func (service *Service) ValidatedBy(_ *http.Request, args *ValidatedByArgs, resp
|
|||
service.vm.Ctx.Log.Debug("validatedBy called")
|
||||
|
||||
switch {
|
||||
case args.BlockchainID.Equals(ids.Empty):
|
||||
return errors.New("'blockchainID' not specified")
|
||||
case args.BlockchainID == "":
|
||||
return errors.New("'blockchainID' not given")
|
||||
}
|
||||
|
||||
chain, err := service.vm.getChain(service.vm.DB, args.BlockchainID)
|
||||
blockchainID, err := ids.FromString(args.BlockchainID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem parsing blockchainID '%s': %w", args.BlockchainID, err)
|
||||
}
|
||||
|
||||
chain, err := service.vm.getChain(service.vm.DB, blockchainID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -1346,7 +1512,7 @@ func (service *Service) ValidatedBy(_ *http.Request, args *ValidatedByArgs, resp
|
|||
|
||||
// ValidatesArgs are the arguments to Validates
|
||||
type ValidatesArgs struct {
|
||||
SubnetID ids.ID `json:"subnetID"`
|
||||
SubnetID string `json:"subnetID"`
|
||||
}
|
||||
|
||||
// ValidatesResponse is the response from calling Validates
|
||||
|
@ -1359,12 +1525,18 @@ func (service *Service) Validates(_ *http.Request, args *ValidatesArgs, response
|
|||
service.vm.Ctx.Log.Debug("validates called")
|
||||
|
||||
switch {
|
||||
case args.SubnetID.Equals(ids.Empty):
|
||||
return errors.New("'subnetID' not specified")
|
||||
case args.SubnetID == "":
|
||||
return errors.New("'subnetID' not given")
|
||||
}
|
||||
|
||||
subnetID, err := ids.FromString(args.SubnetID)
|
||||
if err != nil {
|
||||
return fmt.Errorf("problem parsing subnetID '%s': %w", args.SubnetID, err)
|
||||
}
|
||||
|
||||
// Verify that the Subnet exists
|
||||
if _, err := service.vm.getSubnet(service.vm.DB, args.SubnetID); err != nil {
|
||||
// Ignore lookup error if it's the DefaultSubnetID
|
||||
if _, err := service.vm.getSubnet(service.vm.DB, subnetID); err != nil && !subnetID.Equals(DefaultSubnetID) {
|
||||
return err
|
||||
}
|
||||
// Get the chains that exist
|
||||
|
@ -1374,7 +1546,7 @@ func (service *Service) Validates(_ *http.Request, args *ValidatesArgs, response
|
|||
}
|
||||
// Filter to get the chains validated by the specified Subnet
|
||||
for _, chain := range chains {
|
||||
if chain.SubnetID.Equals(args.SubnetID) {
|
||||
if chain.SubnetID.Equals(subnetID) {
|
||||
response.BlockchainIDs = append(response.BlockchainIDs, chain.ID())
|
||||
}
|
||||
}
|
||||
|
|
|
@ -6,10 +6,13 @@ package platformvm
|
|||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ava-labs/gecko/utils/formatting"
|
||||
)
|
||||
|
||||
func TestAddDefaultSubnetValidator(t *testing.T) {
|
||||
expectedJSONString := `{"startTime":"0","endTime":"0","id":null,"destination":null,"delegationFeeRate":"0","payerNonce":"0"}`
|
||||
expectedJSONString := `{"startTime":"0","endTime":"0","id":null,"destination":"","delegationFeeRate":"0","payerNonce":"0"}`
|
||||
args := AddDefaultSubnetValidatorArgs{}
|
||||
bytes, err := json.Marshal(&args)
|
||||
if err != nil {
|
||||
|
@ -32,3 +35,202 @@ func TestCreateBlockchainArgsParsing(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExportKey(t *testing.T) {
|
||||
jsonString := `{"username":"ScoobyUser","password":"ShaggyPassword1","address":"6Y3kysjF9jnHnYkdS9yGAuoHyae2eNmeV"}`
|
||||
args := ExportKeyArgs{}
|
||||
err := json.Unmarshal([]byte(jsonString), &args)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestImportKey(t *testing.T) {
|
||||
jsonString := `{"username":"ScoobyUser","password":"ShaggyPassword1","privateKey":"ewoqjP7PxY4yr3iLTpLisriqt94hdyDFNgchSxGGztUrTXtNN"}`
|
||||
args := ImportKeyArgs{}
|
||||
err := json.Unmarshal([]byte(jsonString), &args)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIssueTxKeepsTimedEventsSorted(t *testing.T) {
|
||||
vm := defaultVM()
|
||||
vm.Ctx.Lock.Lock()
|
||||
defer func() {
|
||||
vm.Shutdown()
|
||||
vm.Ctx.Lock.Unlock()
|
||||
}()
|
||||
|
||||
service := Service{vm: vm}
|
||||
|
||||
pendingValidatorStartTime1 := defaultGenesisTime.Add(3 * time.Second)
|
||||
pendingValidatorEndTime1 := pendingValidatorStartTime1.Add(MinimumStakingDuration)
|
||||
nodeIDKey1, _ := vm.factory.NewPrivateKey()
|
||||
nodeID1 := nodeIDKey1.PublicKey().Address()
|
||||
addPendingValidatorTx1, err := vm.newAddDefaultSubnetValidatorTx(
|
||||
defaultNonce+1,
|
||||
defaultStakeAmount,
|
||||
uint64(pendingValidatorStartTime1.Unix()),
|
||||
uint64(pendingValidatorEndTime1.Unix()),
|
||||
nodeID1,
|
||||
nodeID1,
|
||||
NumberOfShares,
|
||||
testNetworkID,
|
||||
defaultKey,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
txBytes1, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx1})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
args1 := &IssueTxArgs{}
|
||||
args1.Tx = formatting.CB58{Bytes: txBytes1}
|
||||
reply1 := IssueTxResponse{}
|
||||
|
||||
err = service.IssueTx(nil, args1, &reply1)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pendingValidatorStartTime2 := defaultGenesisTime.Add(2 * time.Second)
|
||||
pendingValidatorEndTime2 := pendingValidatorStartTime2.Add(MinimumStakingDuration)
|
||||
nodeIDKey2, _ := vm.factory.NewPrivateKey()
|
||||
nodeID2 := nodeIDKey2.PublicKey().Address()
|
||||
addPendingValidatorTx2, err := vm.newAddDefaultSubnetValidatorTx(
|
||||
defaultNonce+1,
|
||||
defaultStakeAmount,
|
||||
uint64(pendingValidatorStartTime2.Unix()),
|
||||
uint64(pendingValidatorEndTime2.Unix()),
|
||||
nodeID2,
|
||||
nodeID2,
|
||||
NumberOfShares,
|
||||
testNetworkID,
|
||||
defaultKey,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
txBytes2, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx2})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
args2 := IssueTxArgs{Tx: formatting.CB58{Bytes: txBytes2}}
|
||||
reply2 := IssueTxResponse{}
|
||||
|
||||
err = service.IssueTx(nil, &args2, &reply2)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pendingValidatorStartTime3 := defaultGenesisTime.Add(10 * time.Second)
|
||||
pendingValidatorEndTime3 := pendingValidatorStartTime3.Add(MinimumStakingDuration)
|
||||
nodeIDKey3, _ := vm.factory.NewPrivateKey()
|
||||
nodeID3 := nodeIDKey3.PublicKey().Address()
|
||||
addPendingValidatorTx3, err := vm.newAddDefaultSubnetValidatorTx(
|
||||
defaultNonce+1,
|
||||
defaultStakeAmount,
|
||||
uint64(pendingValidatorStartTime3.Unix()),
|
||||
uint64(pendingValidatorEndTime3.Unix()),
|
||||
nodeID3,
|
||||
nodeID3,
|
||||
NumberOfShares,
|
||||
testNetworkID,
|
||||
defaultKey,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
txBytes3, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx3})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
args3 := IssueTxArgs{Tx: formatting.CB58{Bytes: txBytes3}}
|
||||
reply3 := IssueTxResponse{}
|
||||
|
||||
err = service.IssueTx(nil, &args3, &reply3)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pendingValidatorStartTime4 := defaultGenesisTime.Add(1 * time.Second)
|
||||
pendingValidatorEndTime4 := pendingValidatorStartTime4.Add(MinimumStakingDuration)
|
||||
nodeIDKey4, _ := vm.factory.NewPrivateKey()
|
||||
nodeID4 := nodeIDKey4.PublicKey().Address()
|
||||
addPendingValidatorTx4, err := vm.newAddDefaultSubnetValidatorTx(
|
||||
defaultNonce+1,
|
||||
defaultStakeAmount,
|
||||
uint64(pendingValidatorStartTime4.Unix()),
|
||||
uint64(pendingValidatorEndTime4.Unix()),
|
||||
nodeID4,
|
||||
nodeID4,
|
||||
NumberOfShares,
|
||||
testNetworkID,
|
||||
defaultKey,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
txBytes4, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx4})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
args4 := IssueTxArgs{Tx: formatting.CB58{Bytes: txBytes4}}
|
||||
reply4 := IssueTxResponse{}
|
||||
|
||||
err = service.IssueTx(nil, &args4, &reply4)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
pendingValidatorStartTime5 := defaultGenesisTime.Add(50 * time.Second)
|
||||
pendingValidatorEndTime5 := pendingValidatorStartTime5.Add(MinimumStakingDuration)
|
||||
nodeIDKey5, _ := vm.factory.NewPrivateKey()
|
||||
nodeID5 := nodeIDKey5.PublicKey().Address()
|
||||
addPendingValidatorTx5, err := vm.newAddDefaultSubnetValidatorTx(
|
||||
defaultNonce+1,
|
||||
defaultStakeAmount,
|
||||
uint64(pendingValidatorStartTime5.Unix()),
|
||||
uint64(pendingValidatorEndTime5.Unix()),
|
||||
nodeID5,
|
||||
nodeID5,
|
||||
NumberOfShares,
|
||||
testNetworkID,
|
||||
defaultKey,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
txBytes5, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx5})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
args5 := IssueTxArgs{Tx: formatting.CB58{Bytes: txBytes5}}
|
||||
reply5 := IssueTxResponse{}
|
||||
|
||||
err = service.IssueTx(nil, &args5, &reply5)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
currentEvent := vm.unissuedEvents.Remove()
|
||||
for vm.unissuedEvents.Len() > 0 {
|
||||
nextEvent := vm.unissuedEvents.Remove()
|
||||
if !currentEvent.StartTime().Before(nextEvent.StartTime()) {
|
||||
t.Fatal("IssueTx does not keep event heap ordered")
|
||||
}
|
||||
currentEvent = nextEvent
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,6 +54,13 @@ func (sb *StandardBlock) Verify() error {
|
|||
// be a decision.
|
||||
parent, ok := parentBlock.(decision)
|
||||
if !ok {
|
||||
if err := sb.Reject(); err == nil {
|
||||
if err := sb.vm.DB.Commit(); err != nil {
|
||||
sb.vm.Ctx.Log.Error("error committing Standard block as rejected: %s", err)
|
||||
}
|
||||
} else {
|
||||
sb.vm.DB.Abort()
|
||||
}
|
||||
return errInvalidBlockType
|
||||
}
|
||||
|
||||
|
@ -64,6 +71,13 @@ func (sb *StandardBlock) Verify() error {
|
|||
for _, tx := range sb.Txs {
|
||||
onAccept, err := tx.SemanticVerify(sb.onAcceptDB)
|
||||
if err != nil {
|
||||
if err := sb.Reject(); err == nil {
|
||||
if err := sb.vm.DB.Commit(); err != nil {
|
||||
sb.vm.Ctx.Log.Error("error committing Standard block as rejected: %s", err)
|
||||
}
|
||||
} else {
|
||||
sb.vm.DB.Abort()
|
||||
}
|
||||
return err
|
||||
}
|
||||
if onAccept != nil {
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
package platformvm
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"errors"
|
||||
"net/http"
|
||||
|
||||
|
@ -36,6 +35,13 @@ type APIAccount struct {
|
|||
Balance json.Uint64 `json:"balance"`
|
||||
}
|
||||
|
||||
// FormattedAPIAccount is an APIAccount but allows for a formatted Address
|
||||
type FormattedAPIAccount struct {
|
||||
Address string `json:"address"`
|
||||
Nonce json.Uint64 `json:"nonce"`
|
||||
Balance json.Uint64 `json:"balance"`
|
||||
}
|
||||
|
||||
// APIValidator is a validator.
|
||||
// [Amount] is the amount of $AVA being staked.
|
||||
// [Endtime] is the Unix time repr. of when they are done staking
|
||||
|
@ -70,6 +76,35 @@ type APIDefaultSubnetValidator struct {
|
|||
DelegationFeeRate json.Uint32 `json:"delegationFeeRate"`
|
||||
}
|
||||
|
||||
// FormattedAPIValidator allows for a formatted address
|
||||
type FormattedAPIValidator struct {
|
||||
StartTime json.Uint64 `json:"startTime"`
|
||||
EndTime json.Uint64 `json:"endTime"`
|
||||
Weight *json.Uint64 `json:"weight,omitempty"`
|
||||
StakeAmount *json.Uint64 `json:"stakeAmount,omitempty"`
|
||||
Address string `json:"address,omitempty"`
|
||||
ID ids.ShortID `json:"id"`
|
||||
}
|
||||
|
||||
func (v *FormattedAPIValidator) weight() uint64 {
|
||||
switch {
|
||||
case v.Weight != nil:
|
||||
return uint64(*v.Weight)
|
||||
case v.StakeAmount != nil:
|
||||
return uint64(*v.StakeAmount)
|
||||
default:
|
||||
return 0
|
||||
}
|
||||
}
|
||||
|
||||
// FormattedAPIDefaultSubnetValidator is a formatted validator of the default subnet
|
||||
type FormattedAPIDefaultSubnetValidator struct {
|
||||
FormattedAPIValidator
|
||||
|
||||
Destination string `json:"destination"`
|
||||
DelegationFeeRate json.Uint32 `json:"delegationFeeRate"`
|
||||
}
|
||||
|
||||
// APIChain defines a chain that exists
|
||||
// at the network's genesis.
|
||||
// [GenesisData] is the initial state of the chain.
|
||||
|
@ -174,7 +209,7 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl
|
|||
return err
|
||||
}
|
||||
|
||||
heap.Push(validators, tx)
|
||||
validators.Add(tx)
|
||||
}
|
||||
|
||||
// Specify the chains that exist at genesis.
|
||||
|
|
|
@ -111,3 +111,77 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) {
|
|||
t.Fatalf("Should have errored due to an invalid end time")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildGenesisReturnsSortedValidators(t *testing.T) {
|
||||
id := ids.NewShortID([20]byte{1})
|
||||
account := APIAccount{
|
||||
Address: id,
|
||||
Balance: 123456789,
|
||||
}
|
||||
|
||||
weight := json.Uint64(987654321)
|
||||
validator1 := APIDefaultSubnetValidator{
|
||||
APIValidator: APIValidator{
|
||||
StartTime: 0,
|
||||
EndTime: 20,
|
||||
Weight: &weight,
|
||||
ID: id,
|
||||
},
|
||||
Destination: id,
|
||||
}
|
||||
|
||||
validator2 := APIDefaultSubnetValidator{
|
||||
APIValidator: APIValidator{
|
||||
StartTime: 3,
|
||||
EndTime: 15,
|
||||
Weight: &weight,
|
||||
ID: id,
|
||||
},
|
||||
Destination: id,
|
||||
}
|
||||
|
||||
validator3 := APIDefaultSubnetValidator{
|
||||
APIValidator: APIValidator{
|
||||
StartTime: 1,
|
||||
EndTime: 10,
|
||||
Weight: &weight,
|
||||
ID: id,
|
||||
},
|
||||
Destination: id,
|
||||
}
|
||||
|
||||
args := BuildGenesisArgs{
|
||||
Accounts: []APIAccount{
|
||||
account,
|
||||
},
|
||||
Validators: []APIDefaultSubnetValidator{
|
||||
validator1,
|
||||
validator2,
|
||||
validator3,
|
||||
},
|
||||
Time: 5,
|
||||
}
|
||||
reply := BuildGenesisReply{}
|
||||
|
||||
ss := StaticService{}
|
||||
if err := ss.BuildGenesis(nil, &args, &reply); err != nil {
|
||||
t.Fatalf("BuildGenesis should not have errored")
|
||||
}
|
||||
|
||||
genesis := &Genesis{}
|
||||
if err := Codec.Unmarshal(reply.Bytes.Bytes, genesis); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
validators := genesis.Validators
|
||||
if validators.Len() == 0 {
|
||||
t.Fatal("Validators should contain 3 validators")
|
||||
}
|
||||
currentValidator := validators.Remove()
|
||||
for validators.Len() > 0 {
|
||||
nextValidator := validators.Remove()
|
||||
if currentValidator.EndTime().Unix() > nextValidator.EndTime().Unix() {
|
||||
t.Fatalf("Validators returned by genesis should be a min heap sorted by end time")
|
||||
}
|
||||
currentValidator = nextValidator
|
||||
}
|
||||
}
|
||||
|
|
|
@ -4,7 +4,6 @@
|
|||
package platformvm
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
@ -20,6 +19,7 @@ import (
|
|||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/snow/validators"
|
||||
"github.com/ava-labs/gecko/utils/crypto"
|
||||
"github.com/ava-labs/gecko/utils/formatting"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
"github.com/ava-labs/gecko/utils/math"
|
||||
"github.com/ava-labs/gecko/utils/timer"
|
||||
|
@ -39,6 +39,9 @@ const (
|
|||
blockTypeID
|
||||
subnetsTypeID
|
||||
|
||||
platformAlias = "P"
|
||||
addressSep = "-"
|
||||
|
||||
// Delta is the synchrony bound used for safe decision making
|
||||
Delta = 10 * time.Second
|
||||
|
||||
|
@ -98,6 +101,8 @@ var (
|
|||
errRegisteringType = errors.New("error registering type with database")
|
||||
errMissingBlock = errors.New("missing block")
|
||||
errInvalidLastAcceptedBlock = errors.New("last accepted block must be a decision block")
|
||||
errInvalidAddress = errors.New("invalid address")
|
||||
errEmptyAddress = errors.New("empty address")
|
||||
)
|
||||
|
||||
// Codec does serialization and deserialization
|
||||
|
@ -399,6 +404,12 @@ func (vm *VM) createChain(tx *CreateChainTx) {
|
|||
vm.chainManager.CreateChain(chainParams)
|
||||
}
|
||||
|
||||
// Bootstrapping marks this VM as bootstrapping
|
||||
func (vm *VM) Bootstrapping() error { return nil }
|
||||
|
||||
// Bootstrapped marks this VM as bootstrapped
|
||||
func (vm *VM) Bootstrapped() error { return nil }
|
||||
|
||||
// Shutdown this blockchain
|
||||
func (vm *VM) Shutdown() error {
|
||||
if vm.timer == nil {
|
||||
|
@ -686,7 +697,7 @@ func (vm *VM) resetTimer() {
|
|||
vm.SnowmanVM.NotifyBlockReady() // Should issue a ProposeAddValidator
|
||||
return
|
||||
}
|
||||
// If the tx doesn't meet the syncrony bound, drop it
|
||||
// If the tx doesn't meet the synchrony bound, drop it
|
||||
vm.unissuedEvents.Remove()
|
||||
vm.Ctx.Log.Debug("dropping tx to add validator because its start time has passed")
|
||||
}
|
||||
|
@ -768,8 +779,8 @@ func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, sub
|
|||
if timestamp.Before(nextTx.StartTime()) {
|
||||
break
|
||||
}
|
||||
heap.Push(current, nextTx)
|
||||
heap.Pop(pending)
|
||||
current.Add(nextTx)
|
||||
pending.Remove()
|
||||
started.Add(nextTx.Vdr().ID())
|
||||
}
|
||||
return current, pending, started, stopped, nil
|
||||
|
@ -854,3 +865,19 @@ func (vm *VM) GetAtomicUTXOs(addrs ids.Set) ([]*ava.UTXO, error) {
|
|||
}
|
||||
return utxos, nil
|
||||
}
|
||||
|
||||
// ParseAddress ...
|
||||
func (vm *VM) ParseAddress(addrStr string) (ids.ShortID, error) {
|
||||
cb58 := formatting.CB58{}
|
||||
err := cb58.FromString(addrStr)
|
||||
if err != nil {
|
||||
return ids.ShortID{}, err
|
||||
}
|
||||
return ids.ToShortID(cb58.Bytes)
|
||||
}
|
||||
|
||||
// FormatAddress ...
|
||||
// Assumes addrID is not empty
|
||||
func (vm *VM) FormatAddress(addrID ids.ShortID) string {
|
||||
return addrID.String()
|
||||
}
|
||||
|
|
|
@ -5,7 +5,6 @@ package platformvm
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"container/heap"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -193,6 +192,8 @@ func defaultVM() *VM {
|
|||
panic("no subnets found")
|
||||
} // end delete
|
||||
|
||||
vm.registerDBTypes()
|
||||
|
||||
return vm
|
||||
}
|
||||
|
||||
|
@ -226,7 +227,7 @@ func GenesisCurrentValidators() *EventHeap {
|
|||
testNetworkID, // network ID
|
||||
key, // key paying tx fee and stake
|
||||
)
|
||||
heap.Push(validators, validator)
|
||||
validators.Add(validator)
|
||||
}
|
||||
return validators
|
||||
}
|
||||
|
@ -376,6 +377,65 @@ func TestAddDefaultSubnetValidatorCommit(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// verify invalid proposal to add validator to default subnet
|
||||
func TestInvalidAddDefaultSubnetValidatorCommit(t *testing.T) {
|
||||
vm := defaultVM()
|
||||
vm.Ctx.Lock.Lock()
|
||||
defer func() {
|
||||
vm.Shutdown()
|
||||
vm.Ctx.Lock.Unlock()
|
||||
}()
|
||||
|
||||
startTime := defaultGenesisTime.Add(-Delta).Add(-1 * time.Second)
|
||||
endTime := startTime.Add(MinimumStakingDuration)
|
||||
key, _ := vm.factory.NewPrivateKey()
|
||||
ID := key.PublicKey().Address()
|
||||
|
||||
// create invalid tx
|
||||
tx, err := vm.newAddDefaultSubnetValidatorTx(
|
||||
defaultNonce+1,
|
||||
defaultStakeAmount,
|
||||
uint64(startTime.Unix()),
|
||||
uint64(endTime.Unix()),
|
||||
ID,
|
||||
ID,
|
||||
NumberOfShares,
|
||||
testNetworkID,
|
||||
defaultKey,
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
blk, err := vm.newProposalBlock(vm.LastAccepted(), tx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := vm.State.PutBlock(vm.DB, blk); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := vm.DB.Commit(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := blk.Verify(); err == nil {
|
||||
t.Fatalf("Should have errored during verification")
|
||||
}
|
||||
|
||||
if status := blk.Status(); status != choices.Rejected {
|
||||
t.Fatalf("Should have marked the block as rejected")
|
||||
}
|
||||
|
||||
parsedBlk, err := vm.GetBlock(blk.ID())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if status := parsedBlk.Status(); status != choices.Rejected {
|
||||
t.Fatalf("Should have marked the block as rejected")
|
||||
}
|
||||
}
|
||||
|
||||
// Reject proposal to add validator to default subnet
|
||||
func TestAddDefaultSubnetValidatorReject(t *testing.T) {
|
||||
vm := defaultVM()
|
||||
|
@ -952,7 +1012,7 @@ func TestCreateSubnet(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
vm.unissuedEvents.Push(addValidatorTx)
|
||||
vm.unissuedEvents.Add(addValidatorTx)
|
||||
blk, err = vm.BuildBlock() // should add validator to the new subnet
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -1551,8 +1611,9 @@ func TestBootstrapPartiallyAccepted(t *testing.T) {
|
|||
|
||||
advanceTimePreference := advanceTimeBlk.Options()[0]
|
||||
|
||||
peerID := ids.NewShortID([20]byte{1, 2, 3, 4, 5, 4, 3, 2, 1})
|
||||
vdrs := validators.NewSet()
|
||||
vdrs.Add(validators.NewValidator(ctx.NodeID, 1))
|
||||
vdrs.Add(validators.NewValidator(peerID, 1))
|
||||
beacons := vdrs
|
||||
|
||||
timeoutManager := timeout.Manager{}
|
||||
|
@ -1597,7 +1658,13 @@ func TestBootstrapPartiallyAccepted(t *testing.T) {
|
|||
|
||||
// Asynchronously passes messages from the network to the consensus engine
|
||||
handler := &router.Handler{}
|
||||
handler.Initialize(&engine, msgChan, 1000)
|
||||
handler.Initialize(
|
||||
&engine,
|
||||
msgChan,
|
||||
1000,
|
||||
"",
|
||||
prometheus.NewRegistry(),
|
||||
)
|
||||
|
||||
// Allow incoming messages to be routed to the new chain
|
||||
chainRouter.AddChain(handler)
|
||||
|
@ -1617,23 +1684,23 @@ func TestBootstrapPartiallyAccepted(t *testing.T) {
|
|||
|
||||
frontier := ids.Set{}
|
||||
frontier.Add(advanceTimeBlkID)
|
||||
engine.AcceptedFrontier(ctx.NodeID, *reqID, frontier)
|
||||
engine.AcceptedFrontier(peerID, *reqID, frontier)
|
||||
|
||||
externalSender.GetAcceptedF = nil
|
||||
externalSender.GetF = func(_ ids.ShortID, _ ids.ID, requestID uint32, containerID ids.ID) {
|
||||
externalSender.GetAncestorsF = func(_ ids.ShortID, _ ids.ID, requestID uint32, containerID ids.ID) {
|
||||
*reqID = requestID
|
||||
if !containerID.Equals(advanceTimeBlkID) {
|
||||
t.Fatalf("wrong block requested")
|
||||
}
|
||||
}
|
||||
|
||||
engine.Accepted(ctx.NodeID, *reqID, frontier)
|
||||
engine.Accepted(peerID, *reqID, frontier)
|
||||
|
||||
externalSender.GetF = nil
|
||||
externalSender.CantPushQuery = false
|
||||
externalSender.CantPullQuery = false
|
||||
|
||||
engine.Put(ctx.NodeID, *reqID, advanceTimeBlkID, advanceTimeBlkBytes)
|
||||
engine.MultiPut(peerID, *reqID, [][]byte{advanceTimeBlkBytes})
|
||||
|
||||
externalSender.CantPushQuery = true
|
||||
|
||||
|
|
|
@ -2,6 +2,7 @@ package propertyfx
|
|||
|
||||
import (
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
)
|
||||
|
||||
// ID that this Fx uses when labeled
|
||||
|
@ -13,4 +14,4 @@ var (
|
|||
type Factory struct{}
|
||||
|
||||
// New ...
|
||||
func (f *Factory) New() (interface{}, error) { return &Fx{}, nil }
|
||||
func (f *Factory) New(*snow.Context) (interface{}, error) { return &Fx{}, nil }
|
||||
|
|
|
@ -6,7 +6,7 @@ import (
|
|||
|
||||
func TestFactory(t *testing.T) {
|
||||
factory := Factory{}
|
||||
if fx, err := factory.New(); err != nil {
|
||||
if fx, err := factory.New(nil); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if fx == nil {
|
||||
t.Fatalf("Factory.New returned nil")
|
||||
|
|
|
@ -8,6 +8,8 @@ import (
|
|||
"os/exec"
|
||||
|
||||
"github.com/hashicorp/go-plugin"
|
||||
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -15,13 +17,11 @@ var (
|
|||
)
|
||||
|
||||
// Factory ...
|
||||
type Factory struct {
|
||||
Path string
|
||||
}
|
||||
type Factory struct{ Path string }
|
||||
|
||||
// New ...
|
||||
func (f *Factory) New() (interface{}, error) {
|
||||
client := plugin.NewClient(&plugin.ClientConfig{
|
||||
func (f *Factory) New(ctx *snow.Context) (interface{}, error) {
|
||||
config := &plugin.ClientConfig{
|
||||
HandshakeConfig: Handshake,
|
||||
Plugins: PluginMap,
|
||||
Cmd: exec.Command("sh", "-c", f.Path),
|
||||
|
@ -29,7 +29,13 @@ func (f *Factory) New() (interface{}, error) {
|
|||
plugin.ProtocolNetRPC,
|
||||
plugin.ProtocolGRPC,
|
||||
},
|
||||
})
|
||||
}
|
||||
if ctx != nil {
|
||||
config.Stderr = ctx.Log
|
||||
config.SyncStdout = ctx.Log
|
||||
config.SyncStderr = ctx.Log
|
||||
}
|
||||
client := plugin.NewClient(config)
|
||||
|
||||
rpcClient, err := client.Client()
|
||||
if err != nil {
|
||||
|
|
|
@ -129,6 +129,18 @@ func (vm *VMClient) startMessengerServer(opts []grpc.ServerOption) *grpc.Server
|
|||
return server
|
||||
}
|
||||
|
||||
// Bootstrapping ...
|
||||
func (vm *VMClient) Bootstrapping() error {
|
||||
_, err := vm.client.Bootstrapping(context.Background(), &vmproto.BootstrappingRequest{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Bootstrapped ...
|
||||
func (vm *VMClient) Bootstrapped() error {
|
||||
_, err := vm.client.Bootstrapped(context.Background(), &vmproto.BootstrappedRequest{})
|
||||
return err
|
||||
}
|
||||
|
||||
// Shutdown ...
|
||||
func (vm *VMClient) Shutdown() error {
|
||||
vm.lock.Lock()
|
||||
|
|
|
@ -84,8 +84,18 @@ func (vm *VMServer) Initialize(_ context.Context, req *vmproto.InitializeRequest
|
|||
return &vmproto.InitializeResponse{}, nil
|
||||
}
|
||||
|
||||
// Bootstrapping ...
|
||||
func (vm *VMServer) Bootstrapping(context.Context, *vmproto.BootstrappingRequest) (*vmproto.BootstrappingResponse, error) {
|
||||
return &vmproto.BootstrappingResponse{}, vm.vm.Bootstrapping()
|
||||
}
|
||||
|
||||
// Bootstrapped ...
|
||||
func (vm *VMServer) Bootstrapped(context.Context, *vmproto.BootstrappedRequest) (*vmproto.BootstrappedResponse, error) {
|
||||
return &vmproto.BootstrappedResponse{}, vm.vm.Bootstrapped()
|
||||
}
|
||||
|
||||
// Shutdown ...
|
||||
func (vm *VMServer) Shutdown(_ context.Context, _ *vmproto.ShutdownRequest) (*vmproto.ShutdownResponse, error) {
|
||||
func (vm *VMServer) Shutdown(context.Context, *vmproto.ShutdownRequest) (*vmproto.ShutdownResponse, error) {
|
||||
vm.lock.Lock()
|
||||
defer vm.lock.Unlock()
|
||||
|
||||
|
|
|
@ -110,6 +110,130 @@ func (m *InitializeResponse) XXX_DiscardUnknown() {
|
|||
|
||||
var xxx_messageInfo_InitializeResponse proto.InternalMessageInfo
|
||||
|
||||
type BootstrappingRequest struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BootstrappingRequest) Reset() { *m = BootstrappingRequest{} }
|
||||
func (m *BootstrappingRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*BootstrappingRequest) ProtoMessage() {}
|
||||
func (*BootstrappingRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{2}
|
||||
}
|
||||
|
||||
func (m *BootstrappingRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BootstrappingRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BootstrappingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BootstrappingRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BootstrappingRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BootstrappingRequest.Merge(m, src)
|
||||
}
|
||||
func (m *BootstrappingRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_BootstrappingRequest.Size(m)
|
||||
}
|
||||
func (m *BootstrappingRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BootstrappingRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BootstrappingRequest proto.InternalMessageInfo
|
||||
|
||||
type BootstrappingResponse struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BootstrappingResponse) Reset() { *m = BootstrappingResponse{} }
|
||||
func (m *BootstrappingResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*BootstrappingResponse) ProtoMessage() {}
|
||||
func (*BootstrappingResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{3}
|
||||
}
|
||||
|
||||
func (m *BootstrappingResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BootstrappingResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BootstrappingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BootstrappingResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BootstrappingResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BootstrappingResponse.Merge(m, src)
|
||||
}
|
||||
func (m *BootstrappingResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_BootstrappingResponse.Size(m)
|
||||
}
|
||||
func (m *BootstrappingResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BootstrappingResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BootstrappingResponse proto.InternalMessageInfo
|
||||
|
||||
type BootstrappedRequest struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BootstrappedRequest) Reset() { *m = BootstrappedRequest{} }
|
||||
func (m *BootstrappedRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*BootstrappedRequest) ProtoMessage() {}
|
||||
func (*BootstrappedRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{4}
|
||||
}
|
||||
|
||||
func (m *BootstrappedRequest) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BootstrappedRequest.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BootstrappedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BootstrappedRequest.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BootstrappedRequest) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BootstrappedRequest.Merge(m, src)
|
||||
}
|
||||
func (m *BootstrappedRequest) XXX_Size() int {
|
||||
return xxx_messageInfo_BootstrappedRequest.Size(m)
|
||||
}
|
||||
func (m *BootstrappedRequest) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BootstrappedRequest.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BootstrappedRequest proto.InternalMessageInfo
|
||||
|
||||
type BootstrappedResponse struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
XXX_sizecache int32 `json:"-"`
|
||||
}
|
||||
|
||||
func (m *BootstrappedResponse) Reset() { *m = BootstrappedResponse{} }
|
||||
func (m *BootstrappedResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*BootstrappedResponse) ProtoMessage() {}
|
||||
func (*BootstrappedResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{5}
|
||||
}
|
||||
|
||||
func (m *BootstrappedResponse) XXX_Unmarshal(b []byte) error {
|
||||
return xxx_messageInfo_BootstrappedResponse.Unmarshal(m, b)
|
||||
}
|
||||
func (m *BootstrappedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
|
||||
return xxx_messageInfo_BootstrappedResponse.Marshal(b, m, deterministic)
|
||||
}
|
||||
func (m *BootstrappedResponse) XXX_Merge(src proto.Message) {
|
||||
xxx_messageInfo_BootstrappedResponse.Merge(m, src)
|
||||
}
|
||||
func (m *BootstrappedResponse) XXX_Size() int {
|
||||
return xxx_messageInfo_BootstrappedResponse.Size(m)
|
||||
}
|
||||
func (m *BootstrappedResponse) XXX_DiscardUnknown() {
|
||||
xxx_messageInfo_BootstrappedResponse.DiscardUnknown(m)
|
||||
}
|
||||
|
||||
var xxx_messageInfo_BootstrappedResponse proto.InternalMessageInfo
|
||||
|
||||
type ShutdownRequest struct {
|
||||
XXX_NoUnkeyedLiteral struct{} `json:"-"`
|
||||
XXX_unrecognized []byte `json:"-"`
|
||||
|
@ -120,7 +244,7 @@ func (m *ShutdownRequest) Reset() { *m = ShutdownRequest{} }
|
|||
func (m *ShutdownRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ShutdownRequest) ProtoMessage() {}
|
||||
func (*ShutdownRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{2}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{6}
|
||||
}
|
||||
|
||||
func (m *ShutdownRequest) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -151,7 +275,7 @@ func (m *ShutdownResponse) Reset() { *m = ShutdownResponse{} }
|
|||
func (m *ShutdownResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ShutdownResponse) ProtoMessage() {}
|
||||
func (*ShutdownResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{3}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{7}
|
||||
}
|
||||
|
||||
func (m *ShutdownResponse) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -182,7 +306,7 @@ func (m *CreateHandlersRequest) Reset() { *m = CreateHandlersRequest{} }
|
|||
func (m *CreateHandlersRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateHandlersRequest) ProtoMessage() {}
|
||||
func (*CreateHandlersRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{4}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{8}
|
||||
}
|
||||
|
||||
func (m *CreateHandlersRequest) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -214,7 +338,7 @@ func (m *CreateHandlersResponse) Reset() { *m = CreateHandlersResponse{}
|
|||
func (m *CreateHandlersResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*CreateHandlersResponse) ProtoMessage() {}
|
||||
func (*CreateHandlersResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{5}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{9}
|
||||
}
|
||||
|
||||
func (m *CreateHandlersResponse) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -255,7 +379,7 @@ func (m *Handler) Reset() { *m = Handler{} }
|
|||
func (m *Handler) String() string { return proto.CompactTextString(m) }
|
||||
func (*Handler) ProtoMessage() {}
|
||||
func (*Handler) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{6}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{10}
|
||||
}
|
||||
|
||||
func (m *Handler) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -307,7 +431,7 @@ func (m *BuildBlockRequest) Reset() { *m = BuildBlockRequest{} }
|
|||
func (m *BuildBlockRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*BuildBlockRequest) ProtoMessage() {}
|
||||
func (*BuildBlockRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{7}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{11}
|
||||
}
|
||||
|
||||
func (m *BuildBlockRequest) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -341,7 +465,7 @@ func (m *BuildBlockResponse) Reset() { *m = BuildBlockResponse{} }
|
|||
func (m *BuildBlockResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*BuildBlockResponse) ProtoMessage() {}
|
||||
func (*BuildBlockResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{8}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{12}
|
||||
}
|
||||
|
||||
func (m *BuildBlockResponse) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -394,7 +518,7 @@ func (m *ParseBlockRequest) Reset() { *m = ParseBlockRequest{} }
|
|||
func (m *ParseBlockRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*ParseBlockRequest) ProtoMessage() {}
|
||||
func (*ParseBlockRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{9}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{13}
|
||||
}
|
||||
|
||||
func (m *ParseBlockRequest) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -435,7 +559,7 @@ func (m *ParseBlockResponse) Reset() { *m = ParseBlockResponse{} }
|
|||
func (m *ParseBlockResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*ParseBlockResponse) ProtoMessage() {}
|
||||
func (*ParseBlockResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{10}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{14}
|
||||
}
|
||||
|
||||
func (m *ParseBlockResponse) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -488,7 +612,7 @@ func (m *GetBlockRequest) Reset() { *m = GetBlockRequest{} }
|
|||
func (m *GetBlockRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetBlockRequest) ProtoMessage() {}
|
||||
func (*GetBlockRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{11}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{15}
|
||||
}
|
||||
|
||||
func (m *GetBlockRequest) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -529,7 +653,7 @@ func (m *GetBlockResponse) Reset() { *m = GetBlockResponse{} }
|
|||
func (m *GetBlockResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*GetBlockResponse) ProtoMessage() {}
|
||||
func (*GetBlockResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{12}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{16}
|
||||
}
|
||||
|
||||
func (m *GetBlockResponse) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -582,7 +706,7 @@ func (m *SetPreferenceRequest) Reset() { *m = SetPreferenceRequest{} }
|
|||
func (m *SetPreferenceRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*SetPreferenceRequest) ProtoMessage() {}
|
||||
func (*SetPreferenceRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{13}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{17}
|
||||
}
|
||||
|
||||
func (m *SetPreferenceRequest) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -620,7 +744,7 @@ func (m *SetPreferenceResponse) Reset() { *m = SetPreferenceResponse{} }
|
|||
func (m *SetPreferenceResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*SetPreferenceResponse) ProtoMessage() {}
|
||||
func (*SetPreferenceResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{14}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{18}
|
||||
}
|
||||
|
||||
func (m *SetPreferenceResponse) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -651,7 +775,7 @@ func (m *LastAcceptedRequest) Reset() { *m = LastAcceptedRequest{} }
|
|||
func (m *LastAcceptedRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*LastAcceptedRequest) ProtoMessage() {}
|
||||
func (*LastAcceptedRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{15}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{19}
|
||||
}
|
||||
|
||||
func (m *LastAcceptedRequest) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -683,7 +807,7 @@ func (m *LastAcceptedResponse) Reset() { *m = LastAcceptedResponse{} }
|
|||
func (m *LastAcceptedResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*LastAcceptedResponse) ProtoMessage() {}
|
||||
func (*LastAcceptedResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{16}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{20}
|
||||
}
|
||||
|
||||
func (m *LastAcceptedResponse) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -722,7 +846,7 @@ func (m *BlockVerifyRequest) Reset() { *m = BlockVerifyRequest{} }
|
|||
func (m *BlockVerifyRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockVerifyRequest) ProtoMessage() {}
|
||||
func (*BlockVerifyRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{17}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{21}
|
||||
}
|
||||
|
||||
func (m *BlockVerifyRequest) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -760,7 +884,7 @@ func (m *BlockVerifyResponse) Reset() { *m = BlockVerifyResponse{} }
|
|||
func (m *BlockVerifyResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockVerifyResponse) ProtoMessage() {}
|
||||
func (*BlockVerifyResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{18}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{22}
|
||||
}
|
||||
|
||||
func (m *BlockVerifyResponse) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -792,7 +916,7 @@ func (m *BlockAcceptRequest) Reset() { *m = BlockAcceptRequest{} }
|
|||
func (m *BlockAcceptRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockAcceptRequest) ProtoMessage() {}
|
||||
func (*BlockAcceptRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{19}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{23}
|
||||
}
|
||||
|
||||
func (m *BlockAcceptRequest) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -830,7 +954,7 @@ func (m *BlockAcceptResponse) Reset() { *m = BlockAcceptResponse{} }
|
|||
func (m *BlockAcceptResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockAcceptResponse) ProtoMessage() {}
|
||||
func (*BlockAcceptResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{20}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{24}
|
||||
}
|
||||
|
||||
func (m *BlockAcceptResponse) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -862,7 +986,7 @@ func (m *BlockRejectRequest) Reset() { *m = BlockRejectRequest{} }
|
|||
func (m *BlockRejectRequest) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockRejectRequest) ProtoMessage() {}
|
||||
func (*BlockRejectRequest) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{21}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{25}
|
||||
}
|
||||
|
||||
func (m *BlockRejectRequest) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -900,7 +1024,7 @@ func (m *BlockRejectResponse) Reset() { *m = BlockRejectResponse{} }
|
|||
func (m *BlockRejectResponse) String() string { return proto.CompactTextString(m) }
|
||||
func (*BlockRejectResponse) ProtoMessage() {}
|
||||
func (*BlockRejectResponse) Descriptor() ([]byte, []int) {
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{22}
|
||||
return fileDescriptor_cab246c8c7c5372d, []int{26}
|
||||
}
|
||||
|
||||
func (m *BlockRejectResponse) XXX_Unmarshal(b []byte) error {
|
||||
|
@ -924,6 +1048,10 @@ var xxx_messageInfo_BlockRejectResponse proto.InternalMessageInfo
|
|||
func init() {
|
||||
proto.RegisterType((*InitializeRequest)(nil), "vmproto.InitializeRequest")
|
||||
proto.RegisterType((*InitializeResponse)(nil), "vmproto.InitializeResponse")
|
||||
proto.RegisterType((*BootstrappingRequest)(nil), "vmproto.BootstrappingRequest")
|
||||
proto.RegisterType((*BootstrappingResponse)(nil), "vmproto.BootstrappingResponse")
|
||||
proto.RegisterType((*BootstrappedRequest)(nil), "vmproto.BootstrappedRequest")
|
||||
proto.RegisterType((*BootstrappedResponse)(nil), "vmproto.BootstrappedResponse")
|
||||
proto.RegisterType((*ShutdownRequest)(nil), "vmproto.ShutdownRequest")
|
||||
proto.RegisterType((*ShutdownResponse)(nil), "vmproto.ShutdownResponse")
|
||||
proto.RegisterType((*CreateHandlersRequest)(nil), "vmproto.CreateHandlersRequest")
|
||||
|
@ -950,46 +1078,49 @@ func init() {
|
|||
func init() { proto.RegisterFile("vm.proto", fileDescriptor_cab246c8c7c5372d) }
|
||||
|
||||
var fileDescriptor_cab246c8c7c5372d = []byte{
|
||||
// 617 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x6d, 0x6f, 0xd2, 0x50,
|
||||
0x14, 0x4e, 0x21, 0x32, 0x3c, 0xc0, 0x06, 0x17, 0xd8, 0xb0, 0x6e, 0x13, 0x1b, 0xb3, 0x60, 0x62,
|
||||
0xf8, 0x30, 0x7f, 0xc0, 0x22, 0x8a, 0x6e, 0xf1, 0x6d, 0x96, 0x84, 0x98, 0xe8, 0x97, 0x42, 0x0f,
|
||||
0x5b, 0x95, 0xb5, 0xf5, 0xde, 0x0b, 0x73, 0xfe, 0x23, 0xff, 0xa5, 0xa1, 0xbd, 0x6d, 0xef, 0xbd,
|
||||
0xb4, 0x59, 0xe2, 0xb7, 0x9e, 0x73, 0x9e, 0xf3, 0x9c, 0x97, 0x7b, 0x9e, 0x42, 0x75, 0x7d, 0x33,
|
||||
0x0c, 0x69, 0xc0, 0x03, 0xb2, 0xb3, 0xbe, 0x89, 0x3e, 0xac, 0x5b, 0x68, 0x5d, 0xf8, 0x1e, 0xf7,
|
||||
0x9c, 0xa5, 0xf7, 0x07, 0x6d, 0xfc, 0xb5, 0x42, 0xc6, 0x89, 0x09, 0x55, 0x77, 0x36, 0x41, 0xba,
|
||||
0x46, 0xda, 0x33, 0xfa, 0xc6, 0xa0, 0x61, 0xa7, 0x36, 0xb1, 0xa0, 0x7e, 0x85, 0x3e, 0x32, 0x8f,
|
||||
0x8d, 0xee, 0x38, 0xb2, 0x5e, 0xa9, 0x6f, 0x0c, 0xea, 0xb6, 0xe2, 0xdb, 0x60, 0xd0, 0xbf, 0xf2,
|
||||
0x7c, 0x14, 0x1c, 0xe5, 0x88, 0x43, 0xf1, 0x59, 0x1d, 0x20, 0x72, 0x61, 0x16, 0x06, 0x3e, 0x43,
|
||||
0xab, 0x05, 0x7b, 0x93, 0xeb, 0x15, 0x77, 0x83, 0x5b, 0x5f, 0x34, 0x63, 0x11, 0x68, 0x66, 0x2e,
|
||||
0x01, 0x3b, 0x80, 0xee, 0x6b, 0x8a, 0x0e, 0xc7, 0x73, 0xc7, 0x77, 0x97, 0x48, 0x59, 0x02, 0x7e,
|
||||
0x0b, 0xfb, 0x7a, 0x20, 0x4e, 0x21, 0x2f, 0xa0, 0x7a, 0x2d, 0x7c, 0x3d, 0xa3, 0x5f, 0x1e, 0xd4,
|
||||
0x4e, 0x9b, 0x43, 0xb1, 0x84, 0xa1, 0x00, 0xdb, 0x29, 0xc2, 0xfa, 0x06, 0x3b, 0xc2, 0x49, 0xf6,
|
||||
0xa1, 0x12, 0x52, 0x5c, 0x78, 0xbf, 0xa3, 0x55, 0x3c, 0xb4, 0x85, 0x45, 0xfa, 0x50, 0x5b, 0x06,
|
||||
0xf3, 0x9f, 0x9f, 0x43, 0xee, 0x05, 0x7e, 0xbc, 0x87, 0x86, 0x2d, 0xbb, 0x36, 0x99, 0x4c, 0x5e,
|
||||
0x80, 0xb0, 0xac, 0x36, 0xb4, 0x46, 0x2b, 0x6f, 0xe9, 0x8e, 0x36, 0xe0, 0xa4, 0xf3, 0x29, 0x10,
|
||||
0xd9, 0x29, 0xba, 0xde, 0x85, 0x92, 0xe7, 0x46, 0x85, 0xeb, 0x76, 0xc9, 0x73, 0x37, 0x2f, 0x13,
|
||||
0x3a, 0x14, 0x7d, 0x7e, 0xf1, 0x46, 0x6c, 0x3e, 0xb5, 0x49, 0x07, 0x1e, 0xcc, 0xa2, 0x27, 0x29,
|
||||
0x47, 0x81, 0xd8, 0xb0, 0x9e, 0x43, 0xeb, 0xd2, 0xa1, 0x0c, 0xe5, 0x62, 0x19, 0xd4, 0x90, 0xa1,
|
||||
0x5f, 0x81, 0xc8, 0xd0, 0xff, 0x68, 0x61, 0x33, 0x31, 0x77, 0xf8, 0x8a, 0xa5, 0x13, 0x47, 0x96,
|
||||
0xf5, 0x14, 0xf6, 0xde, 0x21, 0x57, 0x5a, 0xd0, 0x68, 0xad, 0xef, 0xd0, 0xcc, 0x20, 0xa2, 0xb4,
|
||||
0x5c, 0xca, 0x28, 0x9a, 0xb6, 0x24, 0x8d, 0x50, 0xd8, 0xc0, 0x09, 0x74, 0x26, 0xc8, 0x2f, 0x29,
|
||||
0x2e, 0x90, 0xa2, 0x3f, 0xc7, 0xa2, 0x2e, 0x0e, 0xa0, 0xab, 0xe1, 0xc4, 0xc5, 0x75, 0xa1, 0xfd,
|
||||
0xc1, 0x61, 0xfc, 0xd5, 0x7c, 0x8e, 0x21, 0x47, 0x37, 0x79, 0xb5, 0x13, 0xe8, 0xa8, 0xee, 0xfc,
|
||||
0xa5, 0x59, 0xcf, 0x80, 0x44, 0xa3, 0x4d, 0x91, 0x7a, 0x8b, 0xbb, 0xa2, 0xea, 0x5d, 0x68, 0x2b,
|
||||
0x28, 0x51, 0x3b, 0x49, 0x8e, 0xab, 0xdc, 0x97, 0x9c, 0xa0, 0xb4, 0x64, 0x1b, 0x7f, 0xe0, 0xfc,
|
||||
0xde, 0xe4, 0x04, 0x15, 0x27, 0x9f, 0xfe, 0xad, 0x40, 0x69, 0xfa, 0x91, 0x8c, 0x01, 0x32, 0xad,
|
||||
0x12, 0x33, 0xd5, 0xcd, 0xd6, 0x9f, 0xc3, 0x7c, 0x9c, 0x1b, 0x13, 0x4b, 0x39, 0x83, 0x6a, 0xa2,
|
||||
0x64, 0xd2, 0x4b, 0x81, 0x9a, 0xde, 0xcd, 0x47, 0x39, 0x11, 0x41, 0xf0, 0x05, 0x76, 0x55, 0x75,
|
||||
0x93, 0xe3, 0x14, 0x9c, 0xfb, 0x3f, 0x30, 0x9f, 0x14, 0xc6, 0x05, 0xe5, 0x18, 0x20, 0x93, 0x9d,
|
||||
0x34, 0xda, 0x96, 0x40, 0xa5, 0xd1, 0x72, 0x74, 0x3a, 0x06, 0xc8, 0xa4, 0x23, 0xd1, 0x6c, 0x49,
|
||||
0x4f, 0xa2, 0xc9, 0xd1, 0xda, 0x19, 0x54, 0x13, 0x11, 0x48, 0x1b, 0xd2, 0xa4, 0x23, 0x6d, 0x68,
|
||||
0x4b, 0x31, 0x9f, 0xa0, 0xa1, 0xdc, 0x2f, 0x39, 0xca, 0xb6, 0x99, 0x73, 0xff, 0xe6, 0x71, 0x51,
|
||||
0x58, 0xf0, 0xbd, 0x87, 0xba, 0x7c, 0xdf, 0xe4, 0x30, 0xc5, 0xe7, 0xa8, 0xc1, 0x3c, 0x2a, 0x88,
|
||||
0x0a, 0xb2, 0x73, 0xa8, 0x49, 0xe7, 0x4d, 0xa4, 0x85, 0x6e, 0x49, 0xc3, 0x3c, 0xcc, 0x0f, 0x6a,
|
||||
0x4c, 0x71, 0x09, 0x9d, 0x49, 0xd1, 0x89, 0xce, 0xa4, 0xca, 0x23, 0x65, 0x8a, 0x0f, 0x5f, 0x67,
|
||||
0x52, 0x44, 0xa3, 0x33, 0xa9, 0x5a, 0x99, 0x55, 0xa2, 0xd0, 0xcb, 0x7f, 0x01, 0x00, 0x00, 0xff,
|
||||
0xff, 0xbb, 0xac, 0x5b, 0xc8, 0x65, 0x07, 0x00, 0x00,
|
||||
// 672 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x61, 0x4f, 0x13, 0x41,
|
||||
0x10, 0x4d, 0x4b, 0x84, 0x3a, 0xb4, 0x40, 0x97, 0x16, 0xea, 0x09, 0x5a, 0x2f, 0x86, 0x60, 0x62,
|
||||
0xf8, 0x80, 0x3f, 0x80, 0x58, 0x45, 0x21, 0x2a, 0xe2, 0x91, 0x10, 0x13, 0xfd, 0x72, 0xf4, 0x06,
|
||||
0x38, 0x2d, 0x77, 0xe7, 0xee, 0xb6, 0x88, 0x3f, 0xd0, 0xdf, 0x65, 0xee, 0x6e, 0xef, 0x6e, 0x76,
|
||||
0xbb, 0x17, 0x12, 0xbf, 0x75, 0x67, 0xde, 0xbc, 0x99, 0x9b, 0xe9, 0x7b, 0xd0, 0x9a, 0xdd, 0xec,
|
||||
0x25, 0x3c, 0x96, 0x31, 0x5b, 0x9a, 0xdd, 0x64, 0x3f, 0xdc, 0x5b, 0xe8, 0x1e, 0x47, 0xa1, 0x0c,
|
||||
0xfd, 0x49, 0xf8, 0x07, 0x3d, 0xfc, 0x35, 0x45, 0x21, 0x99, 0x03, 0xad, 0xe0, 0xe2, 0x0c, 0xf9,
|
||||
0x0c, 0xf9, 0xa0, 0x31, 0x6c, 0xec, 0x76, 0xbc, 0xf2, 0xcd, 0x5c, 0x68, 0x5f, 0x61, 0x84, 0x22,
|
||||
0x14, 0xa3, 0x3b, 0x89, 0x62, 0xd0, 0x1c, 0x36, 0x76, 0xdb, 0x9e, 0x16, 0x4b, 0x31, 0x18, 0x5d,
|
||||
0x85, 0x11, 0x2a, 0x8e, 0x85, 0x8c, 0x43, 0x8b, 0xb9, 0x3d, 0x60, 0xb4, 0xb1, 0x48, 0xe2, 0x48,
|
||||
0xa0, 0xbb, 0x01, 0xbd, 0x51, 0x1c, 0x4b, 0x21, 0xb9, 0x9f, 0x24, 0x61, 0x74, 0xa5, 0x26, 0x72,
|
||||
0x37, 0xa1, 0x6f, 0xc4, 0x55, 0x41, 0x1f, 0xd6, 0xab, 0x04, 0x06, 0x05, 0x5e, 0xe3, 0x49, 0xc3,
|
||||
0x0a, 0xde, 0x85, 0xd5, 0xb3, 0xeb, 0xa9, 0x0c, 0xe2, 0xdb, 0xa8, 0x80, 0x32, 0x58, 0xab, 0x42,
|
||||
0x0a, 0xb6, 0x09, 0xfd, 0x37, 0x1c, 0x7d, 0x89, 0x47, 0x7e, 0x14, 0x4c, 0x90, 0x8b, 0x02, 0xfc,
|
||||
0x0e, 0x36, 0xcc, 0x44, 0x5e, 0xc2, 0x5e, 0x42, 0xeb, 0x5a, 0xc5, 0x06, 0x8d, 0xe1, 0xc2, 0xee,
|
||||
0xf2, 0xfe, 0xda, 0x9e, 0x5a, 0xf2, 0x9e, 0x02, 0x7b, 0x25, 0xc2, 0xfd, 0x06, 0x4b, 0x2a, 0xc8,
|
||||
0x36, 0x60, 0x31, 0xe1, 0x78, 0x19, 0xfe, 0xce, 0x56, 0xfd, 0xd0, 0x53, 0x2f, 0x36, 0x84, 0xe5,
|
||||
0x49, 0x3c, 0xfe, 0xf9, 0x39, 0x91, 0x61, 0x1c, 0xe5, 0x7b, 0xee, 0x78, 0x34, 0x94, 0x56, 0x0a,
|
||||
0xba, 0x60, 0xf5, 0x72, 0xd7, 0xa1, 0x3b, 0x9a, 0x86, 0x93, 0x60, 0x94, 0x82, 0x8b, 0xc9, 0xcf,
|
||||
0x81, 0xd1, 0xa0, 0x9a, 0x7a, 0x05, 0x9a, 0x61, 0x90, 0x35, 0x6e, 0x7b, 0xcd, 0x30, 0x48, 0x2f,
|
||||
0x9f, 0xf8, 0x1c, 0x23, 0x79, 0xfc, 0x56, 0x5d, 0xb6, 0x7c, 0xb3, 0x1e, 0x3c, 0xb8, 0xc8, 0x4e,
|
||||
0xbe, 0x90, 0x25, 0xf2, 0x87, 0xfb, 0x02, 0xba, 0xa7, 0x3e, 0x17, 0x48, 0x9b, 0x55, 0xd0, 0x06,
|
||||
0x85, 0x7e, 0x05, 0x46, 0xa1, 0xff, 0x31, 0x42, 0xfa, 0xc5, 0xd2, 0x97, 0x53, 0x51, 0x7e, 0x71,
|
||||
0xf6, 0x72, 0x9f, 0xc1, 0xea, 0x7b, 0x94, 0xda, 0x08, 0x06, 0xad, 0xfb, 0x1d, 0xd6, 0x2a, 0x88,
|
||||
0x6a, 0x4d, 0x5b, 0x35, 0xea, 0xbe, 0xb6, 0x49, 0x3e, 0xa1, 0x76, 0x80, 0x1d, 0xe8, 0x9d, 0xa1,
|
||||
0x3c, 0xe5, 0x78, 0x89, 0x1c, 0xa3, 0x31, 0xd6, 0x4d, 0xb1, 0x09, 0x7d, 0x03, 0x57, 0xfd, 0x8f,
|
||||
0x3f, 0xfa, 0x42, 0xbe, 0x1e, 0x8f, 0x31, 0x91, 0xd5, 0xff, 0x78, 0x07, 0x7a, 0x7a, 0xd8, 0xbe,
|
||||
0x34, 0xf7, 0x39, 0xb0, 0xec, 0xd3, 0xce, 0x91, 0x87, 0x97, 0x77, 0x75, 0xdd, 0x53, 0xb1, 0x50,
|
||||
0x94, 0xea, 0x5d, 0x14, 0xe7, 0x5d, 0xee, 0x2b, 0x2e, 0x50, 0x46, 0xb1, 0x87, 0x3f, 0x70, 0x7c,
|
||||
0x6f, 0x71, 0x81, 0xca, 0x8b, 0xf7, 0xff, 0x2e, 0x41, 0xf3, 0xfc, 0x13, 0x3b, 0x04, 0xa8, 0xbc,
|
||||
0x80, 0x39, 0xa5, 0x6e, 0xe6, 0x9c, 0xc9, 0x79, 0x6c, 0xcd, 0xa9, 0xa5, 0x9c, 0x40, 0x47, 0x33,
|
||||
0x09, 0xb6, 0x5d, 0xa2, 0x6d, 0xa6, 0xe2, 0x3c, 0xa9, 0x4b, 0x2b, 0xbe, 0x0f, 0xd0, 0xa6, 0x26,
|
||||
0xc2, 0xb6, 0x2c, 0xf8, 0xf2, 0x54, 0xce, 0x76, 0x4d, 0x56, 0x91, 0x1d, 0x40, 0xab, 0xb0, 0x19,
|
||||
0x36, 0x28, 0xa1, 0x86, 0x19, 0x39, 0x8f, 0x2c, 0x19, 0x45, 0xf0, 0x05, 0x56, 0x74, 0xeb, 0x61,
|
||||
0xd5, 0xfc, 0x56, 0xb3, 0x72, 0x9e, 0xd6, 0xe6, 0x15, 0xe5, 0x21, 0x40, 0xe5, 0x09, 0x64, 0xef,
|
||||
0x73, 0xee, 0x41, 0xf6, 0x6e, 0x31, 0x91, 0x43, 0x80, 0x4a, 0xd7, 0x84, 0x66, 0xce, 0x17, 0x08,
|
||||
0x8d, 0xc5, 0x08, 0x0e, 0xa0, 0x55, 0x28, 0x94, 0x6c, 0xc8, 0xd0, 0x35, 0xd9, 0xd0, 0x9c, 0x9c,
|
||||
0x4f, 0xa0, 0xa3, 0x89, 0x8b, 0xdc, 0xdf, 0x26, 0x4e, 0x72, 0x7f, 0xab, 0x26, 0xd3, 0xfb, 0x53,
|
||||
0xf1, 0x91, 0xfb, 0x5b, 0xa4, 0x4a, 0xee, 0x6f, 0x55, 0xec, 0x11, 0x2c, 0x13, 0xed, 0x31, 0xb2,
|
||||
0xd0, 0x39, 0xdd, 0x3a, 0x5b, 0xf6, 0xa4, 0xc1, 0x94, 0xb7, 0x30, 0x99, 0x34, 0x11, 0x9b, 0x4c,
|
||||
0xba, 0x76, 0x4b, 0xa6, 0x5c, 0x95, 0x26, 0x93, 0xa6, 0x68, 0x93, 0x49, 0x17, 0xf2, 0xc5, 0x62,
|
||||
0x96, 0x7a, 0xf5, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x92, 0xa7, 0x99, 0xc8, 0x62, 0x08, 0x00, 0x00,
|
||||
}
|
||||
|
||||
// Reference imports to suppress errors if they are not otherwise used.
|
||||
|
@ -1005,6 +1136,8 @@ const _ = grpc.SupportPackageIsVersion6
|
|||
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
|
||||
type VMClient interface {
|
||||
Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*InitializeResponse, error)
|
||||
Bootstrapping(ctx context.Context, in *BootstrappingRequest, opts ...grpc.CallOption) (*BootstrappingResponse, error)
|
||||
Bootstrapped(ctx context.Context, in *BootstrappedRequest, opts ...grpc.CallOption) (*BootstrappedResponse, error)
|
||||
Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error)
|
||||
CreateHandlers(ctx context.Context, in *CreateHandlersRequest, opts ...grpc.CallOption) (*CreateHandlersResponse, error)
|
||||
BuildBlock(ctx context.Context, in *BuildBlockRequest, opts ...grpc.CallOption) (*BuildBlockResponse, error)
|
||||
|
@ -1034,6 +1167,24 @@ func (c *vMClient) Initialize(ctx context.Context, in *InitializeRequest, opts .
|
|||
return out, nil
|
||||
}
|
||||
|
||||
func (c *vMClient) Bootstrapping(ctx context.Context, in *BootstrappingRequest, opts ...grpc.CallOption) (*BootstrappingResponse, error) {
|
||||
out := new(BootstrappingResponse)
|
||||
err := c.cc.Invoke(ctx, "/vmproto.VM/Bootstrapping", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *vMClient) Bootstrapped(ctx context.Context, in *BootstrappedRequest, opts ...grpc.CallOption) (*BootstrappedResponse, error) {
|
||||
out := new(BootstrappedResponse)
|
||||
err := c.cc.Invoke(ctx, "/vmproto.VM/Bootstrapped", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *vMClient) Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) {
|
||||
out := new(ShutdownResponse)
|
||||
err := c.cc.Invoke(ctx, "/vmproto.VM/Shutdown", in, out, opts...)
|
||||
|
@ -1127,6 +1278,8 @@ func (c *vMClient) BlockReject(ctx context.Context, in *BlockRejectRequest, opts
|
|||
// VMServer is the server API for VM service.
|
||||
type VMServer interface {
|
||||
Initialize(context.Context, *InitializeRequest) (*InitializeResponse, error)
|
||||
Bootstrapping(context.Context, *BootstrappingRequest) (*BootstrappingResponse, error)
|
||||
Bootstrapped(context.Context, *BootstrappedRequest) (*BootstrappedResponse, error)
|
||||
Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error)
|
||||
CreateHandlers(context.Context, *CreateHandlersRequest) (*CreateHandlersResponse, error)
|
||||
BuildBlock(context.Context, *BuildBlockRequest) (*BuildBlockResponse, error)
|
||||
|
@ -1146,6 +1299,12 @@ type UnimplementedVMServer struct {
|
|||
func (*UnimplementedVMServer) Initialize(ctx context.Context, req *InitializeRequest) (*InitializeResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Initialize not implemented")
|
||||
}
|
||||
func (*UnimplementedVMServer) Bootstrapping(ctx context.Context, req *BootstrappingRequest) (*BootstrappingResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Bootstrapping not implemented")
|
||||
}
|
||||
func (*UnimplementedVMServer) Bootstrapped(ctx context.Context, req *BootstrappedRequest) (*BootstrappedResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Bootstrapped not implemented")
|
||||
}
|
||||
func (*UnimplementedVMServer) Shutdown(ctx context.Context, req *ShutdownRequest) (*ShutdownResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented")
|
||||
}
|
||||
|
@ -1199,6 +1358,42 @@ func _VM_Initialize_Handler(srv interface{}, ctx context.Context, dec func(inter
|
|||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _VM_Bootstrapping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(BootstrappingRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(VMServer).Bootstrapping(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/vmproto.VM/Bootstrapping",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(VMServer).Bootstrapping(ctx, req.(*BootstrappingRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _VM_Bootstrapped_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(BootstrappedRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(VMServer).Bootstrapped(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/vmproto.VM/Bootstrapped",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(VMServer).Bootstrapped(ctx, req.(*BootstrappedRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _VM_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(ShutdownRequest)
|
||||
if err := dec(in); err != nil {
|
||||
|
@ -1387,6 +1582,14 @@ var _VM_serviceDesc = grpc.ServiceDesc{
|
|||
MethodName: "Initialize",
|
||||
Handler: _VM_Initialize_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Bootstrapping",
|
||||
Handler: _VM_Bootstrapping_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Bootstrapped",
|
||||
Handler: _VM_Bootstrapped_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "Shutdown",
|
||||
Handler: _VM_Shutdown_Handler,
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue