mirror of https://github.com/poanetwork/gecko.git
Merge branch 'master' into lower-log-level
This commit is contained in:
commit
893e3383e8
|
@ -1,27 +0,0 @@
|
|||
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
|
||||
// See the file LICENSE for licensing terms.
|
||||
|
||||
package admin
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/ava-labs/gecko/utils"
|
||||
)
|
||||
|
||||
// Peerable can return a group of peers
|
||||
type Peerable interface{ IPs() []utils.IPDesc }
|
||||
|
||||
// Networking provides helper methods for tracking the current network state
|
||||
type Networking struct{ peers Peerable }
|
||||
|
||||
// Peers returns the current peers
|
||||
func (n *Networking) Peers() ([]string, error) {
|
||||
ipDescs := n.peers.IPs()
|
||||
ips := make([]string, len(ipDescs))
|
||||
for i, ipDesc := range ipDescs {
|
||||
ips[i] = ipDesc.String()
|
||||
}
|
||||
sort.Strings(ips)
|
||||
return ips, nil
|
||||
}
|
|
@ -10,45 +10,58 @@ import (
|
|||
|
||||
"github.com/ava-labs/gecko/api"
|
||||
"github.com/ava-labs/gecko/chains"
|
||||
"github.com/ava-labs/gecko/genesis"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/network"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
"github.com/ava-labs/gecko/version"
|
||||
|
||||
cjson "github.com/ava-labs/gecko/utils/json"
|
||||
)
|
||||
|
||||
// Admin is the API service for node admin management
|
||||
type Admin struct {
|
||||
version version.Version
|
||||
nodeID ids.ShortID
|
||||
networkID uint32
|
||||
log logging.Logger
|
||||
networking Networking
|
||||
networking network.Network
|
||||
performance Performance
|
||||
chainManager chains.Manager
|
||||
httpServer *api.Server
|
||||
}
|
||||
|
||||
// NewService returns a new admin API service
|
||||
func NewService(nodeID ids.ShortID, networkID uint32, log logging.Logger, chainManager chains.Manager, peers Peerable, httpServer *api.Server) *common.HTTPHandler {
|
||||
func NewService(version version.Version, nodeID ids.ShortID, networkID uint32, log logging.Logger, chainManager chains.Manager, peers network.Network, httpServer *api.Server) *common.HTTPHandler {
|
||||
newServer := rpc.NewServer()
|
||||
codec := cjson.NewCodec()
|
||||
newServer.RegisterCodec(codec, "application/json")
|
||||
newServer.RegisterCodec(codec, "application/json;charset=UTF-8")
|
||||
newServer.RegisterService(&Admin{
|
||||
version: version,
|
||||
nodeID: nodeID,
|
||||
networkID: networkID,
|
||||
log: log,
|
||||
chainManager: chainManager,
|
||||
networking: Networking{
|
||||
peers: peers,
|
||||
},
|
||||
httpServer: httpServer,
|
||||
networking: peers,
|
||||
httpServer: httpServer,
|
||||
}, "admin")
|
||||
return &common.HTTPHandler{Handler: newServer}
|
||||
}
|
||||
|
||||
// GetNodeIDArgs are the arguments for calling GetNodeID
|
||||
type GetNodeIDArgs struct{}
|
||||
// GetNodeVersionReply are the results from calling GetNodeVersion
|
||||
type GetNodeVersionReply struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// GetNodeVersion returns the version this node is running
|
||||
func (service *Admin) GetNodeVersion(_ *http.Request, _ *struct{}, reply *GetNodeVersionReply) error {
|
||||
service.log.Debug("Admin: GetNodeVersion called")
|
||||
|
||||
reply.Version = service.version.String()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNodeIDReply are the results from calling GetNodeID
|
||||
type GetNodeIDReply struct {
|
||||
|
@ -56,29 +69,39 @@ type GetNodeIDReply struct {
|
|||
}
|
||||
|
||||
// GetNodeID returns the node ID of this node
|
||||
func (service *Admin) GetNodeID(r *http.Request, args *GetNodeIDArgs, reply *GetNodeIDReply) error {
|
||||
func (service *Admin) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error {
|
||||
service.log.Debug("Admin: GetNodeID called")
|
||||
|
||||
reply.NodeID = service.nodeID
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNetworkIDArgs are the arguments for calling GetNetworkID
|
||||
type GetNetworkIDArgs struct{}
|
||||
|
||||
// GetNetworkIDReply are the results from calling GetNetworkID
|
||||
type GetNetworkIDReply struct {
|
||||
NetworkID cjson.Uint32 `json:"networkID"`
|
||||
}
|
||||
|
||||
// GetNetworkID returns the network ID this node is running on
|
||||
func (service *Admin) GetNetworkID(r *http.Request, args *GetNetworkIDArgs, reply *GetNetworkIDReply) error {
|
||||
func (service *Admin) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error {
|
||||
service.log.Debug("Admin: GetNetworkID called")
|
||||
|
||||
reply.NetworkID = cjson.Uint32(service.networkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNetworkNameReply is the result from calling GetNetworkName
|
||||
type GetNetworkNameReply struct {
|
||||
NetworkName string `json:"networkName"`
|
||||
}
|
||||
|
||||
// GetNetworkName returns the network name this node is running on
|
||||
func (service *Admin) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error {
|
||||
service.log.Debug("Admin: GetNetworkName called")
|
||||
|
||||
reply.NetworkName = genesis.NetworkName(service.networkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlockchainIDArgs are the arguments for calling GetBlockchainID
|
||||
type GetBlockchainIDArgs struct {
|
||||
Alias string `json:"alias"`
|
||||
|
@ -90,7 +113,7 @@ type GetBlockchainIDReply struct {
|
|||
}
|
||||
|
||||
// GetBlockchainID returns the blockchain ID that resolves the alias that was supplied
|
||||
func (service *Admin) GetBlockchainID(r *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error {
|
||||
func (service *Admin) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error {
|
||||
service.log.Debug("Admin: GetBlockchainID called")
|
||||
|
||||
bID, err := service.chainManager.Lookup(args.Alias)
|
||||
|
@ -98,21 +121,16 @@ func (service *Admin) GetBlockchainID(r *http.Request, args *GetBlockchainIDArgs
|
|||
return err
|
||||
}
|
||||
|
||||
// PeersArgs are the arguments for calling Peers
|
||||
type PeersArgs struct{}
|
||||
|
||||
// PeersReply are the results from calling Peers
|
||||
type PeersReply struct {
|
||||
Peers []string `json:"peers"`
|
||||
Peers []network.PeerID `json:"peers"`
|
||||
}
|
||||
|
||||
// Peers returns the list of current validators
|
||||
func (service *Admin) Peers(r *http.Request, args *PeersArgs, reply *PeersReply) error {
|
||||
func (service *Admin) Peers(_ *http.Request, _ *struct{}, reply *PeersReply) error {
|
||||
service.log.Debug("Admin: Peers called")
|
||||
|
||||
peers, err := service.networking.Peers()
|
||||
reply.Peers = peers
|
||||
return err
|
||||
reply.Peers = service.networking.Peers()
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartCPUProfilerArgs are the arguments for calling StartCPUProfiler
|
||||
|
@ -126,22 +144,19 @@ type StartCPUProfilerReply struct {
|
|||
}
|
||||
|
||||
// StartCPUProfiler starts a cpu profile writing to the specified file
|
||||
func (service *Admin) StartCPUProfiler(r *http.Request, args *StartCPUProfilerArgs, reply *StartCPUProfilerReply) error {
|
||||
func (service *Admin) StartCPUProfiler(_ *http.Request, args *StartCPUProfilerArgs, reply *StartCPUProfilerReply) error {
|
||||
service.log.Debug("Admin: StartCPUProfiler called with %s", args.Filename)
|
||||
reply.Success = true
|
||||
return service.performance.StartCPUProfiler(args.Filename)
|
||||
}
|
||||
|
||||
// StopCPUProfilerArgs are the arguments for calling StopCPUProfiler
|
||||
type StopCPUProfilerArgs struct{}
|
||||
|
||||
// StopCPUProfilerReply are the results from calling StopCPUProfiler
|
||||
type StopCPUProfilerReply struct {
|
||||
Success bool `json:"success"`
|
||||
}
|
||||
|
||||
// StopCPUProfiler stops the cpu profile
|
||||
func (service *Admin) StopCPUProfiler(r *http.Request, args *StopCPUProfilerArgs, reply *StopCPUProfilerReply) error {
|
||||
func (service *Admin) StopCPUProfiler(_ *http.Request, _ *struct{}, reply *StopCPUProfilerReply) error {
|
||||
service.log.Debug("Admin: StopCPUProfiler called")
|
||||
reply.Success = true
|
||||
return service.performance.StopCPUProfiler()
|
||||
|
@ -158,7 +173,7 @@ type MemoryProfileReply struct {
|
|||
}
|
||||
|
||||
// MemoryProfile runs a memory profile writing to the specified file
|
||||
func (service *Admin) MemoryProfile(r *http.Request, args *MemoryProfileArgs, reply *MemoryProfileReply) error {
|
||||
func (service *Admin) MemoryProfile(_ *http.Request, args *MemoryProfileArgs, reply *MemoryProfileReply) error {
|
||||
service.log.Debug("Admin: MemoryProfile called with %s", args.Filename)
|
||||
reply.Success = true
|
||||
return service.performance.MemoryProfile(args.Filename)
|
||||
|
@ -175,7 +190,7 @@ type LockProfileReply struct {
|
|||
}
|
||||
|
||||
// LockProfile runs a mutex profile writing to the specified file
|
||||
func (service *Admin) LockProfile(r *http.Request, args *LockProfileArgs, reply *LockProfileReply) error {
|
||||
func (service *Admin) LockProfile(_ *http.Request, args *LockProfileArgs, reply *LockProfileReply) error {
|
||||
service.log.Debug("Admin: LockProfile called with %s", args.Filename)
|
||||
reply.Success = true
|
||||
return service.performance.LockProfile(args.Filename)
|
||||
|
@ -193,7 +208,7 @@ type AliasReply struct {
|
|||
}
|
||||
|
||||
// Alias attempts to alias an HTTP endpoint to a new name
|
||||
func (service *Admin) Alias(r *http.Request, args *AliasArgs, reply *AliasReply) error {
|
||||
func (service *Admin) Alias(_ *http.Request, args *AliasArgs, reply *AliasReply) error {
|
||||
service.log.Debug("Admin: Alias called with URL: %s, Alias: %s", args.Endpoint, args.Alias)
|
||||
reply.Success = true
|
||||
return service.httpServer.AddAliasesWithReadLock(args.Endpoint, args.Alias)
|
||||
|
@ -236,7 +251,7 @@ type StacktraceReply struct {
|
|||
}
|
||||
|
||||
// Stacktrace returns the current global stacktrace
|
||||
func (service *Admin) Stacktrace(_ *http.Request, _ *StacktraceArgs, reply *StacktraceReply) error {
|
||||
func (service *Admin) Stacktrace(_ *http.Request, _ *struct{}, reply *StacktraceReply) error {
|
||||
reply.Stacktrace = logging.Stacktrace{Global: true}.String()
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -8,12 +8,14 @@ import (
|
|||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/gorilla/rpc/v2"
|
||||
|
||||
"github.com/ava-labs/gecko/chains/atomic"
|
||||
"github.com/ava-labs/gecko/database"
|
||||
"github.com/ava-labs/gecko/database/encdb"
|
||||
"github.com/ava-labs/gecko/database/memdb"
|
||||
"github.com/ava-labs/gecko/database/prefixdb"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
|
@ -29,8 +31,17 @@ const (
|
|||
// maxUserPassLen is the maximum length of the username or password allowed
|
||||
maxUserPassLen = 1024
|
||||
|
||||
// requiredPassScore defines the score a password must achieve to be accepted
|
||||
// as a password with strong characteristics by the zxcvbn package
|
||||
// maxCheckedPassLen limits the length of the password that should be
|
||||
// strength checked.
|
||||
//
|
||||
// As per issue https://github.com/ava-labs/gecko/issues/195 it was found
|
||||
// the longer the length of password the slower zxcvbn.PasswordStrength()
|
||||
// performs. To avoid performance issues, and a DoS vector, we only check
|
||||
// the first 50 characters of the password.
|
||||
maxCheckedPassLen = 50
|
||||
|
||||
// requiredPassScore defines the score a password must achieve to be
|
||||
// accepted as a password with strong characteristics by the zxcvbn package
|
||||
//
|
||||
// The scoring mechanism defined is as follows;
|
||||
//
|
||||
|
@ -136,36 +147,10 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre
|
|||
defer ks.lock.Unlock()
|
||||
|
||||
ks.log.Verbo("CreateUser called with %.*s", maxUserPassLen, args.Username)
|
||||
|
||||
if len(args.Username) > maxUserPassLen || len(args.Password) > maxUserPassLen {
|
||||
return errUserPassMaxLength
|
||||
}
|
||||
|
||||
if args.Username == "" {
|
||||
return errEmptyUsername
|
||||
}
|
||||
if usr, err := ks.getUser(args.Username); err == nil || usr != nil {
|
||||
return fmt.Errorf("user already exists: %s", args.Username)
|
||||
}
|
||||
|
||||
if zxcvbn.PasswordStrength(args.Password, nil).Score < requiredPassScore {
|
||||
return errWeakPassword
|
||||
}
|
||||
|
||||
usr := &User{}
|
||||
if err := usr.Initialize(args.Password); err != nil {
|
||||
if err := ks.AddUser(args.Username, args.Password); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
usrBytes, err := ks.codec.Marshal(usr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ks.userDB.Put([]byte(args.Username), usrBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
ks.users[args.Username] = usr
|
||||
reply.Success = true
|
||||
return nil
|
||||
}
|
||||
|
@ -266,6 +251,10 @@ func (ks *Keystore) ImportUser(r *http.Request, args *ImportUserArgs, reply *Imp
|
|||
|
||||
ks.log.Verbo("ImportUser called for %s", args.Username)
|
||||
|
||||
if args.Username == "" {
|
||||
return errEmptyUsername
|
||||
}
|
||||
|
||||
if usr, err := ks.getUser(args.Username); err == nil || usr != nil {
|
||||
return fmt.Errorf("user already exists: %s", args.Username)
|
||||
}
|
||||
|
@ -399,3 +388,51 @@ func (ks *Keystore) GetDatabase(bID ids.ID, username, password string) (database
|
|||
|
||||
return encDB, nil
|
||||
}
|
||||
|
||||
// AddUser attempts to register this username and password as a new user of the
|
||||
// keystore.
|
||||
func (ks *Keystore) AddUser(username, password string) error {
|
||||
if len(username) > maxUserPassLen || len(password) > maxUserPassLen {
|
||||
return errUserPassMaxLength
|
||||
}
|
||||
|
||||
if username == "" {
|
||||
return errEmptyUsername
|
||||
}
|
||||
if usr, err := ks.getUser(username); err == nil || usr != nil {
|
||||
return fmt.Errorf("user already exists: %s", username)
|
||||
}
|
||||
|
||||
checkPass := password
|
||||
if len(password) > maxCheckedPassLen {
|
||||
checkPass = password[:maxCheckedPassLen]
|
||||
}
|
||||
|
||||
if zxcvbn.PasswordStrength(checkPass, nil).Score < requiredPassScore {
|
||||
return errWeakPassword
|
||||
}
|
||||
|
||||
usr := &User{}
|
||||
if err := usr.Initialize(password); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
usrBytes, err := ks.codec.Marshal(usr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := ks.userDB.Put([]byte(username), usrBytes); err != nil {
|
||||
return err
|
||||
}
|
||||
ks.users[username] = usr
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateTestKeystore returns a new keystore that can be utilized for testing
|
||||
func CreateTestKeystore(t *testing.T) *Keystore {
|
||||
ks := &Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
return ks
|
||||
}
|
||||
|
|
|
@ -10,9 +10,7 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/ava-labs/gecko/database/memdb"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
)
|
||||
|
||||
var (
|
||||
|
@ -22,8 +20,7 @@ var (
|
|||
)
|
||||
|
||||
func TestServiceListNoUsers(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
reply := ListUsersReply{}
|
||||
if err := ks.ListUsers(nil, &ListUsersArgs{}, &reply); err != nil {
|
||||
|
@ -35,8 +32,7 @@ func TestServiceListNoUsers(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceCreateUser(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
{
|
||||
reply := CreateUserReply{}
|
||||
|
@ -75,8 +71,7 @@ func genStr(n int) string {
|
|||
// TestServiceCreateUserArgsChecks generates excessively long usernames or
|
||||
// passwords to assure the santity checks on string length are not exceeded
|
||||
func TestServiceCreateUserArgsCheck(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
{
|
||||
reply := CreateUserReply{}
|
||||
|
@ -117,8 +112,7 @@ func TestServiceCreateUserArgsCheck(t *testing.T) {
|
|||
// TestServiceCreateUserWeakPassword tests creating a new user with a weak
|
||||
// password to ensure the password strength check is working
|
||||
func TestServiceCreateUserWeakPassword(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
{
|
||||
reply := CreateUserReply{}
|
||||
|
@ -138,8 +132,7 @@ func TestServiceCreateUserWeakPassword(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceCreateDuplicate(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
{
|
||||
reply := CreateUserReply{}
|
||||
|
@ -166,8 +159,7 @@ func TestServiceCreateDuplicate(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceCreateUserNoName(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
reply := CreateUserReply{}
|
||||
if err := ks.CreateUser(nil, &CreateUserArgs{
|
||||
|
@ -178,8 +170,7 @@ func TestServiceCreateUserNoName(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceUseBlockchainDB(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
{
|
||||
reply := CreateUserReply{}
|
||||
|
@ -218,8 +209,7 @@ func TestServiceUseBlockchainDB(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestServiceExportImport(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
{
|
||||
reply := CreateUserReply{}
|
||||
|
@ -252,8 +242,7 @@ func TestServiceExportImport(t *testing.T) {
|
|||
t.Fatal(err)
|
||||
}
|
||||
|
||||
newKS := Keystore{}
|
||||
newKS.Initialize(logging.NoLog{}, memdb.New())
|
||||
newKS := CreateTestKeystore(t)
|
||||
|
||||
{
|
||||
reply := ImportUserReply{}
|
||||
|
@ -266,6 +255,17 @@ func TestServiceExportImport(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
{
|
||||
reply := ImportUserReply{}
|
||||
if err := newKS.ImportUser(nil, &ImportUserArgs{
|
||||
Username: "",
|
||||
Password: "strongPassword",
|
||||
User: exportReply.User,
|
||||
}, &reply); err == nil {
|
||||
t.Fatal("Should have errored due to empty username")
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
reply := ImportUserReply{}
|
||||
if err := newKS.ImportUser(nil, &ImportUserArgs{
|
||||
|
@ -347,11 +347,10 @@ func TestServiceDeleteUser(t *testing.T) {
|
|||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.desc, func(t *testing.T) {
|
||||
ks := Keystore{}
|
||||
ks.Initialize(logging.NoLog{}, memdb.New())
|
||||
ks := CreateTestKeystore(t)
|
||||
|
||||
if tt.setup != nil {
|
||||
if err := tt.setup(&ks); err != nil {
|
||||
if err := tt.setup(ks); err != nil {
|
||||
t.Fatalf("failed to create user setup in keystore: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -75,8 +75,9 @@ func (s *Server) RegisterChain(ctx *snow.Context, vmIntf interface{}) {
|
|||
}
|
||||
|
||||
// all subroutes to a chain begin with "bc/<the chain's ID>"
|
||||
defaultEndpoint := "bc/" + ctx.ChainID.String()
|
||||
httpLogger, err := s.factory.MakeChain(ctx.ChainID, "http")
|
||||
chainID := ctx.ChainID.String()
|
||||
defaultEndpoint := "bc/" + chainID
|
||||
httpLogger, err := s.factory.MakeChain(chainID, "http")
|
||||
if err != nil {
|
||||
s.log.Error("Failed to create new http logger: %s", err)
|
||||
return
|
||||
|
|
|
@ -20,7 +20,6 @@ import (
|
|||
"github.com/ava-labs/gecko/snow/engine/avalanche/state"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/snow/engine/common/queue"
|
||||
"github.com/ava-labs/gecko/snow/networking/handler"
|
||||
"github.com/ava-labs/gecko/snow/networking/router"
|
||||
"github.com/ava-labs/gecko/snow/networking/sender"
|
||||
"github.com/ava-labs/gecko/snow/networking/timeout"
|
||||
|
@ -39,8 +38,9 @@ import (
|
|||
|
||||
const (
|
||||
defaultChannelSize = 1000
|
||||
requestTimeout = 2 * time.Second
|
||||
requestTimeout = 4 * time.Second
|
||||
gossipFrequency = 10 * time.Second
|
||||
shutdownTimeout = 1 * time.Second
|
||||
)
|
||||
|
||||
// Manager manages the chains running on this node.
|
||||
|
@ -145,7 +145,7 @@ func New(
|
|||
timeoutManager.Initialize(requestTimeout)
|
||||
go log.RecoverAndPanic(timeoutManager.Dispatch)
|
||||
|
||||
router.Initialize(log, &timeoutManager, gossipFrequency)
|
||||
router.Initialize(log, &timeoutManager, gossipFrequency, shutdownTimeout)
|
||||
|
||||
m := &manager{
|
||||
stakingEnabled: stakingEnabled,
|
||||
|
@ -204,6 +204,31 @@ func (m *manager) ForceCreateChain(chain ChainParameters) {
|
|||
return
|
||||
}
|
||||
|
||||
primaryAlias, err := m.PrimaryAlias(chain.ID)
|
||||
if err != nil {
|
||||
primaryAlias = chain.ID.String()
|
||||
}
|
||||
|
||||
// Create the log and context of the chain
|
||||
chainLog, err := m.logFactory.MakeChain(primaryAlias, "")
|
||||
if err != nil {
|
||||
m.log.Error("error while creating chain's log %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := &snow.Context{
|
||||
NetworkID: m.networkID,
|
||||
ChainID: chain.ID,
|
||||
Log: chainLog,
|
||||
DecisionDispatcher: m.decisionEvents,
|
||||
ConsensusDispatcher: m.consensusEvents,
|
||||
NodeID: m.nodeID,
|
||||
HTTP: m.server,
|
||||
Keystore: m.keystore.NewBlockchainKeyStore(chain.ID),
|
||||
SharedMemory: m.sharedMemory.NewBlockchainSharedMemory(chain.ID),
|
||||
BCLookup: m,
|
||||
}
|
||||
|
||||
// Get a factory for the vm we want to use on our chain
|
||||
vmFactory, err := m.vmManager.GetVMFactory(vmID)
|
||||
if err != nil {
|
||||
|
@ -212,7 +237,7 @@ func (m *manager) ForceCreateChain(chain ChainParameters) {
|
|||
}
|
||||
|
||||
// Create the chain
|
||||
vm, err := vmFactory.New()
|
||||
vm, err := vmFactory.New(ctx)
|
||||
if err != nil {
|
||||
m.log.Error("error while creating vm: %s", err)
|
||||
return
|
||||
|
@ -234,7 +259,7 @@ func (m *manager) ForceCreateChain(chain ChainParameters) {
|
|||
return
|
||||
}
|
||||
|
||||
fx, err := fxFactory.New()
|
||||
fx, err := fxFactory.New(ctx)
|
||||
if err != nil {
|
||||
m.log.Error("error while creating fx: %s", err)
|
||||
return
|
||||
|
@ -247,31 +272,8 @@ func (m *manager) ForceCreateChain(chain ChainParameters) {
|
|||
}
|
||||
}
|
||||
|
||||
// Create the log and context of the chain
|
||||
chainLog, err := m.logFactory.MakeChain(chain.ID, "")
|
||||
if err != nil {
|
||||
m.log.Error("error while creating chain's log %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
ctx := &snow.Context{
|
||||
NetworkID: m.networkID,
|
||||
ChainID: chain.ID,
|
||||
Log: chainLog,
|
||||
DecisionDispatcher: m.decisionEvents,
|
||||
ConsensusDispatcher: m.consensusEvents,
|
||||
NodeID: m.nodeID,
|
||||
HTTP: m.server,
|
||||
Keystore: m.keystore.NewBlockchainKeyStore(chain.ID),
|
||||
SharedMemory: m.sharedMemory.NewBlockchainSharedMemory(chain.ID),
|
||||
BCLookup: m,
|
||||
}
|
||||
consensusParams := m.consensusParams
|
||||
if alias, err := m.PrimaryAlias(ctx.ChainID); err == nil {
|
||||
consensusParams.Namespace = fmt.Sprintf("gecko_%s", alias)
|
||||
} else {
|
||||
consensusParams.Namespace = fmt.Sprintf("gecko_%s", ctx.ChainID)
|
||||
}
|
||||
consensusParams.Namespace = fmt.Sprintf("gecko_%s", primaryAlias)
|
||||
|
||||
// The validators of this blockchain
|
||||
var validators validators.Set // Validators validating this blockchain
|
||||
|
@ -360,8 +362,8 @@ func (m *manager) createAvalancheChain(
|
|||
db := prefixdb.New(ctx.ChainID.Bytes(), m.db)
|
||||
vmDB := prefixdb.New([]byte("vm"), db)
|
||||
vertexDB := prefixdb.New([]byte("vertex"), db)
|
||||
vertexBootstrappingDB := prefixdb.New([]byte("vertex_bootstrapping"), db)
|
||||
txBootstrappingDB := prefixdb.New([]byte("tx_bootstrapping"), db)
|
||||
vertexBootstrappingDB := prefixdb.New([]byte("vertex_bs"), db)
|
||||
txBootstrappingDB := prefixdb.New([]byte("tx_bs"), db)
|
||||
|
||||
vtxBlocker, err := queue.New(vertexBootstrappingDB)
|
||||
if err != nil {
|
||||
|
@ -428,8 +430,14 @@ func (m *manager) createAvalancheChain(
|
|||
})
|
||||
|
||||
// Asynchronously passes messages from the network to the consensus engine
|
||||
handler := &handler.Handler{}
|
||||
handler.Initialize(&engine, msgChan, defaultChannelSize)
|
||||
handler := &router.Handler{}
|
||||
handler.Initialize(
|
||||
&engine,
|
||||
msgChan,
|
||||
defaultChannelSize,
|
||||
fmt.Sprintf("%s_handler", consensusParams.Namespace),
|
||||
consensusParams.Metrics,
|
||||
)
|
||||
|
||||
// Allows messages to be routed to the new chain
|
||||
m.chainRouter.AddChain(handler)
|
||||
|
@ -465,7 +473,7 @@ func (m *manager) createSnowmanChain(
|
|||
|
||||
db := prefixdb.New(ctx.ChainID.Bytes(), m.db)
|
||||
vmDB := prefixdb.New([]byte("vm"), db)
|
||||
bootstrappingDB := prefixdb.New([]byte("bootstrapping"), db)
|
||||
bootstrappingDB := prefixdb.New([]byte("bs"), db)
|
||||
|
||||
blocked, err := queue.New(bootstrappingDB)
|
||||
if err != nil {
|
||||
|
@ -514,8 +522,14 @@ func (m *manager) createSnowmanChain(
|
|||
})
|
||||
|
||||
// Asynchronously passes messages from the network to the consensus engine
|
||||
handler := &handler.Handler{}
|
||||
handler.Initialize(&engine, msgChan, defaultChannelSize)
|
||||
handler := &router.Handler{}
|
||||
handler.Initialize(
|
||||
&engine,
|
||||
msgChan,
|
||||
defaultChannelSize,
|
||||
fmt.Sprintf("%s_handler", consensusParams.Namespace),
|
||||
consensusParams.Metrics,
|
||||
)
|
||||
|
||||
// Allow incoming messages to be routed to the new chain
|
||||
m.chainRouter.AddChain(handler)
|
||||
|
|
|
@ -50,6 +50,122 @@ func (c *Config) init() error {
|
|||
|
||||
// Hard coded genesis constants
|
||||
var (
|
||||
DenaliConfig = Config{
|
||||
MintAddresses: []string{
|
||||
"95YUFjhDG892VePMzpwKF9JzewGKvGRi3",
|
||||
},
|
||||
FundedAddresses: []string{
|
||||
"9uKvvA7E35QCwLvAaohXTCfFejbf3Rv17",
|
||||
"JLrYNMYXANGj43BfWXBxMMAEenUBp1Sbn",
|
||||
"7TUTzwrU6nbZtWHjTHEpdneUvjKBxb3EM",
|
||||
"77mPUXBdQKwQpPoX6rckCZGLGGdkuG1G6",
|
||||
"4gGWdFZ4Gax1B466YKXyKRRpWLb42Afdt",
|
||||
"CKTkzAPsRxCreyiDTnjGxLmjMarxF28fi",
|
||||
"4ABm9gFHVtsNdcKSd1xsacFkGneSgzpaa",
|
||||
"DpL8PTsrjtLzv5J8LL3D2A6YcnCTqrNH9",
|
||||
"ZdhZv6oZrmXLyFDy6ovXAu6VxmbTsT2h",
|
||||
"6cesTteH62Y5mLoDBUASaBvCXuL2AthL",
|
||||
},
|
||||
StakerIDs: []string{
|
||||
"LQwRLm4cbJ7T2kxcxp4uXCU5XD8DFrE1C",
|
||||
"hArafGhY2HFTbwaaVh1CSCUCUCiJ2Vfb",
|
||||
"2m38qc95mhHXtrhjyGbe7r2NhniqHHJRB",
|
||||
"4QBwET5o8kUhvt9xArhir4d3R25CtmZho",
|
||||
"NpagUxt6KQiwPch9Sd4osv8kD1TZnkjdk",
|
||||
},
|
||||
EVMBytes: []byte{
|
||||
0x7b, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
|
||||
0x22, 0x3a, 0x7b, 0x22, 0x63, 0x68, 0x61, 0x69,
|
||||
0x6e, 0x49, 0x64, 0x22, 0x3a, 0x34, 0x33, 0x31,
|
||||
0x31, 0x30, 0x2c, 0x22, 0x68, 0x6f, 0x6d, 0x65,
|
||||
0x73, 0x74, 0x65, 0x61, 0x64, 0x42, 0x6c, 0x6f,
|
||||
0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x64,
|
||||
0x61, 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x42, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22,
|
||||
0x64, 0x61, 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x53,
|
||||
0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x3a,
|
||||
0x74, 0x72, 0x75, 0x65, 0x2c, 0x22, 0x65, 0x69,
|
||||
0x70, 0x31, 0x35, 0x30, 0x42, 0x6c, 0x6f, 0x63,
|
||||
0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x65, 0x69,
|
||||
0x70, 0x31, 0x35, 0x30, 0x48, 0x61, 0x73, 0x68,
|
||||
0x22, 0x3a, 0x22, 0x30, 0x78, 0x32, 0x30, 0x38,
|
||||
0x36, 0x37, 0x39, 0x39, 0x61, 0x65, 0x65, 0x62,
|
||||
0x65, 0x61, 0x65, 0x31, 0x33, 0x35, 0x63, 0x32,
|
||||
0x34, 0x36, 0x63, 0x36, 0x35, 0x30, 0x32, 0x31,
|
||||
0x63, 0x38, 0x32, 0x62, 0x34, 0x65, 0x31, 0x35,
|
||||
0x61, 0x32, 0x63, 0x34, 0x35, 0x31, 0x33, 0x34,
|
||||
0x30, 0x39, 0x39, 0x33, 0x61, 0x61, 0x63, 0x66,
|
||||
0x64, 0x32, 0x37, 0x35, 0x31, 0x38, 0x38, 0x36,
|
||||
0x35, 0x31, 0x34, 0x66, 0x30, 0x22, 0x2c, 0x22,
|
||||
0x65, 0x69, 0x70, 0x31, 0x35, 0x35, 0x42, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22,
|
||||
0x65, 0x69, 0x70, 0x31, 0x35, 0x38, 0x42, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22,
|
||||
0x62, 0x79, 0x7a, 0x61, 0x6e, 0x74, 0x69, 0x75,
|
||||
0x6d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a,
|
||||
0x30, 0x2c, 0x22, 0x63, 0x6f, 0x6e, 0x73, 0x74,
|
||||
0x61, 0x6e, 0x74, 0x69, 0x6e, 0x6f, 0x70, 0x6c,
|
||||
0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a,
|
||||
0x30, 0x2c, 0x22, 0x70, 0x65, 0x74, 0x65, 0x72,
|
||||
0x73, 0x62, 0x75, 0x72, 0x67, 0x42, 0x6c, 0x6f,
|
||||
0x63, 0x6b, 0x22, 0x3a, 0x30, 0x7d, 0x2c, 0x22,
|
||||
0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0x3a, 0x22,
|
||||
0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22,
|
||||
0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22,
|
||||
0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74,
|
||||
0x61, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30,
|
||||
0x22, 0x2c, 0x22, 0x67, 0x61, 0x73, 0x4c, 0x69,
|
||||
0x6d, 0x69, 0x74, 0x22, 0x3a, 0x22, 0x30, 0x78,
|
||||
0x35, 0x66, 0x35, 0x65, 0x31, 0x30, 0x30, 0x22,
|
||||
0x2c, 0x22, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63,
|
||||
0x75, 0x6c, 0x74, 0x79, 0x22, 0x3a, 0x22, 0x30,
|
||||
0x78, 0x30, 0x22, 0x2c, 0x22, 0x6d, 0x69, 0x78,
|
||||
0x48, 0x61, 0x73, 0x68, 0x22, 0x3a, 0x22, 0x30,
|
||||
0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x22, 0x2c, 0x22, 0x63, 0x6f, 0x69, 0x6e,
|
||||
0x62, 0x61, 0x73, 0x65, 0x22, 0x3a, 0x22, 0x30,
|
||||
0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x22, 0x2c, 0x22, 0x61, 0x6c, 0x6c, 0x6f,
|
||||
0x63, 0x22, 0x3a, 0x7b, 0x22, 0x35, 0x37, 0x32,
|
||||
0x66, 0x34, 0x64, 0x38, 0x30, 0x66, 0x31, 0x30,
|
||||
0x66, 0x36, 0x36, 0x33, 0x62, 0x35, 0x30, 0x34,
|
||||
0x39, 0x66, 0x37, 0x38, 0x39, 0x35, 0x34, 0x36,
|
||||
0x66, 0x32, 0x35, 0x66, 0x37, 0x30, 0x62, 0x62,
|
||||
0x36, 0x32, 0x61, 0x37, 0x66, 0x22, 0x3a, 0x7b,
|
||||
0x22, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65,
|
||||
0x22, 0x3a, 0x22, 0x30, 0x78, 0x33, 0x33, 0x62,
|
||||
0x32, 0x65, 0x33, 0x63, 0x39, 0x66, 0x64, 0x30,
|
||||
0x38, 0x30, 0x34, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x22, 0x7d, 0x7d, 0x2c,
|
||||
0x22, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22,
|
||||
0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22,
|
||||
0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x22,
|
||||
0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22,
|
||||
0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61,
|
||||
0x73, 0x68, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22,
|
||||
0x7d,
|
||||
},
|
||||
}
|
||||
CascadeConfig = Config{
|
||||
MintAddresses: []string{
|
||||
"95YUFjhDG892VePMzpwKF9JzewGKvGRi3",
|
||||
|
@ -277,6 +393,8 @@ var (
|
|||
// GetConfig ...
|
||||
func GetConfig(networkID uint32) *Config {
|
||||
switch networkID {
|
||||
case DenaliID:
|
||||
return &DenaliConfig
|
||||
case CascadeID:
|
||||
return &CascadeConfig
|
||||
default:
|
||||
|
|
|
@ -17,12 +17,15 @@ func TestNetworkName(t *testing.T) {
|
|||
if name := NetworkName(MainnetID); name != MainnetName {
|
||||
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, MainnetName)
|
||||
}
|
||||
if name := NetworkName(TestnetID); name != CascadeName {
|
||||
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, CascadeName)
|
||||
}
|
||||
if name := NetworkName(CascadeID); name != CascadeName {
|
||||
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, CascadeName)
|
||||
}
|
||||
if name := NetworkName(DenaliID); name != DenaliName {
|
||||
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, DenaliName)
|
||||
}
|
||||
if name := NetworkName(TestnetID); name != DenaliName {
|
||||
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, DenaliName)
|
||||
}
|
||||
if name := NetworkName(4294967295); name != "network-4294967295" {
|
||||
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, "network-4294967295")
|
||||
}
|
||||
|
@ -37,26 +40,42 @@ func TestNetworkID(t *testing.T) {
|
|||
t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", MainnetID, id)
|
||||
}
|
||||
|
||||
id, err = NetworkID(TestnetName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if id != TestnetID {
|
||||
t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", TestnetID, id)
|
||||
}
|
||||
|
||||
id, err = NetworkID(CascadeName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if id != TestnetID {
|
||||
t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", TestnetID, id)
|
||||
if id != CascadeID {
|
||||
t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", CascadeID, id)
|
||||
}
|
||||
|
||||
id, err = NetworkID("cAsCaDe")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if id != CascadeID {
|
||||
t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", CascadeID, id)
|
||||
}
|
||||
|
||||
id, err = NetworkID(DenaliName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if id != DenaliID {
|
||||
t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", DenaliID, id)
|
||||
}
|
||||
|
||||
id, err = NetworkID("dEnAlI")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if id != DenaliID {
|
||||
t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", DenaliID, id)
|
||||
}
|
||||
|
||||
id, err = NetworkID(TestnetName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if id != TestnetID {
|
||||
t.Fatalf("Returned wrong network. Expected: %d ; Returned %d", TestnetID, id)
|
||||
}
|
||||
|
|
|
@ -14,24 +14,32 @@ import (
|
|||
// Hardcoded network IDs
|
||||
var (
|
||||
MainnetID uint32 = 1
|
||||
TestnetID uint32 = 2
|
||||
CascadeID uint32 = 2
|
||||
DenaliID uint32 = 3
|
||||
|
||||
TestnetID uint32 = 3
|
||||
LocalID uint32 = 12345
|
||||
|
||||
MainnetName = "mainnet"
|
||||
TestnetName = "testnet"
|
||||
CascadeName = "cascade"
|
||||
DenaliName = "denali"
|
||||
|
||||
TestnetName = "testnet"
|
||||
LocalName = "local"
|
||||
|
||||
NetworkIDToNetworkName = map[uint32]string{
|
||||
MainnetID: MainnetName,
|
||||
TestnetID: CascadeName,
|
||||
LocalID: LocalName,
|
||||
CascadeID: CascadeName,
|
||||
DenaliID: DenaliName,
|
||||
|
||||
LocalID: LocalName,
|
||||
}
|
||||
NetworkNameToNetworkID = map[string]uint32{
|
||||
MainnetName: MainnetID,
|
||||
TestnetName: TestnetID,
|
||||
CascadeName: CascadeID,
|
||||
DenaliName: DenaliID,
|
||||
|
||||
TestnetName: TestnetID,
|
||||
LocalName: LocalID,
|
||||
}
|
||||
|
||||
|
|
2
go.mod
2
go.mod
|
@ -6,7 +6,7 @@ require (
|
|||
github.com/AppsFlyer/go-sundheit v0.2.0
|
||||
github.com/allegro/bigcache v1.2.1 // indirect
|
||||
github.com/aristanetworks/goarista v0.0.0-20200520141224-0f14e646773f // indirect
|
||||
github.com/ava-labs/coreth v0.1.0 // Added manually; don't delete
|
||||
github.com/ava-labs/coreth v0.2.4 // Added manually; don't delete
|
||||
github.com/ava-labs/go-ethereum v1.9.3 // indirect
|
||||
github.com/deckarep/golang-set v1.7.1 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1 v1.0.3
|
||||
|
|
4
go.sum
4
go.sum
|
@ -17,8 +17,8 @@ github.com/aristanetworks/glog v0.0.0-20191112221043-67e8567f59f3/go.mod h1:KASm
|
|||
github.com/aristanetworks/goarista v0.0.0-20200520141224-0f14e646773f h1:uM6lu1fpmCwf54zb6Ckkvphioq8MLlyFb/TlTgPpCKc=
|
||||
github.com/aristanetworks/goarista v0.0.0-20200520141224-0f14e646773f/go.mod h1:QZe5Yh80Hp1b6JxQdpfSEEe8X7hTyTEZSosSrFf/oJE=
|
||||
github.com/aristanetworks/splunk-hec-go v0.3.3/go.mod h1:1VHO9r17b0K7WmOlLb9nTk/2YanvOEnLMUgsFrxBROc=
|
||||
github.com/ava-labs/coreth v0.1.0 h1:Cx9dkhkQ7Bc7jD07OTbFJRlNTeHbSSLUwsCQlduTupg=
|
||||
github.com/ava-labs/coreth v0.1.0/go.mod h1:pGolKipwq5vGIY2IBBcBkMYrqniXMsS5SBn+BBi4+Js=
|
||||
github.com/ava-labs/coreth v0.2.4 h1:MhnbuRyMcij7WU4+frayp40quc44AMPc4IrxXhmucWw=
|
||||
github.com/ava-labs/coreth v0.2.4/go.mod h1:pGolKipwq5vGIY2IBBcBkMYrqniXMsS5SBn+BBi4+Js=
|
||||
github.com/ava-labs/go-ethereum v1.9.3 h1:GmnMZ/dlvVAPFmWBzEpRJX49pUAymPfoASLNRJqR0AY=
|
||||
github.com/ava-labs/go-ethereum v1.9.3/go.mod h1:a+agc6fXfZFsPZCylA3ry4Y8CLCqLKg3Rc23NXZ9aw8=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
|
|
|
@ -62,6 +62,9 @@ func (b *UniqueBag) Difference(diff *UniqueBag) {
|
|||
// GetSet ...
|
||||
func (b *UniqueBag) GetSet(id ID) BitSet { return (*b)[*id.ID] }
|
||||
|
||||
// RemoveSet ...
|
||||
func (b *UniqueBag) RemoveSet(id ID) { delete(*b, id.Key()) }
|
||||
|
||||
// List ...
|
||||
func (b *UniqueBag) List() []ID {
|
||||
idList := []ID(nil)
|
||||
|
|
14
main/main.go
14
main/main.go
|
@ -41,11 +41,14 @@ func main() {
|
|||
defer Config.DB.Close()
|
||||
|
||||
if Config.StakingIP.IsZero() {
|
||||
log.Warn("NAT traversal has failed. If this node becomes a staker, it may lose its reward due to being unreachable.")
|
||||
log.Warn("NAT traversal has failed. It will be able to connect to less nodes.")
|
||||
}
|
||||
|
||||
// Track if sybil control is enforced
|
||||
if !Config.EnableStaking {
|
||||
if !Config.EnableStaking && Config.EnableP2PTLS {
|
||||
log.Warn("Staking is disabled. Sybil control is not enforced.")
|
||||
}
|
||||
if !Config.EnableStaking && !Config.EnableP2PTLS {
|
||||
log.Warn("Staking and p2p encryption are disabled. Packet spoofing is possible.")
|
||||
}
|
||||
|
||||
|
@ -62,7 +65,7 @@ func main() {
|
|||
|
||||
// Track if assertions should be executed
|
||||
if Config.LoggingConfig.Assertions {
|
||||
log.Warn("assertions are enabled. This may slow down execution")
|
||||
log.Debug("assertions are enabled. This may slow down execution")
|
||||
}
|
||||
|
||||
mapper := nat.NewDefaultMapper(log, Config.Nat, nat.TCP, "gecko")
|
||||
|
@ -81,6 +84,7 @@ func main() {
|
|||
|
||||
defer node.Shutdown()
|
||||
|
||||
log.Debug("Dispatching node handlers")
|
||||
node.Dispatch()
|
||||
log.Debug("dispatching node handlers")
|
||||
err = node.Dispatch()
|
||||
log.Debug("node dispatching returned with %s", err)
|
||||
}
|
||||
|
|
146
main/params.go
146
main/params.go
|
@ -25,29 +25,61 @@ import (
|
|||
"github.com/ava-labs/gecko/utils/formatting"
|
||||
"github.com/ava-labs/gecko/utils/hashing"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
"github.com/ava-labs/gecko/utils/random"
|
||||
"github.com/ava-labs/gecko/utils/wrappers"
|
||||
)
|
||||
|
||||
const (
|
||||
dbVersion = "v0.3.0"
|
||||
dbVersion = "v0.5.0"
|
||||
)
|
||||
|
||||
// Results of parsing the CLI
|
||||
var (
|
||||
Config = node.Config{}
|
||||
Err error
|
||||
defaultNetworkName = genesis.TestnetName
|
||||
defaultDbDir = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "db"))
|
||||
defaultStakingKeyPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.key"))
|
||||
defaultStakingCertPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.crt"))
|
||||
|
||||
defaultPluginDirs = []string{
|
||||
"./build/plugins",
|
||||
"./plugins",
|
||||
os.ExpandEnv(filepath.Join("$HOME", ".gecko", "plugins")),
|
||||
}
|
||||
)
|
||||
|
||||
var (
|
||||
errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs")
|
||||
errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs")
|
||||
errStakingRequiresTLS = errors.New("if staking is enabled, network TLS must also be enabled")
|
||||
)
|
||||
|
||||
// GetIPs returns the default IPs for each network
|
||||
func GetIPs(networkID uint32) []string {
|
||||
switch networkID {
|
||||
case genesis.DenaliID:
|
||||
return []string{
|
||||
"18.188.121.35:21001",
|
||||
"3.133.83.66:21001",
|
||||
"3.15.206.239:21001",
|
||||
"18.224.140.156:21001",
|
||||
"3.133.131.39:21001",
|
||||
"18.191.29.54:21001",
|
||||
"18.224.172.110:21001",
|
||||
"18.223.211.203:21001",
|
||||
"18.216.130.143:21001",
|
||||
"18.223.184.147:21001",
|
||||
"52.15.48.84:21001",
|
||||
"18.189.194.220:21001",
|
||||
"18.223.119.104:21001",
|
||||
"3.133.155.41:21001",
|
||||
"13.58.170.174:21001",
|
||||
"3.21.245.246:21001",
|
||||
"52.15.190.149:21001",
|
||||
"18.188.95.241:21001",
|
||||
"3.12.197.248:21001",
|
||||
"3.17.39.236:21001",
|
||||
}
|
||||
case genesis.CascadeID:
|
||||
return []string{
|
||||
"3.227.207.132:21001",
|
||||
|
@ -61,6 +93,68 @@ func GetIPs(networkID uint32) []string {
|
|||
}
|
||||
}
|
||||
|
||||
// GetIDs returns the default IDs for each network
|
||||
func GetIDs(networkID uint32) []string {
|
||||
switch networkID {
|
||||
case genesis.DenaliID:
|
||||
return []string{
|
||||
"NpagUxt6KQiwPch9Sd4osv8kD1TZnkjdk",
|
||||
"2m38qc95mhHXtrhjyGbe7r2NhniqHHJRB",
|
||||
"LQwRLm4cbJ7T2kxcxp4uXCU5XD8DFrE1C",
|
||||
"hArafGhY2HFTbwaaVh1CSCUCUCiJ2Vfb",
|
||||
"4QBwET5o8kUhvt9xArhir4d3R25CtmZho",
|
||||
"HGZ8ae74J3odT8ESreAdCtdnvWG1J4X5n",
|
||||
"4KXitMCoE9p2BHA6VzXtaTxLoEjNDo2Pt",
|
||||
"JyE4P8f4cTryNV8DCz2M81bMtGhFFHexG",
|
||||
"EzGaipqomyK9UKx9DBHV6Ky3y68hoknrF",
|
||||
"CYKruAjwH1BmV3m37sXNuprbr7dGQuJwG",
|
||||
"LegbVf6qaMKcsXPnLStkdc1JVktmmiDxy",
|
||||
"FesGqwKq7z5nPFHa5iwZctHE5EZV9Lpdq",
|
||||
"BFa1padLXBj7VHa2JYvYGzcTBPQGjPhUy",
|
||||
"4B4rc5vdD1758JSBYL1xyvE5NHGzz6xzH",
|
||||
"EDESh4DfZFC15i613pMtWniQ9arbBZRnL",
|
||||
"CZmZ9xpCzkWqjAyS7L4htzh5Lg6kf1k18",
|
||||
"CTtkcXvVdhpNp6f97LEUXPwsRD3A2ZHqP",
|
||||
"84KbQHSDnojroCVY7vQ7u9Tx7pUonPaS",
|
||||
"JjvzhxnLHLUQ5HjVRkvG827ivbLXPwA9u",
|
||||
"4CWTbdvgXHY1CLXqQNAp22nJDo5nAmts6",
|
||||
}
|
||||
case genesis.CascadeID:
|
||||
return []string{
|
||||
"NX4zVkuiRJZYe6Nzzav7GXN3TakUet3Co",
|
||||
"CMsa8cMw4eib1Hb8GG4xiUKAq5eE1BwUX",
|
||||
"DsMP6jLhi1MkDVc3qx9xx9AAZWx8e87Jd",
|
||||
"N86eodVZja3GEyZJTo3DFUPGpxEEvjGHs",
|
||||
"EkKeGSLUbHrrtuayBtbwgWDRUiAziC3ao",
|
||||
}
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// GetDefaultBootstraps returns the default bootstraps this node should connect
|
||||
// to
|
||||
func GetDefaultBootstraps(networkID uint32, count int) ([]string, []string) {
|
||||
ips := GetIPs(networkID)
|
||||
ids := GetIDs(networkID)
|
||||
|
||||
if numIPs := len(ips); numIPs < count {
|
||||
count = numIPs
|
||||
}
|
||||
|
||||
sampledIPs := make([]string, 0, count)
|
||||
sampledIDs := make([]string, 0, count)
|
||||
|
||||
sampler := random.Uniform{N: len(ips)}
|
||||
for i := 0; i < count; i++ {
|
||||
i := sampler.Sample()
|
||||
sampledIPs = append(sampledIPs, ips[i])
|
||||
sampledIDs = append(sampledIDs, ids[i])
|
||||
}
|
||||
|
||||
return sampledIPs, sampledIDs
|
||||
}
|
||||
|
||||
// Parse the CLI arguments
|
||||
func init() {
|
||||
errs := &wrappers.Errs{}
|
||||
|
@ -73,8 +167,11 @@ func init() {
|
|||
|
||||
fs := flag.NewFlagSet("gecko", flag.ContinueOnError)
|
||||
|
||||
// If this is true, print the version and quit.
|
||||
version := fs.Bool("version", false, "If true, print version and quit")
|
||||
|
||||
// NetworkID:
|
||||
networkName := fs.String("network-id", genesis.CascadeName, "Network ID this node will connect to")
|
||||
networkName := fs.String("network-id", defaultNetworkName, "Network ID this node will connect to")
|
||||
|
||||
// Ava fees:
|
||||
fs.Uint64Var(&Config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva")
|
||||
|
@ -105,12 +202,14 @@ func init() {
|
|||
|
||||
// Staking:
|
||||
consensusPort := fs.Uint("staking-port", 9651, "Port of the consensus server")
|
||||
fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Require TLS to authenticate staking connections")
|
||||
// TODO - keeping same flag for backwards compatibility, should be changed to "staking-enabled"
|
||||
fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Enable staking. If enabled, Network TLS is required.")
|
||||
fs.BoolVar(&Config.EnableP2PTLS, "p2p-tls-enabled", true, "Require TLS to authenticate network communication")
|
||||
fs.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", defaultStakingKeyPath, "TLS private key for staking")
|
||||
fs.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", defaultStakingCertPath, "TLS certificate for staking")
|
||||
|
||||
// Plugins:
|
||||
fs.StringVar(&Config.PluginDir, "plugin-dir", "./build/plugins", "Plugin directory for Ava VMs")
|
||||
fs.StringVar(&Config.PluginDir, "plugin-dir", defaultPluginDirs[0], "Plugin directory for Ava VMs")
|
||||
|
||||
// Logging:
|
||||
logsDir := fs.String("log-dir", "", "Logging directory for Ava")
|
||||
|
@ -138,6 +237,19 @@ func init() {
|
|||
|
||||
ferr := fs.Parse(os.Args[1:])
|
||||
|
||||
if *version { // If --version used, print version and exit
|
||||
networkID, err := genesis.NetworkID(defaultNetworkName)
|
||||
if errs.Add(err); err != nil {
|
||||
return
|
||||
}
|
||||
networkGeneration := genesis.NetworkName(networkID)
|
||||
fmt.Printf(
|
||||
"%s [database=%s, network=%s/%s]\n",
|
||||
node.Version, dbVersion, defaultNetworkName, networkGeneration,
|
||||
)
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
if ferr == flag.ErrHelp {
|
||||
// display usage/help text and exit successfully
|
||||
os.Exit(0)
|
||||
|
@ -192,9 +304,11 @@ func init() {
|
|||
Port: uint16(*consensusPort),
|
||||
}
|
||||
|
||||
defaultBootstrapIPs, defaultBootstrapIDs := GetDefaultBootstraps(networkID, 5)
|
||||
|
||||
// Bootstrapping:
|
||||
if *bootstrapIPs == "default" {
|
||||
*bootstrapIPs = strings.Join(GetIPs(networkID), ",")
|
||||
*bootstrapIPs = strings.Join(defaultBootstrapIPs, ",")
|
||||
}
|
||||
for _, ip := range strings.Split(*bootstrapIPs, ",") {
|
||||
if ip != "" {
|
||||
|
@ -213,10 +327,16 @@ func init() {
|
|||
if *bootstrapIPs == "" {
|
||||
*bootstrapIDs = ""
|
||||
} else {
|
||||
*bootstrapIDs = strings.Join(genesis.GetConfig(networkID).StakerIDs, ",")
|
||||
*bootstrapIDs = strings.Join(defaultBootstrapIDs, ",")
|
||||
}
|
||||
}
|
||||
if Config.EnableStaking {
|
||||
|
||||
if Config.EnableStaking && !Config.EnableP2PTLS {
|
||||
errs.Add(errStakingRequiresTLS)
|
||||
return
|
||||
}
|
||||
|
||||
if Config.EnableP2PTLS {
|
||||
i := 0
|
||||
cb58 := formatting.CB58{}
|
||||
for _, id := range strings.Split(*bootstrapIDs, ",") {
|
||||
|
@ -249,6 +369,16 @@ func init() {
|
|||
}
|
||||
}
|
||||
|
||||
// Plugins
|
||||
if _, err := os.Stat(Config.PluginDir); os.IsNotExist(err) {
|
||||
for _, dir := range defaultPluginDirs {
|
||||
if _, err := os.Stat(dir); !os.IsNotExist(err) {
|
||||
Config.PluginDir = dir
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Staking
|
||||
Config.StakingCertFile = os.ExpandEnv(Config.StakingCertFile) // parse any env variable
|
||||
Config.StakingKeyFile = os.ExpandEnv(Config.StakingKeyFile)
|
||||
|
|
|
@ -89,6 +89,15 @@ func (m Builder) Get(chainID ids.ID, requestID uint32, containerID ids.ID) (Msg,
|
|||
})
|
||||
}
|
||||
|
||||
// GetAncestors message
|
||||
func (m Builder) GetAncestors(chainID ids.ID, requestID uint32, containerID ids.ID) (Msg, error) {
|
||||
return m.Pack(GetAncestors, map[Field]interface{}{
|
||||
ChainID: chainID.Bytes(),
|
||||
RequestID: requestID,
|
||||
ContainerID: containerID.Bytes(),
|
||||
})
|
||||
}
|
||||
|
||||
// Put message
|
||||
func (m Builder) Put(chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) (Msg, error) {
|
||||
return m.Pack(Put, map[Field]interface{}{
|
||||
|
@ -99,6 +108,15 @@ func (m Builder) Put(chainID ids.ID, requestID uint32, containerID ids.ID, conta
|
|||
})
|
||||
}
|
||||
|
||||
// MultiPut message
|
||||
func (m Builder) MultiPut(chainID ids.ID, requestID uint32, containers [][]byte) (Msg, error) {
|
||||
return m.Pack(MultiPut, map[Field]interface{}{
|
||||
ChainID: chainID.Bytes(),
|
||||
RequestID: requestID,
|
||||
MultiContainerBytes: containers,
|
||||
})
|
||||
}
|
||||
|
||||
// PushQuery message
|
||||
func (m Builder) PushQuery(chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) (Msg, error) {
|
||||
return m.Pack(PushQuery, map[Field]interface{}{
|
||||
|
|
|
@ -79,14 +79,8 @@ func TestBuildGetPeerList(t *testing.T) {
|
|||
|
||||
func TestBuildPeerList(t *testing.T) {
|
||||
ips := []utils.IPDesc{
|
||||
utils.IPDesc{
|
||||
IP: net.IPv6loopback,
|
||||
Port: 12345,
|
||||
},
|
||||
utils.IPDesc{
|
||||
IP: net.IPv6loopback,
|
||||
Port: 54321,
|
||||
},
|
||||
{IP: net.IPv6loopback, Port: 12345},
|
||||
{IP: net.IPv6loopback, Port: 54321},
|
||||
}
|
||||
|
||||
msg, err := TestBuilder.PeerList(ips)
|
||||
|
|
|
@ -12,17 +12,18 @@ type Field uint32
|
|||
|
||||
// Fields that may be packed. These values are not sent over the wire.
|
||||
const (
|
||||
VersionStr Field = iota // Used in handshake
|
||||
NetworkID // Used in handshake
|
||||
NodeID // Used in handshake
|
||||
MyTime // Used in handshake
|
||||
IP // Used in handshake
|
||||
Peers // Used in handshake
|
||||
ChainID // Used for dispatching
|
||||
RequestID // Used for all messages
|
||||
ContainerID // Used for querying
|
||||
ContainerBytes // Used for gossiping
|
||||
ContainerIDs // Used for querying
|
||||
VersionStr Field = iota // Used in handshake
|
||||
NetworkID // Used in handshake
|
||||
NodeID // Used in handshake
|
||||
MyTime // Used in handshake
|
||||
IP // Used in handshake
|
||||
Peers // Used in handshake
|
||||
ChainID // Used for dispatching
|
||||
RequestID // Used for all messages
|
||||
ContainerID // Used for querying
|
||||
ContainerBytes // Used for gossiping
|
||||
ContainerIDs // Used for querying
|
||||
MultiContainerBytes // Used in MultiPut
|
||||
)
|
||||
|
||||
// Packer returns the packer function that can be used to pack this field.
|
||||
|
@ -50,6 +51,8 @@ func (f Field) Packer() func(*wrappers.Packer, interface{}) {
|
|||
return wrappers.TryPackBytes
|
||||
case ContainerIDs:
|
||||
return wrappers.TryPackHashes
|
||||
case MultiContainerBytes:
|
||||
return wrappers.TryPack2DBytes
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
@ -80,6 +83,8 @@ func (f Field) Unpacker() func(*wrappers.Packer) interface{} {
|
|||
return wrappers.TryUnpackBytes
|
||||
case ContainerIDs:
|
||||
return wrappers.TryUnpackHashes
|
||||
case MultiContainerBytes:
|
||||
return wrappers.TryUnpack2DBytes
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
|
@ -107,6 +112,8 @@ func (f Field) String() string {
|
|||
return "Container Bytes"
|
||||
case ContainerIDs:
|
||||
return "Container IDs"
|
||||
case MultiContainerBytes:
|
||||
return "MultiContainerBytes"
|
||||
default:
|
||||
return "Unknown Field"
|
||||
}
|
||||
|
@ -135,8 +142,12 @@ func (op Op) String() string {
|
|||
return "accepted"
|
||||
case Get:
|
||||
return "get"
|
||||
case GetAncestors:
|
||||
return "get_ancestors"
|
||||
case Put:
|
||||
return "put"
|
||||
case MultiPut:
|
||||
return "multi_put"
|
||||
case PushQuery:
|
||||
return "push_query"
|
||||
case PullQuery:
|
||||
|
@ -166,26 +177,33 @@ const (
|
|||
PushQuery
|
||||
PullQuery
|
||||
Chits
|
||||
// Bootstrapping:
|
||||
// TODO: Move GetAncestors and MultiPut with the rest of the bootstrapping
|
||||
// commands when we do non-backwards compatible upgrade
|
||||
GetAncestors
|
||||
MultiPut
|
||||
)
|
||||
|
||||
// Defines the messages that can be sent/received with this network
|
||||
var (
|
||||
Messages = map[Op][]Field{
|
||||
// Handshake:
|
||||
GetVersion: []Field{},
|
||||
Version: []Field{NetworkID, NodeID, MyTime, IP, VersionStr},
|
||||
GetPeerList: []Field{},
|
||||
PeerList: []Field{Peers},
|
||||
GetVersion: {},
|
||||
Version: {NetworkID, NodeID, MyTime, IP, VersionStr},
|
||||
GetPeerList: {},
|
||||
PeerList: {Peers},
|
||||
// Bootstrapping:
|
||||
GetAcceptedFrontier: []Field{ChainID, RequestID},
|
||||
AcceptedFrontier: []Field{ChainID, RequestID, ContainerIDs},
|
||||
GetAccepted: []Field{ChainID, RequestID, ContainerIDs},
|
||||
Accepted: []Field{ChainID, RequestID, ContainerIDs},
|
||||
GetAcceptedFrontier: {ChainID, RequestID},
|
||||
AcceptedFrontier: {ChainID, RequestID, ContainerIDs},
|
||||
GetAccepted: {ChainID, RequestID, ContainerIDs},
|
||||
Accepted: {ChainID, RequestID, ContainerIDs},
|
||||
GetAncestors: {ChainID, RequestID, ContainerID},
|
||||
MultiPut: {ChainID, RequestID, MultiContainerBytes},
|
||||
// Consensus:
|
||||
Get: []Field{ChainID, RequestID, ContainerID},
|
||||
Put: []Field{ChainID, RequestID, ContainerID, ContainerBytes},
|
||||
PushQuery: []Field{ChainID, RequestID, ContainerID, ContainerBytes},
|
||||
PullQuery: []Field{ChainID, RequestID, ContainerID},
|
||||
Chits: []Field{ChainID, RequestID, ContainerIDs},
|
||||
Get: {ChainID, RequestID, ContainerID},
|
||||
Put: {ChainID, RequestID, ContainerID, ContainerBytes},
|
||||
PushQuery: {ChainID, RequestID, ContainerID, ContainerBytes},
|
||||
PullQuery: {ChainID, RequestID, ContainerID},
|
||||
Chits: {ChainID, RequestID, ContainerIDs},
|
||||
}
|
||||
)
|
||||
|
|
|
@ -56,7 +56,7 @@ type metrics struct {
|
|||
getPeerlist, peerlist,
|
||||
getAcceptedFrontier, acceptedFrontier,
|
||||
getAccepted, accepted,
|
||||
get, put,
|
||||
get, getAncestors, put, multiPut,
|
||||
pushQuery, pullQuery, chits messageMetrics
|
||||
}
|
||||
|
||||
|
@ -83,7 +83,9 @@ func (m *metrics) initialize(registerer prometheus.Registerer) error {
|
|||
errs.Add(m.getAccepted.initialize(GetAccepted, registerer))
|
||||
errs.Add(m.accepted.initialize(Accepted, registerer))
|
||||
errs.Add(m.get.initialize(Get, registerer))
|
||||
errs.Add(m.getAncestors.initialize(GetAncestors, registerer))
|
||||
errs.Add(m.put.initialize(Put, registerer))
|
||||
errs.Add(m.multiPut.initialize(MultiPut, registerer))
|
||||
errs.Add(m.pushQuery.initialize(PushQuery, registerer))
|
||||
errs.Add(m.pullQuery.initialize(PullQuery, registerer))
|
||||
errs.Add(m.chits.initialize(Chits, registerer))
|
||||
|
@ -111,8 +113,12 @@ func (m *metrics) message(msgType Op) *messageMetrics {
|
|||
return &m.accepted
|
||||
case Get:
|
||||
return &m.get
|
||||
case GetAncestors:
|
||||
return &m.getAncestors
|
||||
case Put:
|
||||
return &m.put
|
||||
case MultiPut:
|
||||
return &m.multiPut
|
||||
case PushQuery:
|
||||
return &m.pushQuery
|
||||
case PullQuery:
|
||||
|
|
|
@ -21,24 +21,28 @@ import (
|
|||
"github.com/ava-labs/gecko/snow/triggers"
|
||||
"github.com/ava-labs/gecko/snow/validators"
|
||||
"github.com/ava-labs/gecko/utils"
|
||||
"github.com/ava-labs/gecko/utils/formatting"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
"github.com/ava-labs/gecko/utils/random"
|
||||
"github.com/ava-labs/gecko/utils/timer"
|
||||
"github.com/ava-labs/gecko/version"
|
||||
)
|
||||
|
||||
// reasonable default values
|
||||
const (
|
||||
defaultInitialReconnectDelay = time.Second
|
||||
defaultMaxReconnectDelay = time.Hour
|
||||
defaultMaxMessageSize uint32 = 1 << 21
|
||||
defaultSendQueueSize = 1 << 10
|
||||
defaultMaxClockDifference = time.Minute
|
||||
defaultPeerListGossipSpacing = time.Minute
|
||||
defaultPeerListGossipSize = 100
|
||||
defaultPeerListStakerGossipFraction = 2
|
||||
defaultGetVersionTimeout = 2 * time.Second
|
||||
defaultAllowPrivateIPs = true
|
||||
defaultGossipSize = 50
|
||||
defaultInitialReconnectDelay = time.Second
|
||||
defaultMaxReconnectDelay = time.Hour
|
||||
DefaultMaxMessageSize uint32 = 1 << 21
|
||||
defaultSendQueueSize = 1 << 10
|
||||
defaultMaxNetworkPendingSendBytes = 1 << 29 // 512MB
|
||||
defaultNetworkPendingSendBytesToRateLimit = defaultMaxNetworkPendingSendBytes / 4
|
||||
defaultMaxClockDifference = time.Minute
|
||||
defaultPeerListGossipSpacing = time.Minute
|
||||
defaultPeerListGossipSize = 100
|
||||
defaultPeerListStakerGossipFraction = 2
|
||||
defaultGetVersionTimeout = 2 * time.Second
|
||||
defaultAllowPrivateIPs = true
|
||||
defaultGossipSize = 50
|
||||
)
|
||||
|
||||
// Network defines the functionality of the networking library.
|
||||
|
@ -70,9 +74,9 @@ type Network interface {
|
|||
// The handler will initially be called with this local node's ID.
|
||||
RegisterHandler(h Handler)
|
||||
|
||||
// Returns the IPs of nodes this network is currently connected to
|
||||
// externally. Thread safety must be managed internally to the network.
|
||||
IPs() []utils.IPDesc
|
||||
// Returns the description of the nodes this network is currently connected
|
||||
// to externally. Thread safety must be managed internally to the network.
|
||||
Peers() []PeerID
|
||||
|
||||
// Close this network and all existing connections it has. Thread safety
|
||||
// must be managed internally to the network. Calling close multiple times
|
||||
|
@ -102,26 +106,30 @@ type network struct {
|
|||
clock timer.Clock
|
||||
lastHeartbeat int64
|
||||
|
||||
initialReconnectDelay time.Duration
|
||||
maxReconnectDelay time.Duration
|
||||
maxMessageSize uint32
|
||||
sendQueueSize int
|
||||
maxClockDifference time.Duration
|
||||
peerListGossipSpacing time.Duration
|
||||
peerListGossipSize int
|
||||
peerListStakerGossipFraction int
|
||||
getVersionTimeout time.Duration
|
||||
allowPrivateIPs bool
|
||||
gossipSize int
|
||||
initialReconnectDelay time.Duration
|
||||
maxReconnectDelay time.Duration
|
||||
maxMessageSize uint32
|
||||
sendQueueSize int
|
||||
maxNetworkPendingSendBytes int
|
||||
networkPendingSendBytesToRateLimit int
|
||||
maxClockDifference time.Duration
|
||||
peerListGossipSpacing time.Duration
|
||||
peerListGossipSize int
|
||||
peerListStakerGossipFraction int
|
||||
getVersionTimeout time.Duration
|
||||
allowPrivateIPs bool
|
||||
gossipSize int
|
||||
|
||||
executor timer.Executor
|
||||
|
||||
b Builder
|
||||
|
||||
stateLock sync.Mutex
|
||||
pendingBytes int
|
||||
closed bool
|
||||
disconnectedIPs map[string]struct{}
|
||||
connectedIPs map[string]struct{}
|
||||
retryDelay map[string]time.Duration
|
||||
// TODO: bound the size of [myIPs] to avoid DoS. LRU caching would be ideal
|
||||
myIPs map[string]struct{} // set of IPs that resulted in my ID.
|
||||
peers map[[20]byte]*peer
|
||||
|
@ -161,8 +169,10 @@ func NewDefaultNetwork(
|
|||
router,
|
||||
defaultInitialReconnectDelay,
|
||||
defaultMaxReconnectDelay,
|
||||
defaultMaxMessageSize,
|
||||
DefaultMaxMessageSize,
|
||||
defaultSendQueueSize,
|
||||
defaultMaxNetworkPendingSendBytes,
|
||||
defaultNetworkPendingSendBytesToRateLimit,
|
||||
defaultMaxClockDifference,
|
||||
defaultPeerListGossipSpacing,
|
||||
defaultPeerListGossipSize,
|
||||
|
@ -192,6 +202,8 @@ func NewNetwork(
|
|||
maxReconnectDelay time.Duration,
|
||||
maxMessageSize uint32,
|
||||
sendQueueSize int,
|
||||
maxNetworkPendingSendBytes int,
|
||||
networkPendingSendBytesToRateLimit int,
|
||||
maxClockDifference time.Duration,
|
||||
peerListGossipSpacing time.Duration,
|
||||
peerListGossipSize int,
|
||||
|
@ -201,34 +213,37 @@ func NewNetwork(
|
|||
gossipSize int,
|
||||
) Network {
|
||||
net := &network{
|
||||
log: log,
|
||||
id: id,
|
||||
ip: ip,
|
||||
networkID: networkID,
|
||||
version: version,
|
||||
parser: parser,
|
||||
listener: listener,
|
||||
dialer: dialer,
|
||||
serverUpgrader: serverUpgrader,
|
||||
clientUpgrader: clientUpgrader,
|
||||
vdrs: vdrs,
|
||||
router: router,
|
||||
nodeID: rand.Uint32(),
|
||||
initialReconnectDelay: initialReconnectDelay,
|
||||
maxReconnectDelay: maxReconnectDelay,
|
||||
maxMessageSize: maxMessageSize,
|
||||
sendQueueSize: sendQueueSize,
|
||||
maxClockDifference: maxClockDifference,
|
||||
peerListGossipSpacing: peerListGossipSpacing,
|
||||
peerListGossipSize: peerListGossipSize,
|
||||
peerListStakerGossipFraction: peerListStakerGossipFraction,
|
||||
getVersionTimeout: getVersionTimeout,
|
||||
allowPrivateIPs: allowPrivateIPs,
|
||||
gossipSize: gossipSize,
|
||||
log: log,
|
||||
id: id,
|
||||
ip: ip,
|
||||
networkID: networkID,
|
||||
version: version,
|
||||
parser: parser,
|
||||
listener: listener,
|
||||
dialer: dialer,
|
||||
serverUpgrader: serverUpgrader,
|
||||
clientUpgrader: clientUpgrader,
|
||||
vdrs: vdrs,
|
||||
router: router,
|
||||
nodeID: rand.Uint32(),
|
||||
initialReconnectDelay: initialReconnectDelay,
|
||||
maxReconnectDelay: maxReconnectDelay,
|
||||
maxMessageSize: maxMessageSize,
|
||||
sendQueueSize: sendQueueSize,
|
||||
maxNetworkPendingSendBytes: maxNetworkPendingSendBytes,
|
||||
networkPendingSendBytesToRateLimit: networkPendingSendBytesToRateLimit,
|
||||
maxClockDifference: maxClockDifference,
|
||||
peerListGossipSpacing: peerListGossipSpacing,
|
||||
peerListGossipSize: peerListGossipSize,
|
||||
peerListStakerGossipFraction: peerListStakerGossipFraction,
|
||||
getVersionTimeout: getVersionTimeout,
|
||||
allowPrivateIPs: allowPrivateIPs,
|
||||
gossipSize: gossipSize,
|
||||
|
||||
disconnectedIPs: make(map[string]struct{}),
|
||||
connectedIPs: make(map[string]struct{}),
|
||||
myIPs: map[string]struct{}{ip.String(): struct{}{}},
|
||||
retryDelay: make(map[string]time.Duration),
|
||||
myIPs: map[string]struct{}{ip.String(): {}},
|
||||
peers: make(map[[20]byte]*peer),
|
||||
}
|
||||
net.initialize(registerer)
|
||||
|
@ -264,8 +279,11 @@ func (n *network) GetAcceptedFrontier(validatorIDs ids.ShortSet, chainID ids.ID,
|
|||
func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
|
||||
msg, err := n.b.AcceptedFrontier(chainID, requestID, containerIDs)
|
||||
if err != nil {
|
||||
n.log.Error("attempted to pack too large of an AcceptedFrontier message.\nNumber of containerIDs: %d",
|
||||
containerIDs.Len())
|
||||
n.log.Error("failed to build AcceptedFrontier(%s, %d, %s): %s",
|
||||
chainID,
|
||||
requestID,
|
||||
containerIDs,
|
||||
err)
|
||||
return // Packing message failed
|
||||
}
|
||||
|
||||
|
@ -277,7 +295,11 @@ func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requ
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send an AcceptedFrontier message to: %s", validatorID)
|
||||
n.log.Debug("failed to send AcceptedFrontier(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerIDs)
|
||||
n.acceptedFrontier.numFailed.Inc()
|
||||
} else {
|
||||
n.acceptedFrontier.numSent.Inc()
|
||||
|
@ -288,6 +310,11 @@ func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requ
|
|||
func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
|
||||
msg, err := n.b.GetAccepted(chainID, requestID, containerIDs)
|
||||
if err != nil {
|
||||
n.log.Error("failed to build GetAccepted(%s, %d, %s): %s",
|
||||
chainID,
|
||||
requestID,
|
||||
containerIDs,
|
||||
err)
|
||||
for _, validatorID := range validatorIDs.List() {
|
||||
vID := validatorID
|
||||
n.executor.Add(func() { n.router.GetAcceptedFailed(vID, chainID, requestID) })
|
||||
|
@ -305,6 +332,11 @@ func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, request
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send GetAccepted(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerIDs)
|
||||
n.executor.Add(func() { n.router.GetAcceptedFailed(vID, chainID, requestID) })
|
||||
n.getAccepted.numFailed.Inc()
|
||||
} else {
|
||||
|
@ -317,8 +349,11 @@ func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, request
|
|||
func (n *network) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
|
||||
msg, err := n.b.Accepted(chainID, requestID, containerIDs)
|
||||
if err != nil {
|
||||
n.log.Error("attempted to pack too large of an Accepted message.\nNumber of containerIDs: %d",
|
||||
containerIDs.Len())
|
||||
n.log.Error("failed to build Accepted(%s, %d, %s): %s",
|
||||
chainID,
|
||||
requestID,
|
||||
containerIDs,
|
||||
err)
|
||||
return // Packing message failed
|
||||
}
|
||||
|
||||
|
@ -330,13 +365,72 @@ func (n *network) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID ui
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send an Accepted message to: %s", validatorID)
|
||||
n.log.Debug("failed to send Accepted(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerIDs)
|
||||
n.accepted.numFailed.Inc()
|
||||
} else {
|
||||
n.accepted.numSent.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// GetAncestors implements the Sender interface.
|
||||
func (n *network) GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
|
||||
msg, err := n.b.GetAncestors(chainID, requestID, containerID)
|
||||
if err != nil {
|
||||
n.log.Error("failed to build GetAncestors message: %w", err)
|
||||
return
|
||||
}
|
||||
|
||||
n.stateLock.Lock()
|
||||
defer n.stateLock.Unlock()
|
||||
|
||||
peer, sent := n.peers[validatorID.Key()]
|
||||
if sent {
|
||||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send GetAncestors(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerID)
|
||||
n.executor.Add(func() { n.router.GetAncestorsFailed(validatorID, chainID, requestID) })
|
||||
n.getAncestors.numFailed.Inc()
|
||||
} else {
|
||||
n.getAncestors.numSent.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// MultiPut implements the Sender interface.
|
||||
func (n *network) MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte) {
|
||||
msg, err := n.b.MultiPut(chainID, requestID, containers)
|
||||
if err != nil {
|
||||
n.log.Error("failed to build MultiPut message because of container of size %d", len(containers))
|
||||
return
|
||||
}
|
||||
|
||||
n.stateLock.Lock()
|
||||
defer n.stateLock.Unlock()
|
||||
|
||||
peer, sent := n.peers[validatorID.Key()]
|
||||
if sent {
|
||||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send MultiPut(%s, %s, %d, %d)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
len(containers))
|
||||
n.multiPut.numFailed.Inc()
|
||||
} else {
|
||||
n.multiPut.numSent.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// Get implements the Sender interface.
|
||||
func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
|
||||
msg, err := n.b.Get(chainID, requestID, containerID)
|
||||
|
@ -350,7 +444,12 @@ func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32,
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send a Get message to: %s", validatorID)
|
||||
n.log.Debug("failed to send Get(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerID)
|
||||
n.executor.Add(func() { n.router.GetFailed(validatorID, chainID, requestID) })
|
||||
n.get.numFailed.Inc()
|
||||
} else {
|
||||
n.get.numSent.Inc()
|
||||
|
@ -361,7 +460,12 @@ func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32,
|
|||
func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
|
||||
msg, err := n.b.Put(chainID, requestID, containerID, container)
|
||||
if err != nil {
|
||||
n.log.Error("failed to build Put message because of container of size %d", len(container))
|
||||
n.log.Error("failed to build Put(%s, %d, %s): %s. len(container) : %d",
|
||||
chainID,
|
||||
requestID,
|
||||
containerID,
|
||||
err,
|
||||
len(container))
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -373,7 +477,12 @@ func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32,
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send a Put message to: %s", validatorID)
|
||||
n.log.Debug("failed to send Put(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerID)
|
||||
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
|
||||
n.put.numFailed.Inc()
|
||||
} else {
|
||||
n.put.numSent.Inc()
|
||||
|
@ -384,11 +493,17 @@ func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32,
|
|||
func (n *network) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
|
||||
msg, err := n.b.PushQuery(chainID, requestID, containerID, container)
|
||||
if err != nil {
|
||||
n.log.Error("failed to build PushQuery(%s, %d, %s): %s. len(container): %d",
|
||||
chainID,
|
||||
requestID,
|
||||
containerID,
|
||||
err,
|
||||
len(container))
|
||||
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
|
||||
for _, validatorID := range validatorIDs.List() {
|
||||
vID := validatorID
|
||||
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
|
||||
}
|
||||
n.log.Error("attempted to pack too large of a PushQuery message.\nContainer length: %d", len(container))
|
||||
return // Packing message failed
|
||||
}
|
||||
|
||||
|
@ -402,7 +517,12 @@ func (n *network) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed sending a PushQuery message to: %s", vID)
|
||||
n.log.Debug("failed to send PushQuery(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerID)
|
||||
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
|
||||
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
|
||||
n.pushQuery.numFailed.Inc()
|
||||
} else {
|
||||
|
@ -426,7 +546,11 @@ func (n *network) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed sending a PullQuery message to: %s", vID)
|
||||
n.log.Debug("failed to send PullQuery(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
containerID)
|
||||
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
|
||||
n.pullQuery.numFailed.Inc()
|
||||
} else {
|
||||
|
@ -439,7 +563,11 @@ func (n *network) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
|
|||
func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) {
|
||||
msg, err := n.b.Chits(chainID, requestID, votes)
|
||||
if err != nil {
|
||||
n.log.Error("failed to build Chits message because of %d votes", votes.Len())
|
||||
n.log.Error("failed to build Chits(%s, %d, %s): %s",
|
||||
chainID,
|
||||
requestID,
|
||||
votes,
|
||||
err)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -451,7 +579,11 @@ func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint3
|
|||
sent = peer.send(msg)
|
||||
}
|
||||
if !sent {
|
||||
n.log.Debug("failed to send a Chits message to: %s", validatorID)
|
||||
n.log.Debug("failed to send Chits(%s, %s, %d, %s)",
|
||||
validatorID,
|
||||
chainID,
|
||||
requestID,
|
||||
votes)
|
||||
n.chits.numFailed.Inc()
|
||||
} else {
|
||||
n.chits.numSent.Inc()
|
||||
|
@ -461,7 +593,8 @@ func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint3
|
|||
// Gossip attempts to gossip the container to the network
|
||||
func (n *network) Gossip(chainID, containerID ids.ID, container []byte) {
|
||||
if err := n.gossipContainer(chainID, containerID, container); err != nil {
|
||||
n.log.Error("error gossiping container %s to %s: %s", containerID, chainID, err)
|
||||
n.log.Debug("failed to Gossip(%s, %s): %s", chainID, containerID, err)
|
||||
n.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -483,7 +616,15 @@ func (n *network) Dispatch() error {
|
|||
for {
|
||||
conn, err := n.listener.Accept()
|
||||
if err != nil {
|
||||
return err
|
||||
n.stateLock.Lock()
|
||||
closed := n.closed
|
||||
n.stateLock.Unlock()
|
||||
|
||||
if closed {
|
||||
return err
|
||||
}
|
||||
n.log.Debug("error during server accept: %s", err)
|
||||
continue
|
||||
}
|
||||
go n.upgrade(&peer{
|
||||
net: n,
|
||||
|
@ -511,17 +652,24 @@ func (n *network) RegisterHandler(h Handler) {
|
|||
}
|
||||
|
||||
// IPs implements the Network interface
|
||||
func (n *network) IPs() []utils.IPDesc {
|
||||
func (n *network) Peers() []PeerID {
|
||||
n.stateLock.Lock()
|
||||
defer n.stateLock.Unlock()
|
||||
|
||||
ips := []utils.IPDesc(nil)
|
||||
peers := []PeerID{}
|
||||
for _, peer := range n.peers {
|
||||
if peer.connected {
|
||||
ips = append(ips, peer.ip)
|
||||
peers = append(peers, PeerID{
|
||||
IP: peer.conn.RemoteAddr().String(),
|
||||
PublicIP: peer.ip.String(),
|
||||
ID: peer.id,
|
||||
Version: peer.versionStr,
|
||||
LastSent: time.Unix(atomic.LoadInt64(&peer.lastSent), 0),
|
||||
LastReceived: time.Unix(atomic.LoadInt64(&peer.lastReceived), 0),
|
||||
})
|
||||
}
|
||||
}
|
||||
return ips
|
||||
return peers
|
||||
}
|
||||
|
||||
// Close implements the Network interface
|
||||
|
@ -620,7 +768,9 @@ func (n *network) gossip() {
|
|||
}
|
||||
msg, err := n.b.PeerList(ips)
|
||||
if err != nil {
|
||||
n.log.Warn("failed to gossip PeerList message due to %s", err)
|
||||
n.log.Error("failed to build peer list to gossip: %s. len(ips): %d",
|
||||
err,
|
||||
len(ips))
|
||||
continue
|
||||
}
|
||||
|
||||
|
@ -666,16 +816,29 @@ func (n *network) gossip() {
|
|||
// the network is closed
|
||||
func (n *network) connectTo(ip utils.IPDesc) {
|
||||
str := ip.String()
|
||||
delay := n.initialReconnectDelay
|
||||
n.stateLock.Lock()
|
||||
delay := n.retryDelay[str]
|
||||
n.stateLock.Unlock()
|
||||
|
||||
for {
|
||||
time.Sleep(delay)
|
||||
|
||||
if delay == 0 {
|
||||
delay = n.initialReconnectDelay
|
||||
}
|
||||
|
||||
delay = time.Duration(float64(delay) * (1 + rand.Float64()))
|
||||
if delay > n.maxReconnectDelay {
|
||||
// set the timeout to [.75, 1) * maxReconnectDelay
|
||||
delay = time.Duration(float64(n.maxReconnectDelay) * (3 + rand.Float64()) / 4)
|
||||
}
|
||||
|
||||
n.stateLock.Lock()
|
||||
_, isDisconnected := n.disconnectedIPs[str]
|
||||
_, isConnected := n.connectedIPs[str]
|
||||
_, isMyself := n.myIPs[str]
|
||||
closed := n.closed
|
||||
|
||||
n.stateLock.Unlock()
|
||||
|
||||
if !isDisconnected || isConnected || isMyself || closed {
|
||||
// If the IP was discovered by the peer connecting to us, we don't
|
||||
// need to attempt to connect anymore
|
||||
|
@ -685,8 +848,12 @@ func (n *network) connectTo(ip utils.IPDesc) {
|
|||
|
||||
// If the network was closed, we should stop attempting to connect
|
||||
// to the peer
|
||||
|
||||
n.stateLock.Unlock()
|
||||
return
|
||||
}
|
||||
n.retryDelay[str] = delay
|
||||
n.stateLock.Unlock()
|
||||
|
||||
err := n.attemptConnect(ip)
|
||||
if err == nil {
|
||||
|
@ -694,12 +861,6 @@ func (n *network) connectTo(ip utils.IPDesc) {
|
|||
}
|
||||
n.log.Verbo("error attempting to connect to %s: %s. Reattempting in %s",
|
||||
ip, err, delay)
|
||||
|
||||
time.Sleep(delay)
|
||||
delay *= 2
|
||||
if delay > n.maxReconnectDelay {
|
||||
delay = n.maxReconnectDelay
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -737,6 +898,7 @@ func (n *network) upgrade(p *peer, upgrader Upgrader) error {
|
|||
defer n.stateLock.Unlock()
|
||||
|
||||
if n.closed {
|
||||
p.conn.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -752,6 +914,7 @@ func (n *network) upgrade(p *peer, upgrader Upgrader) error {
|
|||
}
|
||||
str := p.ip.String()
|
||||
delete(n.disconnectedIPs, str)
|
||||
delete(n.retryDelay, str)
|
||||
n.myIPs[str] = struct{}{}
|
||||
}
|
||||
p.conn.Close()
|
||||
|
@ -760,7 +923,9 @@ func (n *network) upgrade(p *peer, upgrader Upgrader) error {
|
|||
|
||||
if _, ok := n.peers[key]; ok {
|
||||
if !p.ip.IsZero() {
|
||||
delete(n.disconnectedIPs, p.ip.String())
|
||||
str := p.ip.String()
|
||||
delete(n.disconnectedIPs, str)
|
||||
delete(n.retryDelay, str)
|
||||
}
|
||||
p.conn.Close()
|
||||
return nil
|
||||
|
@ -798,6 +963,7 @@ func (n *network) connected(p *peer) {
|
|||
str := p.ip.String()
|
||||
|
||||
delete(n.disconnectedIPs, str)
|
||||
delete(n.retryDelay, str)
|
||||
n.connectedIPs[str] = struct{}{}
|
||||
}
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ import (
|
|||
"math"
|
||||
"net"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
|
@ -30,6 +31,10 @@ type peer struct {
|
|||
// state lock held.
|
||||
closed bool
|
||||
|
||||
// number of bytes currently in the send queue, is only modifed when the
|
||||
// network state lock held.
|
||||
pendingBytes int
|
||||
|
||||
// queue of messages this connection is attempting to send the peer. Is
|
||||
// closed when the connection is closed.
|
||||
sender chan []byte
|
||||
|
@ -43,6 +48,12 @@ type peer struct {
|
|||
|
||||
// the connection object that is used to read/write messages from
|
||||
conn net.Conn
|
||||
|
||||
// version that the peer reported during the handshake
|
||||
versionStr string
|
||||
|
||||
// unix time of the last message sent and received respectively
|
||||
lastSent, lastReceived int64
|
||||
}
|
||||
|
||||
// assume the stateLock is held
|
||||
|
@ -148,6 +159,10 @@ func (p *peer) WriteMessages() {
|
|||
p.id,
|
||||
formatting.DumpBytes{Bytes: msg})
|
||||
|
||||
p.net.stateLock.Lock()
|
||||
p.pendingBytes -= len(msg)
|
||||
p.net.stateLock.Unlock()
|
||||
|
||||
packer := wrappers.Packer{Bytes: make([]byte, len(msg)+wrappers.IntLen)}
|
||||
packer.PackBytes(msg)
|
||||
msg = packer.Bytes
|
||||
|
@ -159,6 +174,7 @@ func (p *peer) WriteMessages() {
|
|||
}
|
||||
msg = msg[written:]
|
||||
}
|
||||
atomic.StoreInt64(&p.lastSent, p.net.clock.Time().Unix())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -176,8 +192,22 @@ func (p *peer) send(msg Msg) bool {
|
|||
p.net.log.Debug("dropping message to %s due to a closed connection", p.id)
|
||||
return false
|
||||
}
|
||||
|
||||
msgBytes := msg.Bytes()
|
||||
newPendingBytes := p.net.pendingBytes + len(msgBytes)
|
||||
newConnPendingBytes := p.pendingBytes + len(msgBytes)
|
||||
if newPendingBytes > p.net.networkPendingSendBytesToRateLimit && // Check to see if we should be enforcing any rate limiting
|
||||
uint32(p.pendingBytes) > p.net.maxMessageSize && // this connection should have a minimum allowed bandwidth
|
||||
(newPendingBytes > p.net.maxNetworkPendingSendBytes || // Check to see if this message would put too much memory into the network
|
||||
newConnPendingBytes > p.net.maxNetworkPendingSendBytes/20) { // Check to see if this connection is using too much memory
|
||||
p.net.log.Debug("dropping message to %s due to a send queue with too many bytes", p.id)
|
||||
return false
|
||||
}
|
||||
|
||||
select {
|
||||
case p.sender <- msg.Bytes():
|
||||
case p.sender <- msgBytes:
|
||||
p.net.pendingBytes = newPendingBytes
|
||||
p.pendingBytes = newConnPendingBytes
|
||||
return true
|
||||
default:
|
||||
p.net.log.Debug("dropping message to %s due to a full send queue", p.id)
|
||||
|
@ -188,11 +218,12 @@ func (p *peer) send(msg Msg) bool {
|
|||
// assumes the stateLock is not held
|
||||
func (p *peer) handle(msg Msg) {
|
||||
p.net.heartbeat()
|
||||
atomic.StoreInt64(&p.lastReceived, p.net.clock.Time().Unix())
|
||||
|
||||
op := msg.Op()
|
||||
msgMetrics := p.net.message(op)
|
||||
if msgMetrics == nil {
|
||||
p.net.log.Debug("dropping an unknown message from %s with op %d", p.id, op)
|
||||
p.net.log.Debug("dropping an unknown message from %s with op %s", p.id, op.String())
|
||||
return
|
||||
}
|
||||
msgMetrics.numReceived.Inc()
|
||||
|
@ -227,14 +258,20 @@ func (p *peer) handle(msg Msg) {
|
|||
p.accepted(msg)
|
||||
case Get:
|
||||
p.get(msg)
|
||||
case GetAncestors:
|
||||
p.getAncestors(msg)
|
||||
case Put:
|
||||
p.put(msg)
|
||||
case MultiPut:
|
||||
p.multiPut(msg)
|
||||
case PushQuery:
|
||||
p.pushQuery(msg)
|
||||
case PullQuery:
|
||||
p.pullQuery(msg)
|
||||
case Chits:
|
||||
p.chits(msg)
|
||||
default:
|
||||
p.net.log.Debug("dropping an unknown message from %s with op %s", p.id, op.String())
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -415,6 +452,8 @@ func (p *peer) version(msg Msg) {
|
|||
return
|
||||
}
|
||||
|
||||
p.versionStr = peerVersion.String()
|
||||
|
||||
p.connected = true
|
||||
p.net.connected(p)
|
||||
}
|
||||
|
@ -526,6 +565,16 @@ func (p *peer) get(msg Msg) {
|
|||
p.net.router.Get(p.id, chainID, requestID, containerID)
|
||||
}
|
||||
|
||||
func (p *peer) getAncestors(msg Msg) {
|
||||
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
|
||||
p.net.log.AssertNoError(err)
|
||||
requestID := msg.Get(RequestID).(uint32)
|
||||
containerID, err := ids.ToID(msg.Get(ContainerID).([]byte))
|
||||
p.net.log.AssertNoError(err)
|
||||
|
||||
p.net.router.GetAncestors(p.id, chainID, requestID, containerID)
|
||||
}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) put(msg Msg) {
|
||||
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
|
||||
|
@ -538,6 +587,16 @@ func (p *peer) put(msg Msg) {
|
|||
p.net.router.Put(p.id, chainID, requestID, containerID, container)
|
||||
}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) multiPut(msg Msg) {
|
||||
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
|
||||
p.net.log.AssertNoError(err)
|
||||
requestID := msg.Get(RequestID).(uint32)
|
||||
containers := msg.Get(MultiContainerBytes).([][]byte)
|
||||
|
||||
p.net.router.MultiPut(p.id, chainID, requestID, containers)
|
||||
}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) pushQuery(msg Msg) {
|
||||
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
|
||||
|
|
|
@ -0,0 +1,20 @@
|
|||
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
|
||||
// See the file LICENSE for licensing terms.
|
||||
|
||||
package network
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
)
|
||||
|
||||
// PeerID ...
|
||||
type PeerID struct {
|
||||
IP string `json:"ip"`
|
||||
PublicIP string `json:"publicIP"`
|
||||
ID ids.ShortID `json:"id"`
|
||||
Version string `json:"version"`
|
||||
LastSent time.Time `json:"lastSent"`
|
||||
LastReceived time.Time `json:"lastReceived"`
|
||||
}
|
|
@ -34,6 +34,7 @@ type Config struct {
|
|||
|
||||
// Staking configuration
|
||||
StakingIP utils.IPDesc
|
||||
EnableP2PTLS bool
|
||||
EnableStaking bool
|
||||
StakingKeyFile string
|
||||
StakingCertFile string
|
||||
|
|
17
node/node.go
17
node/node.go
|
@ -55,7 +55,8 @@ const (
|
|||
var (
|
||||
genesisHashKey = []byte("genesisID")
|
||||
|
||||
nodeVersion = version.NewDefaultVersion("avalanche", 0, 3, 0)
|
||||
// Version is the version of this code
|
||||
Version = version.NewDefaultVersion("avalanche", 0, 5, 5)
|
||||
versionParser = version.NewDefaultParser()
|
||||
)
|
||||
|
||||
|
@ -118,7 +119,7 @@ func (n *Node) initNetworking() error {
|
|||
dialer := network.NewDialer(TCP)
|
||||
|
||||
var serverUpgrader, clientUpgrader network.Upgrader
|
||||
if n.Config.EnableStaking {
|
||||
if n.Config.EnableP2PTLS {
|
||||
cert, err := tls.LoadX509KeyPair(n.Config.StakingCertFile, n.Config.StakingKeyFile)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -156,7 +157,7 @@ func (n *Node) initNetworking() error {
|
|||
n.ID,
|
||||
n.Config.StakingIP,
|
||||
n.Config.NetworkID,
|
||||
nodeVersion,
|
||||
Version,
|
||||
versionParser,
|
||||
listener,
|
||||
dialer,
|
||||
|
@ -195,7 +196,7 @@ func (i *insecureValidatorManager) Disconnected(vdrID ids.ShortID) bool {
|
|||
|
||||
// Dispatch starts the node's servers.
|
||||
// Returns when the node exits.
|
||||
func (n *Node) Dispatch() {
|
||||
func (n *Node) Dispatch() error {
|
||||
// Add bootstrap nodes to the peer network
|
||||
for _, peer := range n.Config.BootstrapPeers {
|
||||
if !peer.IP.Equal(n.Config.StakingIP) {
|
||||
|
@ -205,7 +206,7 @@ func (n *Node) Dispatch() {
|
|||
}
|
||||
}
|
||||
|
||||
n.Net.Dispatch()
|
||||
return n.Net.Dispatch()
|
||||
}
|
||||
|
||||
/*
|
||||
|
@ -252,7 +253,7 @@ func (n *Node) initDatabase() error {
|
|||
// Otherwise, it is a hash of the TLS certificate that this node
|
||||
// uses for P2P communication
|
||||
func (n *Node) initNodeID() error {
|
||||
if !n.Config.EnableStaking {
|
||||
if !n.Config.EnableP2PTLS {
|
||||
n.ID = ids.NewShortID(hashing.ComputeHash160Array([]byte(n.Config.StakingIP.String())))
|
||||
n.Log.Info("Set the node's ID to %s", n.ID)
|
||||
return nil
|
||||
|
@ -460,7 +461,7 @@ func (n *Node) initMetricsAPI() {
|
|||
func (n *Node) initAdminAPI() {
|
||||
if n.Config.AdminAPIEnabled {
|
||||
n.Log.Info("initializing Admin API")
|
||||
service := admin.NewService(n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.Net, &n.APIServer)
|
||||
service := admin.NewService(Version, n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.Net, &n.APIServer)
|
||||
n.APIServer.AddRoute(service, &sync.RWMutex{}, "admin", "", n.HTTPLog)
|
||||
}
|
||||
}
|
||||
|
@ -525,6 +526,7 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg
|
|||
n.Log = logger
|
||||
n.LogFactory = logFactory
|
||||
n.Config = Config
|
||||
n.Log.Info("Gecko version is: %s", Version)
|
||||
|
||||
httpLog, err := logFactory.MakeSubdir("http")
|
||||
if err != nil {
|
||||
|
@ -575,4 +577,5 @@ func (n *Node) Shutdown() {
|
|||
n.Net.Close()
|
||||
n.chainManager.Shutdown()
|
||||
utils.ClearSignals(n.nodeCloser)
|
||||
n.Log.Info("node shut down successfully")
|
||||
}
|
||||
|
|
|
@ -1,3 +1,7 @@
|
|||
- name: Kill Node
|
||||
command: killall -SIGINT ava
|
||||
command: killall -SIGTERM ava
|
||||
ignore_errors: true
|
||||
|
||||
- name: Kill EVM
|
||||
command: killall -SIGTERM evm
|
||||
ignore_errors: true
|
||||
|
|
|
@ -15,7 +15,7 @@ GECKO_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Directory
|
|||
BUILD_DIR=$GECKO_PATH/build # Where binaries go
|
||||
PLUGIN_DIR="$BUILD_DIR/plugins" # Where plugin binaries (namely coreth) go
|
||||
|
||||
CORETH_VER="0.1.0" # Should match coreth version in go.mod
|
||||
CORETH_VER="0.2.4" # Should match coreth version in go.mod
|
||||
CORETH_PATH="$GOPATH/pkg/mod/github.com/ava-labs/coreth@v$CORETH_VER"
|
||||
|
||||
# Build Gecko
|
||||
|
|
|
@ -22,12 +22,12 @@ type Decidable interface {
|
|||
// Accept this element.
|
||||
//
|
||||
// This element will be accepted by every correct node in the network.
|
||||
Accept()
|
||||
Accept() error
|
||||
|
||||
// Reject this element.
|
||||
//
|
||||
// This element will not be accepted by any correct node in the network.
|
||||
Reject()
|
||||
Reject() error
|
||||
|
||||
// Status returns this element's current status.
|
||||
//
|
||||
|
|
|
@ -34,8 +34,9 @@ type Consensus interface {
|
|||
IsVirtuous(snowstorm.Tx) bool
|
||||
|
||||
// Adds a new decision. Assumes the dependencies have already been added.
|
||||
// Assumes that mutations don't conflict with themselves.
|
||||
Add(Vertex)
|
||||
// Assumes that mutations don't conflict with themselves. Returns if a
|
||||
// critical error has occurred.
|
||||
Add(Vertex) error
|
||||
|
||||
// VertexIssued returns true iff Vertex has been added
|
||||
VertexIssued(Vertex) bool
|
||||
|
@ -54,8 +55,9 @@ type Consensus interface {
|
|||
Preferences() ids.Set
|
||||
|
||||
// RecordPoll collects the results of a network poll. If a result has not
|
||||
// been added, the result is dropped.
|
||||
RecordPoll(ids.UniqueBag)
|
||||
// been added, the result is dropped. Returns if a critical error has
|
||||
// occurred.
|
||||
RecordPoll(ids.UniqueBag) error
|
||||
|
||||
// Quiesce returns true iff all vertices that have been added but not been accepted or rejected are rogue.
|
||||
// Note, it is possible that after returning quiesce, a new decision may be added such
|
||||
|
@ -75,6 +77,10 @@ type Vertex interface {
|
|||
// Returns the vertices this vertex depends on
|
||||
Parents() []Vertex
|
||||
|
||||
// Returns the height of this vertex. A vertex's height is defined by one
|
||||
// greater than the maximum height of the parents.
|
||||
Height() uint64
|
||||
|
||||
// Returns a series of state transitions to be performed on acceptance
|
||||
Txs() []snowstorm.Tx
|
||||
|
||||
|
|
|
@ -37,14 +37,14 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr
|
|||
Namespace: namespace,
|
||||
Name: "vtx_accepted",
|
||||
Help: "Latency of accepting from the time the vertex was issued in milliseconds",
|
||||
Buckets: timer.Buckets,
|
||||
Buckets: timer.MillisecondsBuckets,
|
||||
})
|
||||
m.latRejected = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Name: "vtx_rejected",
|
||||
Help: "Latency of rejecting from the time the vertex was issued in milliseconds",
|
||||
Buckets: timer.Buckets,
|
||||
Buckets: timer.MillisecondsBuckets,
|
||||
})
|
||||
|
||||
if err := registerer.Register(m.numProcessing); err != nil {
|
||||
|
|
|
@ -74,7 +74,7 @@ func (ta *Topological) Initialize(ctx *snow.Context, params Parameters, frontier
|
|||
for _, vtx := range frontier {
|
||||
ta.frontier[vtx.ID().Key()] = vtx
|
||||
}
|
||||
ta.updateFrontiers()
|
||||
ctx.Log.AssertNoError(ta.updateFrontiers())
|
||||
}
|
||||
|
||||
// Parameters implements the Avalanche interface
|
||||
|
@ -84,15 +84,15 @@ func (ta *Topological) Parameters() Parameters { return ta.params }
|
|||
func (ta *Topological) IsVirtuous(tx snowstorm.Tx) bool { return ta.cg.IsVirtuous(tx) }
|
||||
|
||||
// Add implements the Avalanche interface
|
||||
func (ta *Topological) Add(vtx Vertex) {
|
||||
func (ta *Topological) Add(vtx Vertex) error {
|
||||
ta.ctx.Log.AssertTrue(vtx != nil, "Attempting to insert nil vertex")
|
||||
|
||||
vtxID := vtx.ID()
|
||||
key := vtxID.Key()
|
||||
if vtx.Status().Decided() {
|
||||
return // Already decided this vertex
|
||||
return nil // Already decided this vertex
|
||||
} else if _, exists := ta.nodes[key]; exists {
|
||||
return // Already inserted this vertex
|
||||
return nil // Already inserted this vertex
|
||||
}
|
||||
|
||||
ta.ctx.ConsensusDispatcher.Issue(ta.ctx.ChainID, vtxID, vtx.Bytes())
|
||||
|
@ -100,14 +100,16 @@ func (ta *Topological) Add(vtx Vertex) {
|
|||
for _, tx := range vtx.Txs() {
|
||||
if !tx.Status().Decided() {
|
||||
// Add the consumers to the conflict graph.
|
||||
ta.cg.Add(tx)
|
||||
if err := ta.cg.Add(tx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ta.nodes[key] = vtx // Add this vertex to the set of nodes
|
||||
ta.metrics.Issued(vtxID)
|
||||
|
||||
ta.update(vtx) // Update the vertex and it's ancestry
|
||||
return ta.update(vtx) // Update the vertex and it's ancestry
|
||||
}
|
||||
|
||||
// VertexIssued implements the Avalanche interface
|
||||
|
@ -132,7 +134,7 @@ func (ta *Topological) Virtuous() ids.Set { return ta.virtuous }
|
|||
func (ta *Topological) Preferences() ids.Set { return ta.preferred }
|
||||
|
||||
// RecordPoll implements the Avalanche interface
|
||||
func (ta *Topological) RecordPoll(responses ids.UniqueBag) {
|
||||
func (ta *Topological) RecordPoll(responses ids.UniqueBag) error {
|
||||
// Set up the topological sort: O(|Live Set|)
|
||||
kahns, leaves := ta.calculateInDegree(responses)
|
||||
// Collect the votes for each transaction: O(|Live Set|)
|
||||
|
@ -141,7 +143,7 @@ func (ta *Topological) RecordPoll(responses ids.UniqueBag) {
|
|||
ta.ctx.Log.Verbo("Updating consumer confidences based on:\n%s", &votes)
|
||||
ta.cg.RecordPoll(votes)
|
||||
// Update the dag: O(|Live Set|)
|
||||
ta.updateFrontiers()
|
||||
return ta.updateFrontiers()
|
||||
}
|
||||
|
||||
// Quiesce implements the Avalanche interface
|
||||
|
@ -275,11 +277,11 @@ func (ta *Topological) pushVotes(
|
|||
// If I'm preferred, remove all my ancestors from the preferred frontier, add
|
||||
// myself to the preferred frontier
|
||||
// If all my parents are accepted and I'm acceptable, accept myself
|
||||
func (ta *Topological) update(vtx Vertex) {
|
||||
func (ta *Topological) update(vtx Vertex) error {
|
||||
vtxID := vtx.ID()
|
||||
vtxKey := vtxID.Key()
|
||||
if _, cached := ta.preferenceCache[vtxKey]; cached {
|
||||
return // This vertex has already been updated
|
||||
return nil // This vertex has already been updated
|
||||
}
|
||||
|
||||
switch vtx.Status() {
|
||||
|
@ -291,12 +293,12 @@ func (ta *Topological) update(vtx Vertex) {
|
|||
|
||||
ta.preferenceCache[vtxKey] = true
|
||||
ta.virtuousCache[vtxKey] = true
|
||||
return
|
||||
return nil
|
||||
case choices.Rejected:
|
||||
// I'm rejected
|
||||
ta.preferenceCache[vtxKey] = false
|
||||
ta.virtuousCache[vtxKey] = false
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
acceptable := true // If the batch is accepted, this vertex is acceptable
|
||||
|
@ -327,7 +329,9 @@ func (ta *Topological) update(vtx Vertex) {
|
|||
deps := vtx.Parents()
|
||||
// Update all of my dependencies
|
||||
for _, dep := range deps {
|
||||
ta.update(dep)
|
||||
if err := ta.update(dep); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
depID := dep.ID()
|
||||
key := depID.Key()
|
||||
|
@ -338,13 +342,17 @@ func (ta *Topological) update(vtx Vertex) {
|
|||
// Check my parent statuses
|
||||
for _, dep := range deps {
|
||||
if status := dep.Status(); status == choices.Rejected {
|
||||
vtx.Reject() // My parent is rejected, so I should be rejected
|
||||
// My parent is rejected, so I should be rejected
|
||||
if err := vtx.Reject(); err != nil {
|
||||
return err
|
||||
}
|
||||
ta.ctx.ConsensusDispatcher.Reject(ta.ctx.ChainID, vtxID, vtx.Bytes())
|
||||
delete(ta.nodes, vtxKey)
|
||||
ta.metrics.Rejected(vtxID)
|
||||
|
||||
ta.preferenceCache[vtxKey] = false
|
||||
ta.virtuousCache[vtxKey] = false
|
||||
return
|
||||
return nil
|
||||
} else if status != choices.Accepted {
|
||||
acceptable = false // My parent isn't accepted, so I can't be
|
||||
}
|
||||
|
@ -389,21 +397,26 @@ func (ta *Topological) update(vtx Vertex) {
|
|||
switch {
|
||||
case acceptable:
|
||||
// I'm acceptable, why not accept?
|
||||
if err := vtx.Accept(); err != nil {
|
||||
return err
|
||||
}
|
||||
ta.ctx.ConsensusDispatcher.Accept(ta.ctx.ChainID, vtxID, vtx.Bytes())
|
||||
vtx.Accept()
|
||||
delete(ta.nodes, vtxKey)
|
||||
ta.metrics.Accepted(vtxID)
|
||||
case rejectable:
|
||||
// I'm rejectable, why not reject?
|
||||
vtx.Reject()
|
||||
if err := vtx.Reject(); err != nil {
|
||||
return err
|
||||
}
|
||||
ta.ctx.ConsensusDispatcher.Reject(ta.ctx.ChainID, vtxID, vtx.Bytes())
|
||||
delete(ta.nodes, vtxKey)
|
||||
ta.metrics.Rejected(vtxID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Update the frontier sets
|
||||
func (ta *Topological) updateFrontiers() {
|
||||
func (ta *Topological) updateFrontiers() error {
|
||||
vts := ta.frontier
|
||||
|
||||
ta.preferred.Clear()
|
||||
|
@ -417,6 +430,9 @@ func (ta *Topological) updateFrontiers() {
|
|||
|
||||
for _, vtx := range vts {
|
||||
// Update all the vertices that were in my previous frontier
|
||||
ta.update(vtx)
|
||||
if err := ta.update(vtx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ type Vtx struct {
|
|||
id ids.ID
|
||||
txs []snowstorm.Tx
|
||||
|
||||
height int
|
||||
height uint64
|
||||
status choices.Status
|
||||
|
||||
bytes []byte
|
||||
|
@ -25,11 +25,12 @@ type Vtx struct {
|
|||
func (v *Vtx) ID() ids.ID { return v.id }
|
||||
func (v *Vtx) ParentIDs() []ids.ID { return nil }
|
||||
func (v *Vtx) Parents() []Vertex { return v.dependencies }
|
||||
func (v *Vtx) Height() uint64 { return v.height }
|
||||
func (v *Vtx) Txs() []snowstorm.Tx { return v.txs }
|
||||
func (v *Vtx) Status() choices.Status { return v.status }
|
||||
func (v *Vtx) Live() {}
|
||||
func (v *Vtx) Accept() { v.status = choices.Accepted }
|
||||
func (v *Vtx) Reject() { v.status = choices.Rejected }
|
||||
func (v *Vtx) Accept() error { v.status = choices.Accepted; return nil }
|
||||
func (v *Vtx) Reject() error { v.status = choices.Rejected; return nil }
|
||||
func (v *Vtx) Bytes() []byte { return v.bytes }
|
||||
|
||||
type sortVts []*Vtx
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
package snowman
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sort"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
|
@ -21,17 +22,19 @@ type TestBlock struct {
|
|||
func (b *TestBlock) Parent() Block { return b.parent }
|
||||
func (b *TestBlock) ID() ids.ID { return b.id }
|
||||
func (b *TestBlock) Status() choices.Status { return b.status }
|
||||
func (b *TestBlock) Accept() {
|
||||
func (b *TestBlock) Accept() error {
|
||||
if b.status.Decided() && b.status != choices.Accepted {
|
||||
panic("Dis-agreement")
|
||||
return errors.New("Dis-agreement")
|
||||
}
|
||||
b.status = choices.Accepted
|
||||
return nil
|
||||
}
|
||||
func (b *TestBlock) Reject() {
|
||||
func (b *TestBlock) Reject() error {
|
||||
if b.status.Decided() && b.status != choices.Rejected {
|
||||
panic("Dis-agreement")
|
||||
return errors.New("Dis-agreement")
|
||||
}
|
||||
b.status = choices.Rejected
|
||||
return nil
|
||||
}
|
||||
func (b *TestBlock) Verify() error { return nil }
|
||||
func (b *TestBlock) Bytes() []byte { return b.bytes }
|
||||
|
|
|
@ -19,7 +19,8 @@ type Consensus interface {
|
|||
Parameters() snowball.Parameters
|
||||
|
||||
// Adds a new decision. Assumes the dependency has already been added.
|
||||
Add(Block)
|
||||
// Returns if a critical error has occurred.
|
||||
Add(Block) error
|
||||
|
||||
// Issued returns true if the block has been issued into consensus
|
||||
Issued(Block) bool
|
||||
|
@ -29,8 +30,8 @@ type Consensus interface {
|
|||
Preference() ids.ID
|
||||
|
||||
// RecordPoll collects the results of a network poll. Assumes all decisions
|
||||
// have been previously added.
|
||||
RecordPoll(ids.Bag)
|
||||
// have been previously added. Returns if a critical error has occurred.
|
||||
RecordPoll(ids.Bag) error
|
||||
|
||||
// Finalized returns true if all decisions that have been added have been
|
||||
// finalized. Note, it is possible that after returning finalized, a new
|
||||
|
|
|
@ -37,14 +37,14 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr
|
|||
Namespace: namespace,
|
||||
Name: "accepted",
|
||||
Help: "Latency of accepting from the time the block was issued in milliseconds",
|
||||
Buckets: timer.Buckets,
|
||||
Buckets: timer.MillisecondsBuckets,
|
||||
})
|
||||
m.latRejected = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Name: "rejected",
|
||||
Help: "Latency of rejecting from the time the block was issued in milliseconds",
|
||||
Buckets: timer.Buckets,
|
||||
Buckets: timer.MillisecondsBuckets,
|
||||
})
|
||||
|
||||
if err := registerer.Register(m.numProcessing); err != nil {
|
||||
|
|
|
@ -77,7 +77,7 @@ func (ts *Topological) Initialize(ctx *snow.Context, params snowball.Parameters,
|
|||
func (ts *Topological) Parameters() snowball.Parameters { return ts.params }
|
||||
|
||||
// Add implements the Snowman interface
|
||||
func (ts *Topological) Add(blk Block) {
|
||||
func (ts *Topological) Add(blk Block) error {
|
||||
parent := blk.Parent()
|
||||
parentID := parent.ID()
|
||||
parentKey := parentID.Key()
|
||||
|
@ -95,13 +95,15 @@ func (ts *Topological) Add(blk Block) {
|
|||
// If the ancestor is missing, this means the ancestor must have already
|
||||
// been pruned. Therefore, the dependent should be transitively
|
||||
// rejected.
|
||||
blk.Reject()
|
||||
if err := blk.Reject(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Notify anyone listening that this block was rejected.
|
||||
ts.ctx.DecisionDispatcher.Reject(ts.ctx.ChainID, blkID, blkBytes)
|
||||
ts.ctx.ConsensusDispatcher.Reject(ts.ctx.ChainID, blkID, blkBytes)
|
||||
ts.metrics.Rejected(blkID)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
// add the block as a child of its parent, and add the block to the tree
|
||||
|
@ -115,6 +117,7 @@ func (ts *Topological) Add(blk Block) {
|
|||
if ts.tail.Equals(parentID) {
|
||||
ts.tail = blkID
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Issued implements the Snowman interface
|
||||
|
@ -154,7 +157,7 @@ func (ts *Topological) Preference() ids.ID { return ts.tail }
|
|||
// The complexity of this function is:
|
||||
// - Runtime = 3 * |live set| + |votes|
|
||||
// - Space = 2 * |live set| + |votes|
|
||||
func (ts *Topological) RecordPoll(votes ids.Bag) {
|
||||
func (ts *Topological) RecordPoll(votes ids.Bag) error {
|
||||
// Runtime = |live set| + |votes| ; Space = |live set| + |votes|
|
||||
kahnGraph, leaves := ts.calculateInDegree(votes)
|
||||
|
||||
|
@ -162,10 +165,14 @@ func (ts *Topological) RecordPoll(votes ids.Bag) {
|
|||
voteStack := ts.pushVotes(kahnGraph, leaves)
|
||||
|
||||
// Runtime = |live set| ; Space = Constant
|
||||
preferred := ts.vote(voteStack)
|
||||
preferred, err := ts.vote(voteStack)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Runtime = |live set| ; Space = Constant
|
||||
ts.tail = ts.getPreferredDecendent(preferred)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Finalized implements the Snowman interface
|
||||
|
@ -292,7 +299,7 @@ func (ts *Topological) pushVotes(
|
|||
}
|
||||
|
||||
// apply votes to the branch that received an Alpha threshold
|
||||
func (ts *Topological) vote(voteStack []votes) ids.ID {
|
||||
func (ts *Topological) vote(voteStack []votes) (ids.ID, error) {
|
||||
// If the voteStack is empty, then the full tree should falter. This won't
|
||||
// change the preferred branch.
|
||||
if len(voteStack) == 0 {
|
||||
|
@ -301,7 +308,7 @@ func (ts *Topological) vote(voteStack []votes) ids.ID {
|
|||
headKey := ts.head.Key()
|
||||
headBlock := ts.blocks[headKey]
|
||||
headBlock.shouldFalter = true
|
||||
return ts.tail
|
||||
return ts.tail, nil
|
||||
}
|
||||
|
||||
// keep track of the new preferred block
|
||||
|
@ -341,7 +348,9 @@ func (ts *Topological) vote(voteStack []votes) ids.ID {
|
|||
|
||||
// Only accept when you are finalized and the head.
|
||||
if parentBlock.sb.Finalized() && ts.head.Equals(vote.parentID) {
|
||||
ts.accept(parentBlock)
|
||||
if err := ts.accept(parentBlock); err != nil {
|
||||
return ids.ID{}, err
|
||||
}
|
||||
|
||||
// by accepting the child of parentBlock, the last accepted block is
|
||||
// no longer voteParentID, but its child. So, voteParentID can be
|
||||
|
@ -393,7 +402,7 @@ func (ts *Topological) vote(voteStack []votes) ids.ID {
|
|||
}
|
||||
}
|
||||
}
|
||||
return newPreferred
|
||||
return newPreferred, nil
|
||||
}
|
||||
|
||||
// Get the preferred decendent of the provided block ID
|
||||
|
@ -409,7 +418,7 @@ func (ts *Topological) getPreferredDecendent(blkID ids.ID) ids.ID {
|
|||
// accept the preferred child of the provided snowman block. By accepting the
|
||||
// preferred child, all other children will be rejected. When these children are
|
||||
// rejected, all their descendants will be rejected.
|
||||
func (ts *Topological) accept(n *snowmanBlock) {
|
||||
func (ts *Topological) accept(n *snowmanBlock) error {
|
||||
// We are finalizing the block's child, so we need to get the preference
|
||||
pref := n.sb.Preference()
|
||||
|
||||
|
@ -417,7 +426,9 @@ func (ts *Topological) accept(n *snowmanBlock) {
|
|||
|
||||
// Get the child and accept it
|
||||
child := n.children[pref.Key()]
|
||||
child.Accept()
|
||||
if err := child.Accept(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Notify anyone listening that this block was accepted.
|
||||
bytes := child.Bytes()
|
||||
|
@ -439,7 +450,9 @@ func (ts *Topological) accept(n *snowmanBlock) {
|
|||
continue
|
||||
}
|
||||
|
||||
child.Reject()
|
||||
if err := child.Reject(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Notify anyone listening that this block was rejected.
|
||||
bytes := child.Bytes()
|
||||
|
@ -452,11 +465,11 @@ func (ts *Topological) accept(n *snowmanBlock) {
|
|||
}
|
||||
|
||||
// reject all the descendants of the blocks we just rejected
|
||||
ts.rejectTransitively(rejects)
|
||||
return ts.rejectTransitively(rejects)
|
||||
}
|
||||
|
||||
// Takes in a list of rejected ids and rejects all descendants of these IDs
|
||||
func (ts *Topological) rejectTransitively(rejected []ids.ID) {
|
||||
func (ts *Topological) rejectTransitively(rejected []ids.ID) error {
|
||||
// the rejected array is treated as a queue, with the next element at index
|
||||
// 0 and the last element at the end of the slice.
|
||||
for len(rejected) > 0 {
|
||||
|
@ -471,7 +484,9 @@ func (ts *Topological) rejectTransitively(rejected []ids.ID) {
|
|||
delete(ts.blocks, rejectedKey)
|
||||
|
||||
for childIDKey, child := range rejectedNode.children {
|
||||
child.Reject()
|
||||
if err := child.Reject(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Notify anyone listening that this block was rejected.
|
||||
childID := ids.NewID(childIDKey)
|
||||
|
@ -484,4 +499,5 @@ func (ts *Topological) rejectTransitively(rejected []ids.ID) {
|
|||
rejected = append(rejected, childID)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -28,8 +28,9 @@ type Consensus interface {
|
|||
// That is, no transaction has been added that conflicts with <Tx>
|
||||
IsVirtuous(Tx) bool
|
||||
|
||||
// Adds a new transaction to vote on
|
||||
Add(Tx)
|
||||
// Adds a new transaction to vote on. Returns if a critical error has
|
||||
// occurred.
|
||||
Add(Tx) error
|
||||
|
||||
// Returns true iff transaction <Tx> has been added
|
||||
Issued(Tx) bool
|
||||
|
@ -45,8 +46,8 @@ type Consensus interface {
|
|||
Conflicts(Tx) ids.Set
|
||||
|
||||
// Collects the results of a network poll. Assumes all transactions
|
||||
// have been previously added
|
||||
RecordPoll(ids.Bag)
|
||||
// have been previously added. Returns if a critical error has occurred.
|
||||
RecordPoll(ids.Bag) error
|
||||
|
||||
// Returns true iff all remaining transactions are rogue. Note, it is
|
||||
// possible that after returning quiesce, a new decision may be added such
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/ava-labs/gecko/snow/consensus/snowball"
|
||||
"github.com/ava-labs/gecko/snow/events"
|
||||
"github.com/ava-labs/gecko/utils/formatting"
|
||||
"github.com/ava-labs/gecko/utils/wrappers"
|
||||
)
|
||||
|
||||
// DirectedFactory implements Factory by returning a directed struct
|
||||
|
@ -54,6 +55,8 @@ type Directed struct {
|
|||
|
||||
// Number of times RecordPoll has been called
|
||||
currentVote int
|
||||
|
||||
errs wrappers.Errs
|
||||
}
|
||||
|
||||
type flatNode struct {
|
||||
|
@ -118,9 +121,9 @@ func (dg *Directed) Conflicts(tx Tx) ids.Set {
|
|||
}
|
||||
|
||||
// Add implements the Consensus interface
|
||||
func (dg *Directed) Add(tx Tx) {
|
||||
func (dg *Directed) Add(tx Tx) error {
|
||||
if dg.Issued(tx) {
|
||||
return // Already inserted
|
||||
return nil // Already inserted
|
||||
}
|
||||
|
||||
txID := tx.ID()
|
||||
|
@ -130,11 +133,13 @@ func (dg *Directed) Add(tx Tx) {
|
|||
inputs := tx.InputIDs()
|
||||
// If there are no inputs, Tx is vacuously accepted
|
||||
if inputs.Len() == 0 {
|
||||
tx.Accept()
|
||||
if err := tx.Accept(); err != nil {
|
||||
return err
|
||||
}
|
||||
dg.ctx.DecisionDispatcher.Accept(dg.ctx.ChainID, txID, bytes)
|
||||
dg.metrics.Issued(txID)
|
||||
dg.metrics.Accepted(txID)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
fn := &flatNode{tx: tx}
|
||||
|
@ -195,6 +200,7 @@ func (dg *Directed) Add(tx Tx) {
|
|||
}
|
||||
}
|
||||
dg.pendingReject.Register(toReject)
|
||||
return dg.errs.Err
|
||||
}
|
||||
|
||||
// Issued implements the Consensus interface
|
||||
|
@ -213,7 +219,7 @@ func (dg *Directed) Virtuous() ids.Set { return dg.virtuous }
|
|||
func (dg *Directed) Preferences() ids.Set { return dg.preferences }
|
||||
|
||||
// RecordPoll implements the Consensus interface
|
||||
func (dg *Directed) RecordPoll(votes ids.Bag) {
|
||||
func (dg *Directed) RecordPoll(votes ids.Bag) error {
|
||||
dg.currentVote++
|
||||
|
||||
votes.SetThreshold(dg.params.Alpha)
|
||||
|
@ -231,7 +237,8 @@ func (dg *Directed) RecordPoll(votes ids.Bag) {
|
|||
}
|
||||
fn.lastVote = dg.currentVote
|
||||
|
||||
dg.ctx.Log.Verbo("Increasing (bias, confidence) of %s from (%d, %d) to (%d, %d)", toInc, fn.bias, fn.confidence, fn.bias+1, fn.confidence+1)
|
||||
dg.ctx.Log.Verbo("Increasing (bias, confidence) of %s from (%d, %d) to (%d, %d)",
|
||||
toInc, fn.bias, fn.confidence, fn.bias+1, fn.confidence+1)
|
||||
|
||||
fn.bias++
|
||||
fn.confidence++
|
||||
|
@ -240,17 +247,22 @@ func (dg *Directed) RecordPoll(votes ids.Bag) {
|
|||
((!fn.rogue && fn.confidence >= dg.params.BetaVirtuous) ||
|
||||
fn.confidence >= dg.params.BetaRogue) {
|
||||
dg.deferAcceptance(fn)
|
||||
if dg.errs.Errored() {
|
||||
return dg.errs.Err
|
||||
}
|
||||
}
|
||||
if !fn.accepted {
|
||||
dg.redirectEdges(fn)
|
||||
}
|
||||
}
|
||||
return dg.errs.Err
|
||||
}
|
||||
|
||||
// Quiesce implements the Consensus interface
|
||||
func (dg *Directed) Quiesce() bool {
|
||||
numVirtuous := dg.virtuousVoting.Len()
|
||||
dg.ctx.Log.Verbo("Conflict graph has %d voting virtuous transactions and %d transactions", numVirtuous, len(dg.nodes))
|
||||
dg.ctx.Log.Verbo("Conflict graph has %d voting virtuous transactions and %d transactions",
|
||||
numVirtuous, len(dg.nodes))
|
||||
return numVirtuous == 0
|
||||
}
|
||||
|
||||
|
@ -311,7 +323,7 @@ func (dg *Directed) deferAcceptance(fn *flatNode) {
|
|||
dg.pendingAccept.Register(toAccept)
|
||||
}
|
||||
|
||||
func (dg *Directed) reject(ids ...ids.ID) {
|
||||
func (dg *Directed) reject(ids ...ids.ID) error {
|
||||
for _, conflict := range ids {
|
||||
conflictKey := conflict.Key()
|
||||
conf := dg.nodes[conflictKey]
|
||||
|
@ -324,13 +336,16 @@ func (dg *Directed) reject(ids ...ids.ID) {
|
|||
dg.removeConflict(conflict, conf.outs.List()...)
|
||||
|
||||
// Mark it as rejected
|
||||
conf.tx.Reject()
|
||||
if err := conf.tx.Reject(); err != nil {
|
||||
return err
|
||||
}
|
||||
dg.ctx.DecisionDispatcher.Reject(dg.ctx.ChainID, conf.tx.ID(), conf.tx.Bytes())
|
||||
dg.metrics.Rejected(conflict)
|
||||
|
||||
dg.pendingAccept.Abandon(conflict)
|
||||
dg.pendingReject.Fulfill(conflict)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dg *Directed) redirectEdges(fn *flatNode) {
|
||||
|
@ -396,7 +411,7 @@ func (a *directedAccepter) Abandon(id ids.ID) { a.rejected = true }
|
|||
|
||||
func (a *directedAccepter) Update() {
|
||||
// If I was rejected or I am still waiting on dependencies to finish do nothing.
|
||||
if a.rejected || a.deps.Len() != 0 {
|
||||
if a.rejected || a.deps.Len() != 0 || a.dg.errs.Errored() {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -410,12 +425,22 @@ func (a *directedAccepter) Update() {
|
|||
a.dg.preferences.Remove(id)
|
||||
|
||||
// Reject the conflicts
|
||||
a.dg.reject(a.fn.ins.List()...)
|
||||
a.dg.reject(a.fn.outs.List()...) // Should normally be empty
|
||||
if err := a.dg.reject(a.fn.ins.List()...); err != nil {
|
||||
a.dg.errs.Add(err)
|
||||
return
|
||||
}
|
||||
// Should normally be empty
|
||||
if err := a.dg.reject(a.fn.outs.List()...); err != nil {
|
||||
a.dg.errs.Add(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Mark it as accepted
|
||||
if err := a.fn.tx.Accept(); err != nil {
|
||||
a.dg.errs.Add(err)
|
||||
return
|
||||
}
|
||||
a.fn.accepted = true
|
||||
a.fn.tx.Accept()
|
||||
a.dg.ctx.DecisionDispatcher.Accept(a.dg.ctx.ChainID, id, a.fn.tx.Bytes())
|
||||
a.dg.metrics.Accepted(id)
|
||||
|
||||
|
@ -434,11 +459,11 @@ type directedRejector struct {
|
|||
func (r *directedRejector) Dependencies() ids.Set { return r.deps }
|
||||
|
||||
func (r *directedRejector) Fulfill(id ids.ID) {
|
||||
if r.rejected {
|
||||
if r.rejected || r.dg.errs.Errored() {
|
||||
return
|
||||
}
|
||||
r.rejected = true
|
||||
r.dg.reject(r.fn.tx.ID())
|
||||
r.dg.errs.Add(r.dg.reject(r.fn.tx.ID()))
|
||||
}
|
||||
|
||||
func (*directedRejector) Abandon(id ids.ID) {}
|
||||
|
|
|
@ -14,6 +14,7 @@ import (
|
|||
"github.com/ava-labs/gecko/snow/consensus/snowball"
|
||||
"github.com/ava-labs/gecko/snow/events"
|
||||
"github.com/ava-labs/gecko/utils/formatting"
|
||||
"github.com/ava-labs/gecko/utils/wrappers"
|
||||
)
|
||||
|
||||
// InputFactory implements Factory by returning an input struct
|
||||
|
@ -43,6 +44,8 @@ type Input struct {
|
|||
|
||||
// Number of times RecordPoll has been called
|
||||
currentVote int
|
||||
|
||||
errs wrappers.Errs
|
||||
}
|
||||
|
||||
type txNode struct {
|
||||
|
@ -92,9 +95,9 @@ func (ig *Input) IsVirtuous(tx Tx) bool {
|
|||
}
|
||||
|
||||
// Add implements the ConflictGraph interface
|
||||
func (ig *Input) Add(tx Tx) {
|
||||
func (ig *Input) Add(tx Tx) error {
|
||||
if ig.Issued(tx) {
|
||||
return // Already inserted
|
||||
return nil // Already inserted
|
||||
}
|
||||
|
||||
txID := tx.ID()
|
||||
|
@ -104,11 +107,13 @@ func (ig *Input) Add(tx Tx) {
|
|||
inputs := tx.InputIDs()
|
||||
// If there are no inputs, they are vacuously accepted
|
||||
if inputs.Len() == 0 {
|
||||
tx.Accept()
|
||||
if err := tx.Accept(); err != nil {
|
||||
return err
|
||||
}
|
||||
ig.ctx.DecisionDispatcher.Accept(ig.ctx.ChainID, txID, bytes)
|
||||
ig.metrics.Issued(txID)
|
||||
ig.metrics.Accepted(txID)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
cn := txNode{tx: tx}
|
||||
|
@ -155,6 +160,7 @@ func (ig *Input) Add(tx Tx) {
|
|||
}
|
||||
}
|
||||
ig.pendingReject.Register(toReject)
|
||||
return ig.errs.Err
|
||||
}
|
||||
|
||||
// Issued implements the ConflictGraph interface
|
||||
|
@ -187,7 +193,7 @@ func (ig *Input) Conflicts(tx Tx) ids.Set {
|
|||
}
|
||||
|
||||
// RecordPoll implements the ConflictGraph interface
|
||||
func (ig *Input) RecordPoll(votes ids.Bag) {
|
||||
func (ig *Input) RecordPoll(votes ids.Bag) error {
|
||||
ig.currentVote++
|
||||
|
||||
votes.SetThreshold(ig.params.Alpha)
|
||||
|
@ -261,11 +267,15 @@ func (ig *Input) RecordPoll(votes ids.Bag) {
|
|||
if (!rogue && confidence >= ig.params.BetaVirtuous) ||
|
||||
confidence >= ig.params.BetaRogue {
|
||||
ig.deferAcceptance(tx)
|
||||
if ig.errs.Errored() {
|
||||
return ig.errs.Err
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
ig.txs[incKey] = tx
|
||||
}
|
||||
return ig.errs.Err
|
||||
}
|
||||
|
||||
func (ig *Input) deferAcceptance(tn txNode) {
|
||||
|
@ -285,7 +295,7 @@ func (ig *Input) deferAcceptance(tn txNode) {
|
|||
}
|
||||
|
||||
// reject all the ids and remove them from their conflict sets
|
||||
func (ig *Input) reject(ids ...ids.ID) {
|
||||
func (ig *Input) reject(ids ...ids.ID) error {
|
||||
for _, conflict := range ids {
|
||||
conflictKey := conflict.Key()
|
||||
cn := ig.txs[conflictKey]
|
||||
|
@ -296,12 +306,15 @@ func (ig *Input) reject(ids ...ids.ID) {
|
|||
ig.removeConflict(conflict, cn.tx.InputIDs().List()...)
|
||||
|
||||
// Mark it as rejected
|
||||
cn.tx.Reject()
|
||||
if err := cn.tx.Reject(); err != nil {
|
||||
return err
|
||||
}
|
||||
ig.ctx.DecisionDispatcher.Reject(ig.ctx.ChainID, cn.tx.ID(), cn.tx.Bytes())
|
||||
ig.metrics.Rejected(conflict)
|
||||
ig.pendingAccept.Abandon(conflict)
|
||||
ig.pendingReject.Fulfill(conflict)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove id from all of its conflict sets
|
||||
|
@ -458,7 +471,7 @@ func (a *inputAccepter) Fulfill(id ids.ID) {
|
|||
func (a *inputAccepter) Abandon(id ids.ID) { a.rejected = true }
|
||||
|
||||
func (a *inputAccepter) Update() {
|
||||
if a.rejected || a.deps.Len() != 0 {
|
||||
if a.rejected || a.deps.Len() != 0 || a.ig.errs.Errored() {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -480,10 +493,16 @@ func (a *inputAccepter) Update() {
|
|||
conflicts.Union(inputNode.conflicts)
|
||||
}
|
||||
}
|
||||
a.ig.reject(conflicts.List()...)
|
||||
if err := a.ig.reject(conflicts.List()...); err != nil {
|
||||
a.ig.errs.Add(err)
|
||||
return
|
||||
}
|
||||
|
||||
// Mark it as accepted
|
||||
a.tn.tx.Accept()
|
||||
if err := a.tn.tx.Accept(); err != nil {
|
||||
a.ig.errs.Add(err)
|
||||
return
|
||||
}
|
||||
a.ig.ctx.DecisionDispatcher.Accept(a.ig.ctx.ChainID, id, a.tn.tx.Bytes())
|
||||
a.ig.metrics.Accepted(id)
|
||||
|
||||
|
@ -502,11 +521,11 @@ type inputRejector struct {
|
|||
func (r *inputRejector) Dependencies() ids.Set { return r.deps }
|
||||
|
||||
func (r *inputRejector) Fulfill(id ids.ID) {
|
||||
if r.rejected {
|
||||
if r.rejected || r.ig.errs.Errored() {
|
||||
return
|
||||
}
|
||||
r.rejected = true
|
||||
r.ig.reject(r.tn.tx.ID())
|
||||
r.ig.errs.Add(r.ig.reject(r.tn.tx.ID()))
|
||||
}
|
||||
|
||||
func (*inputRejector) Abandon(id ids.ID) {}
|
||||
|
|
|
@ -37,14 +37,14 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr
|
|||
Namespace: namespace,
|
||||
Name: "tx_accepted",
|
||||
Help: "Latency of accepting from the time the transaction was issued in milliseconds",
|
||||
Buckets: timer.Buckets,
|
||||
Buckets: timer.MillisecondsBuckets,
|
||||
})
|
||||
m.latRejected = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Name: "tx_rejected",
|
||||
Help: "Latency of rejecting from the time the transaction was issued in milliseconds",
|
||||
Buckets: timer.Buckets,
|
||||
Buckets: timer.MillisecondsBuckets,
|
||||
})
|
||||
|
||||
if err := registerer.Register(m.numProcessing); err != nil {
|
||||
|
|
|
@ -31,10 +31,10 @@ func (tx *TestTx) InputIDs() ids.Set { return tx.Ins }
|
|||
func (tx *TestTx) Status() choices.Status { return tx.Stat }
|
||||
|
||||
// Accept implements the Consumer interface
|
||||
func (tx *TestTx) Accept() { tx.Stat = choices.Accepted }
|
||||
func (tx *TestTx) Accept() error { tx.Stat = choices.Accepted; return nil }
|
||||
|
||||
// Reject implements the Consumer interface
|
||||
func (tx *TestTx) Reject() { tx.Stat = choices.Rejected }
|
||||
func (tx *TestTx) Reject() error { tx.Stat = choices.Rejected; return nil }
|
||||
|
||||
// Reset sets the status to pending
|
||||
func (tx *TestTx) Reset() { tx.Stat = choices.Processing }
|
||||
|
|
|
@ -4,6 +4,9 @@
|
|||
package avalanche
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ava-labs/gecko/cache"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/choices"
|
||||
"github.com/ava-labs/gecko/snow/consensus/avalanche"
|
||||
|
@ -13,6 +16,10 @@ import (
|
|||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
const (
|
||||
cacheSize = 100000
|
||||
)
|
||||
|
||||
// BootstrapConfig ...
|
||||
type BootstrapConfig struct {
|
||||
common.Config
|
||||
|
@ -30,39 +37,47 @@ type bootstrapper struct {
|
|||
metrics
|
||||
common.Bootstrapper
|
||||
|
||||
// IDs of vertices that we're already in the process of getting
|
||||
// TODO: Find a better way to track; this keeps every single vertex's ID in memory when bootstrapping from nothing
|
||||
seen ids.Set
|
||||
// true if all of the vertices in the original accepted frontier have been processed
|
||||
processedStartingAcceptedFrontier bool
|
||||
|
||||
numFetched uint64 // number of vertices that have been fetched from validators
|
||||
// number of vertices fetched so far
|
||||
numFetched uint32
|
||||
|
||||
// vtxReqs prevents asking validators for the same vertex
|
||||
vtxReqs common.Requests
|
||||
// tracks which validators were asked for which containers in which requests
|
||||
outstandingRequests common.Requests
|
||||
|
||||
// IDs of vertices that we have requested from other validators but haven't received
|
||||
pending ids.Set
|
||||
finished bool
|
||||
onFinished func()
|
||||
// Contains IDs of vertices that have recently been processed
|
||||
processedCache *cache.LRU
|
||||
|
||||
// true if bootstrapping is done
|
||||
finished bool
|
||||
|
||||
// Called when bootstrapping is done
|
||||
onFinished func() error
|
||||
}
|
||||
|
||||
// Initialize this engine.
|
||||
func (b *bootstrapper) Initialize(config BootstrapConfig) {
|
||||
func (b *bootstrapper) Initialize(config BootstrapConfig) error {
|
||||
b.BootstrapConfig = config
|
||||
b.processedCache = &cache.LRU{Size: cacheSize}
|
||||
|
||||
b.VtxBlocked.SetParser(&vtxParser{
|
||||
numAccepted: b.numBootstrappedVtx,
|
||||
numDropped: b.numDroppedVtx,
|
||||
log: config.Context.Log,
|
||||
numAccepted: b.numBSVtx,
|
||||
numDropped: b.numBSDroppedVtx,
|
||||
state: b.State,
|
||||
})
|
||||
|
||||
b.TxBlocked.SetParser(&txParser{
|
||||
numAccepted: b.numBootstrappedTx,
|
||||
numDropped: b.numDroppedTx,
|
||||
log: config.Context.Log,
|
||||
numAccepted: b.numBSTx,
|
||||
numDropped: b.numBSDroppedTx,
|
||||
vm: b.VM,
|
||||
})
|
||||
|
||||
config.Bootstrapable = b
|
||||
b.Bootstrapper.Initialize(config.Config)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CurrentAcceptedFrontier ...
|
||||
|
@ -83,171 +98,214 @@ func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set {
|
|||
return acceptedVtxIDs
|
||||
}
|
||||
|
||||
// ForceAccepted ...
|
||||
func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) {
|
||||
for _, vtxID := range acceptedContainerIDs.List() {
|
||||
b.fetch(vtxID)
|
||||
// Get vertex [vtxID] and its ancestors
|
||||
func (b *bootstrapper) fetch(vtxID ids.ID) error {
|
||||
// Make sure we haven't already requested this block
|
||||
if b.outstandingRequests.Contains(vtxID) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if numPending := b.pending.Len(); numPending == 0 {
|
||||
// TODO: This typically indicates bootstrapping has failed, so this
|
||||
// should be handled appropriately
|
||||
b.finish()
|
||||
}
|
||||
}
|
||||
|
||||
// Put ...
|
||||
func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) {
|
||||
vtx, err := b.State.ParseVertex(vtxBytes)
|
||||
if err != nil {
|
||||
b.BootstrapConfig.Context.Log.Debug("ParseVertex failed due to %s for block:\n%s",
|
||||
err,
|
||||
formatting.DumpBytes{Bytes: vtxBytes})
|
||||
|
||||
b.GetFailed(vdr, requestID)
|
||||
return
|
||||
// Make sure we don't already have this vertex
|
||||
if _, err := b.State.GetVertex(vtxID); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !b.pending.Contains(vtx.ID()) {
|
||||
b.BootstrapConfig.Context.Log.Debug("Validator %s sent an unrequested vertex:\n%s",
|
||||
vdr,
|
||||
formatting.DumpBytes{Bytes: vtxBytes})
|
||||
|
||||
b.GetFailed(vdr, requestID)
|
||||
return
|
||||
}
|
||||
|
||||
b.addVertex(vtx)
|
||||
}
|
||||
|
||||
// GetFailed ...
|
||||
func (b *bootstrapper) GetFailed(vdr ids.ShortID, requestID uint32) {
|
||||
vtxID, ok := b.vtxReqs.Remove(vdr, requestID)
|
||||
if !ok {
|
||||
b.BootstrapConfig.Context.Log.Debug("GetFailed called without sending the corresponding Get message from %s",
|
||||
vdr)
|
||||
return
|
||||
}
|
||||
|
||||
b.sendRequest(vtxID)
|
||||
}
|
||||
|
||||
func (b *bootstrapper) fetch(vtxID ids.ID) {
|
||||
if b.pending.Contains(vtxID) {
|
||||
return
|
||||
}
|
||||
|
||||
vtx, err := b.State.GetVertex(vtxID)
|
||||
if err != nil {
|
||||
b.sendRequest(vtxID)
|
||||
return
|
||||
}
|
||||
b.storeVertex(vtx)
|
||||
}
|
||||
|
||||
func (b *bootstrapper) sendRequest(vtxID ids.ID) {
|
||||
validators := b.BootstrapConfig.Validators.Sample(1)
|
||||
validators := b.BootstrapConfig.Validators.Sample(1) // validator to send request to
|
||||
if len(validators) == 0 {
|
||||
b.BootstrapConfig.Context.Log.Error("Dropping request for %s as there are no validators", vtxID)
|
||||
return
|
||||
return fmt.Errorf("Dropping request for %s as there are no validators", vtxID)
|
||||
}
|
||||
validatorID := validators[0].ID()
|
||||
b.RequestID++
|
||||
|
||||
b.vtxReqs.RemoveAny(vtxID)
|
||||
b.vtxReqs.Add(validatorID, b.RequestID, vtxID)
|
||||
|
||||
b.pending.Add(vtxID)
|
||||
b.BootstrapConfig.Sender.Get(validatorID, b.RequestID, vtxID)
|
||||
|
||||
b.numPendingRequests.Set(float64(b.pending.Len()))
|
||||
b.outstandingRequests.Add(validatorID, b.RequestID, vtxID)
|
||||
b.BootstrapConfig.Sender.GetAncestors(validatorID, b.RequestID, vtxID) // request vertex and ancestors
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bootstrapper) addVertex(vtx avalanche.Vertex) {
|
||||
b.storeVertex(vtx)
|
||||
// Process vertices
|
||||
func (b *bootstrapper) process(vtx avalanche.Vertex) error {
|
||||
toProcess := []avalanche.Vertex{vtx}
|
||||
for len(toProcess) > 0 {
|
||||
newLen := len(toProcess) - 1
|
||||
vtx := toProcess[newLen]
|
||||
toProcess = toProcess[:newLen]
|
||||
if _, ok := b.processedCache.Get(vtx.ID()); ok { // already processed this
|
||||
continue
|
||||
}
|
||||
|
||||
if numPending := b.pending.Len(); numPending == 0 {
|
||||
b.finish()
|
||||
}
|
||||
}
|
||||
|
||||
func (b *bootstrapper) storeVertex(vtx avalanche.Vertex) {
|
||||
vts := []avalanche.Vertex{vtx}
|
||||
b.numFetched++
|
||||
if b.numFetched%2500 == 0 { // perioidcally inform user of progress
|
||||
b.BootstrapConfig.Context.Log.Info("bootstrapping has fetched %d vertices", b.numFetched)
|
||||
}
|
||||
|
||||
for len(vts) > 0 {
|
||||
newLen := len(vts) - 1
|
||||
vtx := vts[newLen]
|
||||
vts = vts[:newLen]
|
||||
|
||||
vtxID := vtx.ID()
|
||||
switch status := vtx.Status(); status {
|
||||
switch vtx.Status() {
|
||||
case choices.Unknown:
|
||||
b.sendRequest(vtxID)
|
||||
if err := b.fetch(vtx.ID()); err != nil {
|
||||
return err
|
||||
}
|
||||
case choices.Rejected:
|
||||
return fmt.Errorf("tried to accept %s even though it was previously rejected", vtx.ID())
|
||||
case choices.Processing:
|
||||
b.pending.Remove(vtxID)
|
||||
|
||||
if err := b.VtxBlocked.Push(&vertexJob{
|
||||
numAccepted: b.numBootstrappedVtx,
|
||||
numDropped: b.numDroppedVtx,
|
||||
log: b.BootstrapConfig.Context.Log,
|
||||
numAccepted: b.numBSVtx,
|
||||
numDropped: b.numBSDroppedVtx,
|
||||
vtx: vtx,
|
||||
}); err == nil {
|
||||
b.numBlockedVtx.Inc()
|
||||
b.numBSBlockedVtx.Inc()
|
||||
b.numFetched++ // Progress tracker
|
||||
if b.numFetched%common.StatusUpdateFrequency == 0 {
|
||||
b.BootstrapConfig.Context.Log.Info("fetched %d vertices", b.numFetched)
|
||||
}
|
||||
} else {
|
||||
b.BootstrapConfig.Context.Log.Verbo("couldn't push to vtxBlocked")
|
||||
b.BootstrapConfig.Context.Log.Verbo("couldn't push to vtxBlocked: %s", err)
|
||||
}
|
||||
for _, tx := range vtx.Txs() {
|
||||
if err := b.TxBlocked.Push(&txJob{
|
||||
numAccepted: b.numBootstrappedVtx,
|
||||
numDropped: b.numDroppedVtx,
|
||||
log: b.BootstrapConfig.Context.Log,
|
||||
numAccepted: b.numBSTx,
|
||||
numDropped: b.numBSDroppedTx,
|
||||
tx: tx,
|
||||
}); err == nil {
|
||||
b.numBlockedTx.Inc()
|
||||
b.numBSBlockedTx.Inc()
|
||||
} else {
|
||||
b.BootstrapConfig.Context.Log.Verbo("couldn't push to txBlocked")
|
||||
b.BootstrapConfig.Context.Log.Verbo("couldn't push to txBlocked: %s", err)
|
||||
}
|
||||
}
|
||||
for _, parent := range vtx.Parents() {
|
||||
if parentID := parent.ID(); !b.seen.Contains(parentID) {
|
||||
b.seen.Add(parentID)
|
||||
vts = append(vts, parent)
|
||||
}
|
||||
toProcess = append(toProcess, parent)
|
||||
}
|
||||
case choices.Accepted:
|
||||
b.BootstrapConfig.Context.Log.Verbo("Bootstrapping confirmed %s", vtxID)
|
||||
case choices.Rejected:
|
||||
b.BootstrapConfig.Context.Log.Error("Bootstrapping wants to accept %s, however it was previously rejected", vtxID)
|
||||
b.processedCache.Put(vtx.ID(), nil)
|
||||
}
|
||||
}
|
||||
|
||||
numPending := b.pending.Len()
|
||||
b.numPendingRequests.Set(float64(numPending))
|
||||
if err := b.VtxBlocked.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := b.TxBlocked.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier {
|
||||
return b.finish()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bootstrapper) finish() {
|
||||
if b.finished {
|
||||
return
|
||||
// MultiPut handles the receipt of multiple containers. Should be received in response to a GetAncestors message to [vdr]
|
||||
// with request ID [requestID]. Expects vtxs[0] to be the vertex requested in the corresponding GetAncestors.
|
||||
func (b *bootstrapper) MultiPut(vdr ids.ShortID, requestID uint32, vtxs [][]byte) error {
|
||||
if lenVtxs := len(vtxs); lenVtxs > common.MaxContainersPerMultiPut {
|
||||
b.BootstrapConfig.Context.Log.Debug("MultiPut(%s, %d) contains more than maximum number of vertices", vdr, requestID)
|
||||
return b.GetAncestorsFailed(vdr, requestID)
|
||||
} else if lenVtxs == 0 {
|
||||
b.BootstrapConfig.Context.Log.Debug("MultiPut(%s, %d) contains no vertices", vdr, requestID)
|
||||
return b.GetAncestorsFailed(vdr, requestID)
|
||||
}
|
||||
b.BootstrapConfig.Context.Log.Info("bootstrapping finished fetching vertices. executing state transitions...")
|
||||
|
||||
b.executeAll(b.TxBlocked, b.numBlockedTx)
|
||||
b.executeAll(b.VtxBlocked, b.numBlockedVtx)
|
||||
// Make sure this is in response to a request we made
|
||||
neededVtxID, needed := b.outstandingRequests.Remove(vdr, requestID)
|
||||
if !needed { // this message isn't in response to a request we made
|
||||
b.BootstrapConfig.Context.Log.Debug("received unexpected MultiPut from %s with ID %d", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
neededVtx, err := b.State.ParseVertex(vtxs[0]) // the vertex we requested
|
||||
if err != nil {
|
||||
b.BootstrapConfig.Context.Log.Debug("Failed to parse requested vertex %s: %w", neededVtxID, err)
|
||||
b.BootstrapConfig.Context.Log.Verbo("vertex: %s", formatting.DumpBytes{Bytes: vtxs[0]})
|
||||
return b.fetch(neededVtxID)
|
||||
} else if actualID := neededVtx.ID(); !actualID.Equals(neededVtxID) {
|
||||
b.BootstrapConfig.Context.Log.Debug("expected the first block to be the requested block, %s, but is %s", neededVtxID, actualID)
|
||||
return b.fetch(neededVtxID)
|
||||
}
|
||||
|
||||
for _, vtxBytes := range vtxs { // Parse/persist all the vertices
|
||||
if _, err := b.State.ParseVertex(vtxBytes); err != nil { // Persists the vtx
|
||||
b.BootstrapConfig.Context.Log.Debug("Failed to parse vertex: %w", err)
|
||||
b.BootstrapConfig.Context.Log.Verbo("vertex: %s", formatting.DumpBytes{Bytes: vtxBytes})
|
||||
}
|
||||
}
|
||||
|
||||
return b.process(neededVtx)
|
||||
}
|
||||
|
||||
// GetAncestorsFailed is called when a GetAncestors message we sent fails
|
||||
func (b *bootstrapper) GetAncestorsFailed(vdr ids.ShortID, requestID uint32) error {
|
||||
vtxID, ok := b.outstandingRequests.Remove(vdr, requestID)
|
||||
if !ok {
|
||||
b.BootstrapConfig.Context.Log.Debug("GetAncestorsFailed(%s, %d) called but there was no outstanding request to this validator with this ID", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
// Send another request for the vertex
|
||||
return b.fetch(vtxID)
|
||||
}
|
||||
|
||||
// ForceAccepted ...
|
||||
func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) error {
|
||||
if err := b.VM.Bootstrapping(); err != nil {
|
||||
return fmt.Errorf("failed to notify VM that bootstrapping has started: %w",
|
||||
err)
|
||||
}
|
||||
|
||||
for _, vtxID := range acceptedContainerIDs.List() {
|
||||
if vtx, err := b.State.GetVertex(vtxID); err == nil {
|
||||
if err := b.process(vtx); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := b.fetch(vtxID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
b.processedStartingAcceptedFrontier = true
|
||||
|
||||
if numPending := b.outstandingRequests.Len(); numPending == 0 {
|
||||
return b.finish()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Finish bootstrapping
|
||||
func (b *bootstrapper) finish() error {
|
||||
if b.finished {
|
||||
return nil
|
||||
}
|
||||
b.BootstrapConfig.Context.Log.Info("finished fetching vertices. executing transaction state transitions...")
|
||||
|
||||
if err := b.executeAll(b.TxBlocked, b.numBSBlockedTx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.BootstrapConfig.Context.Log.Info("executing vertex state transitions...")
|
||||
|
||||
if err := b.executeAll(b.VtxBlocked, b.numBSBlockedVtx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := b.VM.Bootstrapped(); err != nil {
|
||||
return fmt.Errorf("failed to notify VM that bootstrapping has finished: %w",
|
||||
err)
|
||||
}
|
||||
|
||||
// Start consensus
|
||||
b.onFinished()
|
||||
b.seen = ids.Set{}
|
||||
if err := b.onFinished(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.finished = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bootstrapper) executeAll(jobs *queue.Jobs, numBlocked prometheus.Gauge) {
|
||||
func (b *bootstrapper) executeAll(jobs *queue.Jobs, numBlocked prometheus.Gauge) error {
|
||||
numExecuted := 0
|
||||
for job, err := jobs.Pop(); err == nil; job, err = jobs.Pop() {
|
||||
numBlocked.Dec()
|
||||
b.BootstrapConfig.Context.Log.Debug("Executing: %s", job.ID())
|
||||
if err := jobs.Execute(job); err != nil {
|
||||
b.BootstrapConfig.Context.Log.Warn("Error executing: %s", err)
|
||||
b.BootstrapConfig.Context.Log.Error("Error executing: %s", err)
|
||||
return err
|
||||
}
|
||||
if err := jobs.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
numExecuted++
|
||||
if numExecuted%common.StatusUpdateFrequency == 0 { // Periodically print progress
|
||||
b.BootstrapConfig.Context.Log.Info("executed %d operations", numExecuted)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/consensus/avalanche"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/utils/wrappers"
|
||||
)
|
||||
|
||||
type convincer struct {
|
||||
|
@ -16,6 +17,7 @@ type convincer struct {
|
|||
requestID uint32
|
||||
abandoned bool
|
||||
deps ids.Set
|
||||
errs *wrappers.Errs
|
||||
}
|
||||
|
||||
func (c *convincer) Dependencies() ids.Set { return c.deps }
|
||||
|
@ -28,7 +30,7 @@ func (c *convincer) Fulfill(id ids.ID) {
|
|||
func (c *convincer) Abandon(ids.ID) { c.abandoned = true }
|
||||
|
||||
func (c *convincer) Update() {
|
||||
if c.abandoned || c.deps.Len() != 0 {
|
||||
if c.abandoned || c.deps.Len() != 0 || c.errs.Errored() {
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ type Vtx struct {
|
|||
id ids.ID
|
||||
txs []snowstorm.Tx
|
||||
|
||||
height int
|
||||
height uint64
|
||||
status choices.Status
|
||||
|
||||
bytes []byte
|
||||
|
@ -36,10 +36,11 @@ type Vtx struct {
|
|||
func (v *Vtx) ID() ids.ID { return v.id }
|
||||
func (v *Vtx) DependencyIDs() []ids.ID { return nil }
|
||||
func (v *Vtx) Parents() []avalanche.Vertex { return v.parents }
|
||||
func (v *Vtx) Height() uint64 { return v.height }
|
||||
func (v *Vtx) Txs() []snowstorm.Tx { return v.txs }
|
||||
func (v *Vtx) Status() choices.Status { return v.status }
|
||||
func (v *Vtx) Accept() { v.status = choices.Accepted }
|
||||
func (v *Vtx) Reject() { v.status = choices.Rejected }
|
||||
func (v *Vtx) Accept() error { v.status = choices.Accepted; return nil }
|
||||
func (v *Vtx) Reject() error { v.status = choices.Rejected; return nil }
|
||||
func (v *Vtx) Bytes() []byte { return v.bytes }
|
||||
|
||||
type sortVts []*Vtx
|
||||
|
|
|
@ -37,7 +37,7 @@ func (i *issuer) Abandon() {
|
|||
}
|
||||
|
||||
func (i *issuer) Update() {
|
||||
if i.abandoned || i.issued || i.vtxDeps.Len() != 0 || i.txDeps.Len() != 0 || i.t.Consensus.VertexIssued(i.vtx) {
|
||||
if i.abandoned || i.issued || i.vtxDeps.Len() != 0 || i.txDeps.Len() != 0 || i.t.Consensus.VertexIssued(i.vtx) || i.t.errs.Errored() {
|
||||
return
|
||||
}
|
||||
i.issued = true
|
||||
|
@ -65,7 +65,10 @@ func (i *issuer) Update() {
|
|||
|
||||
i.t.Config.Context.Log.Verbo("Adding vertex to consensus:\n%s", i.vtx)
|
||||
|
||||
i.t.Consensus.Add(i.vtx)
|
||||
if err := i.t.Consensus.Add(i.vtx); err != nil {
|
||||
i.t.errs.Add(err)
|
||||
return
|
||||
}
|
||||
|
||||
p := i.t.Consensus.Parameters()
|
||||
vdrs := i.t.Config.Validators.Sample(p.K) // Validators to sample
|
||||
|
@ -87,7 +90,7 @@ func (i *issuer) Update() {
|
|||
i.t.txBlocked.Fulfill(tx.ID())
|
||||
}
|
||||
|
||||
i.t.repoll()
|
||||
i.t.errs.Add(i.t.repoll())
|
||||
}
|
||||
|
||||
type vtxIssuer struct{ i *issuer }
|
||||
|
|
|
@ -10,52 +10,52 @@ import (
|
|||
)
|
||||
|
||||
type metrics struct {
|
||||
numPendingRequests, numBlockedVtx, numBlockedTx prometheus.Gauge
|
||||
numBootstrappedVtx, numDroppedVtx,
|
||||
numBootstrappedTx, numDroppedTx prometheus.Counter
|
||||
numBSPendingRequests, numBSBlockedVtx, numBSBlockedTx prometheus.Gauge
|
||||
numBSVtx, numBSDroppedVtx,
|
||||
numBSTx, numBSDroppedTx prometheus.Counter
|
||||
|
||||
numPolls, numVtxRequests, numTxRequests, numPendingVtx prometheus.Gauge
|
||||
}
|
||||
|
||||
// Initialize implements the Engine interface
|
||||
func (m *metrics) Initialize(log logging.Logger, namespace string, registerer prometheus.Registerer) {
|
||||
m.numPendingRequests = prometheus.NewGauge(
|
||||
m.numBSPendingRequests = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "av_bs_vtx_requests",
|
||||
Help: "Number of pending bootstrap vertex requests",
|
||||
})
|
||||
m.numBlockedVtx = prometheus.NewGauge(
|
||||
m.numBSBlockedVtx = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "av_bs_blocked_vts",
|
||||
Help: "Number of blocked bootstrap vertices",
|
||||
})
|
||||
m.numBlockedTx = prometheus.NewGauge(
|
||||
m.numBSBlockedTx = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "av_bs_blocked_txs",
|
||||
Help: "Number of blocked bootstrap txs",
|
||||
})
|
||||
m.numBootstrappedVtx = prometheus.NewCounter(
|
||||
m.numBSVtx = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "av_bs_accepted_vts",
|
||||
Help: "Number of accepted vertices",
|
||||
})
|
||||
m.numDroppedVtx = prometheus.NewCounter(
|
||||
m.numBSDroppedVtx = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "av_bs_dropped_vts",
|
||||
Help: "Number of dropped vertices",
|
||||
})
|
||||
m.numBootstrappedTx = prometheus.NewCounter(
|
||||
m.numBSTx = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "av_bs_accepted_txs",
|
||||
Help: "Number of accepted txs",
|
||||
})
|
||||
m.numDroppedTx = prometheus.NewCounter(
|
||||
m.numBSDroppedTx = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "av_bs_dropped_txs",
|
||||
|
@ -86,25 +86,25 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr
|
|||
Help: "Number of blocked vertices",
|
||||
})
|
||||
|
||||
if err := registerer.Register(m.numPendingRequests); err != nil {
|
||||
if err := registerer.Register(m.numBSPendingRequests); err != nil {
|
||||
log.Error("Failed to register av_bs_vtx_requests statistics due to %s", err)
|
||||
}
|
||||
if err := registerer.Register(m.numBlockedVtx); err != nil {
|
||||
if err := registerer.Register(m.numBSBlockedVtx); err != nil {
|
||||
log.Error("Failed to register av_bs_blocked_vts statistics due to %s", err)
|
||||
}
|
||||
if err := registerer.Register(m.numBlockedTx); err != nil {
|
||||
if err := registerer.Register(m.numBSBlockedTx); err != nil {
|
||||
log.Error("Failed to register av_bs_blocked_txs statistics due to %s", err)
|
||||
}
|
||||
if err := registerer.Register(m.numBootstrappedVtx); err != nil {
|
||||
if err := registerer.Register(m.numBSVtx); err != nil {
|
||||
log.Error("Failed to register av_bs_accepted_vts statistics due to %s", err)
|
||||
}
|
||||
if err := registerer.Register(m.numDroppedVtx); err != nil {
|
||||
if err := registerer.Register(m.numBSDroppedVtx); err != nil {
|
||||
log.Error("Failed to register av_bs_dropped_vts statistics due to %s", err)
|
||||
}
|
||||
if err := registerer.Register(m.numBootstrappedTx); err != nil {
|
||||
if err := registerer.Register(m.numBSTx); err != nil {
|
||||
log.Error("Failed to register av_bs_accepted_txs statistics due to %s", err)
|
||||
}
|
||||
if err := registerer.Register(m.numDroppedTx); err != nil {
|
||||
if err := registerer.Register(m.numBSDroppedTx); err != nil {
|
||||
log.Error("Failed to register av_bs_dropped_txs statistics due to %s", err)
|
||||
}
|
||||
if err := registerer.Register(m.numPolls); err != nil {
|
||||
|
|
|
@ -76,7 +76,7 @@ func (vtx *uniqueVertex) setStatus(status choices.Status) {
|
|||
|
||||
func (vtx *uniqueVertex) ID() ids.ID { return vtx.vtxID }
|
||||
|
||||
func (vtx *uniqueVertex) Accept() {
|
||||
func (vtx *uniqueVertex) Accept() error {
|
||||
vtx.setStatus(choices.Accepted)
|
||||
|
||||
vtx.serializer.edge.Add(vtx.vtxID)
|
||||
|
@ -90,17 +90,17 @@ func (vtx *uniqueVertex) Accept() {
|
|||
// parents to be garbage collected
|
||||
vtx.v.parents = nil
|
||||
|
||||
vtx.serializer.db.Commit()
|
||||
return vtx.serializer.db.Commit()
|
||||
}
|
||||
|
||||
func (vtx *uniqueVertex) Reject() {
|
||||
func (vtx *uniqueVertex) Reject() error {
|
||||
vtx.setStatus(choices.Rejected)
|
||||
|
||||
// Should never traverse into parents of a decided vertex. Allows for the
|
||||
// parents to be garbage collected
|
||||
vtx.v.parents = nil
|
||||
|
||||
vtx.serializer.db.Commit()
|
||||
return vtx.serializer.db.Commit()
|
||||
}
|
||||
|
||||
func (vtx *uniqueVertex) Status() choices.Status { vtx.refresh(); return vtx.v.status }
|
||||
|
@ -121,6 +121,12 @@ func (vtx *uniqueVertex) Parents() []avalanche.Vertex {
|
|||
return vtx.v.parents
|
||||
}
|
||||
|
||||
func (vtx *uniqueVertex) Height() uint64 {
|
||||
vtx.refresh()
|
||||
|
||||
return vtx.v.vtx.height
|
||||
}
|
||||
|
||||
func (vtx *uniqueVertex) Txs() []snowstorm.Tx {
|
||||
vtx.refresh()
|
||||
|
||||
|
|
|
@ -4,14 +4,25 @@
|
|||
package avalanche
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/network"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
"github.com/ava-labs/gecko/snow/choices"
|
||||
"github.com/ava-labs/gecko/snow/consensus/avalanche"
|
||||
"github.com/ava-labs/gecko/snow/consensus/snowstorm"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/snow/events"
|
||||
"github.com/ava-labs/gecko/utils/formatting"
|
||||
"github.com/ava-labs/gecko/utils/random"
|
||||
"github.com/ava-labs/gecko/utils/wrappers"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO define this constant in one place rather than here and in snowman
|
||||
// Max containers size in a MultiPut message
|
||||
maxContainersLen = int(4 * network.DefaultMaxMessageSize / 5)
|
||||
)
|
||||
|
||||
// Transitive implements the Engine interface by attempting to fetch all
|
||||
|
@ -33,108 +44,156 @@ type Transitive struct {
|
|||
vtxBlocked, txBlocked events.Blocker
|
||||
|
||||
bootstrapped bool
|
||||
|
||||
errs wrappers.Errs
|
||||
}
|
||||
|
||||
// Initialize implements the Engine interface
|
||||
func (t *Transitive) Initialize(config Config) {
|
||||
config.Context.Log.Info("Initializing Avalanche consensus")
|
||||
func (t *Transitive) Initialize(config Config) error {
|
||||
config.Context.Log.Info("Initializing consensus engine")
|
||||
|
||||
t.Config = config
|
||||
t.metrics.Initialize(config.Context.Log, config.Params.Namespace, config.Params.Metrics)
|
||||
|
||||
t.onFinished = t.finishBootstrapping
|
||||
t.bootstrapper.Initialize(config.BootstrapConfig)
|
||||
|
||||
t.polls.log = config.Context.Log
|
||||
t.polls.numPolls = t.numPolls
|
||||
t.polls.m = make(map[uint32]poll)
|
||||
|
||||
return t.bootstrapper.Initialize(config.BootstrapConfig)
|
||||
}
|
||||
|
||||
func (t *Transitive) finishBootstrapping() {
|
||||
func (t *Transitive) finishBootstrapping() error {
|
||||
// Load the vertices that were last saved as the accepted frontier
|
||||
frontier := []avalanche.Vertex(nil)
|
||||
for _, vtxID := range t.Config.State.Edge() {
|
||||
if vtx, err := t.Config.State.GetVertex(vtxID); err == nil {
|
||||
frontier = append(frontier, vtx)
|
||||
} else {
|
||||
t.Config.Context.Log.Error("Vertex %s failed to be loaded from the frontier with %s", vtxID, err)
|
||||
t.Config.Context.Log.Error("vertex %s failed to be loaded from the frontier with %s", vtxID, err)
|
||||
}
|
||||
}
|
||||
t.Consensus.Initialize(t.Config.Context, t.Params, frontier)
|
||||
t.bootstrapped = true
|
||||
|
||||
t.Config.Context.Log.Info("Bootstrapping finished with %d vertices in the accepted frontier", len(frontier))
|
||||
t.Config.Context.Log.Info("bootstrapping finished with %d vertices in the accepted frontier", len(frontier))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gossip implements the Engine interface
|
||||
func (t *Transitive) Gossip() {
|
||||
func (t *Transitive) Gossip() error {
|
||||
edge := t.Config.State.Edge()
|
||||
if len(edge) == 0 {
|
||||
t.Config.Context.Log.Debug("Dropping gossip request as no vertices have been accepted")
|
||||
return
|
||||
t.Config.Context.Log.Verbo("dropping gossip request as no vertices have been accepted")
|
||||
return nil
|
||||
}
|
||||
|
||||
sampler := random.Uniform{N: len(edge)}
|
||||
vtxID := edge[sampler.Sample()]
|
||||
vtx, err := t.Config.State.GetVertex(vtxID)
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Warn("Dropping gossip request as %s couldn't be loaded due to %s", vtxID, err)
|
||||
return
|
||||
t.Config.Context.Log.Warn("dropping gossip request as %s couldn't be loaded due to: %s", vtxID, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
t.Config.Context.Log.Debug("Gossiping %s as accepted to the network", vtxID)
|
||||
t.Config.Context.Log.Verbo("gossiping %s as accepted to the network", vtxID)
|
||||
t.Config.Sender.Gossip(vtxID, vtx.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown implements the Engine interface
|
||||
func (t *Transitive) Shutdown() {
|
||||
t.Config.Context.Log.Info("Shutting down Avalanche consensus")
|
||||
t.Config.VM.Shutdown()
|
||||
func (t *Transitive) Shutdown() error {
|
||||
t.Config.Context.Log.Info("shutting down consensus engine")
|
||||
return t.Config.VM.Shutdown()
|
||||
}
|
||||
|
||||
// Context implements the Engine interface
|
||||
func (t *Transitive) Context() *snow.Context { return t.Config.Context }
|
||||
|
||||
// Get implements the Engine interface
|
||||
func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, vtxID ids.ID) {
|
||||
func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, vtxID ids.ID) error {
|
||||
// If this engine has access to the requested vertex, provide it
|
||||
if vtx, err := t.Config.State.GetVertex(vtxID); err == nil {
|
||||
t.Config.Sender.Put(vdr, requestID, vtxID, vtx.Bytes())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAncestors implements the Engine interface
|
||||
func (t *Transitive) GetAncestors(vdr ids.ShortID, requestID uint32, vtxID ids.ID) error {
|
||||
startTime := time.Now()
|
||||
t.Config.Context.Log.Verbo("GetAncestors(%s, %d, %s) called", vdr, requestID, vtxID)
|
||||
vertex, err := t.Config.State.GetVertex(vtxID)
|
||||
if err != nil || vertex.Status() == choices.Unknown {
|
||||
t.Config.Context.Log.Verbo("dropping getAncestors")
|
||||
return nil // Don't have the requested vertex. Drop message.
|
||||
}
|
||||
|
||||
queue := make([]avalanche.Vertex, 1, common.MaxContainersPerMultiPut) // for BFS
|
||||
queue[0] = vertex
|
||||
ancestorsBytesLen := 0 // length, in bytes, of vertex and its ancestors
|
||||
ancestorsBytes := make([][]byte, 0, common.MaxContainersPerMultiPut) // vertex and its ancestors in BFS order
|
||||
visited := ids.Set{} // IDs of vertices that have been in queue before
|
||||
visited.Add(vertex.ID())
|
||||
|
||||
for len(ancestorsBytes) < common.MaxContainersPerMultiPut && len(queue) > 0 && time.Since(startTime) < common.MaxTimeFetchingAncestors {
|
||||
var vtx avalanche.Vertex
|
||||
vtx, queue = queue[0], queue[1:] // pop
|
||||
vtxBytes := vtx.Bytes()
|
||||
// Ensure response size isn't too large. Include wrappers.IntLen because the size of the message
|
||||
// is included with each container, and the size is repr. by an int.
|
||||
if newLen := wrappers.IntLen + ancestorsBytesLen + len(vtxBytes); newLen < maxContainersLen {
|
||||
ancestorsBytes = append(ancestorsBytes, vtxBytes)
|
||||
ancestorsBytesLen = newLen
|
||||
} else { // reached maximum response size
|
||||
break
|
||||
}
|
||||
for _, parent := range vtx.Parents() {
|
||||
if parent.Status() == choices.Unknown { // Don't have this vertex;ignore
|
||||
continue
|
||||
}
|
||||
if parentID := parent.ID(); !visited.Contains(parentID) { // If already visited, ignore
|
||||
queue = append(queue, parent)
|
||||
visited.Add(parentID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
t.Config.Sender.MultiPut(vdr, requestID, ancestorsBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put implements the Engine interface
|
||||
func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) {
|
||||
t.Config.Context.Log.Verbo("Put called for vertexID %s", vtxID)
|
||||
func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) error {
|
||||
t.Config.Context.Log.Verbo("Put(%s, %d, %s) called", vdr, requestID, vtxID)
|
||||
|
||||
if !t.bootstrapped {
|
||||
t.bootstrapper.Put(vdr, requestID, vtxID, vtxBytes)
|
||||
return
|
||||
if !t.bootstrapped { // Bootstrapping unfinished --> didn't call Get --> this message is invalid
|
||||
t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
|
||||
return nil
|
||||
}
|
||||
|
||||
vtx, err := t.Config.State.ParseVertex(vtxBytes)
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Debug("ParseVertex failed due to %s for block:\n%s",
|
||||
err,
|
||||
formatting.DumpBytes{Bytes: vtxBytes})
|
||||
t.GetFailed(vdr, requestID)
|
||||
return
|
||||
t.Config.Context.Log.Debug("failed to parse vertex %s due to: %s", vtxID, err)
|
||||
t.Config.Context.Log.Verbo("vertex:\n%s", formatting.DumpBytes{Bytes: vtxBytes})
|
||||
return t.GetFailed(vdr, requestID)
|
||||
}
|
||||
t.insertFrom(vdr, vtx)
|
||||
_, err = t.insertFrom(vdr, vtx)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetFailed implements the Engine interface
|
||||
func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) {
|
||||
if !t.bootstrapped {
|
||||
t.bootstrapper.GetFailed(vdr, requestID)
|
||||
return
|
||||
func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) error {
|
||||
if !t.bootstrapped { // Bootstrapping unfinished --> didn't call Get --> this message is invalid
|
||||
t.Config.Context.Log.Debug("dropping GetFailed(%s, %d) due to bootstrapping", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
vtxID, ok := t.vtxReqs.Remove(vdr, requestID)
|
||||
if !ok {
|
||||
t.Config.Context.Log.Warn("GetFailed called without sending the corresponding Get message from %s",
|
||||
vdr)
|
||||
return
|
||||
t.Config.Context.Log.Debug("GetFailed(%s, %d) called without having sent corresponding Get", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
t.vtxBlocked.Abandon(vtxID)
|
||||
|
@ -149,13 +208,14 @@ func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) {
|
|||
// Track performance statistics
|
||||
t.numVtxRequests.Set(float64(t.vtxReqs.Len()))
|
||||
t.numTxRequests.Set(float64(t.missingTxs.Len()))
|
||||
return t.errs.Err
|
||||
}
|
||||
|
||||
// PullQuery implements the Engine interface
|
||||
func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID) {
|
||||
func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID) error {
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Debug("Dropping PullQuery for %s due to bootstrapping", vtxID)
|
||||
return
|
||||
t.Config.Context.Log.Debug("dropping PullQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
|
||||
return nil
|
||||
}
|
||||
|
||||
c := &convincer{
|
||||
|
@ -163,39 +223,48 @@ func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID)
|
|||
sender: t.Config.Sender,
|
||||
vdr: vdr,
|
||||
requestID: requestID,
|
||||
errs: &t.errs,
|
||||
}
|
||||
|
||||
if !t.reinsertFrom(vdr, vtxID) {
|
||||
added, err := t.reinsertFrom(vdr, vtxID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !added {
|
||||
c.deps.Add(vtxID)
|
||||
}
|
||||
|
||||
t.vtxBlocked.Register(c)
|
||||
return t.errs.Err
|
||||
}
|
||||
|
||||
// PushQuery implements the Engine interface
|
||||
func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) {
|
||||
func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) error {
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Debug("Dropping PushQuery for %s due to bootstrapping", vtxID)
|
||||
return
|
||||
t.Config.Context.Log.Debug("dropping PushQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
|
||||
return nil
|
||||
}
|
||||
|
||||
vtx, err := t.Config.State.ParseVertex(vtxBytes)
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Warn("ParseVertex failed due to %s for block:\n%s",
|
||||
err,
|
||||
formatting.DumpBytes{Bytes: vtxBytes})
|
||||
return
|
||||
t.Config.Context.Log.Debug("failed to parse vertex %s due to: %s", vtxID, err)
|
||||
t.Config.Context.Log.Verbo("vertex:\n%s", formatting.DumpBytes{Bytes: vtxBytes})
|
||||
return nil
|
||||
}
|
||||
t.insertFrom(vdr, vtx)
|
||||
|
||||
t.PullQuery(vdr, requestID, vtx.ID())
|
||||
if _, err := t.insertFrom(vdr, vtx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return t.PullQuery(vdr, requestID, vtx.ID())
|
||||
}
|
||||
|
||||
// Chits implements the Engine interface
|
||||
func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) {
|
||||
func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) error {
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Debug("Dropping Chits due to bootstrapping")
|
||||
return
|
||||
t.Config.Context.Log.Debug("dropping Chits(%s, %d) due to bootstrapping", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
v := &voter{
|
||||
|
@ -206,56 +275,65 @@ func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) {
|
|||
}
|
||||
voteList := votes.List()
|
||||
for _, vote := range voteList {
|
||||
if !t.reinsertFrom(vdr, vote) {
|
||||
if added, err := t.reinsertFrom(vdr, vote); err != nil {
|
||||
return err
|
||||
} else if !added {
|
||||
v.deps.Add(vote)
|
||||
}
|
||||
}
|
||||
|
||||
t.vtxBlocked.Register(v)
|
||||
return t.errs.Err
|
||||
}
|
||||
|
||||
// QueryFailed implements the Engine interface
|
||||
func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) {
|
||||
t.Chits(vdr, requestID, ids.Set{})
|
||||
func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) error {
|
||||
return t.Chits(vdr, requestID, ids.Set{})
|
||||
}
|
||||
|
||||
// Notify implements the Engine interface
|
||||
func (t *Transitive) Notify(msg common.Message) {
|
||||
func (t *Transitive) Notify(msg common.Message) error {
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Warn("Dropping Notify due to bootstrapping")
|
||||
return
|
||||
t.Config.Context.Log.Debug("dropping Notify due to bootstrapping")
|
||||
return nil
|
||||
}
|
||||
|
||||
switch msg {
|
||||
case common.PendingTxs:
|
||||
txs := t.Config.VM.PendingTxs()
|
||||
t.batch(txs, false /*=force*/, false /*=empty*/)
|
||||
return t.batch(txs, false /*=force*/, false /*=empty*/)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Transitive) repoll() {
|
||||
if len(t.polls.m) >= t.Params.ConcurrentRepolls {
|
||||
return
|
||||
func (t *Transitive) repoll() error {
|
||||
if len(t.polls.m) >= t.Params.ConcurrentRepolls || t.errs.Errored() {
|
||||
return nil
|
||||
}
|
||||
|
||||
txs := t.Config.VM.PendingTxs()
|
||||
t.batch(txs, false /*=force*/, true /*=empty*/)
|
||||
if err := t.batch(txs, false /*=force*/, true /*=empty*/); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i := len(t.polls.m); i < t.Params.ConcurrentRepolls; i++ {
|
||||
t.batch(nil, false /*=force*/, true /*=empty*/)
|
||||
if err := t.batch(nil, false /*=force*/, true /*=empty*/); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Transitive) reinsertFrom(vdr ids.ShortID, vtxID ids.ID) bool {
|
||||
func (t *Transitive) reinsertFrom(vdr ids.ShortID, vtxID ids.ID) (bool, error) {
|
||||
vtx, err := t.Config.State.GetVertex(vtxID)
|
||||
if err != nil {
|
||||
t.sendRequest(vdr, vtxID)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
return t.insertFrom(vdr, vtx)
|
||||
}
|
||||
|
||||
func (t *Transitive) insertFrom(vdr ids.ShortID, vtx avalanche.Vertex) bool {
|
||||
func (t *Transitive) insertFrom(vdr ids.ShortID, vtx avalanche.Vertex) (bool, error) {
|
||||
issued := true
|
||||
vts := []avalanche.Vertex{vtx}
|
||||
for len(vts) > 0 {
|
||||
|
@ -279,12 +357,14 @@ func (t *Transitive) insertFrom(vdr ids.ShortID, vtx avalanche.Vertex) bool {
|
|||
}
|
||||
}
|
||||
|
||||
t.insert(vtx)
|
||||
if err := t.insert(vtx); err != nil {
|
||||
return false, err
|
||||
}
|
||||
}
|
||||
return issued
|
||||
return issued, nil
|
||||
}
|
||||
|
||||
func (t *Transitive) insert(vtx avalanche.Vertex) {
|
||||
func (t *Transitive) insert(vtx avalanche.Vertex) error {
|
||||
vtxID := vtx.ID()
|
||||
|
||||
t.pending.Add(vtxID)
|
||||
|
@ -318,7 +398,7 @@ func (t *Transitive) insert(vtx avalanche.Vertex) {
|
|||
}
|
||||
}
|
||||
|
||||
t.Config.Context.Log.Verbo("Vertex: %s is blocking on %d vertices and %d transactions", vtxID, i.vtxDeps.Len(), i.txDeps.Len())
|
||||
t.Config.Context.Log.Verbo("vertex %s is blocking on %d vertices and %d transactions", vtxID, i.vtxDeps.Len(), i.txDeps.Len())
|
||||
|
||||
t.vtxBlocked.Register(&vtxIssuer{i: i})
|
||||
t.txBlocked.Register(&txIssuer{i: i})
|
||||
|
@ -333,14 +413,16 @@ func (t *Transitive) insert(vtx avalanche.Vertex) {
|
|||
// Track performance statistics
|
||||
t.numVtxRequests.Set(float64(t.vtxReqs.Len()))
|
||||
t.numTxRequests.Set(float64(t.missingTxs.Len()))
|
||||
t.numBlockedVtx.Set(float64(t.pending.Len()))
|
||||
t.numPendingVtx.Set(float64(t.pending.Len()))
|
||||
return t.errs.Err
|
||||
}
|
||||
|
||||
func (t *Transitive) batch(txs []snowstorm.Tx, force, empty bool) {
|
||||
func (t *Transitive) batch(txs []snowstorm.Tx, force, empty bool) error {
|
||||
batch := []snowstorm.Tx(nil)
|
||||
issuedTxs := ids.Set{}
|
||||
consumed := ids.Set{}
|
||||
issued := false
|
||||
orphans := t.Consensus.Orphans()
|
||||
for _, tx := range txs {
|
||||
inputs := tx.InputIDs()
|
||||
overlaps := consumed.Overlaps(inputs)
|
||||
|
@ -352,8 +434,10 @@ func (t *Transitive) batch(txs []snowstorm.Tx, force, empty bool) {
|
|||
overlaps = false
|
||||
}
|
||||
|
||||
// Force allows for a conflict to be issued
|
||||
if txID := tx.ID(); !overlaps && !issuedTxs.Contains(txID) && (force || t.Consensus.IsVirtuous(tx)) && !tx.Status().Decided() {
|
||||
if txID := tx.ID(); !overlaps && // should never allow conflicting txs in the same vertex
|
||||
!issuedTxs.Contains(txID) && // shouldn't issue duplicated transactions to the same vertex
|
||||
(force || t.Consensus.IsVirtuous(tx)) && // force allows for a conflict to be issued
|
||||
(!t.Consensus.TxIssued(tx) || orphans.Contains(txID)) { // should only reissued orphaned txs
|
||||
batch = append(batch, tx)
|
||||
issuedTxs.Add(txID)
|
||||
consumed.Union(inputs)
|
||||
|
@ -361,17 +445,18 @@ func (t *Transitive) batch(txs []snowstorm.Tx, force, empty bool) {
|
|||
}
|
||||
|
||||
if len(batch) > 0 {
|
||||
t.issueBatch(batch)
|
||||
return t.issueBatch(batch)
|
||||
} else if empty && !issued {
|
||||
t.issueRepoll()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Transitive) issueRepoll() {
|
||||
preferredIDs := t.Consensus.Preferences().List()
|
||||
numPreferredIDs := len(preferredIDs)
|
||||
if numPreferredIDs == 0 {
|
||||
t.Config.Context.Log.Error("Re-query attempt was dropped due to no pending vertices")
|
||||
t.Config.Context.Log.Error("re-query attempt was dropped due to no pending vertices")
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -390,12 +475,12 @@ func (t *Transitive) issueRepoll() {
|
|||
if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet.Len()) {
|
||||
t.Config.Sender.PullQuery(vdrSet, t.RequestID, vtxID)
|
||||
} else if numVdrs < p.K {
|
||||
t.Config.Context.Log.Error("Re-query for %s was dropped due to an insufficient number of validators", vtxID)
|
||||
t.Config.Context.Log.Error("re-query for %s was dropped due to an insufficient number of validators", vtxID)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Transitive) issueBatch(txs []snowstorm.Tx) {
|
||||
t.Config.Context.Log.Verbo("Batching %d transactions into a new vertex", len(txs))
|
||||
func (t *Transitive) issueBatch(txs []snowstorm.Tx) error {
|
||||
t.Config.Context.Log.Verbo("batching %d transactions into a new vertex", len(txs))
|
||||
|
||||
virtuousIDs := t.Consensus.Virtuous().List()
|
||||
sampler := random.Uniform{N: len(virtuousIDs)}
|
||||
|
@ -404,16 +489,17 @@ func (t *Transitive) issueBatch(txs []snowstorm.Tx) {
|
|||
parentIDs.Add(virtuousIDs[sampler.Sample()])
|
||||
}
|
||||
|
||||
if vtx, err := t.Config.State.BuildVertex(parentIDs, txs); err == nil {
|
||||
t.insert(vtx)
|
||||
} else {
|
||||
t.Config.Context.Log.Warn("Error building new vertex with %d parents and %d transactions", len(parentIDs), len(txs))
|
||||
vtx, err := t.Config.State.BuildVertex(parentIDs, txs)
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Warn("error building new vertex with %d parents and %d transactions", len(parentIDs), len(txs))
|
||||
return nil
|
||||
}
|
||||
return t.insert(vtx)
|
||||
}
|
||||
|
||||
func (t *Transitive) sendRequest(vdr ids.ShortID, vtxID ids.ID) {
|
||||
if t.vtxReqs.Contains(vtxID) {
|
||||
t.Config.Context.Log.Debug("Not requesting a vertex because we have recently sent a request")
|
||||
t.Config.Context.Log.Debug("not requesting a vertex because we have recently sent a request")
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ func TestEngineShutdown(t *testing.T) {
|
|||
config := DefaultConfig()
|
||||
vmShutdownCalled := false
|
||||
vm := &VMTest{}
|
||||
vm.ShutdownF = func() { vmShutdownCalled = true }
|
||||
vm.ShutdownF = func() error { vmShutdownCalled = true; return nil }
|
||||
config.VM = vm
|
||||
|
||||
transitive := &Transitive{}
|
||||
|
@ -2167,6 +2167,9 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) {
|
|||
|
||||
vm.Default(true)
|
||||
|
||||
vm.CantBootstrapping = false
|
||||
vm.CantBootstrapped = false
|
||||
|
||||
utxos := []ids.ID{GenerateID(), GenerateID()}
|
||||
|
||||
txID0 := GenerateID()
|
||||
|
@ -2272,7 +2275,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) {
|
|||
panic("Unknown vertex requested")
|
||||
}
|
||||
|
||||
sender.GetF = func(inVdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
sender.GetAncestorsF = func(inVdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdrID.Equals(inVdr) {
|
||||
t.Fatalf("Asking wrong validator for vertex")
|
||||
}
|
||||
|
@ -2315,7 +2318,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) {
|
|||
panic("Unknown bytes provided")
|
||||
}
|
||||
|
||||
te.Put(vdrID, *requestID, vtxID0, vtxBytes0)
|
||||
te.MultiPut(vdrID, *requestID, [][]byte{vtxBytes0})
|
||||
|
||||
vm.ParseTxF = nil
|
||||
st.parseVertex = nil
|
||||
|
@ -2975,3 +2978,110 @@ func TestEngineAggressivePolling(t *testing.T) {
|
|||
t.Fatalf("should have issued one pull query")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEngineDuplicatedIssuance(t *testing.T) {
|
||||
config := DefaultConfig()
|
||||
config.Params.BatchSize = 1
|
||||
config.Params.BetaVirtuous = 5
|
||||
config.Params.BetaRogue = 5
|
||||
|
||||
sender := &common.SenderTest{}
|
||||
sender.T = t
|
||||
config.Sender = sender
|
||||
|
||||
sender.Default(true)
|
||||
sender.CantGetAcceptedFrontier = false
|
||||
|
||||
vdr := validators.GenerateRandomValidator(1)
|
||||
|
||||
vals := validators.NewSet()
|
||||
config.Validators = vals
|
||||
|
||||
vals.Add(vdr)
|
||||
|
||||
st := &stateTest{t: t}
|
||||
config.State = st
|
||||
|
||||
st.Default(true)
|
||||
|
||||
vm := &VMTest{}
|
||||
vm.T = t
|
||||
config.VM = vm
|
||||
|
||||
vm.Default(true)
|
||||
|
||||
gVtx := &Vtx{
|
||||
id: GenerateID(),
|
||||
status: choices.Accepted,
|
||||
}
|
||||
mVtx := &Vtx{
|
||||
id: GenerateID(),
|
||||
status: choices.Accepted,
|
||||
}
|
||||
|
||||
gTx := &TestTx{
|
||||
TestTx: snowstorm.TestTx{
|
||||
Identifier: GenerateID(),
|
||||
Stat: choices.Accepted,
|
||||
},
|
||||
}
|
||||
|
||||
utxos := []ids.ID{GenerateID(), GenerateID()}
|
||||
|
||||
tx := &TestTx{
|
||||
TestTx: snowstorm.TestTx{
|
||||
Identifier: GenerateID(),
|
||||
Deps: []snowstorm.Tx{gTx},
|
||||
Stat: choices.Processing,
|
||||
},
|
||||
}
|
||||
tx.Ins.Add(utxos[0])
|
||||
|
||||
st.edge = func() []ids.ID { return []ids.ID{gVtx.ID(), mVtx.ID()} }
|
||||
st.getVertex = func(id ids.ID) (avalanche.Vertex, error) {
|
||||
switch {
|
||||
case id.Equals(gVtx.ID()):
|
||||
return gVtx, nil
|
||||
case id.Equals(mVtx.ID()):
|
||||
return mVtx, nil
|
||||
}
|
||||
t.Fatalf("Unknown vertex")
|
||||
panic("Should have errored")
|
||||
}
|
||||
|
||||
te := &Transitive{}
|
||||
te.Initialize(config)
|
||||
te.finishBootstrapping()
|
||||
|
||||
lastVtx := new(Vtx)
|
||||
st.buildVertex = func(_ ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) {
|
||||
consumers := []snowstorm.Tx{}
|
||||
for _, tx := range txs {
|
||||
consumers = append(consumers, tx)
|
||||
}
|
||||
lastVtx = &Vtx{
|
||||
parents: []avalanche.Vertex{gVtx, mVtx},
|
||||
id: GenerateID(),
|
||||
txs: consumers,
|
||||
status: choices.Processing,
|
||||
bytes: []byte{1},
|
||||
}
|
||||
return lastVtx, nil
|
||||
}
|
||||
|
||||
sender.CantPushQuery = false
|
||||
|
||||
vm.PendingTxsF = func() []snowstorm.Tx { return []snowstorm.Tx{tx} }
|
||||
te.Notify(common.PendingTxs)
|
||||
|
||||
if len(lastVtx.txs) != 1 || !lastVtx.txs[0].ID().Equals(tx.ID()) {
|
||||
t.Fatalf("Should have issued txs differently")
|
||||
}
|
||||
|
||||
st.buildVertex = func(ids.Set, []snowstorm.Tx) (avalanche.Vertex, error) {
|
||||
t.Fatalf("shouldn't have attempted to issue a duplicated tx")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
te.Notify(common.PendingTxs)
|
||||
}
|
||||
|
|
|
@ -4,15 +4,20 @@
|
|||
package avalanche
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/choices"
|
||||
"github.com/ava-labs/gecko/snow/consensus/snowstorm"
|
||||
"github.com/ava-labs/gecko/snow/engine/common/queue"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
)
|
||||
|
||||
type txParser struct {
|
||||
log logging.Logger
|
||||
numAccepted, numDropped prometheus.Counter
|
||||
vm DAGVM
|
||||
}
|
||||
|
@ -23,6 +28,7 @@ func (p *txParser) Parse(txBytes []byte) (queue.Job, error) {
|
|||
return nil, err
|
||||
}
|
||||
return &txJob{
|
||||
log: p.log,
|
||||
numAccepted: p.numAccepted,
|
||||
numDropped: p.numDropped,
|
||||
tx: tx,
|
||||
|
@ -30,6 +36,7 @@ func (p *txParser) Parse(txBytes []byte) (queue.Job, error) {
|
|||
}
|
||||
|
||||
type txJob struct {
|
||||
log logging.Logger
|
||||
numAccepted, numDropped prometheus.Counter
|
||||
tx snowstorm.Tx
|
||||
}
|
||||
|
@ -44,19 +51,31 @@ func (t *txJob) MissingDependencies() ids.Set {
|
|||
}
|
||||
return missing
|
||||
}
|
||||
func (t *txJob) Execute() {
|
||||
|
||||
func (t *txJob) Execute() error {
|
||||
if t.MissingDependencies().Len() != 0 {
|
||||
t.numDropped.Inc()
|
||||
return
|
||||
return errors.New("attempting to accept a transaction with missing dependencies")
|
||||
}
|
||||
|
||||
switch t.tx.Status() {
|
||||
status := t.tx.Status()
|
||||
switch status {
|
||||
case choices.Unknown, choices.Rejected:
|
||||
t.numDropped.Inc()
|
||||
return fmt.Errorf("attempting to execute transaction with status %s", status)
|
||||
case choices.Processing:
|
||||
t.tx.Verify()
|
||||
t.tx.Accept()
|
||||
if err := t.tx.Verify(); err != nil {
|
||||
t.log.Debug("transaction %s failed verification during bootstrapping due to %s",
|
||||
t.tx.ID(), err)
|
||||
}
|
||||
|
||||
t.numAccepted.Inc()
|
||||
if err := t.tx.Accept(); err != nil {
|
||||
t.log.Error("transaction %s failed to accept during bootstrapping due to %s",
|
||||
t.tx.ID(), err)
|
||||
return fmt.Errorf("failed to accept transaction in bootstrapping: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (t *txJob) Bytes() []byte { return t.tx.Bytes() }
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
package avalanche
|
||||
|
||||
import (
|
||||
"container/heap"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/consensus/avalanche"
|
||||
)
|
||||
|
||||
// A vertexItem is a Vertex managed by the priority queue.
|
||||
type vertexItem struct {
|
||||
vertex avalanche.Vertex
|
||||
index int // The index of the item in the heap.
|
||||
}
|
||||
|
||||
// A priorityQueue implements heap.Interface and holds vertexItems.
|
||||
type priorityQueue []*vertexItem
|
||||
|
||||
func (pq priorityQueue) Len() int { return len(pq) }
|
||||
|
||||
// Returns true if the vertex at index i has greater height than the vertex at
|
||||
// index j.
|
||||
func (pq priorityQueue) Less(i, j int) bool {
|
||||
statusI := pq[i].vertex.Status()
|
||||
statusJ := pq[j].vertex.Status()
|
||||
|
||||
// Put unknown vertices at the front of the heap to ensure once we have made
|
||||
// it below a certain height in DAG traversal we do not need to reset
|
||||
if !statusI.Fetched() {
|
||||
return true
|
||||
}
|
||||
if !statusJ.Fetched() {
|
||||
return false
|
||||
}
|
||||
return pq[i].vertex.Height() > pq[j].vertex.Height()
|
||||
}
|
||||
|
||||
func (pq priorityQueue) Swap(i, j int) {
|
||||
pq[i], pq[j] = pq[j], pq[i]
|
||||
pq[i].index = i
|
||||
pq[j].index = j
|
||||
}
|
||||
|
||||
// Push adds an item to this priority queue. x must have type *vertexItem
|
||||
func (pq *priorityQueue) Push(x interface{}) {
|
||||
n := len(*pq)
|
||||
item := x.(*vertexItem)
|
||||
item.index = n
|
||||
*pq = append(*pq, item)
|
||||
}
|
||||
|
||||
// Pop returns the last item in this priorityQueue
|
||||
func (pq *priorityQueue) Pop() interface{} {
|
||||
old := *pq
|
||||
n := len(old)
|
||||
item := old[n-1]
|
||||
old[n-1] = nil
|
||||
item.index = -1
|
||||
*pq = old[0 : n-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// vertexHeap defines the functionality of a heap of vertices
|
||||
// with unique VertexIDs ordered by height
|
||||
type vertexHeap interface {
|
||||
Clear()
|
||||
Push(avalanche.Vertex)
|
||||
Pop() avalanche.Vertex // Requires that there be at least one element
|
||||
Contains(avalanche.Vertex) bool
|
||||
Len() int
|
||||
}
|
||||
|
||||
type maxHeightVertexHeap struct {
|
||||
heap *priorityQueue
|
||||
elementIDs ids.Set
|
||||
}
|
||||
|
||||
func newMaxVertexHeap() *maxHeightVertexHeap {
|
||||
return &maxHeightVertexHeap{
|
||||
heap: &priorityQueue{},
|
||||
elementIDs: ids.Set{},
|
||||
}
|
||||
}
|
||||
|
||||
func (vh *maxHeightVertexHeap) Clear() {
|
||||
vh.heap = &priorityQueue{}
|
||||
vh.elementIDs.Clear()
|
||||
}
|
||||
|
||||
// Push adds an element to this heap. Returns true if the element was added.
|
||||
// Returns false if it was already in the heap.
|
||||
func (vh *maxHeightVertexHeap) Push(vtx avalanche.Vertex) bool {
|
||||
vtxID := vtx.ID()
|
||||
if vh.elementIDs.Contains(vtxID) {
|
||||
return false
|
||||
}
|
||||
|
||||
vh.elementIDs.Add(vtxID)
|
||||
item := &vertexItem{
|
||||
vertex: vtx,
|
||||
}
|
||||
heap.Push(vh.heap, item)
|
||||
return true
|
||||
}
|
||||
|
||||
// If there are any vertices in this heap with status Unknown, removes one such
|
||||
// vertex and returns it. Otherwise, removes and returns the vertex in this heap
|
||||
// with the greatest height.
|
||||
func (vh *maxHeightVertexHeap) Pop() avalanche.Vertex {
|
||||
vtx := heap.Pop(vh.heap).(*vertexItem).vertex
|
||||
vh.elementIDs.Remove(vtx.ID())
|
||||
return vtx
|
||||
}
|
||||
|
||||
func (vh *maxHeightVertexHeap) Len() int { return vh.heap.Len() }
|
||||
|
||||
func (vh *maxHeightVertexHeap) Contains(vtxID ids.ID) bool { return vh.elementIDs.Contains(vtxID) }
|
|
@ -0,0 +1,130 @@
|
|||
package avalanche
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ava-labs/gecko/snow/choices"
|
||||
"github.com/ava-labs/gecko/snow/consensus/avalanche"
|
||||
)
|
||||
|
||||
// This example inserts several ints into an IntHeap, checks the minimum,
|
||||
// and removes them in order of priority.
|
||||
func TestUniqueVertexHeapReturnsOrdered(t *testing.T) {
|
||||
h := newMaxVertexHeap()
|
||||
|
||||
vtx0 := &Vtx{
|
||||
id: GenerateID(),
|
||||
height: 0,
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
vtx1 := &Vtx{
|
||||
id: GenerateID(),
|
||||
height: 1,
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
vtx2 := &Vtx{
|
||||
id: GenerateID(),
|
||||
height: 1,
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
vtx3 := &Vtx{
|
||||
id: GenerateID(),
|
||||
height: 3,
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
vtx4 := &Vtx{
|
||||
id: GenerateID(),
|
||||
status: choices.Unknown,
|
||||
}
|
||||
|
||||
vts := []avalanche.Vertex{vtx0, vtx1, vtx2, vtx3, vtx4}
|
||||
|
||||
for _, vtx := range vts {
|
||||
h.Push(vtx)
|
||||
}
|
||||
|
||||
vtxZ := h.Pop()
|
||||
if !vtxZ.ID().Equals(vtx4.ID()) {
|
||||
t.Fatalf("Heap did not pop unknown element first")
|
||||
}
|
||||
|
||||
vtxA := h.Pop()
|
||||
if vtxA.Height() != 3 {
|
||||
t.Fatalf("First height from heap was incorrect")
|
||||
} else if !vtxA.ID().Equals(vtx3.ID()) {
|
||||
t.Fatalf("Incorrect ID on vertex popped from heap")
|
||||
}
|
||||
|
||||
vtxB := h.Pop()
|
||||
if vtxB.Height() != 1 {
|
||||
t.Fatalf("First height from heap was incorrect")
|
||||
} else if !vtxB.ID().Equals(vtx1.ID()) && !vtxB.ID().Equals(vtx2.ID()) {
|
||||
t.Fatalf("Incorrect ID on vertex popped from heap")
|
||||
}
|
||||
|
||||
vtxC := h.Pop()
|
||||
if vtxC.Height() != 1 {
|
||||
t.Fatalf("First height from heap was incorrect")
|
||||
} else if !vtxC.ID().Equals(vtx1.ID()) && !vtxC.ID().Equals(vtx2.ID()) {
|
||||
t.Fatalf("Incorrect ID on vertex popped from heap")
|
||||
}
|
||||
|
||||
if vtxB.ID().Equals(vtxC.ID()) {
|
||||
t.Fatalf("Heap returned same element more than once")
|
||||
}
|
||||
|
||||
vtxD := h.Pop()
|
||||
if vtxD.Height() != 0 {
|
||||
t.Fatalf("Last height returned was incorrect")
|
||||
} else if !vtxD.ID().Equals(vtx0.ID()) {
|
||||
t.Fatalf("Last item from heap had incorrect ID")
|
||||
}
|
||||
|
||||
if h.Len() != 0 {
|
||||
t.Fatalf("Heap was not empty after popping all of its elements")
|
||||
}
|
||||
}
|
||||
|
||||
func TestUniqueVertexHeapRemainsUnique(t *testing.T) {
|
||||
h := newMaxVertexHeap()
|
||||
|
||||
vtx0 := &Vtx{
|
||||
height: 0,
|
||||
id: GenerateID(),
|
||||
status: choices.Processing,
|
||||
}
|
||||
vtx1 := &Vtx{
|
||||
height: 1,
|
||||
id: GenerateID(),
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
sharedID := GenerateID()
|
||||
vtx2 := &Vtx{
|
||||
height: 1,
|
||||
id: sharedID,
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
vtx3 := &Vtx{
|
||||
height: 2,
|
||||
id: sharedID,
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
pushed1 := h.Push(vtx0)
|
||||
pushed2 := h.Push(vtx1)
|
||||
pushed3 := h.Push(vtx2)
|
||||
pushed4 := h.Push(vtx3)
|
||||
if h.Len() != 3 {
|
||||
t.Fatalf("Unique Vertex Heap has incorrect length: %d", h.Len())
|
||||
} else if !(pushed1 && pushed2 && pushed3) {
|
||||
t.Fatalf("Failed to push a new unique element")
|
||||
} else if pushed4 {
|
||||
t.Fatalf("Pushed non-unique element to the unique vertex heap")
|
||||
}
|
||||
}
|
|
@ -4,15 +4,20 @@
|
|||
package avalanche
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/choices"
|
||||
"github.com/ava-labs/gecko/snow/consensus/avalanche"
|
||||
"github.com/ava-labs/gecko/snow/engine/common/queue"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
)
|
||||
|
||||
type vtxParser struct {
|
||||
log logging.Logger
|
||||
numAccepted, numDropped prometheus.Counter
|
||||
state State
|
||||
}
|
||||
|
@ -23,6 +28,7 @@ func (p *vtxParser) Parse(vtxBytes []byte) (queue.Job, error) {
|
|||
return nil, err
|
||||
}
|
||||
return &vertexJob{
|
||||
log: p.log,
|
||||
numAccepted: p.numAccepted,
|
||||
numDropped: p.numDropped,
|
||||
vtx: vtx,
|
||||
|
@ -30,6 +36,7 @@ func (p *vtxParser) Parse(vtxBytes []byte) (queue.Job, error) {
|
|||
}
|
||||
|
||||
type vertexJob struct {
|
||||
log logging.Logger
|
||||
numAccepted, numDropped prometheus.Counter
|
||||
vtx avalanche.Vertex
|
||||
}
|
||||
|
@ -44,23 +51,29 @@ func (v *vertexJob) MissingDependencies() ids.Set {
|
|||
}
|
||||
return missing
|
||||
}
|
||||
func (v *vertexJob) Execute() {
|
||||
func (v *vertexJob) Execute() error {
|
||||
if v.MissingDependencies().Len() != 0 {
|
||||
v.numDropped.Inc()
|
||||
return
|
||||
return errors.New("attempting to execute blocked vertex")
|
||||
}
|
||||
for _, tx := range v.vtx.Txs() {
|
||||
if tx.Status() != choices.Accepted {
|
||||
v.numDropped.Inc()
|
||||
return
|
||||
v.log.Warn("attempting to execute vertex with non-accepted transactions")
|
||||
return nil
|
||||
}
|
||||
}
|
||||
switch v.vtx.Status() {
|
||||
status := v.vtx.Status()
|
||||
switch status {
|
||||
case choices.Unknown, choices.Rejected:
|
||||
v.numDropped.Inc()
|
||||
return fmt.Errorf("attempting to execute vertex with status %s", status)
|
||||
case choices.Processing:
|
||||
v.vtx.Accept()
|
||||
v.numAccepted.Inc()
|
||||
if err := v.vtx.Accept(); err != nil {
|
||||
return fmt.Errorf("failed to accept vertex in bootstrapping: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (v *vertexJob) Bytes() []byte { return v.vtx.Bytes() }
|
||||
|
|
|
@ -5,7 +5,6 @@ package avalanche
|
|||
|
||||
import (
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/consensus/avalanche"
|
||||
"github.com/ava-labs/gecko/snow/consensus/snowstorm"
|
||||
)
|
||||
|
||||
|
@ -27,7 +26,7 @@ func (v *voter) Fulfill(id ids.ID) {
|
|||
func (v *voter) Abandon(id ids.ID) { v.Fulfill(id) }
|
||||
|
||||
func (v *voter) Update() {
|
||||
if v.deps.Len() != 0 {
|
||||
if v.deps.Len() != 0 || v.t.errs.Errored() {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -38,7 +37,10 @@ func (v *voter) Update() {
|
|||
results = v.bubbleVotes(results)
|
||||
|
||||
v.t.Config.Context.Log.Debug("Finishing poll with:\n%s", &results)
|
||||
v.t.Consensus.RecordPoll(results)
|
||||
if err := v.t.Consensus.RecordPoll(results); err != nil {
|
||||
v.t.errs.Add(err)
|
||||
return
|
||||
}
|
||||
|
||||
txs := []snowstorm.Tx(nil)
|
||||
for _, orphanID := range v.t.Consensus.Orphans().List() {
|
||||
|
@ -51,50 +53,62 @@ func (v *voter) Update() {
|
|||
if len(txs) > 0 {
|
||||
v.t.Config.Context.Log.Debug("Re-issuing %d transactions", len(txs))
|
||||
}
|
||||
v.t.batch(txs, true /*=force*/, false /*empty*/)
|
||||
|
||||
if v.t.Consensus.Quiesce() {
|
||||
v.t.Config.Context.Log.Verbo("Avalanche engine can quiesce")
|
||||
if err := v.t.batch(txs, true /*=force*/, false /*empty*/); err != nil {
|
||||
v.t.errs.Add(err)
|
||||
return
|
||||
}
|
||||
|
||||
v.t.Config.Context.Log.Verbo("Avalanche engine can't quiesce")
|
||||
v.t.repoll()
|
||||
if v.t.Consensus.Quiesce() {
|
||||
v.t.Config.Context.Log.Debug("Avalanche engine can quiesce")
|
||||
return
|
||||
}
|
||||
|
||||
v.t.Config.Context.Log.Debug("Avalanche engine can't quiesce")
|
||||
v.t.errs.Add(v.t.repoll())
|
||||
}
|
||||
|
||||
func (v *voter) bubbleVotes(votes ids.UniqueBag) ids.UniqueBag {
|
||||
bubbledVotes := ids.UniqueBag{}
|
||||
vertexHeap := newMaxVertexHeap()
|
||||
for _, vote := range votes.List() {
|
||||
set := votes.GetSet(vote)
|
||||
vtx, err := v.t.Config.State.GetVertex(vote)
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
vts := []avalanche.Vertex{vtx}
|
||||
for len(vts) > 0 {
|
||||
vtx := vts[0]
|
||||
vts = vts[1:]
|
||||
vertexHeap.Push(vtx)
|
||||
}
|
||||
|
||||
status := vtx.Status()
|
||||
if !status.Fetched() {
|
||||
v.t.Config.Context.Log.Verbo("Dropping %d vote(s) for %s because the vertex is unknown", set.Len(), vtx.ID())
|
||||
continue
|
||||
}
|
||||
for vertexHeap.Len() > 0 {
|
||||
vtx := vertexHeap.Pop()
|
||||
vtxID := vtx.ID()
|
||||
set := votes.GetSet(vtxID)
|
||||
status := vtx.Status()
|
||||
|
||||
if status.Decided() {
|
||||
v.t.Config.Context.Log.Verbo("Dropping %d vote(s) for %s because the vertex is decided", set.Len(), vtx.ID())
|
||||
continue
|
||||
}
|
||||
if !status.Fetched() {
|
||||
v.t.Config.Context.Log.Verbo("Dropping %d vote(s) for %s because the vertex is unknown", set.Len(), vtxID)
|
||||
bubbledVotes.RemoveSet(vtx.ID())
|
||||
continue
|
||||
}
|
||||
|
||||
if v.t.Consensus.VertexIssued(vtx) {
|
||||
v.t.Config.Context.Log.Verbo("Applying %d vote(s) for %s", set.Len(), vtx.ID())
|
||||
bubbledVotes.UnionSet(vtx.ID(), set)
|
||||
} else {
|
||||
v.t.Config.Context.Log.Verbo("Bubbling %d vote(s) for %s because the vertex isn't issued", set.Len(), vtx.ID())
|
||||
vts = append(vts, vtx.Parents()...)
|
||||
if status.Decided() {
|
||||
v.t.Config.Context.Log.Verbo("Dropping %d vote(s) for %s because the vertex is decided", set.Len(), vtxID)
|
||||
bubbledVotes.RemoveSet(vtx.ID())
|
||||
continue
|
||||
}
|
||||
|
||||
if v.t.Consensus.VertexIssued(vtx) {
|
||||
v.t.Config.Context.Log.Verbo("Applying %d vote(s) for %s", set.Len(), vtx.ID())
|
||||
bubbledVotes.UnionSet(vtx.ID(), set)
|
||||
} else {
|
||||
v.t.Config.Context.Log.Verbo("Bubbling %d vote(s) for %s because the vertex isn't issued", set.Len(), vtx.ID())
|
||||
bubbledVotes.RemoveSet(vtx.ID()) // Remove votes for this vertex because it hasn't been issued
|
||||
for _, parentVtx := range vtx.Parents() {
|
||||
bubbledVotes.UnionSet(parentVtx.ID(), set)
|
||||
vertexHeap.Push(parentVtx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return bubbledVotes
|
||||
}
|
||||
|
|
|
@ -16,6 +16,7 @@ type Bootstrapable interface {
|
|||
// Returns the subset of containerIDs that are accepted by this chain.
|
||||
FilterAccepted(containerIDs ids.Set) (acceptedContainerIDs ids.Set)
|
||||
|
||||
// Force the provided containers to be accepted.
|
||||
ForceAccepted(acceptedContainerIDs ids.Set)
|
||||
// Force the provided containers to be accepted. Only returns fatal errors
|
||||
// if they occur.
|
||||
ForceAccepted(acceptedContainerIDs ids.Set) error
|
||||
}
|
||||
|
|
|
@ -5,15 +5,31 @@ package common
|
|||
|
||||
import (
|
||||
stdmath "math"
|
||||
"time"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/utils/math"
|
||||
)
|
||||
|
||||
const (
|
||||
// MaxContainersPerMultiPut is the maximum number of containers that can be sent in a MultiPut
|
||||
MaxContainersPerMultiPut = 2000
|
||||
|
||||
// StatusUpdateFrequency ... bootstrapper logs "processed X blocks/vertices" every [statusUpdateFrequency] blocks/vertices
|
||||
StatusUpdateFrequency = 2500
|
||||
)
|
||||
|
||||
var (
|
||||
// MaxTimeFetchingAncestors is the maximum amount of time to spend fetching vertices during a call to GetAncestors
|
||||
MaxTimeFetchingAncestors = 100 * time.Millisecond
|
||||
)
|
||||
|
||||
// Bootstrapper implements the Engine interface.
|
||||
type Bootstrapper struct {
|
||||
Config
|
||||
|
||||
// IDs of validators we have requested the accepted frontier from but haven't
|
||||
// received a reply from
|
||||
pendingAcceptedFrontier ids.ShortSet
|
||||
acceptedFrontier ids.Set
|
||||
|
||||
|
@ -37,40 +53,50 @@ func (b *Bootstrapper) Initialize(config Config) {
|
|||
}
|
||||
|
||||
// Startup implements the Engine interface.
|
||||
func (b *Bootstrapper) Startup() {
|
||||
func (b *Bootstrapper) Startup() error {
|
||||
if b.pendingAcceptedFrontier.Len() == 0 {
|
||||
b.Context.Log.Info("Bootstrapping skipped due to no provided bootstraps")
|
||||
b.Bootstrapable.ForceAccepted(ids.Set{})
|
||||
return
|
||||
return b.Bootstrapable.ForceAccepted(ids.Set{})
|
||||
}
|
||||
|
||||
// Ask each of the bootstrap validators to send their accepted frontier
|
||||
vdrs := ids.ShortSet{}
|
||||
vdrs.Union(b.pendingAcceptedFrontier)
|
||||
|
||||
b.RequestID++
|
||||
b.Sender.GetAcceptedFrontier(vdrs, b.RequestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAcceptedFrontier implements the Engine interface.
|
||||
func (b *Bootstrapper) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32) {
|
||||
func (b *Bootstrapper) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32) error {
|
||||
b.Sender.AcceptedFrontier(validatorID, requestID, b.Bootstrapable.CurrentAcceptedFrontier())
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAcceptedFrontierFailed implements the Engine interface.
|
||||
func (b *Bootstrapper) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
func (b *Bootstrapper) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) error {
|
||||
// If we can't get a response from [validatorID], act as though they said their accepted frontier is empty
|
||||
b.AcceptedFrontier(validatorID, requestID, ids.Set{})
|
||||
return nil
|
||||
}
|
||||
|
||||
// AcceptedFrontier implements the Engine interface.
|
||||
func (b *Bootstrapper) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
func (b *Bootstrapper) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) error {
|
||||
if !b.pendingAcceptedFrontier.Contains(validatorID) {
|
||||
b.Context.Log.Debug("Received an AcceptedFrontier message from %s unexpectedly", validatorID)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
// Mark that we received a response from [validatorID]
|
||||
b.pendingAcceptedFrontier.Remove(validatorID)
|
||||
|
||||
// Union the reported accepted frontier from [validatorID] with the accepted frontier we got from others
|
||||
b.acceptedFrontier.Union(containerIDs)
|
||||
|
||||
// We've received the accepted frontier from every bootstrap validator
|
||||
// Ask each bootstrap validator to filter the list of containers that we were
|
||||
// told are on the accepted frontier such that the list only contains containers
|
||||
// they think are accepted
|
||||
if b.pendingAcceptedFrontier.Len() == 0 {
|
||||
vdrs := ids.ShortSet{}
|
||||
vdrs.Union(b.pendingAccepted)
|
||||
|
@ -78,28 +104,33 @@ func (b *Bootstrapper) AcceptedFrontier(validatorID ids.ShortID, requestID uint3
|
|||
b.RequestID++
|
||||
b.Sender.GetAccepted(vdrs, b.RequestID, b.acceptedFrontier)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAccepted implements the Engine interface.
|
||||
func (b *Bootstrapper) GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
func (b *Bootstrapper) GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) error {
|
||||
b.Sender.Accepted(validatorID, requestID, b.Bootstrapable.FilterAccepted(containerIDs))
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAcceptedFailed implements the Engine interface.
|
||||
func (b *Bootstrapper) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
b.Accepted(validatorID, requestID, ids.Set{})
|
||||
func (b *Bootstrapper) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) error {
|
||||
// If we can't get a response from [validatorID], act as though they said
|
||||
// that they think none of the containers we sent them in GetAccepted are accepted
|
||||
return b.Accepted(validatorID, requestID, ids.Set{})
|
||||
}
|
||||
|
||||
// Accepted implements the Engine interface.
|
||||
func (b *Bootstrapper) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
func (b *Bootstrapper) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) error {
|
||||
if !b.pendingAccepted.Contains(validatorID) {
|
||||
b.Context.Log.Debug("Received an Accepted message from %s unexpectedly", validatorID)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
// Mark that we received a response from [validatorID]
|
||||
b.pendingAccepted.Remove(validatorID)
|
||||
|
||||
weight := uint64(0)
|
||||
if vdr, ok := b.Validators.Get(validatorID); ok {
|
||||
if vdr, ok := b.Beacons.Get(validatorID); ok {
|
||||
weight = vdr.Weight()
|
||||
}
|
||||
|
||||
|
@ -113,20 +144,24 @@ func (b *Bootstrapper) Accepted(validatorID ids.ShortID, requestID uint32, conta
|
|||
b.acceptedVotes[key] = newWeight
|
||||
}
|
||||
|
||||
if b.pendingAccepted.Len() == 0 {
|
||||
accepted := ids.Set{}
|
||||
for key, weight := range b.acceptedVotes {
|
||||
if weight >= b.Config.Alpha {
|
||||
accepted.Add(ids.NewID(key))
|
||||
}
|
||||
}
|
||||
|
||||
if size := accepted.Len(); size == 0 && b.Config.Beacons.Len() > 0 {
|
||||
b.Context.Log.Warn("Bootstrapping finished with no accepted frontier. This is likely a result of failing to be able to connect to the specified bootstraps, or no transactions have been issued on this chain yet")
|
||||
} else {
|
||||
b.Context.Log.Info("Bootstrapping started syncing with %d vertices in the accepted frontier", size)
|
||||
}
|
||||
|
||||
b.Bootstrapable.ForceAccepted(accepted)
|
||||
if b.pendingAccepted.Len() != 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We've received the filtered accepted frontier from every bootstrap validator
|
||||
// Accept all containers that have a sufficient weight behind them
|
||||
accepted := ids.Set{}
|
||||
for key, weight := range b.acceptedVotes {
|
||||
if weight >= b.Config.Alpha {
|
||||
accepted.Add(ids.NewID(key))
|
||||
}
|
||||
}
|
||||
|
||||
if size := accepted.Len(); size == 0 && b.Config.Beacons.Len() > 0 {
|
||||
b.Context.Log.Info("Bootstrapping finished with no accepted frontier. This is likely a result of failing to be able to connect to the specified bootstraps, or no transactions have been issued on this chain yet")
|
||||
} else {
|
||||
b.Context.Log.Info("Bootstrapping started syncing with %d vertices in the accepted frontier", size)
|
||||
}
|
||||
|
||||
return b.Bootstrapable.ForceAccepted(accepted)
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ type ExternalHandler interface {
|
|||
}
|
||||
|
||||
// FrontierHandler defines how a consensus engine reacts to frontier messages
|
||||
// from other validators
|
||||
// from other validators. Functions only return fatal errors if they occur.
|
||||
type FrontierHandler interface {
|
||||
// Notify this engine of a request for the accepted frontier of vertices.
|
||||
//
|
||||
|
@ -45,19 +45,19 @@ type FrontierHandler interface {
|
|||
//
|
||||
// This engine should respond with an AcceptedFrontier message with the same
|
||||
// requestID, and the engine's current accepted frontier.
|
||||
GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32)
|
||||
GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32) error
|
||||
|
||||
// Notify this engine of an accepted frontier.
|
||||
//
|
||||
// This function can be called by any validator. It is not safe to assume
|
||||
// this message is in response to a GetAcceptedFrontier message, is utilizing a
|
||||
// unique requestID, or that the containerIDs from a valid frontier.
|
||||
// However, the validatorID is assumed to be authenticated.
|
||||
// this message is in response to a GetAcceptedFrontier message, is
|
||||
// utilizing a unique requestID, or that the containerIDs from a valid
|
||||
// frontier. However, the validatorID is assumed to be authenticated.
|
||||
AcceptedFrontier(
|
||||
validatorID ids.ShortID,
|
||||
requestID uint32,
|
||||
containerIDs ids.Set,
|
||||
)
|
||||
) error
|
||||
|
||||
// Notify this engine that a get accepted frontier request it issued has
|
||||
// failed.
|
||||
|
@ -69,11 +69,12 @@ type FrontierHandler interface {
|
|||
//
|
||||
// The validatorID, and requestID, are assumed to be the same as those sent
|
||||
// in the GetAcceptedFrontier message.
|
||||
GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32)
|
||||
GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) error
|
||||
}
|
||||
|
||||
// AcceptedHandler defines how a consensus engine reacts to messages pertaining
|
||||
// to accepted containers from other validators
|
||||
// to accepted containers from other validators. Functions only return fatal
|
||||
// errors if they occur.
|
||||
type AcceptedHandler interface {
|
||||
// Notify this engine of a request to filter non-accepted vertices.
|
||||
//
|
||||
|
@ -84,7 +85,11 @@ type AcceptedHandler interface {
|
|||
// This engine should respond with an Accepted message with the same
|
||||
// requestID, and the subset of the containerIDs that this node has decided
|
||||
// are accepted.
|
||||
GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set)
|
||||
GetAccepted(
|
||||
validatorID ids.ShortID,
|
||||
requestID uint32,
|
||||
containerIDs ids.Set,
|
||||
) error
|
||||
|
||||
// Notify this engine of a set of accepted vertices.
|
||||
//
|
||||
|
@ -93,7 +98,11 @@ type AcceptedHandler interface {
|
|||
// unique requestID, or that the containerIDs are a subset of the
|
||||
// containerIDs from a GetAccepted message. However, the validatorID is
|
||||
// assumed to be authenticated.
|
||||
Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set)
|
||||
Accepted(
|
||||
validatorID ids.ShortID,
|
||||
requestID uint32,
|
||||
containerIDs ids.Set,
|
||||
) error
|
||||
|
||||
// Notify this engine that a get accepted request it issued has failed.
|
||||
//
|
||||
|
@ -104,11 +113,11 @@ type AcceptedHandler interface {
|
|||
//
|
||||
// The validatorID, and requestID, are assumed to be the same as those sent
|
||||
// in the GetAccepted message.
|
||||
GetAcceptedFailed(validatorID ids.ShortID, requestID uint32)
|
||||
GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) error
|
||||
}
|
||||
|
||||
// FetchHandler defines how a consensus engine reacts to retrieval messages from
|
||||
// other validators
|
||||
// other validators. Functions only return fatal errors if they occur.
|
||||
type FetchHandler interface {
|
||||
// Notify this engine of a request for a container.
|
||||
//
|
||||
|
@ -124,7 +133,22 @@ type FetchHandler interface {
|
|||
// This engine should respond with a Put message with the same requestID if
|
||||
// the container was locally avaliable. Otherwise, the message can be safely
|
||||
// dropped.
|
||||
Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
|
||||
Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error
|
||||
|
||||
// Notify this engine of a request for a container and its ancestors.
|
||||
// The request is from validator [validatorID]. The requested container is [containerID].
|
||||
//
|
||||
// This function can be called by any validator. It is not safe to assume
|
||||
// this message is utilizing a unique requestID. It is also not safe to
|
||||
// assume the requested containerID exists. However, the validatorID is
|
||||
// assumed to be authenticated.
|
||||
//
|
||||
// This engine should respond with a MultiPut message with the same requestID,
|
||||
// which contains [containerID] as well as its ancestors. See MultiPut's documentation.
|
||||
//
|
||||
// If this engine doesn't have some ancestors, it should reply with its best effort attempt at getting them.
|
||||
// If this engine doesn't have [containerID] it can ignore this message.
|
||||
GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error
|
||||
|
||||
// Notify this engine of a container.
|
||||
//
|
||||
|
@ -141,7 +165,25 @@ type FetchHandler interface {
|
|||
requestID uint32,
|
||||
containerID ids.ID,
|
||||
container []byte,
|
||||
)
|
||||
) error
|
||||
|
||||
// Notify this engine of multiple containers.
|
||||
// Each element of [containers] is the byte representation of a container.
|
||||
//
|
||||
// This should only be called during bootstrapping, and in response to a GetAncestors message to
|
||||
// [validatorID] with request ID [requestID]. This call should contain the container requested in
|
||||
// that message, along with ancestors.
|
||||
// The containers should be in BFS order (ie the first container must be the container
|
||||
// requested in the GetAncestors message and further back ancestors are later in [containers]
|
||||
//
|
||||
// It is not safe to assume this message is in response to a GetAncestor message, that this
|
||||
// message has a unique requestID or that any of the containers in [containers] are valid.
|
||||
// However, the validatorID is assumed to be authenticated.
|
||||
MultiPut(
|
||||
validatorID ids.ShortID,
|
||||
requestID uint32,
|
||||
containers [][]byte,
|
||||
) error
|
||||
|
||||
// Notify this engine that a get request it issued has failed.
|
||||
//
|
||||
|
@ -151,11 +193,21 @@ type FetchHandler interface {
|
|||
//
|
||||
// The validatorID and requestID are assumed to be the same as those sent in
|
||||
// the Get message.
|
||||
GetFailed(validatorID ids.ShortID, requestID uint32)
|
||||
GetFailed(validatorID ids.ShortID, requestID uint32) error
|
||||
|
||||
// Notify this engine that a GetAncestors request it issued has failed.
|
||||
//
|
||||
// This function will be called if the engine sent a GetAncestors message that is not
|
||||
// anticipated to be responded to. This could be because the recipient of
|
||||
// the message is unknown or if the message request has timed out.
|
||||
//
|
||||
// The validatorID and requestID are assumed to be the same as those sent in
|
||||
// the GetAncestors message.
|
||||
GetAncestorsFailed(validatorID ids.ShortID, requestID uint32) error
|
||||
}
|
||||
|
||||
// QueryHandler defines how a consensus engine reacts to query messages from
|
||||
// other validators
|
||||
// other validators. Functions only return fatal errors if they occur.
|
||||
type QueryHandler interface {
|
||||
// Notify this engine of a request for our preferences.
|
||||
//
|
||||
|
@ -168,7 +220,11 @@ type QueryHandler interface {
|
|||
// is complete, this engine should send this validator the current
|
||||
// preferences in a Chits message. The Chits message should have the same
|
||||
// requestID that was passed in here.
|
||||
PullQuery(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
|
||||
PullQuery(
|
||||
validatorID ids.ShortID,
|
||||
requestID uint32,
|
||||
containerID ids.ID,
|
||||
) error
|
||||
|
||||
// Notify this engine of a request for our preferences.
|
||||
//
|
||||
|
@ -191,14 +247,14 @@ type QueryHandler interface {
|
|||
requestID uint32,
|
||||
containerID ids.ID,
|
||||
container []byte,
|
||||
)
|
||||
) error
|
||||
|
||||
// Notify this engine of the specified validators preferences.
|
||||
//
|
||||
// This function can be called by any validator. It is not safe to assume
|
||||
// this message is in response to a PullQuery or a PushQuery message.
|
||||
// However, the validatorID is assumed to be authenticated.
|
||||
Chits(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set)
|
||||
Chits(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) error
|
||||
|
||||
// Notify this engine that a query it issued has failed.
|
||||
//
|
||||
|
@ -209,26 +265,27 @@ type QueryHandler interface {
|
|||
//
|
||||
// The validatorID and the requestID are assumed to be the same as those
|
||||
// sent in the Query message.
|
||||
QueryFailed(validatorID ids.ShortID, requestID uint32)
|
||||
QueryFailed(validatorID ids.ShortID, requestID uint32) error
|
||||
}
|
||||
|
||||
// InternalHandler defines how this consensus engine reacts to messages from
|
||||
// other components of this validator
|
||||
// other components of this validator. Functions only return fatal errors if
|
||||
// they occur.
|
||||
type InternalHandler interface {
|
||||
// Startup this engine.
|
||||
//
|
||||
// This function will be called once the environment is configured to be
|
||||
// able to run the engine.
|
||||
Startup()
|
||||
Startup() error
|
||||
|
||||
// Gossip to the network a container on the accepted frontier
|
||||
Gossip()
|
||||
Gossip() error
|
||||
|
||||
// Shutdown this engine.
|
||||
//
|
||||
// This function will be called when the environment is exiting.
|
||||
Shutdown()
|
||||
Shutdown() error
|
||||
|
||||
// Notify this engine of a message from the virtual machine.
|
||||
Notify(Message)
|
||||
Notify(Message) error
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ type Job interface {
|
|||
ID() ids.ID
|
||||
|
||||
MissingDependencies() ids.Set
|
||||
Execute()
|
||||
Execute() error
|
||||
|
||||
Bytes() []byte
|
||||
}
|
||||
|
|
|
@ -79,14 +79,19 @@ func (j *Jobs) HasNext() (bool, error) {
|
|||
|
||||
// Execute ...
|
||||
func (j *Jobs) Execute(job Job) error {
|
||||
job.Execute()
|
||||
if err := job.Execute(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jobID := job.ID()
|
||||
|
||||
blocking, _ := j.state.Blocking(j.db, jobID)
|
||||
j.state.DeleteBlocking(j.db, jobID)
|
||||
blocking, err := j.state.Blocking(j.db, jobID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
j.state.DeleteBlocking(j.db, jobID, blocking)
|
||||
|
||||
for _, blockedID := range blocking.List() {
|
||||
for _, blockedID := range blocking {
|
||||
job, err := j.state.Job(j.db, blockedID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -128,15 +133,19 @@ func (j *Jobs) push(job Job) error {
|
|||
}
|
||||
|
||||
func (j *Jobs) block(job Job, deps ids.Set) error {
|
||||
if has, err := j.state.HasJob(j.db, job.ID()); err != nil {
|
||||
return err
|
||||
} else if has {
|
||||
return errDuplicate
|
||||
}
|
||||
|
||||
if err := j.state.SetJob(j.db, job); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
jobID := job.ID()
|
||||
for _, depID := range deps.List() {
|
||||
blocking, _ := j.state.Blocking(j.db, depID)
|
||||
blocking.Add(jobID)
|
||||
if err := j.state.SetBlocking(j.db, depID, blocking); err != nil {
|
||||
if err := j.state.AddBlocking(j.db, depID, jobID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -5,12 +5,14 @@ package queue
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/ava-labs/gecko/database/memdb"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
)
|
||||
|
||||
// Test that creating a new queue can be created and that it is initially empty.
|
||||
func TestNew(t *testing.T) {
|
||||
parser := &TestParser{T: t}
|
||||
db := memdb.New()
|
||||
|
@ -29,6 +31,8 @@ func TestNew(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Test that a job can be added to a queue, and then the job can be removed from
|
||||
// the queue after a shutdown.
|
||||
func TestPushPop(t *testing.T) {
|
||||
parser := &TestParser{T: t}
|
||||
db := memdb.New()
|
||||
|
@ -46,7 +50,7 @@ func TestPushPop(t *testing.T) {
|
|||
|
||||
IDF: func() ids.ID { return id },
|
||||
MissingDependenciesF: func() ids.Set { return ids.Set{} },
|
||||
ExecuteF: func() {},
|
||||
ExecuteF: func() error { return nil },
|
||||
BytesF: func() []byte { return []byte{0} },
|
||||
}
|
||||
|
||||
|
@ -94,6 +98,8 @@ func TestPushPop(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
// Test that executing a job will cause a dependent job to be placed on to the
|
||||
// ready queue
|
||||
func TestExecute(t *testing.T) {
|
||||
parser := &TestParser{T: t}
|
||||
db := memdb.New()
|
||||
|
@ -112,18 +118,18 @@ func TestExecute(t *testing.T) {
|
|||
|
||||
IDF: func() ids.ID { return id0 },
|
||||
MissingDependenciesF: func() ids.Set { return ids.Set{} },
|
||||
ExecuteF: func() { *executed0 = true },
|
||||
ExecuteF: func() error { *executed0 = true; return nil },
|
||||
BytesF: func() []byte { return []byte{0} },
|
||||
}
|
||||
|
||||
id1 := ids.Empty.Prefix(0)
|
||||
id1 := ids.Empty.Prefix(1)
|
||||
executed1 := new(bool)
|
||||
job1 := &TestJob{
|
||||
T: t,
|
||||
|
||||
IDF: func() ids.ID { return id1 },
|
||||
MissingDependenciesF: func() ids.Set { return ids.Set{id0.Key(): true} },
|
||||
ExecuteF: func() { *executed1 = true },
|
||||
ExecuteF: func() error { *executed1 = true; return nil },
|
||||
BytesF: func() []byte { return []byte{1} },
|
||||
}
|
||||
|
||||
|
@ -182,7 +188,8 @@ func TestExecute(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestDuplicatedPush(t *testing.T) {
|
||||
// Test that a job that is ready to be executed can only be added once
|
||||
func TestDuplicatedExecutablePush(t *testing.T) {
|
||||
parser := &TestParser{T: t}
|
||||
db := memdb.New()
|
||||
|
||||
|
@ -199,7 +206,7 @@ func TestDuplicatedPush(t *testing.T) {
|
|||
|
||||
IDF: func() ids.ID { return id },
|
||||
MissingDependenciesF: func() ids.Set { return ids.Set{} },
|
||||
ExecuteF: func() {},
|
||||
ExecuteF: func() error { return nil },
|
||||
BytesF: func() []byte { return []byte{0} },
|
||||
}
|
||||
|
||||
|
@ -250,3 +257,116 @@ func TestDuplicatedPush(t *testing.T) {
|
|||
t.Fatalf("Shouldn't have a container ready to pop")
|
||||
}
|
||||
}
|
||||
|
||||
// Test that a job that isn't ready to be executed can only be added once
|
||||
func TestDuplicatedNotExecutablePush(t *testing.T) {
|
||||
parser := &TestParser{T: t}
|
||||
db := memdb.New()
|
||||
|
||||
jobs, err := New(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
jobs.SetParser(parser)
|
||||
|
||||
id0 := ids.Empty.Prefix(0)
|
||||
id1 := ids.Empty.Prefix(1)
|
||||
job1 := &TestJob{
|
||||
T: t,
|
||||
|
||||
IDF: func() ids.ID { return id1 },
|
||||
MissingDependenciesF: func() ids.Set {
|
||||
s := ids.Set{}
|
||||
s.Add(id0)
|
||||
return s
|
||||
},
|
||||
ExecuteF: func() error { return nil },
|
||||
BytesF: func() []byte { return []byte{1} },
|
||||
}
|
||||
job0 := &TestJob{
|
||||
T: t,
|
||||
|
||||
IDF: func() ids.ID { return id0 },
|
||||
MissingDependenciesF: func() ids.Set { return ids.Set{} },
|
||||
ExecuteF: func() error {
|
||||
job1.MissingDependenciesF = func() ids.Set { return ids.Set{} }
|
||||
return nil
|
||||
},
|
||||
BytesF: func() []byte { return []byte{0} },
|
||||
}
|
||||
|
||||
if err := jobs.Push(job1); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := jobs.Push(job1); err == nil {
|
||||
t.Fatalf("should have errored on pushing a duplicate job")
|
||||
}
|
||||
|
||||
if err := jobs.Commit(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
jobs, err = New(db)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
jobs.SetParser(parser)
|
||||
|
||||
if err := jobs.Push(job0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if hasNext, err := jobs.HasNext(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !hasNext {
|
||||
t.Fatalf("Should have a container ready to pop")
|
||||
}
|
||||
|
||||
parser.ParseF = func(b []byte) (Job, error) {
|
||||
if bytes.Equal(b, []byte{0}) {
|
||||
return job0, nil
|
||||
}
|
||||
if bytes.Equal(b, []byte{1}) {
|
||||
return job1, nil
|
||||
}
|
||||
t.Fatalf("Unknown job")
|
||||
return nil, errors.New("Unknown job")
|
||||
}
|
||||
|
||||
returnedBlockable, err := jobs.Pop()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if returnedBlockable != job0 {
|
||||
t.Fatalf("Returned wrong job")
|
||||
}
|
||||
|
||||
if err := jobs.Execute(job0); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if hasNext, err := jobs.HasNext(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if !hasNext {
|
||||
t.Fatalf("Should have a container ready to pop")
|
||||
}
|
||||
|
||||
returnedBlockable, err = jobs.Pop()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if returnedBlockable != job1 {
|
||||
t.Fatalf("Returned wrong job")
|
||||
}
|
||||
|
||||
if hasNext, err := jobs.HasNext(); err != nil {
|
||||
t.Fatal(err)
|
||||
} else if hasNext {
|
||||
t.Fatalf("Shouldn't have a container ready to pop")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -95,25 +95,31 @@ func (ps *prefixedState) Job(db database.Database, id ids.ID) (Job, error) {
|
|||
return ps.state.Job(db, p.Bytes)
|
||||
}
|
||||
|
||||
func (ps *prefixedState) SetBlocking(db database.Database, id ids.ID, blocking ids.Set) error {
|
||||
func (ps *prefixedState) AddBlocking(db database.Database, id ids.ID, blocking ids.ID) error {
|
||||
p := wrappers.Packer{Bytes: make([]byte, 1+hashing.HashLen)}
|
||||
|
||||
p.PackByte(blockingID)
|
||||
p.PackFixedBytes(id.Bytes())
|
||||
|
||||
return ps.state.SetIDs(db, p.Bytes, blocking)
|
||||
return ps.state.AddID(db, p.Bytes, blocking)
|
||||
}
|
||||
|
||||
func (ps *prefixedState) DeleteBlocking(db database.Database, id ids.ID) error {
|
||||
func (ps *prefixedState) DeleteBlocking(db database.Database, id ids.ID, blocking []ids.ID) error {
|
||||
p := wrappers.Packer{Bytes: make([]byte, 1+hashing.HashLen)}
|
||||
|
||||
p.PackByte(blockingID)
|
||||
p.PackFixedBytes(id.Bytes())
|
||||
|
||||
return db.Delete(p.Bytes)
|
||||
for _, blocked := range blocking {
|
||||
if err := ps.state.RemoveID(db, p.Bytes, blocked); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (ps *prefixedState) Blocking(db database.Database, id ids.ID) (ids.Set, error) {
|
||||
func (ps *prefixedState) Blocking(db database.Database, id ids.ID) ([]ids.ID, error) {
|
||||
p := wrappers.Packer{Bytes: make([]byte, 1+hashing.HashLen)}
|
||||
|
||||
p.PackByte(blockingID)
|
||||
|
|
|
@ -4,12 +4,18 @@
|
|||
package queue
|
||||
|
||||
import (
|
||||
"errors"
|
||||
|
||||
"github.com/ava-labs/gecko/database"
|
||||
"github.com/ava-labs/gecko/database/prefixdb"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/utils/hashing"
|
||||
"github.com/ava-labs/gecko/utils/wrappers"
|
||||
)
|
||||
|
||||
var (
|
||||
errZeroID = errors.New("zero id")
|
||||
)
|
||||
|
||||
type state struct{ jobs *Jobs }
|
||||
|
||||
func (s *state) SetInt(db database.Database, key []byte, size uint32) error {
|
||||
|
@ -42,30 +48,37 @@ func (s *state) Job(db database.Database, key []byte) (Job, error) {
|
|||
return s.jobs.parser.Parse(value)
|
||||
}
|
||||
|
||||
func (s *state) SetIDs(db database.Database, key []byte, blocking ids.Set) error {
|
||||
p := wrappers.Packer{Bytes: make([]byte, wrappers.IntLen+hashing.HashLen*blocking.Len())}
|
||||
// IDs returns a slice of IDs from storage
|
||||
func (s *state) IDs(db database.Database, prefix []byte) ([]ids.ID, error) {
|
||||
idSlice := []ids.ID(nil)
|
||||
iter := prefixdb.NewNested(prefix, db).NewIterator()
|
||||
defer iter.Release()
|
||||
|
||||
p.PackInt(uint32(blocking.Len()))
|
||||
for _, id := range blocking.List() {
|
||||
p.PackFixedBytes(id.Bytes())
|
||||
for iter.Next() {
|
||||
keyID, err := ids.ToID(iter.Key())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
idSlice = append(idSlice, keyID)
|
||||
}
|
||||
|
||||
return db.Put(key, p.Bytes)
|
||||
return idSlice, nil
|
||||
}
|
||||
|
||||
func (s *state) IDs(db database.Database, key []byte) (ids.Set, error) {
|
||||
bytes, err := db.Get(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// AddID saves an ID to the prefixed database
|
||||
func (s *state) AddID(db database.Database, prefix []byte, key ids.ID) error {
|
||||
if key.IsZero() {
|
||||
return errZeroID
|
||||
}
|
||||
|
||||
p := wrappers.Packer{Bytes: bytes}
|
||||
|
||||
blocking := ids.Set{}
|
||||
for i := p.UnpackInt(); i > 0 && !p.Errored(); i-- {
|
||||
id, _ := ids.ToID(p.UnpackFixedBytes(hashing.HashLen))
|
||||
blocking.Add(id)
|
||||
}
|
||||
|
||||
return blocking, p.Err
|
||||
pdb := prefixdb.NewNested(prefix, db)
|
||||
return pdb.Put(key.Bytes(), nil)
|
||||
}
|
||||
|
||||
// RemoveID removes an ID from the prefixed database
|
||||
func (s *state) RemoveID(db database.Database, prefix []byte, key ids.ID) error {
|
||||
if key.IsZero() {
|
||||
return errZeroID
|
||||
}
|
||||
pdb := prefixdb.NewNested(prefix, db)
|
||||
return pdb.Delete(key.Bytes())
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
package queue
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
|
@ -20,7 +21,7 @@ type TestJob struct {
|
|||
|
||||
IDF func() ids.ID
|
||||
MissingDependenciesF func() ids.Set
|
||||
ExecuteF func()
|
||||
ExecuteF func() error
|
||||
BytesF func() []byte
|
||||
}
|
||||
|
||||
|
@ -55,12 +56,13 @@ func (j *TestJob) MissingDependencies() ids.Set {
|
|||
}
|
||||
|
||||
// Execute ...
|
||||
func (j *TestJob) Execute() {
|
||||
func (j *TestJob) Execute() error {
|
||||
if j.ExecuteF != nil {
|
||||
j.ExecuteF()
|
||||
return j.ExecuteF()
|
||||
} else if j.CantExecute && j.T != nil {
|
||||
j.T.Fatalf("Unexpectedly called Execute")
|
||||
}
|
||||
return errors.New("Unexpectedly called Execute")
|
||||
}
|
||||
|
||||
// Bytes ...
|
||||
|
|
|
@ -50,9 +50,17 @@ type FetchSender interface {
|
|||
// to this validator
|
||||
Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
|
||||
|
||||
// GetAncestors requests that the validator with ID [validatorID] send container [containerID] and its
|
||||
// ancestors. The maximum number of ancestors to send in response is defined in snow/engine/common/bootstrapper.go
|
||||
GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
|
||||
|
||||
// Tell the specified validator that the container whose ID is <containerID>
|
||||
// has body <container>
|
||||
Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte)
|
||||
|
||||
// Give the specified validator several containers at once
|
||||
// Should be in response to a GetAncestors message with request ID [requestID] from the validator
|
||||
MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte)
|
||||
}
|
||||
|
||||
// QuerySender defines how a consensus engine sends query messages to other
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
|
@ -19,7 +20,7 @@ type BootstrapableTest struct {
|
|||
|
||||
CurrentAcceptedFrontierF func() (acceptedContainerIDs ids.Set)
|
||||
FilterAcceptedF func(containerIDs ids.Set) (acceptedContainerIDs ids.Set)
|
||||
ForceAcceptedF func(acceptedContainerIDs ids.Set)
|
||||
ForceAcceptedF func(acceptedContainerIDs ids.Set) error
|
||||
}
|
||||
|
||||
// Default sets the default on call handling
|
||||
|
@ -52,10 +53,14 @@ func (b *BootstrapableTest) FilterAccepted(containerIDs ids.Set) ids.Set {
|
|||
}
|
||||
|
||||
// ForceAccepted implements the Bootstrapable interface
|
||||
func (b *BootstrapableTest) ForceAccepted(containerIDs ids.Set) {
|
||||
func (b *BootstrapableTest) ForceAccepted(containerIDs ids.Set) error {
|
||||
if b.ForceAcceptedF != nil {
|
||||
b.ForceAcceptedF(containerIDs)
|
||||
} else if b.CantForceAccepted && b.T != nil {
|
||||
b.T.Fatalf("Unexpectedly called ForceAccepted")
|
||||
return b.ForceAcceptedF(containerIDs)
|
||||
} else if b.CantForceAccepted {
|
||||
if b.T != nil {
|
||||
b.T.Fatalf("Unexpectedly called ForceAccepted")
|
||||
}
|
||||
return errors.New("Unexpectedly called ForceAccepted")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -4,6 +4,7 @@
|
|||
package common
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
|
@ -31,24 +32,30 @@ type EngineTest struct {
|
|||
CantAccepted,
|
||||
|
||||
CantGet,
|
||||
CantGetAncestors,
|
||||
CantGetFailed,
|
||||
CantGetAncestorsFailed,
|
||||
CantPut,
|
||||
CantMultiPut,
|
||||
|
||||
CantPushQuery,
|
||||
CantPullQuery,
|
||||
CantQueryFailed,
|
||||
CantChits bool
|
||||
|
||||
StartupF, GossipF, ShutdownF func()
|
||||
ContextF func() *snow.Context
|
||||
NotifyF func(Message)
|
||||
GetF, PullQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
|
||||
GetFailedF func(validatorID ids.ShortID, requestID uint32)
|
||||
PutF, PushQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte)
|
||||
GetAcceptedFrontierF, GetAcceptedFrontierFailedF, GetAcceptedFailedF, QueryFailedF func(validatorID ids.ShortID, requestID uint32)
|
||||
AcceptedFrontierF, GetAcceptedF, AcceptedF, ChitsF func(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set)
|
||||
ContextF func() *snow.Context
|
||||
StartupF, GossipF, ShutdownF func() error
|
||||
NotifyF func(Message) error
|
||||
GetF, GetAncestorsF, PullQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error
|
||||
PutF, PushQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) error
|
||||
MultiPutF func(validatorID ids.ShortID, requestID uint32, containers [][]byte) error
|
||||
AcceptedFrontierF, GetAcceptedF, AcceptedF, ChitsF func(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) error
|
||||
GetAcceptedFrontierF, GetFailedF, GetAncestorsFailedF,
|
||||
QueryFailedF, GetAcceptedFrontierFailedF, GetAcceptedFailedF func(validatorID ids.ShortID, requestID uint32) error
|
||||
}
|
||||
|
||||
var _ Engine = &EngineTest{}
|
||||
|
||||
// Default ...
|
||||
func (e *EngineTest) Default(cant bool) {
|
||||
e.CantStartup = cant
|
||||
|
@ -68,8 +75,11 @@ func (e *EngineTest) Default(cant bool) {
|
|||
e.CantAccepted = cant
|
||||
|
||||
e.CantGet = cant
|
||||
e.CantGetAncestors = cant
|
||||
e.CantGetAncestorsFailed = cant
|
||||
e.CantGetFailed = cant
|
||||
e.CantPut = cant
|
||||
e.CantMultiPut = cant
|
||||
|
||||
e.CantPushQuery = cant
|
||||
e.CantPullQuery = cant
|
||||
|
@ -77,33 +87,6 @@ func (e *EngineTest) Default(cant bool) {
|
|||
e.CantChits = cant
|
||||
}
|
||||
|
||||
// Startup ...
|
||||
func (e *EngineTest) Startup() {
|
||||
if e.StartupF != nil {
|
||||
e.StartupF()
|
||||
} else if e.CantStartup && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Startup")
|
||||
}
|
||||
}
|
||||
|
||||
// Gossip ...
|
||||
func (e *EngineTest) Gossip() {
|
||||
if e.GossipF != nil {
|
||||
e.GossipF()
|
||||
} else if e.CantGossip && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Gossip")
|
||||
}
|
||||
}
|
||||
|
||||
// Shutdown ...
|
||||
func (e *EngineTest) Shutdown() {
|
||||
if e.ShutdownF != nil {
|
||||
e.ShutdownF()
|
||||
} else if e.CantShutdown && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Shutdown")
|
||||
}
|
||||
}
|
||||
|
||||
// Context ...
|
||||
func (e *EngineTest) Context() *snow.Context {
|
||||
if e.ContextF != nil {
|
||||
|
@ -115,128 +98,259 @@ func (e *EngineTest) Context() *snow.Context {
|
|||
return nil
|
||||
}
|
||||
|
||||
// Notify ...
|
||||
func (e *EngineTest) Notify(msg Message) {
|
||||
if e.NotifyF != nil {
|
||||
e.NotifyF(msg)
|
||||
} else if e.CantNotify && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Notify")
|
||||
// Startup ...
|
||||
func (e *EngineTest) Startup() error {
|
||||
if e.StartupF != nil {
|
||||
return e.StartupF()
|
||||
} else if e.CantStartup {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Startup")
|
||||
}
|
||||
return errors.New("Unexpectedly called Startup")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gossip ...
|
||||
func (e *EngineTest) Gossip() error {
|
||||
if e.GossipF != nil {
|
||||
return e.GossipF()
|
||||
} else if e.CantGossip {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Gossip")
|
||||
}
|
||||
return errors.New("Unexpectedly called Gossip")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown ...
|
||||
func (e *EngineTest) Shutdown() error {
|
||||
if e.ShutdownF != nil {
|
||||
return e.ShutdownF()
|
||||
} else if e.CantShutdown {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Shutdown")
|
||||
}
|
||||
return errors.New("Unexpectedly called Shutdown")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Notify ...
|
||||
func (e *EngineTest) Notify(msg Message) error {
|
||||
if e.NotifyF != nil {
|
||||
return e.NotifyF(msg)
|
||||
} else if e.CantNotify {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Notify")
|
||||
}
|
||||
return errors.New("Unexpectedly called Notify")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAcceptedFrontier ...
|
||||
func (e *EngineTest) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32) {
|
||||
func (e *EngineTest) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32) error {
|
||||
if e.GetAcceptedFrontierF != nil {
|
||||
e.GetAcceptedFrontierF(validatorID, requestID)
|
||||
} else if e.CantGetAcceptedFrontier && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called GetAcceptedFrontier")
|
||||
return e.GetAcceptedFrontierF(validatorID, requestID)
|
||||
} else if e.CantGetAcceptedFrontier {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called GetAcceptedFrontier")
|
||||
}
|
||||
return errors.New("Unexpectedly called GetAcceptedFrontier")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAcceptedFrontierFailed ...
|
||||
func (e *EngineTest) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
func (e *EngineTest) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) error {
|
||||
if e.GetAcceptedFrontierFailedF != nil {
|
||||
e.GetAcceptedFrontierFailedF(validatorID, requestID)
|
||||
} else if e.CantGetAcceptedFrontierFailed && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called GetAcceptedFrontierFailed")
|
||||
return e.GetAcceptedFrontierFailedF(validatorID, requestID)
|
||||
} else if e.CantGetAcceptedFrontierFailed {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called GetAcceptedFrontierFailed")
|
||||
}
|
||||
return errors.New("Unexpectedly called GetAcceptedFrontierFailed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// AcceptedFrontier ...
|
||||
func (e *EngineTest) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
func (e *EngineTest) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) error {
|
||||
if e.AcceptedFrontierF != nil {
|
||||
e.AcceptedFrontierF(validatorID, requestID, containerIDs)
|
||||
} else if e.CantAcceptedFrontier && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called AcceptedFrontierF")
|
||||
return e.AcceptedFrontierF(validatorID, requestID, containerIDs)
|
||||
} else if e.CantAcceptedFrontier {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called AcceptedFrontierF")
|
||||
}
|
||||
return errors.New("Unexpectedly called AcceptedFrontierF")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAccepted ...
|
||||
func (e *EngineTest) GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
func (e *EngineTest) GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) error {
|
||||
if e.GetAcceptedF != nil {
|
||||
e.GetAcceptedF(validatorID, requestID, containerIDs)
|
||||
} else if e.CantGetAccepted && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called GetAccepted")
|
||||
return e.GetAcceptedF(validatorID, requestID, containerIDs)
|
||||
} else if e.CantGetAccepted {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called GetAccepted")
|
||||
}
|
||||
return errors.New("Unexpectedly called GetAccepted")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAcceptedFailed ...
|
||||
func (e *EngineTest) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
func (e *EngineTest) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) error {
|
||||
if e.GetAcceptedFailedF != nil {
|
||||
e.GetAcceptedFailedF(validatorID, requestID)
|
||||
} else if e.CantGetAcceptedFailed && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called GetAcceptedFailed")
|
||||
return e.GetAcceptedFailedF(validatorID, requestID)
|
||||
} else if e.CantGetAcceptedFailed {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called GetAcceptedFailed")
|
||||
}
|
||||
return errors.New("Unexpectedly called GetAcceptedFailed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Accepted ...
|
||||
func (e *EngineTest) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
func (e *EngineTest) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) error {
|
||||
if e.AcceptedF != nil {
|
||||
e.AcceptedF(validatorID, requestID, containerIDs)
|
||||
} else if e.CantAccepted && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Accepted")
|
||||
return e.AcceptedF(validatorID, requestID, containerIDs)
|
||||
} else if e.CantAccepted {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Accepted")
|
||||
}
|
||||
return errors.New("Unexpectedly called Accepted")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get ...
|
||||
func (e *EngineTest) Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) {
|
||||
func (e *EngineTest) Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error {
|
||||
if e.GetF != nil {
|
||||
e.GetF(validatorID, requestID, containerID)
|
||||
} else if e.CantGet && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Get")
|
||||
return e.GetF(validatorID, requestID, containerID)
|
||||
} else if e.CantGet {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Get")
|
||||
}
|
||||
return errors.New("Unexpectedly called Get")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAncestors ...
|
||||
func (e *EngineTest) GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error {
|
||||
if e.GetAncestorsF != nil {
|
||||
e.GetAncestorsF(validatorID, requestID, containerID)
|
||||
} else if e.CantGetAncestors && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called GetAncestors")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetFailed ...
|
||||
func (e *EngineTest) GetFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
func (e *EngineTest) GetFailed(validatorID ids.ShortID, requestID uint32) error {
|
||||
if e.GetFailedF != nil {
|
||||
e.GetFailedF(validatorID, requestID)
|
||||
} else if e.CantGetFailed && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called GetFailed")
|
||||
return e.GetFailedF(validatorID, requestID)
|
||||
} else if e.CantGetFailed {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called GetFailed")
|
||||
}
|
||||
return errors.New("Unexpectedly called GetFailed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAncestorsFailed ...
|
||||
func (e *EngineTest) GetAncestorsFailed(validatorID ids.ShortID, requestID uint32) error {
|
||||
if e.GetAncestorsFailedF != nil {
|
||||
return e.GetAncestorsFailedF(validatorID, requestID)
|
||||
} else if e.CantGetAncestorsFailed {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called GetAncestorsFailed")
|
||||
}
|
||||
return errors.New("Unexpectedly called GetAncestorsFailed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put ...
|
||||
func (e *EngineTest) Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) {
|
||||
func (e *EngineTest) Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) error {
|
||||
if e.PutF != nil {
|
||||
e.PutF(validatorID, requestID, containerID, container)
|
||||
} else if e.CantPut && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Put")
|
||||
return e.PutF(validatorID, requestID, containerID, container)
|
||||
} else if e.CantPut {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Put")
|
||||
}
|
||||
return errors.New("Unexpectedly called Put")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MultiPut ...
|
||||
func (e *EngineTest) MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte) error {
|
||||
if e.MultiPutF != nil {
|
||||
return e.MultiPutF(validatorID, requestID, containers)
|
||||
} else if e.CantMultiPut {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called MultiPut")
|
||||
}
|
||||
return errors.New("Unexpectedly called MultiPut")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PushQuery ...
|
||||
func (e *EngineTest) PushQuery(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) {
|
||||
func (e *EngineTest) PushQuery(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) error {
|
||||
if e.PushQueryF != nil {
|
||||
e.PushQueryF(validatorID, requestID, containerID, container)
|
||||
} else if e.CantPushQuery && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called PushQuery")
|
||||
return e.PushQueryF(validatorID, requestID, containerID, container)
|
||||
} else if e.CantPushQuery {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called PushQuery")
|
||||
}
|
||||
return errors.New("Unexpectedly called PushQuery")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// PullQuery ...
|
||||
func (e *EngineTest) PullQuery(validatorID ids.ShortID, requestID uint32, containerID ids.ID) {
|
||||
func (e *EngineTest) PullQuery(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error {
|
||||
if e.PullQueryF != nil {
|
||||
e.PullQueryF(validatorID, requestID, containerID)
|
||||
} else if e.CantPullQuery && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called PullQuery")
|
||||
return e.PullQueryF(validatorID, requestID, containerID)
|
||||
} else if e.CantPullQuery {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called PullQuery")
|
||||
}
|
||||
return errors.New("Unexpectedly called PullQuery")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// QueryFailed ...
|
||||
func (e *EngineTest) QueryFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
func (e *EngineTest) QueryFailed(validatorID ids.ShortID, requestID uint32) error {
|
||||
if e.QueryFailedF != nil {
|
||||
e.QueryFailedF(validatorID, requestID)
|
||||
} else if e.CantQueryFailed && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called QueryFailed")
|
||||
return e.QueryFailedF(validatorID, requestID)
|
||||
} else if e.CantQueryFailed {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called QueryFailed")
|
||||
}
|
||||
return errors.New("Unexpectedly called QueryFailed")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Chits ...
|
||||
func (e *EngineTest) Chits(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
func (e *EngineTest) Chits(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) error {
|
||||
if e.ChitsF != nil {
|
||||
e.ChitsF(validatorID, requestID, containerIDs)
|
||||
} else if e.CantChits && e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Chits")
|
||||
return e.ChitsF(validatorID, requestID, containerIDs)
|
||||
} else if e.CantChits {
|
||||
if e.T != nil {
|
||||
e.T.Fatalf("Unexpectedly called Chits")
|
||||
}
|
||||
return errors.New("Unexpectedly called Chits")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -15,7 +15,7 @@ type SenderTest struct {
|
|||
|
||||
CantGetAcceptedFrontier, CantAcceptedFrontier,
|
||||
CantGetAccepted, CantAccepted,
|
||||
CantGet, CantPut,
|
||||
CantGet, CantGetAncestors, CantPut, CantMultiPut,
|
||||
CantPullQuery, CantPushQuery, CantChits,
|
||||
CantGossip bool
|
||||
|
||||
|
@ -24,7 +24,9 @@ type SenderTest struct {
|
|||
GetAcceptedF func(ids.ShortSet, uint32, ids.Set)
|
||||
AcceptedF func(ids.ShortID, uint32, ids.Set)
|
||||
GetF func(ids.ShortID, uint32, ids.ID)
|
||||
GetAncestorsF func(ids.ShortID, uint32, ids.ID)
|
||||
PutF func(ids.ShortID, uint32, ids.ID, []byte)
|
||||
MultiPutF func(ids.ShortID, uint32, [][]byte)
|
||||
PushQueryF func(ids.ShortSet, uint32, ids.ID, []byte)
|
||||
PullQueryF func(ids.ShortSet, uint32, ids.ID)
|
||||
ChitsF func(ids.ShortID, uint32, ids.Set)
|
||||
|
@ -38,7 +40,9 @@ func (s *SenderTest) Default(cant bool) {
|
|||
s.CantGetAccepted = cant
|
||||
s.CantAccepted = cant
|
||||
s.CantGet = cant
|
||||
s.CantGetAccepted = cant
|
||||
s.CantPut = cant
|
||||
s.CantMultiPut = cant
|
||||
s.CantPullQuery = cant
|
||||
s.CantPushQuery = cant
|
||||
s.CantChits = cant
|
||||
|
@ -100,6 +104,17 @@ func (s *SenderTest) Get(vdr ids.ShortID, requestID uint32, vtxID ids.ID) {
|
|||
}
|
||||
}
|
||||
|
||||
// GetAncestors calls GetAncestorsF if it was initialized. If it
|
||||
// wasn't initialized and this function shouldn't be called and testing was
|
||||
// initialized, then testing will fail.
|
||||
func (s *SenderTest) GetAncestors(validatorID ids.ShortID, requestID uint32, vtxID ids.ID) {
|
||||
if s.GetAncestorsF != nil {
|
||||
s.GetAncestorsF(validatorID, requestID, vtxID)
|
||||
} else if s.CantGetAncestors && s.T != nil {
|
||||
s.T.Fatalf("Unexpectedly called CantGetAncestors")
|
||||
}
|
||||
}
|
||||
|
||||
// Put calls PutF if it was initialized. If it wasn't initialized and this
|
||||
// function shouldn't be called and testing was initialized, then testing will
|
||||
// fail.
|
||||
|
@ -111,6 +126,17 @@ func (s *SenderTest) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtx []
|
|||
}
|
||||
}
|
||||
|
||||
// MultiPut calls MultiPutF if it was initialized. If it wasn't initialized and this
|
||||
// function shouldn't be called and testing was initialized, then testing will
|
||||
// fail.
|
||||
func (s *SenderTest) MultiPut(vdr ids.ShortID, requestID uint32, vtxs [][]byte) {
|
||||
if s.MultiPutF != nil {
|
||||
s.MultiPutF(vdr, requestID, vtxs)
|
||||
} else if s.CantMultiPut && s.T != nil {
|
||||
s.T.Fatalf("Unexpectedly called MultiPut")
|
||||
}
|
||||
}
|
||||
|
||||
// PushQuery calls PushQueryF if it was initialized. If it wasn't initialized
|
||||
// and this function shouldn't be called and testing was initialized, then
|
||||
// testing will fail.
|
||||
|
|
|
@ -19,19 +19,22 @@ var (
|
|||
type VMTest struct {
|
||||
T *testing.T
|
||||
|
||||
CantInitialize, CantShutdown, CantCreateHandlers, CantCreateStaticHandlers bool
|
||||
CantInitialize, CantBootstrapping, CantBootstrapped, CantShutdown, CantCreateHandlers, CantCreateStaticHandlers bool
|
||||
|
||||
InitializeF func(*snow.Context, database.Database, []byte, chan<- Message, []*Fx) error
|
||||
ShutdownF func()
|
||||
CreateHandlersF func() map[string]*HTTPHandler
|
||||
CreateStaticHandlersF func() map[string]*HTTPHandler
|
||||
InitializeF func(*snow.Context, database.Database, []byte, chan<- Message, []*Fx) error
|
||||
BootstrappingF, BootstrappedF, ShutdownF func() error
|
||||
CreateHandlersF func() map[string]*HTTPHandler
|
||||
CreateStaticHandlersF func() map[string]*HTTPHandler
|
||||
}
|
||||
|
||||
// Default ...
|
||||
func (vm *VMTest) Default(cant bool) {
|
||||
vm.CantInitialize = cant
|
||||
vm.CantBootstrapping = cant
|
||||
vm.CantBootstrapped = cant
|
||||
vm.CantShutdown = cant
|
||||
vm.CantCreateHandlers = cant
|
||||
vm.CantCreateStaticHandlers = cant
|
||||
}
|
||||
|
||||
// Initialize ...
|
||||
|
@ -45,13 +48,43 @@ func (vm *VMTest) Initialize(ctx *snow.Context, db database.Database, initState
|
|||
return errInitialize
|
||||
}
|
||||
|
||||
// Shutdown ...
|
||||
func (vm *VMTest) Shutdown() {
|
||||
if vm.ShutdownF != nil {
|
||||
vm.ShutdownF()
|
||||
} else if vm.CantShutdown && vm.T != nil {
|
||||
vm.T.Fatalf("Unexpectedly called Shutdown")
|
||||
// Bootstrapping ...
|
||||
func (vm *VMTest) Bootstrapping() error {
|
||||
if vm.BootstrappingF != nil {
|
||||
return vm.BootstrappingF()
|
||||
} else if vm.CantBootstrapping {
|
||||
if vm.T != nil {
|
||||
vm.T.Fatalf("Unexpectedly called Bootstrapping")
|
||||
}
|
||||
return errors.New("Unexpectedly called Bootstrapping")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Bootstrapped ...
|
||||
func (vm *VMTest) Bootstrapped() error {
|
||||
if vm.BootstrappedF != nil {
|
||||
return vm.BootstrappedF()
|
||||
} else if vm.CantBootstrapped {
|
||||
if vm.T != nil {
|
||||
vm.T.Fatalf("Unexpectedly called Bootstrapped")
|
||||
}
|
||||
return errors.New("Unexpectedly called Bootstrapped")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown ...
|
||||
func (vm *VMTest) Shutdown() error {
|
||||
if vm.ShutdownF != nil {
|
||||
return vm.ShutdownF()
|
||||
} else if vm.CantShutdown {
|
||||
if vm.T != nil {
|
||||
vm.T.Fatalf("Unexpectedly called Shutdown")
|
||||
}
|
||||
return errors.New("Unexpectedly called Shutdown")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateHandlers ...
|
||||
|
|
|
@ -12,8 +12,7 @@ import (
|
|||
type VM interface {
|
||||
// Initialize this VM.
|
||||
// [ctx]: Metadata about this VM.
|
||||
// [ctx.networkID]: The ID of the network this VM's chain is running
|
||||
// on.
|
||||
// [ctx.networkID]: The ID of the network this VM's chain is running on.
|
||||
// [ctx.chainID]: The unique ID of the chain this VM is running on.
|
||||
// [ctx.Log]: Used to log messages
|
||||
// [ctx.NodeID]: The unique staker ID of this node.
|
||||
|
@ -37,8 +36,14 @@ type VM interface {
|
|||
fxs []*Fx,
|
||||
) error
|
||||
|
||||
// Bootstrapping is called when the node is starting to bootstrap this chain.
|
||||
Bootstrapping() error
|
||||
|
||||
// Bootstrapped is called when the node is done bootstrapping this chain.
|
||||
Bootstrapped() error
|
||||
|
||||
// Shutdown is called when the node is shutting down.
|
||||
Shutdown()
|
||||
Shutdown() error
|
||||
|
||||
// Creates the HTTP handlers for custom chain network calls.
|
||||
//
|
||||
|
|
|
@ -4,15 +4,20 @@
|
|||
package snowman
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/choices"
|
||||
"github.com/ava-labs/gecko/snow/consensus/snowman"
|
||||
"github.com/ava-labs/gecko/snow/engine/common/queue"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
log logging.Logger
|
||||
numAccepted, numDropped prometheus.Counter
|
||||
vm ChainVM
|
||||
}
|
||||
|
@ -23,6 +28,7 @@ func (p *parser) Parse(blkBytes []byte) (queue.Job, error) {
|
|||
return nil, err
|
||||
}
|
||||
return &blockJob{
|
||||
log: p.log,
|
||||
numAccepted: p.numAccepted,
|
||||
numDropped: p.numDropped,
|
||||
blk: blk,
|
||||
|
@ -30,6 +36,7 @@ func (p *parser) Parse(blkBytes []byte) (queue.Job, error) {
|
|||
}
|
||||
|
||||
type blockJob struct {
|
||||
log logging.Logger
|
||||
numAccepted, numDropped prometheus.Counter
|
||||
blk snowman.Block
|
||||
}
|
||||
|
@ -42,18 +49,29 @@ func (b *blockJob) MissingDependencies() ids.Set {
|
|||
}
|
||||
return missing
|
||||
}
|
||||
func (b *blockJob) Execute() {
|
||||
func (b *blockJob) Execute() error {
|
||||
if b.MissingDependencies().Len() != 0 {
|
||||
b.numDropped.Inc()
|
||||
return
|
||||
return errors.New("attempting to accept a block with missing dependencies")
|
||||
}
|
||||
switch b.blk.Status() {
|
||||
status := b.blk.Status()
|
||||
switch status {
|
||||
case choices.Unknown, choices.Rejected:
|
||||
b.numDropped.Inc()
|
||||
return fmt.Errorf("attempting to execute block with status %s", status)
|
||||
case choices.Processing:
|
||||
b.blk.Verify()
|
||||
b.blk.Accept()
|
||||
if err := b.blk.Verify(); err != nil {
|
||||
b.log.Debug("block %s failed verification during bootstrapping due to %s",
|
||||
b.blk.ID(), err)
|
||||
}
|
||||
|
||||
b.numAccepted.Inc()
|
||||
if err := b.blk.Accept(); err != nil {
|
||||
b.log.Debug("block %s failed to accept during bootstrapping due to %s",
|
||||
b.blk.ID(), err)
|
||||
return fmt.Errorf("failed to accept block in bootstrapping: %w", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func (b *blockJob) Bytes() []byte { return b.blk.Bytes() }
|
||||
|
|
|
@ -4,6 +4,8 @@
|
|||
package snowman
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/choices"
|
||||
"github.com/ava-labs/gecko/snow/consensus/snowman"
|
||||
|
@ -20,9 +22,6 @@ type BootstrapConfig struct {
|
|||
// Blocked tracks operations that are blocked on blocks
|
||||
Blocked *queue.Jobs
|
||||
|
||||
// blocks that have outstanding get requests
|
||||
blkReqs common.Requests
|
||||
|
||||
VM ChainVM
|
||||
|
||||
Bootstrapped func()
|
||||
|
@ -33,16 +32,28 @@ type bootstrapper struct {
|
|||
metrics
|
||||
common.Bootstrapper
|
||||
|
||||
pending ids.Set
|
||||
finished bool
|
||||
onFinished func()
|
||||
// true if all of the vertices in the original accepted frontier have been processed
|
||||
processedStartingAcceptedFrontier bool
|
||||
|
||||
// Number of blocks fetched
|
||||
numFetched uint32
|
||||
|
||||
// tracks which validators were asked for which containers in which requests
|
||||
outstandingRequests common.Requests
|
||||
|
||||
// true if bootstrapping is done
|
||||
finished bool
|
||||
|
||||
// Called when bootstrapping is done
|
||||
onFinished func() error
|
||||
}
|
||||
|
||||
// Initialize this engine.
|
||||
func (b *bootstrapper) Initialize(config BootstrapConfig) {
|
||||
func (b *bootstrapper) Initialize(config BootstrapConfig) error {
|
||||
b.BootstrapConfig = config
|
||||
|
||||
b.Blocked.SetParser(&parser{
|
||||
log: config.Context.Log,
|
||||
numAccepted: b.numBootstrapped,
|
||||
numDropped: b.numDropped,
|
||||
vm: b.VM,
|
||||
|
@ -50,16 +61,17 @@ func (b *bootstrapper) Initialize(config BootstrapConfig) {
|
|||
|
||||
config.Bootstrapable = b
|
||||
b.Bootstrapper.Initialize(config.Config)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CurrentAcceptedFrontier ...
|
||||
// CurrentAcceptedFrontier returns the last accepted block
|
||||
func (b *bootstrapper) CurrentAcceptedFrontier() ids.Set {
|
||||
acceptedFrontier := ids.Set{}
|
||||
acceptedFrontier.Add(b.VM.LastAccepted())
|
||||
return acceptedFrontier
|
||||
}
|
||||
|
||||
// FilterAccepted ...
|
||||
// FilterAccepted returns the blocks in [containerIDs] that we have accepted
|
||||
func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set {
|
||||
acceptedIDs := ids.Set{}
|
||||
for _, blkID := range containerIDs.List() {
|
||||
|
@ -71,108 +83,123 @@ func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set {
|
|||
}
|
||||
|
||||
// ForceAccepted ...
|
||||
func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) {
|
||||
func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) error {
|
||||
if err := b.VM.Bootstrapping(); err != nil {
|
||||
return fmt.Errorf("failed to notify VM that bootstrapping has started: %w",
|
||||
err)
|
||||
}
|
||||
|
||||
for _, blkID := range acceptedContainerIDs.List() {
|
||||
b.fetch(blkID)
|
||||
if blk, err := b.VM.GetBlock(blkID); err == nil {
|
||||
if err := b.process(blk); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if err := b.fetch(blkID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if numPending := b.pending.Len(); numPending == 0 {
|
||||
// TODO: This typically indicates bootstrapping has failed, so this
|
||||
// should be handled appropriately
|
||||
b.finish()
|
||||
b.processedStartingAcceptedFrontier = true
|
||||
if numPending := b.outstandingRequests.Len(); numPending == 0 {
|
||||
return b.finish()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put ...
|
||||
func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) {
|
||||
b.BootstrapConfig.Context.Log.Verbo("Put called for blkID %s", blkID)
|
||||
|
||||
blk, err := b.VM.ParseBlock(blkBytes)
|
||||
if err != nil {
|
||||
b.BootstrapConfig.Context.Log.Debug("ParseBlock failed due to %s for block:\n%s",
|
||||
err,
|
||||
formatting.DumpBytes{Bytes: blkBytes})
|
||||
|
||||
b.GetFailed(vdr, requestID)
|
||||
return
|
||||
// Get block [blkID] and its ancestors from a validator
|
||||
func (b *bootstrapper) fetch(blkID ids.ID) error {
|
||||
// Make sure we haven't already requested this block
|
||||
if b.outstandingRequests.Contains(blkID) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !b.pending.Contains(blk.ID()) {
|
||||
b.BootstrapConfig.Context.Log.Debug("Validator %s sent an unrequested block:\n%s",
|
||||
vdr,
|
||||
formatting.DumpBytes{Bytes: blkBytes})
|
||||
|
||||
b.GetFailed(vdr, requestID)
|
||||
return
|
||||
// Make sure we don't already have this block
|
||||
if _, err := b.VM.GetBlock(blkID); err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
b.addBlock(blk)
|
||||
}
|
||||
|
||||
// GetFailed ...
|
||||
func (b *bootstrapper) GetFailed(vdr ids.ShortID, requestID uint32) {
|
||||
blkID, ok := b.blkReqs.Remove(vdr, requestID)
|
||||
if !ok {
|
||||
b.BootstrapConfig.Context.Log.Debug("GetFailed called without sending the corresponding Get message from %s",
|
||||
vdr)
|
||||
return
|
||||
}
|
||||
b.sendRequest(blkID)
|
||||
}
|
||||
|
||||
func (b *bootstrapper) fetch(blkID ids.ID) {
|
||||
if b.pending.Contains(blkID) {
|
||||
return
|
||||
}
|
||||
|
||||
blk, err := b.VM.GetBlock(blkID)
|
||||
if err != nil {
|
||||
b.sendRequest(blkID)
|
||||
return
|
||||
}
|
||||
b.storeBlock(blk)
|
||||
}
|
||||
|
||||
func (b *bootstrapper) sendRequest(blkID ids.ID) {
|
||||
validators := b.BootstrapConfig.Validators.Sample(1)
|
||||
validators := b.BootstrapConfig.Validators.Sample(1) // validator to send request to
|
||||
if len(validators) == 0 {
|
||||
b.BootstrapConfig.Context.Log.Error("Dropping request for %s as there are no validators", blkID)
|
||||
return
|
||||
return fmt.Errorf("Dropping request for %s as there are no validators", blkID)
|
||||
}
|
||||
validatorID := validators[0].ID()
|
||||
b.RequestID++
|
||||
|
||||
b.blkReqs.RemoveAny(blkID)
|
||||
b.blkReqs.Add(validatorID, b.RequestID, blkID)
|
||||
|
||||
b.pending.Add(blkID)
|
||||
b.BootstrapConfig.Sender.Get(validatorID, b.RequestID, blkID)
|
||||
|
||||
b.numPendingRequests.Set(float64(b.pending.Len()))
|
||||
b.outstandingRequests.Add(validatorID, b.RequestID, blkID)
|
||||
b.BootstrapConfig.Sender.GetAncestors(validatorID, b.RequestID, blkID) // request block and ancestors
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bootstrapper) addBlock(blk snowman.Block) {
|
||||
b.storeBlock(blk)
|
||||
|
||||
if numPending := b.pending.Len(); numPending == 0 {
|
||||
b.finish()
|
||||
// MultiPut handles the receipt of multiple containers. Should be received in response to a GetAncestors message to [vdr]
|
||||
// with request ID [requestID]
|
||||
func (b *bootstrapper) MultiPut(vdr ids.ShortID, requestID uint32, blks [][]byte) error {
|
||||
if lenBlks := len(blks); lenBlks > common.MaxContainersPerMultiPut {
|
||||
b.BootstrapConfig.Context.Log.Debug("MultiPut(%s, %d) contains more than maximum number of blocks", vdr, requestID)
|
||||
return b.GetAncestorsFailed(vdr, requestID)
|
||||
} else if lenBlks == 0 {
|
||||
b.BootstrapConfig.Context.Log.Debug("MultiPut(%s, %d) contains no blocks", vdr, requestID)
|
||||
return b.GetAncestorsFailed(vdr, requestID)
|
||||
}
|
||||
|
||||
// Make sure this is in response to a request we made
|
||||
wantedBlkID, ok := b.outstandingRequests.Remove(vdr, requestID)
|
||||
if !ok { // this message isn't in response to a request we made
|
||||
b.BootstrapConfig.Context.Log.Debug("received unexpected MultiPut from %s with ID %d", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
wantedBlk, err := b.VM.ParseBlock(blks[0]) // the block we requested
|
||||
if err != nil {
|
||||
b.BootstrapConfig.Context.Log.Debug("Failed to parse requested block %s: %w", wantedBlkID, err)
|
||||
return b.fetch(wantedBlkID)
|
||||
} else if actualID := wantedBlk.ID(); !actualID.Equals(wantedBlkID) {
|
||||
b.BootstrapConfig.Context.Log.Debug("expected the first block to be the requested block, %s, but is %s", wantedBlk, actualID)
|
||||
return b.fetch(wantedBlkID)
|
||||
}
|
||||
|
||||
for _, blkBytes := range blks {
|
||||
if _, err := b.VM.ParseBlock(blkBytes); err != nil { // persists the block
|
||||
b.BootstrapConfig.Context.Log.Debug("Failed to parse block: %w", err)
|
||||
b.BootstrapConfig.Context.Log.Verbo("block: %s", formatting.DumpBytes{Bytes: blkBytes})
|
||||
}
|
||||
}
|
||||
|
||||
return b.process(wantedBlk)
|
||||
}
|
||||
|
||||
func (b *bootstrapper) storeBlock(blk snowman.Block) {
|
||||
// GetAncestorsFailed is called when a GetAncestors message we sent fails
|
||||
func (b *bootstrapper) GetAncestorsFailed(vdr ids.ShortID, requestID uint32) error {
|
||||
blkID, ok := b.outstandingRequests.Remove(vdr, requestID)
|
||||
if !ok {
|
||||
b.BootstrapConfig.Context.Log.Debug("GetAncestorsFailed(%s, %d) called but there was no outstanding request to this validator with this ID", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
// Send another request for this
|
||||
return b.fetch(blkID)
|
||||
}
|
||||
|
||||
// process a block
|
||||
func (b *bootstrapper) process(blk snowman.Block) error {
|
||||
status := blk.Status()
|
||||
blkID := blk.ID()
|
||||
for status == choices.Processing {
|
||||
b.pending.Remove(blkID)
|
||||
|
||||
if err := b.Blocked.Push(&blockJob{
|
||||
numAccepted: b.numBootstrapped,
|
||||
numDropped: b.numDropped,
|
||||
blk: blk,
|
||||
}); err == nil {
|
||||
b.numBlocked.Inc()
|
||||
b.numFetched++ // Progress tracker
|
||||
if b.numFetched%common.StatusUpdateFrequency == 0 { // Periodically print progress
|
||||
b.BootstrapConfig.Context.Log.Info("fetched %d blocks", b.numFetched)
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.Blocked.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Process this block's parent
|
||||
blk = blk.Parent()
|
||||
status = blk.Status()
|
||||
blkID = blk.ID()
|
||||
|
@ -180,38 +207,60 @@ func (b *bootstrapper) storeBlock(blk snowman.Block) {
|
|||
|
||||
switch status := blk.Status(); status {
|
||||
case choices.Unknown:
|
||||
b.sendRequest(blkID)
|
||||
case choices.Accepted:
|
||||
b.BootstrapConfig.Context.Log.Verbo("Bootstrapping confirmed %s", blkID)
|
||||
case choices.Rejected:
|
||||
b.BootstrapConfig.Context.Log.Error("Bootstrapping wants to accept %s, however it was previously rejected", blkID)
|
||||
if err := b.fetch(blkID); err != nil {
|
||||
return err
|
||||
}
|
||||
case choices.Rejected: // Should never happen
|
||||
return fmt.Errorf("bootstrapping wants to accept %s, however it was previously rejected", blkID)
|
||||
}
|
||||
|
||||
numPending := b.pending.Len()
|
||||
b.numPendingRequests.Set(float64(numPending))
|
||||
if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier {
|
||||
return b.finish()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bootstrapper) finish() {
|
||||
func (b *bootstrapper) finish() error {
|
||||
if b.finished {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
b.BootstrapConfig.Context.Log.Info("bootstrapping finished fetching blocks. executing state transitions...")
|
||||
|
||||
if err := b.executeAll(b.Blocked, b.numBlocked); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.executeAll(b.Blocked, b.numBlocked)
|
||||
if err := b.VM.Bootstrapped(); err != nil {
|
||||
return fmt.Errorf("failed to notify VM that bootstrapping has finished: %w",
|
||||
err)
|
||||
}
|
||||
|
||||
// Start consensus
|
||||
b.onFinished()
|
||||
if err := b.onFinished(); err != nil {
|
||||
return err
|
||||
}
|
||||
b.finished = true
|
||||
|
||||
if b.Bootstrapped != nil {
|
||||
b.Bootstrapped()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bootstrapper) executeAll(jobs *queue.Jobs, numBlocked prometheus.Gauge) {
|
||||
func (b *bootstrapper) executeAll(jobs *queue.Jobs, numBlocked prometheus.Gauge) error {
|
||||
numExecuted := 0
|
||||
for job, err := jobs.Pop(); err == nil; job, err = jobs.Pop() {
|
||||
numBlocked.Dec()
|
||||
if err := jobs.Execute(job); err != nil {
|
||||
b.BootstrapConfig.Context.Log.Warn("Error executing: %s", err)
|
||||
return err
|
||||
}
|
||||
if err := jobs.Commit(); err != nil {
|
||||
return err
|
||||
}
|
||||
numExecuted++
|
||||
if numExecuted%common.StatusUpdateFrequency == 0 { // Periodically print progress
|
||||
b.BootstrapConfig.Context.Log.Info("executed %d blocks", numExecuted)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -19,7 +19,6 @@ import (
|
|||
"github.com/ava-labs/gecko/snow/consensus/snowman"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/snow/engine/common/queue"
|
||||
"github.com/ava-labs/gecko/snow/networking/handler"
|
||||
"github.com/ava-labs/gecko/snow/networking/router"
|
||||
"github.com/ava-labs/gecko/snow/networking/timeout"
|
||||
"github.com/ava-labs/gecko/snow/validators"
|
||||
|
@ -37,7 +36,7 @@ func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest,
|
|||
sender := &common.SenderTest{}
|
||||
vm := &VMTest{}
|
||||
engine := &Transitive{}
|
||||
handler := &handler.Handler{}
|
||||
handler := &router.Handler{}
|
||||
router := &router.ChainRouter{}
|
||||
timeouts := &timeout.Manager{}
|
||||
|
||||
|
@ -53,9 +52,15 @@ func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest,
|
|||
peerID := peer.ID()
|
||||
peers.Add(peer)
|
||||
|
||||
handler.Initialize(engine, make(chan common.Message), 1)
|
||||
handler.Initialize(
|
||||
engine,
|
||||
make(chan common.Message),
|
||||
1,
|
||||
"",
|
||||
prometheus.NewRegistry(),
|
||||
)
|
||||
timeouts.Initialize(0)
|
||||
router.Initialize(ctx.Log, timeouts, time.Hour)
|
||||
router.Initialize(ctx.Log, timeouts, time.Hour, time.Second)
|
||||
|
||||
blocker, _ := queue.New(db)
|
||||
|
||||
|
@ -73,8 +78,9 @@ func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest,
|
|||
}, peerID, sender, vm
|
||||
}
|
||||
|
||||
// Single node in the accepted frontier; no need to fecth parent
|
||||
func TestBootstrapperSingleFrontier(t *testing.T) {
|
||||
config, peerID, sender, vm := newConfig(t)
|
||||
config, _, _, vm := newConfig(t)
|
||||
|
||||
blkID0 := ids.Empty.Prefix(0)
|
||||
blkID1 := ids.Empty.Prefix(1)
|
||||
|
@ -99,6 +105,8 @@ func TestBootstrapperSingleFrontier(t *testing.T) {
|
|||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() error { *finished = true; return nil }
|
||||
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(blkID1)
|
||||
|
@ -106,57 +114,41 @@ func TestBootstrapperSingleFrontier(t *testing.T) {
|
|||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID1):
|
||||
return nil, errUnknownBlock
|
||||
return blk1, nil
|
||||
case blkID.Equals(blkID0):
|
||||
return blk0, nil
|
||||
default:
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
|
||||
reqID := new(uint32)
|
||||
sender.GetF = func(vdr ids.ShortID, innerReqID uint32, blkID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
switch {
|
||||
case blkID.Equals(blkID1):
|
||||
default:
|
||||
t.Fatalf("Requested unknown vertex")
|
||||
}
|
||||
|
||||
*reqID = innerReqID
|
||||
}
|
||||
|
||||
bs.ForceAccepted(acceptedIDs)
|
||||
|
||||
vm.GetBlockF = nil
|
||||
sender.GetF = nil
|
||||
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes1):
|
||||
return blk1, nil
|
||||
case bytes.Equal(blkBytes, blkBytes0):
|
||||
return blk0, nil
|
||||
}
|
||||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() { *finished = true }
|
||||
vm.CantBootstrapping = false
|
||||
vm.CantBootstrapped = false
|
||||
|
||||
bs.Put(peerID, *reqID, blkID1, blkBytes1)
|
||||
|
||||
vm.ParseBlockF = nil
|
||||
bs.onFinished = nil
|
||||
|
||||
if !*finished {
|
||||
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should finish
|
||||
t.Fatal(err)
|
||||
} else if !*finished {
|
||||
t.Fatalf("Bootstrapping should have finished")
|
||||
}
|
||||
if blk1.Status() != choices.Accepted {
|
||||
} else if blk1.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
}
|
||||
}
|
||||
|
||||
// Requests the unknown block and gets back a MultiPut with unexpected request ID.
|
||||
// Requests again and gets response from unexpected peer.
|
||||
// Requests again and gets an unexpected block.
|
||||
// Requests again and gets the expected block.
|
||||
func TestBootstrapperUnknownByzantineResponse(t *testing.T) {
|
||||
config, peerID, sender, vm := newConfig(t)
|
||||
|
||||
|
@ -168,103 +160,6 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) {
|
|||
blkBytes1 := []byte{1}
|
||||
blkBytes2 := []byte{2}
|
||||
|
||||
blk0 := &Blk{
|
||||
id: blkID0,
|
||||
height: 0,
|
||||
status: choices.Accepted,
|
||||
bytes: blkBytes0,
|
||||
}
|
||||
blk1 := &Blk{
|
||||
parent: blk0,
|
||||
id: blkID1,
|
||||
height: 1,
|
||||
status: choices.Processing,
|
||||
bytes: blkBytes1,
|
||||
}
|
||||
blk2 := &Blk{
|
||||
parent: blk1,
|
||||
id: blkID2,
|
||||
height: 2,
|
||||
status: choices.Processing,
|
||||
bytes: blkBytes2,
|
||||
}
|
||||
|
||||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(blkID1)
|
||||
|
||||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID1):
|
||||
return nil, errUnknownBlock
|
||||
default:
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
|
||||
requestID := new(uint32)
|
||||
sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
switch {
|
||||
case vtxID.Equals(blkID1):
|
||||
default:
|
||||
t.Fatalf("Requested unknown block")
|
||||
}
|
||||
|
||||
*requestID = reqID
|
||||
}
|
||||
|
||||
bs.ForceAccepted(acceptedIDs)
|
||||
|
||||
vm.GetBlockF = nil
|
||||
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes1):
|
||||
return blk1, nil
|
||||
case bytes.Equal(blkBytes, blkBytes2):
|
||||
return blk2, nil
|
||||
}
|
||||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() { *finished = true }
|
||||
|
||||
bs.Put(peerID, *requestID, blkID2, blkBytes2)
|
||||
bs.Put(peerID, *requestID, blkID1, blkBytes1)
|
||||
|
||||
vm.ParseBlockF = nil
|
||||
|
||||
if !*finished {
|
||||
t.Fatalf("Bootstrapping should have finished")
|
||||
}
|
||||
if blk1.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
}
|
||||
if blk2.Status() != choices.Processing {
|
||||
t.Fatalf("Block should be processing")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBootstrapperDependency(t *testing.T) {
|
||||
config, peerID, sender, vm := newConfig(t)
|
||||
|
||||
blkID0 := ids.Empty.Prefix(0)
|
||||
blkID1 := ids.Empty.Prefix(1)
|
||||
blkID2 := ids.Empty.Prefix(2)
|
||||
|
||||
blkBytes0 := []byte{0}
|
||||
blkBytes1 := []byte{1}
|
||||
blkBytes2 := []byte{2}
|
||||
|
||||
blk0 := &Blk{
|
||||
id: blkID0,
|
||||
height: 0,
|
||||
|
@ -289,42 +184,36 @@ func TestBootstrapperDependency(t *testing.T) {
|
|||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() error { *finished = true; return nil }
|
||||
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(blkID2)
|
||||
|
||||
parsedBlk1 := false
|
||||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID0):
|
||||
return blk0, nil
|
||||
case blkID.Equals(blkID1):
|
||||
if parsedBlk1 {
|
||||
return blk1, nil
|
||||
}
|
||||
return nil, errUnknownBlock
|
||||
case blkID.Equals(blkID2):
|
||||
return blk2, nil
|
||||
default:
|
||||
t.Fatalf("Requested unknown block")
|
||||
panic("Requested unknown block")
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
|
||||
requestID := new(uint32)
|
||||
sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
switch {
|
||||
case vtxID.Equals(blkID1):
|
||||
default:
|
||||
t.Fatalf("Requested unknown block")
|
||||
}
|
||||
|
||||
*requestID = reqID
|
||||
}
|
||||
|
||||
bs.ForceAccepted(acceptedIDs)
|
||||
|
||||
vm.GetBlockF = nil
|
||||
sender.GetF = nil
|
||||
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes0):
|
||||
return blk0, nil
|
||||
case bytes.Equal(blkBytes, blkBytes1):
|
||||
blk1.status = choices.Processing
|
||||
parsedBlk1 = true
|
||||
return blk1, nil
|
||||
case bytes.Equal(blkBytes, blkBytes2):
|
||||
return blk2, nil
|
||||
|
@ -333,20 +222,325 @@ func TestBootstrapperDependency(t *testing.T) {
|
|||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
blk1.status = choices.Processing
|
||||
requestID := new(uint32)
|
||||
sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
switch {
|
||||
case vtxID.Equals(blkID1):
|
||||
default:
|
||||
t.Fatalf("should have requested blk1")
|
||||
}
|
||||
*requestID = reqID
|
||||
}
|
||||
vm.CantBootstrapping = false
|
||||
|
||||
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk1
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
oldReqID := *requestID
|
||||
if err := bs.MultiPut(peerID, *requestID+1, [][]byte{blkBytes1}); err != nil { // respond with wrong request ID
|
||||
t.Fatal(err)
|
||||
} else if oldReqID != *requestID {
|
||||
t.Fatal("should not have sent new request")
|
||||
}
|
||||
|
||||
if err := bs.MultiPut(ids.NewShortID([20]byte{1, 2, 3}), *requestID, [][]byte{blkBytes1}); err != nil { // respond from wrong peer
|
||||
t.Fatal(err)
|
||||
} else if oldReqID != *requestID {
|
||||
t.Fatal("should not have sent new request")
|
||||
}
|
||||
|
||||
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes0}); err != nil { // respond with wrong block
|
||||
t.Fatal(err)
|
||||
} else if oldReqID == *requestID {
|
||||
t.Fatal("should have sent new request")
|
||||
}
|
||||
|
||||
vm.CantBootstrapped = false
|
||||
|
||||
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes1}); err != nil { // respond with right block
|
||||
t.Fatal(err)
|
||||
} else if !*finished {
|
||||
t.Fatalf("Bootstrapping should have finished")
|
||||
} else if blk0.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
} else if blk1.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
} else if blk2.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
}
|
||||
}
|
||||
|
||||
// There are multiple needed blocks and MultiPut returns one at a time
|
||||
func TestBootstrapperPartialFetch(t *testing.T) {
|
||||
config, peerID, sender, vm := newConfig(t)
|
||||
|
||||
blkID0 := ids.Empty.Prefix(0)
|
||||
blkID1 := ids.Empty.Prefix(1)
|
||||
blkID2 := ids.Empty.Prefix(2)
|
||||
blkID3 := ids.Empty.Prefix(3)
|
||||
|
||||
blkBytes0 := []byte{0}
|
||||
blkBytes1 := []byte{1}
|
||||
blkBytes2 := []byte{2}
|
||||
blkBytes3 := []byte{3}
|
||||
|
||||
blk0 := &Blk{
|
||||
id: blkID0,
|
||||
height: 0,
|
||||
status: choices.Accepted,
|
||||
bytes: blkBytes0,
|
||||
}
|
||||
blk1 := &Blk{
|
||||
parent: blk0,
|
||||
id: blkID1,
|
||||
height: 1,
|
||||
status: choices.Unknown,
|
||||
bytes: blkBytes1,
|
||||
}
|
||||
blk2 := &Blk{
|
||||
parent: blk1,
|
||||
id: blkID2,
|
||||
height: 2,
|
||||
status: choices.Unknown,
|
||||
bytes: blkBytes2,
|
||||
}
|
||||
blk3 := &Blk{
|
||||
parent: blk2,
|
||||
id: blkID3,
|
||||
height: 3,
|
||||
status: choices.Processing,
|
||||
bytes: blkBytes3,
|
||||
}
|
||||
|
||||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() { *finished = true }
|
||||
bs.onFinished = func() error { *finished = true; return nil }
|
||||
|
||||
bs.Put(peerID, *requestID, blkID1, blkBytes1)
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(blkID3)
|
||||
|
||||
parsedBlk1 := false
|
||||
parsedBlk2 := false
|
||||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID0):
|
||||
return blk0, nil
|
||||
case blkID.Equals(blkID1):
|
||||
if parsedBlk1 {
|
||||
return blk1, nil
|
||||
}
|
||||
return nil, errUnknownBlock
|
||||
case blkID.Equals(blkID2):
|
||||
if parsedBlk2 {
|
||||
return blk2, nil
|
||||
}
|
||||
return nil, errUnknownBlock
|
||||
case blkID.Equals(blkID3):
|
||||
return blk3, nil
|
||||
default:
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes0):
|
||||
return blk0, nil
|
||||
case bytes.Equal(blkBytes, blkBytes1):
|
||||
blk1.status = choices.Processing
|
||||
parsedBlk1 = true
|
||||
return blk1, nil
|
||||
case bytes.Equal(blkBytes, blkBytes2):
|
||||
blk2.status = choices.Processing
|
||||
parsedBlk2 = true
|
||||
return blk2, nil
|
||||
case bytes.Equal(blkBytes, blkBytes3):
|
||||
return blk3, nil
|
||||
}
|
||||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
requestID := new(uint32)
|
||||
requested := ids.Empty
|
||||
sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
switch {
|
||||
case vtxID.Equals(blkID1), vtxID.Equals(blkID2):
|
||||
default:
|
||||
t.Fatalf("should have requested blk1 or blk2")
|
||||
}
|
||||
*requestID = reqID
|
||||
requested = vtxID
|
||||
}
|
||||
|
||||
vm.CantBootstrapping = false
|
||||
|
||||
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk2
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes2}); err != nil { // respond with blk2
|
||||
t.Fatal(err)
|
||||
} else if !requested.Equals(blkID1) {
|
||||
t.Fatal("should have requested blk1")
|
||||
}
|
||||
|
||||
vm.CantBootstrapped = false
|
||||
|
||||
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes1}); err != nil { // respond with blk1
|
||||
t.Fatal(err)
|
||||
} else if !requested.Equals(blkID1) {
|
||||
t.Fatal("should not have requested another block")
|
||||
}
|
||||
|
||||
if !*finished {
|
||||
t.Fatalf("Bootstrapping should have finished")
|
||||
}
|
||||
if blk1.Status() != choices.Accepted {
|
||||
} else if blk0.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
} else if blk1.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
} else if blk2.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
}
|
||||
if blk2.Status() != choices.Accepted {
|
||||
}
|
||||
|
||||
// There are multiple needed blocks and MultiPut returns all at once
|
||||
func TestBootstrapperMultiPut(t *testing.T) {
|
||||
config, peerID, sender, vm := newConfig(t)
|
||||
|
||||
blkID0 := ids.Empty.Prefix(0)
|
||||
blkID1 := ids.Empty.Prefix(1)
|
||||
blkID2 := ids.Empty.Prefix(2)
|
||||
blkID3 := ids.Empty.Prefix(3)
|
||||
|
||||
blkBytes0 := []byte{0}
|
||||
blkBytes1 := []byte{1}
|
||||
blkBytes2 := []byte{2}
|
||||
blkBytes3 := []byte{3}
|
||||
|
||||
blk0 := &Blk{
|
||||
id: blkID0,
|
||||
height: 0,
|
||||
status: choices.Accepted,
|
||||
bytes: blkBytes0,
|
||||
}
|
||||
blk1 := &Blk{
|
||||
parent: blk0,
|
||||
id: blkID1,
|
||||
height: 1,
|
||||
status: choices.Unknown,
|
||||
bytes: blkBytes1,
|
||||
}
|
||||
blk2 := &Blk{
|
||||
parent: blk1,
|
||||
id: blkID2,
|
||||
height: 2,
|
||||
status: choices.Unknown,
|
||||
bytes: blkBytes2,
|
||||
}
|
||||
blk3 := &Blk{
|
||||
parent: blk2,
|
||||
id: blkID3,
|
||||
height: 3,
|
||||
status: choices.Processing,
|
||||
bytes: blkBytes3,
|
||||
}
|
||||
vm.CantBootstrapping = false
|
||||
|
||||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() error { *finished = true; return nil }
|
||||
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(blkID3)
|
||||
|
||||
parsedBlk1 := false
|
||||
parsedBlk2 := false
|
||||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID0):
|
||||
return blk0, nil
|
||||
case blkID.Equals(blkID1):
|
||||
if parsedBlk1 {
|
||||
return blk1, nil
|
||||
}
|
||||
return nil, errUnknownBlock
|
||||
case blkID.Equals(blkID2):
|
||||
if parsedBlk2 {
|
||||
return blk2, nil
|
||||
}
|
||||
return nil, errUnknownBlock
|
||||
case blkID.Equals(blkID3):
|
||||
return blk3, nil
|
||||
default:
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes0):
|
||||
return blk0, nil
|
||||
case bytes.Equal(blkBytes, blkBytes1):
|
||||
blk1.status = choices.Processing
|
||||
parsedBlk1 = true
|
||||
return blk1, nil
|
||||
case bytes.Equal(blkBytes, blkBytes2):
|
||||
blk2.status = choices.Processing
|
||||
parsedBlk2 = true
|
||||
return blk2, nil
|
||||
case bytes.Equal(blkBytes, blkBytes3):
|
||||
return blk3, nil
|
||||
}
|
||||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
requestID := new(uint32)
|
||||
requested := ids.Empty
|
||||
sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
switch {
|
||||
case vtxID.Equals(blkID1), vtxID.Equals(blkID2):
|
||||
default:
|
||||
t.Fatalf("should have requested blk1 or blk2")
|
||||
}
|
||||
*requestID = reqID
|
||||
requested = vtxID
|
||||
}
|
||||
|
||||
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk2
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
vm.CantBootstrapped = false
|
||||
|
||||
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes2, blkBytes1}); err != nil { // respond with blk2 and blk1
|
||||
t.Fatal(err)
|
||||
} else if !requested.Equals(blkID2) {
|
||||
t.Fatal("should not have requested another block")
|
||||
}
|
||||
|
||||
if !*finished {
|
||||
t.Fatalf("Bootstrapping should have finished")
|
||||
} else if blk0.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
} else if blk1.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
} else if blk2.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
}
|
||||
}
|
||||
|
@ -411,6 +605,7 @@ func TestBootstrapperFilterAccepted(t *testing.T) {
|
|||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
vm.CantBootstrapping = false
|
||||
|
||||
accepted := bs.FilterAccepted(blkIDs)
|
||||
|
||||
|
@ -427,164 +622,3 @@ func TestBootstrapperFilterAccepted(t *testing.T) {
|
|||
t.Fatalf("Blk shouldn't be accepted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBootstrapperPartialFetch(t *testing.T) {
|
||||
config, _, sender, vm := newConfig(t)
|
||||
|
||||
blkID0 := ids.Empty.Prefix(0)
|
||||
blkID1 := ids.Empty.Prefix(1)
|
||||
|
||||
blkBytes0 := []byte{0}
|
||||
|
||||
blk0 := &Blk{
|
||||
id: blkID0,
|
||||
height: 0,
|
||||
status: choices.Accepted,
|
||||
bytes: blkBytes0,
|
||||
}
|
||||
|
||||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(
|
||||
blkID0,
|
||||
blkID1,
|
||||
)
|
||||
|
||||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID0):
|
||||
return blk0, nil
|
||||
case blkID.Equals(blkID1):
|
||||
return nil, errUnknownBlock
|
||||
default:
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
|
||||
sender.CantGet = false
|
||||
bs.onFinished = func() {}
|
||||
|
||||
bs.ForceAccepted(acceptedIDs)
|
||||
|
||||
if bs.finished {
|
||||
t.Fatalf("should have requested a block")
|
||||
}
|
||||
|
||||
if bs.pending.Len() != 1 {
|
||||
t.Fatalf("wrong number pending")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBootstrapperWrongIDByzantineResponse(t *testing.T) {
|
||||
config, peerID, sender, vm := newConfig(t)
|
||||
|
||||
blkID0 := ids.Empty.Prefix(0)
|
||||
blkID1 := ids.Empty.Prefix(1)
|
||||
blkID2 := ids.Empty.Prefix(2)
|
||||
|
||||
blkBytes0 := []byte{0}
|
||||
blkBytes1 := []byte{1}
|
||||
blkBytes2 := []byte{2}
|
||||
|
||||
blk0 := &Blk{
|
||||
id: blkID0,
|
||||
height: 0,
|
||||
status: choices.Accepted,
|
||||
bytes: blkBytes0,
|
||||
}
|
||||
blk1 := &Blk{
|
||||
parent: blk0,
|
||||
id: blkID1,
|
||||
height: 1,
|
||||
status: choices.Processing,
|
||||
bytes: blkBytes1,
|
||||
}
|
||||
blk2 := &Blk{
|
||||
parent: blk1,
|
||||
id: blkID2,
|
||||
height: 2,
|
||||
status: choices.Processing,
|
||||
bytes: blkBytes2,
|
||||
}
|
||||
|
||||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(blkID1)
|
||||
|
||||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID1):
|
||||
return nil, errUnknownBlock
|
||||
default:
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
|
||||
requestID := new(uint32)
|
||||
sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
switch {
|
||||
case vtxID.Equals(blkID1):
|
||||
default:
|
||||
t.Fatalf("Requested unknown block")
|
||||
}
|
||||
|
||||
*requestID = reqID
|
||||
}
|
||||
|
||||
bs.ForceAccepted(acceptedIDs)
|
||||
|
||||
vm.GetBlockF = nil
|
||||
sender.GetF = nil
|
||||
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes2):
|
||||
return blk2, nil
|
||||
}
|
||||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
sender.CantGet = false
|
||||
|
||||
bs.Put(peerID, *requestID, blkID1, blkBytes2)
|
||||
|
||||
sender.CantGet = true
|
||||
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes1):
|
||||
return blk1, nil
|
||||
}
|
||||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() { *finished = true }
|
||||
|
||||
bs.Put(peerID, *requestID, blkID1, blkBytes1)
|
||||
|
||||
vm.ParseBlockF = nil
|
||||
|
||||
if !*finished {
|
||||
t.Fatalf("Bootstrapping should have finished")
|
||||
}
|
||||
if blk1.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
}
|
||||
if blk2.Status() != choices.Processing {
|
||||
t.Fatalf("Block should be processing")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ import (
|
|||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/consensus/snowman"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/utils/wrappers"
|
||||
)
|
||||
|
||||
type convincer struct {
|
||||
|
@ -16,6 +17,7 @@ type convincer struct {
|
|||
requestID uint32
|
||||
abandoned bool
|
||||
deps ids.Set
|
||||
errs *wrappers.Errs
|
||||
}
|
||||
|
||||
func (c *convincer) Dependencies() ids.Set { return c.deps }
|
||||
|
@ -28,7 +30,7 @@ func (c *convincer) Fulfill(id ids.ID) {
|
|||
func (c *convincer) Abandon(ids.ID) { c.abandoned = true }
|
||||
|
||||
func (c *convincer) Update() {
|
||||
if c.abandoned || c.deps.Len() != 0 {
|
||||
if c.abandoned || c.deps.Len() != 0 || c.errs.Errored() {
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -34,8 +34,8 @@ type Blk struct {
|
|||
|
||||
func (b *Blk) ID() ids.ID { return b.id }
|
||||
func (b *Blk) Parent() snowman.Block { return b.parent }
|
||||
func (b *Blk) Accept() { b.status = choices.Accepted }
|
||||
func (b *Blk) Reject() { b.status = choices.Rejected }
|
||||
func (b *Blk) Accept() error { b.status = choices.Accepted; return nil }
|
||||
func (b *Blk) Reject() error { b.status = choices.Rejected; return nil }
|
||||
func (b *Blk) Status() choices.Status { return b.status }
|
||||
func (b *Blk) Verify() error { return b.validity }
|
||||
func (b *Blk) Bytes() []byte { return b.bytes }
|
||||
|
|
|
@ -36,9 +36,9 @@ func (i *issuer) Abandon(ids.ID) {
|
|||
}
|
||||
|
||||
func (i *issuer) Update() {
|
||||
if i.abandoned || i.deps.Len() != 0 {
|
||||
if i.abandoned || i.deps.Len() != 0 || i.t.errs.Errored() {
|
||||
return
|
||||
}
|
||||
|
||||
i.t.deliver(i.blk)
|
||||
i.t.errs.Add(i.t.deliver(i.blk))
|
||||
}
|
||||
|
|
|
@ -4,13 +4,23 @@
|
|||
package snowman
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/network"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
"github.com/ava-labs/gecko/snow/choices"
|
||||
"github.com/ava-labs/gecko/snow/consensus/snowman"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/snow/events"
|
||||
"github.com/ava-labs/gecko/utils/formatting"
|
||||
"github.com/ava-labs/gecko/utils/wrappers"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO define this constant in one place rather than here and in snowman
|
||||
// Max containers size in a MultiPut message
|
||||
maxContainersLen = int(4 * network.DefaultMaxMessageSize / 5)
|
||||
)
|
||||
|
||||
// Transitive implements the Engine interface by attempting to fetch all
|
||||
|
@ -36,11 +46,14 @@ type Transitive struct {
|
|||
|
||||
// mark for if the engine has been bootstrapped or not
|
||||
bootstrapped bool
|
||||
|
||||
// errs tracks if an error has occurred in a callback
|
||||
errs wrappers.Errs
|
||||
}
|
||||
|
||||
// Initialize implements the Engine interface
|
||||
func (t *Transitive) Initialize(config Config) {
|
||||
config.Context.Log.Info("Initializing Snowman consensus")
|
||||
func (t *Transitive) Initialize(config Config) error {
|
||||
config.Context.Log.Info("initializing consensus engine")
|
||||
|
||||
t.Config = config
|
||||
t.metrics.Initialize(
|
||||
|
@ -50,17 +63,18 @@ func (t *Transitive) Initialize(config Config) {
|
|||
)
|
||||
|
||||
t.onFinished = t.finishBootstrapping
|
||||
t.bootstrapper.Initialize(config.BootstrapConfig)
|
||||
|
||||
t.polls.log = config.Context.Log
|
||||
t.polls.numPolls = t.numPolls
|
||||
t.polls.alpha = t.Params.Alpha
|
||||
t.polls.m = make(map[uint32]poll)
|
||||
|
||||
return t.bootstrapper.Initialize(config.BootstrapConfig)
|
||||
}
|
||||
|
||||
// when bootstrapping is finished, this will be called. This initializes the
|
||||
// consensus engine with the last accepted block.
|
||||
func (t *Transitive) finishBootstrapping() {
|
||||
func (t *Transitive) finishBootstrapping() error {
|
||||
// set the bootstrapped mark to switch consensus modes
|
||||
t.bootstrapped = true
|
||||
|
||||
|
@ -73,15 +87,17 @@ func (t *Transitive) finishBootstrapping() {
|
|||
// oracle block
|
||||
tail, err := t.Config.VM.GetBlock(tailID)
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Error("Failed to get last accepted block due to: %s", err)
|
||||
return
|
||||
t.Config.Context.Log.Error("failed to get last accepted block due to: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
switch blk := tail.(type) {
|
||||
case OracleBlock:
|
||||
for _, blk := range blk.Options() {
|
||||
// note that deliver will set the VM's preference
|
||||
t.deliver(blk)
|
||||
if err := t.deliver(blk); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
default:
|
||||
// if there aren't blocks we need to deliver on startup, we need to set
|
||||
|
@ -89,70 +105,98 @@ func (t *Transitive) finishBootstrapping() {
|
|||
t.Config.VM.SetPreference(tailID)
|
||||
}
|
||||
|
||||
t.Config.Context.Log.Info("Bootstrapping finished with %s as the last accepted block", tailID)
|
||||
t.Config.Context.Log.Info("bootstrapping finished with %s as the last accepted block", tailID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Gossip implements the Engine interface
|
||||
func (t *Transitive) Gossip() {
|
||||
func (t *Transitive) Gossip() error {
|
||||
blkID := t.Config.VM.LastAccepted()
|
||||
blk, err := t.Config.VM.GetBlock(blkID)
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Warn("Dropping gossip request as %s couldn't be loaded due to %s", blkID, err)
|
||||
return
|
||||
t.Config.Context.Log.Warn("dropping gossip request as %s couldn't be loaded due to %s", blkID, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
t.Config.Context.Log.Debug("Gossiping %s as accepted to the network", blkID)
|
||||
t.Config.Context.Log.Verbo("gossiping %s as accepted to the network", blkID)
|
||||
t.Config.Sender.Gossip(blkID, blk.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
// Shutdown implements the Engine interface
|
||||
func (t *Transitive) Shutdown() {
|
||||
t.Config.Context.Log.Info("Shutting down Snowman consensus")
|
||||
t.Config.VM.Shutdown()
|
||||
func (t *Transitive) Shutdown() error {
|
||||
t.Config.Context.Log.Info("shutting down consensus engine")
|
||||
return t.Config.VM.Shutdown()
|
||||
}
|
||||
|
||||
// Context implements the Engine interface
|
||||
func (t *Transitive) Context() *snow.Context { return t.Config.Context }
|
||||
|
||||
// Get implements the Engine interface
|
||||
func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, blkID ids.ID) {
|
||||
func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, blkID ids.ID) error {
|
||||
blk, err := t.Config.VM.GetBlock(blkID)
|
||||
if err != nil {
|
||||
// If we failed to get the block, that means either an unexpected error
|
||||
// has occurred, the validator is not following the protocol, or the
|
||||
// block has been pruned.
|
||||
t.Config.Context.Log.Warn("Get called for blockID %s errored with %s",
|
||||
blkID,
|
||||
err)
|
||||
return
|
||||
t.Config.Context.Log.Debug("Get(%s, %d, %s) failed with: %s", vdr, requestID, blkID, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Respond to the validator with the fetched block and the same requestID.
|
||||
t.Config.Sender.Put(vdr, requestID, blkID, blk.Bytes())
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetAncestors implements the Engine interface
|
||||
func (t *Transitive) GetAncestors(vdr ids.ShortID, requestID uint32, blkID ids.ID) error {
|
||||
startTime := time.Now()
|
||||
blk, err := t.Config.VM.GetBlock(blkID)
|
||||
if err != nil { // Don't have the block. Drop this request.
|
||||
t.Config.Context.Log.Verbo("couldn't get block %s. dropping GetAncestors(%s, %d, %s)", blkID, vdr, requestID, blkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
ancestorsBytes := make([][]byte, 1, common.MaxContainersPerMultiPut) // First elt is byte repr. of blk, then its parents, then grandparent, etc.
|
||||
ancestorsBytes[0] = blk.Bytes()
|
||||
ancestorsBytesLen := len(blk.Bytes()) + wrappers.IntLen // length, in bytes, of all elements of ancestors
|
||||
|
||||
for numFetched := 1; numFetched < common.MaxContainersPerMultiPut && time.Since(startTime) < common.MaxTimeFetchingAncestors; numFetched++ {
|
||||
blk = blk.Parent()
|
||||
if blk.Status() == choices.Unknown {
|
||||
break
|
||||
}
|
||||
blkBytes := blk.Bytes()
|
||||
// Ensure response size isn't too large. Include wrappers.IntLen because the size of the message
|
||||
// is included with each container, and the size is repr. by an int.
|
||||
if newLen := wrappers.IntLen + ancestorsBytesLen + len(blkBytes); newLen < maxContainersLen {
|
||||
ancestorsBytes = append(ancestorsBytes, blkBytes)
|
||||
ancestorsBytesLen = newLen
|
||||
} else { // reached maximum response size
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
t.Config.Sender.MultiPut(vdr, requestID, ancestorsBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Put implements the Engine interface
|
||||
func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) {
|
||||
t.Config.Context.Log.Verbo("Put called for blockID %s", blkID)
|
||||
|
||||
// if the engine hasn't been bootstrapped, forward the request to the
|
||||
// bootstrapper
|
||||
func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) error {
|
||||
// bootstrapping isn't done --> we didn't send any gets --> this put is invalid
|
||||
if !t.bootstrapped {
|
||||
t.bootstrapper.Put(vdr, requestID, blkID, blkBytes)
|
||||
return
|
||||
t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
blk, err := t.Config.VM.ParseBlock(blkBytes)
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Debug("ParseBlock failed due to %s for block:\n%s",
|
||||
err,
|
||||
formatting.DumpBytes{Bytes: blkBytes})
|
||||
|
||||
t.Config.Context.Log.Debug("failed to parse block %s: %s", blkID, err)
|
||||
t.Config.Context.Log.Verbo("block:\n%s", formatting.DumpBytes{Bytes: blkBytes})
|
||||
// because GetFailed doesn't utilize the assumption that we actually
|
||||
// sent a Get message, we can safely call GetFailed here to potentially
|
||||
// abandon the request.
|
||||
t.GetFailed(vdr, requestID)
|
||||
return
|
||||
return t.GetFailed(vdr, requestID)
|
||||
}
|
||||
|
||||
// insert the block into consensus. If the block has already been issued,
|
||||
|
@ -160,16 +204,16 @@ func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkByt
|
|||
// receive requests to fill the ancestry. dependencies that have already
|
||||
// been fetched, but with missing dependencies themselves won't be requested
|
||||
// from the vdr.
|
||||
t.insertFrom(vdr, blk)
|
||||
_, err = t.insertFrom(vdr, blk)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetFailed implements the Engine interface
|
||||
func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) {
|
||||
// if the engine hasn't been bootstrapped, forward the request to the
|
||||
// bootstrapper
|
||||
func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) error {
|
||||
// not done bootstrapping --> didn't send a get --> this message is invalid
|
||||
if !t.bootstrapped {
|
||||
t.bootstrapper.GetFailed(vdr, requestID)
|
||||
return
|
||||
t.Config.Context.Log.Debug("dropping GetFailed(%s, %d) due to bootstrapping")
|
||||
return nil
|
||||
}
|
||||
|
||||
// we don't use the assumption that this function is called after a failed
|
||||
|
@ -177,24 +221,23 @@ func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) {
|
|||
// and also get what the request was for if it exists
|
||||
blkID, ok := t.blkReqs.Remove(vdr, requestID)
|
||||
if !ok {
|
||||
t.Config.Context.Log.Warn("GetFailed called without sending the corresponding Get message from %s",
|
||||
vdr)
|
||||
return
|
||||
t.Config.Context.Log.Debug("getFailed(%s, %d) called without having sent corresponding Get", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// because the get request was dropped, we no longer are expected blkID to
|
||||
// be issued.
|
||||
t.blocked.Abandon(blkID)
|
||||
return t.errs.Err
|
||||
}
|
||||
|
||||
// PullQuery implements the Engine interface
|
||||
func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID) {
|
||||
func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID) error {
|
||||
// if the engine hasn't been bootstrapped, we aren't ready to respond to
|
||||
// queries
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Debug("Dropping PullQuery for %s due to bootstrapping",
|
||||
blkID)
|
||||
return
|
||||
t.Config.Context.Log.Debug("dropping PullQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
c := &convincer{
|
||||
|
@ -202,33 +245,39 @@ func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID)
|
|||
sender: t.Config.Sender,
|
||||
vdr: vdr,
|
||||
requestID: requestID,
|
||||
errs: &t.errs,
|
||||
}
|
||||
|
||||
added, err := t.reinsertFrom(vdr, blkID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if we aren't able to have issued this block, then it is a dependency for
|
||||
// this reply
|
||||
if !t.reinsertFrom(vdr, blkID) {
|
||||
if !added {
|
||||
c.deps.Add(blkID)
|
||||
}
|
||||
|
||||
t.blocked.Register(c)
|
||||
return t.errs.Err
|
||||
}
|
||||
|
||||
// PushQuery implements the Engine interface
|
||||
func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) {
|
||||
func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) error {
|
||||
// if the engine hasn't been bootstrapped, we aren't ready to respond to
|
||||
// queries
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Debug("Dropping PushQuery for %s due to bootstrapping", blkID)
|
||||
return
|
||||
t.Config.Context.Log.Debug("dropping PushQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
blk, err := t.Config.VM.ParseBlock(blkBytes)
|
||||
// If the parsing fails, we just drop the request, as we didn't ask for it
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Warn("ParseBlock failed due to %s for block:\n%s",
|
||||
err,
|
||||
formatting.DumpBytes{Bytes: blkBytes})
|
||||
return
|
||||
t.Config.Context.Log.Debug("failed to parse block %s: %s", blkID, err)
|
||||
t.Config.Context.Log.Verbo("block:\n%s", formatting.DumpBytes{Bytes: blkBytes})
|
||||
return nil
|
||||
}
|
||||
|
||||
// insert the block into consensus. If the block has already been issued,
|
||||
|
@ -236,36 +285,33 @@ func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID,
|
|||
// receive requests to fill the ancestry. dependencies that have already
|
||||
// been fetched, but with missing dependencies themselves won't be requested
|
||||
// from the vdr.
|
||||
t.insertFrom(vdr, blk)
|
||||
if _, err := t.insertFrom(vdr, blk); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// register the chit request
|
||||
t.PullQuery(vdr, requestID, blk.ID())
|
||||
return t.PullQuery(vdr, requestID, blk.ID())
|
||||
}
|
||||
|
||||
// Chits implements the Engine interface
|
||||
func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) {
|
||||
func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) error {
|
||||
// if the engine hasn't been bootstrapped, we shouldn't be receiving chits
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Debug("Dropping Chits due to bootstrapping")
|
||||
return
|
||||
t.Config.Context.Log.Debug("dropping Chits(%s, %d) due to bootstrapping", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Since this is snowman, there should only be one ID in the vote set
|
||||
if votes.Len() != 1 {
|
||||
t.Config.Context.Log.Debug("Chits was called with the wrong number of votes %d. ValidatorID: %s, RequestID: %d",
|
||||
votes.Len(),
|
||||
vdr,
|
||||
requestID)
|
||||
|
||||
t.Config.Context.Log.Debug("Chits(%s, %d) was called with %d votes (expected 1)", vdr, requestID, votes.Len())
|
||||
// because QueryFailed doesn't utilize the assumption that we actually
|
||||
// sent a Query message, we can safely call QueryFailed here to
|
||||
// potentially abandon the request.
|
||||
t.QueryFailed(vdr, requestID)
|
||||
return
|
||||
return t.QueryFailed(vdr, requestID)
|
||||
}
|
||||
vote := votes.List()[0]
|
||||
|
||||
t.Config.Context.Log.Verbo("Chit was called. RequestID: %v. Vote: %s", requestID, vote)
|
||||
t.Config.Context.Log.Verbo("Chits(%s, %d) contains vote for %s", vdr, requestID, vote)
|
||||
|
||||
v := &voter{
|
||||
t: t,
|
||||
|
@ -274,21 +320,27 @@ func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) {
|
|||
response: vote,
|
||||
}
|
||||
|
||||
added, err := t.reinsertFrom(vdr, vote)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// if we aren't able to have issued the vote's block, then it is a
|
||||
// dependency for applying the vote
|
||||
if !t.reinsertFrom(vdr, vote) {
|
||||
if !added {
|
||||
v.deps.Add(vote)
|
||||
}
|
||||
|
||||
t.blocked.Register(v)
|
||||
return t.errs.Err
|
||||
}
|
||||
|
||||
// QueryFailed implements the Engine interface
|
||||
func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) {
|
||||
func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) error {
|
||||
// if the engine hasn't been bootstrapped, we won't have sent a query
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Warn("Dropping QueryFailed due to bootstrapping")
|
||||
return
|
||||
t.Config.Context.Log.Warn("dropping QueryFailed(%s, %d) due to bootstrapping", vdr, requestID)
|
||||
return nil
|
||||
}
|
||||
|
||||
t.blocked.Register(&voter{
|
||||
|
@ -296,30 +348,31 @@ func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) {
|
|||
vdr: vdr,
|
||||
requestID: requestID,
|
||||
})
|
||||
return t.errs.Err
|
||||
}
|
||||
|
||||
// Notify implements the Engine interface
|
||||
func (t *Transitive) Notify(msg common.Message) {
|
||||
func (t *Transitive) Notify(msg common.Message) error {
|
||||
// if the engine hasn't been bootstrapped, we shouldn't issuing blocks
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Warn("Dropping Notify due to bootstrapping")
|
||||
return
|
||||
t.Config.Context.Log.Debug("dropping Notify due to bootstrapping")
|
||||
return nil
|
||||
}
|
||||
|
||||
t.Config.Context.Log.Verbo("Snowman engine notified of %s from the vm", msg)
|
||||
t.Config.Context.Log.Verbo("snowman engine notified of %s from the vm", msg)
|
||||
switch msg {
|
||||
case common.PendingTxs:
|
||||
// the pending txs message means we should attempt to build a block.
|
||||
blk, err := t.Config.VM.BuildBlock()
|
||||
if err != nil {
|
||||
t.Config.Context.Log.Verbo("VM.BuildBlock errored with %s", err)
|
||||
return
|
||||
t.Config.Context.Log.Debug("VM.BuildBlock errored with: %s", err)
|
||||
return nil
|
||||
}
|
||||
|
||||
// a newly created block is expected to be processing. If this check
|
||||
// fails, there is potentially an error in the VM this engine is running
|
||||
if status := blk.Status(); status != choices.Processing {
|
||||
t.Config.Context.Log.Warn("Attempting to issue a block with status: %s, expected Processing", status)
|
||||
t.Config.Context.Log.Warn("attempting to issue a block with status: %s, expected Processing", status)
|
||||
}
|
||||
|
||||
// the newly created block should be built on top of the preferred
|
||||
|
@ -327,18 +380,24 @@ func (t *Transitive) Notify(msg common.Message) {
|
|||
// confirmed.
|
||||
parentID := blk.Parent().ID()
|
||||
if pref := t.Consensus.Preference(); !parentID.Equals(pref) {
|
||||
t.Config.Context.Log.Warn("Built block with parent: %s, expected %s", parentID, pref)
|
||||
t.Config.Context.Log.Warn("built block with parent: %s, expected %s", parentID, pref)
|
||||
}
|
||||
|
||||
added, err := t.insertAll(blk)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// inserting the block shouldn't have any missing dependencies
|
||||
if t.insertAll(blk) {
|
||||
t.Config.Context.Log.Verbo("Successfully issued new block from the VM")
|
||||
if added {
|
||||
t.Config.Context.Log.Verbo("successfully issued new block from the VM")
|
||||
} else {
|
||||
t.Config.Context.Log.Warn("VM.BuildBlock returned a block that is pending for ancestors")
|
||||
}
|
||||
default:
|
||||
t.Config.Context.Log.Warn("Unexpected message from the VM: %s", msg)
|
||||
t.Config.Context.Log.Warn("unexpected message from the VM: %s", msg)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *Transitive) repoll() {
|
||||
|
@ -356,11 +415,11 @@ func (t *Transitive) repoll() {
|
|||
// added, to consensus. This is useful to check the local DB before requesting a
|
||||
// block in case we have the block for some reason. If the block or a dependency
|
||||
// is missing, the validator will be sent a Get message.
|
||||
func (t *Transitive) reinsertFrom(vdr ids.ShortID, blkID ids.ID) bool {
|
||||
func (t *Transitive) reinsertFrom(vdr ids.ShortID, blkID ids.ID) (bool, error) {
|
||||
blk, err := t.Config.VM.GetBlock(blkID)
|
||||
if err != nil {
|
||||
t.sendRequest(vdr, blkID)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
return t.insertFrom(vdr, blk)
|
||||
}
|
||||
|
@ -370,12 +429,14 @@ func (t *Transitive) reinsertFrom(vdr ids.ShortID, blkID ids.ID) bool {
|
|||
// This is useful to check the local DB before requesting a block in case we
|
||||
// have the block for some reason. If a dependency is missing, the validator
|
||||
// will be sent a Get message.
|
||||
func (t *Transitive) insertFrom(vdr ids.ShortID, blk snowman.Block) bool {
|
||||
func (t *Transitive) insertFrom(vdr ids.ShortID, blk snowman.Block) (bool, error) {
|
||||
blkID := blk.ID()
|
||||
// if the block has been issued, we don't need to insert it. if the block is
|
||||
// already pending, we shouldn't attempt to insert it again yet
|
||||
for !t.Consensus.Issued(blk) && !t.pending.Contains(blkID) {
|
||||
t.insert(blk)
|
||||
if err := t.insert(blk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
blk = blk.Parent()
|
||||
blkID = blk.ID()
|
||||
|
@ -384,10 +445,10 @@ func (t *Transitive) insertFrom(vdr ids.ShortID, blk snowman.Block) bool {
|
|||
// newly inserted block
|
||||
if !blk.Status().Fetched() {
|
||||
t.sendRequest(vdr, blkID)
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
}
|
||||
return t.Consensus.Issued(blk)
|
||||
return t.Consensus.Issued(blk), nil
|
||||
}
|
||||
|
||||
// insertAll attempts to issue the branch ending with a block to consensus.
|
||||
|
@ -395,10 +456,12 @@ func (t *Transitive) insertFrom(vdr ids.ShortID, blk snowman.Block) bool {
|
|||
// This is useful to check the local DB before requesting a block in case we
|
||||
// have the block for some reason. If a dependency is missing and the dependency
|
||||
// hasn't been requested, the issuance will be abandoned.
|
||||
func (t *Transitive) insertAll(blk snowman.Block) bool {
|
||||
func (t *Transitive) insertAll(blk snowman.Block) (bool, error) {
|
||||
blkID := blk.ID()
|
||||
for blk.Status().Fetched() && !t.Consensus.Issued(blk) && !t.pending.Contains(blkID) {
|
||||
t.insert(blk)
|
||||
if err := t.insert(blk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
blk = blk.Parent()
|
||||
blkID = blk.ID()
|
||||
|
@ -406,25 +469,25 @@ func (t *Transitive) insertAll(blk snowman.Block) bool {
|
|||
|
||||
// if issuance the block was successful, this is the happy path
|
||||
if t.Consensus.Issued(blk) {
|
||||
return true
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// if this branch is waiting on a block that we supposedly have a source of,
|
||||
// we can just wait for that request to succeed or fail
|
||||
if t.blkReqs.Contains(blkID) {
|
||||
return false
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// if we have no reason to expect that this block will be inserted, we
|
||||
// should abandon the block to avoid a memory leak
|
||||
t.blocked.Abandon(blkID)
|
||||
return false
|
||||
return false, t.errs.Err
|
||||
}
|
||||
|
||||
// attempt to insert the block to consensus. If the block's parent hasn't been
|
||||
// issued, the insertion will block until the parent's issuance is abandoned or
|
||||
// fulfilled
|
||||
func (t *Transitive) insert(blk snowman.Block) {
|
||||
func (t *Transitive) insert(blk snowman.Block) error {
|
||||
blkID := blk.ID()
|
||||
|
||||
// mark that the block has been fetched but is pending
|
||||
|
@ -442,7 +505,7 @@ func (t *Transitive) insert(blk snowman.Block) {
|
|||
// block on the parent if needed
|
||||
if parent := blk.Parent(); !t.Consensus.Issued(parent) {
|
||||
parentID := parent.ID()
|
||||
t.Config.Context.Log.Verbo("Block waiting for parent %s", parentID)
|
||||
t.Config.Context.Log.Verbo("block %s waiting for parent %s", blkID, parentID)
|
||||
i.deps.Add(parentID)
|
||||
}
|
||||
|
||||
|
@ -451,6 +514,7 @@ func (t *Transitive) insert(blk snowman.Block) {
|
|||
// Tracks performance statistics
|
||||
t.numBlkRequests.Set(float64(t.blkReqs.Len()))
|
||||
t.numBlockedBlk.Set(float64(t.pending.Len()))
|
||||
return t.errs.Err
|
||||
}
|
||||
|
||||
func (t *Transitive) sendRequest(vdr ids.ShortID, blkID ids.ID) {
|
||||
|
@ -459,10 +523,9 @@ func (t *Transitive) sendRequest(vdr ids.ShortID, blkID ids.ID) {
|
|||
return
|
||||
}
|
||||
|
||||
t.Config.Context.Log.Verbo("Sending Get message for %s", blkID)
|
||||
|
||||
t.RequestID++
|
||||
t.blkReqs.Add(vdr, t.RequestID, blkID)
|
||||
t.Config.Context.Log.Verbo("sending Get(%s, %d, %s)", vdr, t.RequestID, blkID)
|
||||
t.Config.Sender.Get(vdr, t.RequestID, blkID)
|
||||
|
||||
// Tracks performance statistics
|
||||
|
@ -471,7 +534,7 @@ func (t *Transitive) sendRequest(vdr ids.ShortID, blkID ids.ID) {
|
|||
|
||||
// send a pull request for this block ID
|
||||
func (t *Transitive) pullSample(blkID ids.ID) {
|
||||
t.Config.Context.Log.Verbo("About to sample from: %s", t.Config.Validators)
|
||||
t.Config.Context.Log.Verbo("about to sample from: %s", t.Config.Validators)
|
||||
p := t.Consensus.Parameters()
|
||||
vdrs := t.Config.Validators.Sample(p.K)
|
||||
vdrSet := ids.ShortSet{}
|
||||
|
@ -480,13 +543,13 @@ func (t *Transitive) pullSample(blkID ids.ID) {
|
|||
}
|
||||
|
||||
if numVdrs := len(vdrs); numVdrs != p.K {
|
||||
t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blkID)
|
||||
t.Config.Context.Log.Error("query for %s was dropped due to an insufficient number of validators", blkID)
|
||||
return
|
||||
}
|
||||
|
||||
t.RequestID++
|
||||
if !t.polls.Add(t.RequestID, vdrSet.Len()) {
|
||||
t.Config.Context.Log.Error("Query for %s was dropped due to use of a duplicated requestID", blkID)
|
||||
t.Config.Context.Log.Error("query for %s was dropped due to use of a duplicated requestID", blkID)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -495,7 +558,7 @@ func (t *Transitive) pullSample(blkID ids.ID) {
|
|||
|
||||
// send a push request for this block
|
||||
func (t *Transitive) pushSample(blk snowman.Block) {
|
||||
t.Config.Context.Log.Verbo("About to sample from: %s", t.Config.Validators)
|
||||
t.Config.Context.Log.Verbo("about to sample from: %s", t.Config.Validators)
|
||||
p := t.Consensus.Parameters()
|
||||
vdrs := t.Config.Validators.Sample(p.K)
|
||||
vdrSet := ids.ShortSet{}
|
||||
|
@ -505,13 +568,13 @@ func (t *Transitive) pushSample(blk snowman.Block) {
|
|||
|
||||
blkID := blk.ID()
|
||||
if numVdrs := len(vdrs); numVdrs != p.K {
|
||||
t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blkID)
|
||||
t.Config.Context.Log.Error("query for %s was dropped due to an insufficient number of validators", blkID)
|
||||
return
|
||||
}
|
||||
|
||||
t.RequestID++
|
||||
if !t.polls.Add(t.RequestID, vdrSet.Len()) {
|
||||
t.Config.Context.Log.Error("Query for %s was dropped due to use of a duplicated requestID", blkID)
|
||||
t.Config.Context.Log.Error("query for %s was dropped due to use of a duplicated requestID", blkID)
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -519,9 +582,9 @@ func (t *Transitive) pushSample(blk snowman.Block) {
|
|||
return
|
||||
}
|
||||
|
||||
func (t *Transitive) deliver(blk snowman.Block) {
|
||||
func (t *Transitive) deliver(blk snowman.Block) error {
|
||||
if t.Consensus.Issued(blk) {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
|
||||
// we are adding the block to consensus, so it is no longer pending
|
||||
|
@ -529,15 +592,15 @@ func (t *Transitive) deliver(blk snowman.Block) {
|
|||
t.pending.Remove(blkID)
|
||||
|
||||
if err := blk.Verify(); err != nil {
|
||||
t.Config.Context.Log.Debug("Block failed verification due to %s, dropping block", err)
|
||||
t.Config.Context.Log.Debug("block failed verification due to %s, dropping block", err)
|
||||
|
||||
// if verify fails, then all decedents are also invalid
|
||||
t.blocked.Abandon(blkID)
|
||||
t.numBlockedBlk.Set(float64(t.pending.Len())) // Tracks performance statistics
|
||||
return
|
||||
return t.errs.Err
|
||||
}
|
||||
|
||||
t.Config.Context.Log.Verbo("Adding block to consensus: %s", blkID)
|
||||
t.Config.Context.Log.Verbo("adding block to consensus: %s", blkID)
|
||||
t.Consensus.Add(blk)
|
||||
|
||||
// Add all the oracle blocks if they exist. We call verify on all the blocks
|
||||
|
@ -549,7 +612,7 @@ func (t *Transitive) deliver(blk snowman.Block) {
|
|||
case OracleBlock:
|
||||
for _, blk := range blk.Options() {
|
||||
if err := blk.Verify(); err != nil {
|
||||
t.Config.Context.Log.Debug("Block failed verification due to %s, dropping block", err)
|
||||
t.Config.Context.Log.Debug("block failed verification due to %s, dropping block", err)
|
||||
dropped = append(dropped, blk)
|
||||
} else {
|
||||
t.Consensus.Add(blk)
|
||||
|
@ -583,4 +646,5 @@ func (t *Transitive) deliver(blk snowman.Block) {
|
|||
// Tracks performance statistics
|
||||
t.numBlkRequests.Set(float64(t.blkReqs.Len()))
|
||||
t.numBlockedBlk.Set(float64(t.pending.Len()))
|
||||
return t.errs.Err
|
||||
}
|
||||
|
|
|
@ -76,7 +76,7 @@ func setup(t *testing.T) (validators.Validator, validators.Set, *common.SenderTe
|
|||
func TestEngineShutdown(t *testing.T) {
|
||||
_, _, _, vm, transitive, _ := setup(t)
|
||||
vmShutdownCalled := false
|
||||
vm.ShutdownF = func() { vmShutdownCalled = true }
|
||||
vm.ShutdownF = func() error { vmShutdownCalled = true; return nil }
|
||||
vm.CantShutdown = false
|
||||
transitive.Shutdown()
|
||||
if !vmShutdownCalled {
|
||||
|
|
|
@ -25,7 +25,7 @@ func (v *voter) Fulfill(id ids.ID) {
|
|||
func (v *voter) Abandon(id ids.ID) { v.Fulfill(id) }
|
||||
|
||||
func (v *voter) Update() {
|
||||
if v.deps.Len() != 0 {
|
||||
if v.deps.Len() != 0 || v.t.errs.Errored() {
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -45,17 +45,20 @@ func (v *voter) Update() {
|
|||
// must be bubbled to the nearest valid block
|
||||
results = v.bubbleVotes(results)
|
||||
|
||||
v.t.Config.Context.Log.Verbo("Finishing poll [%d] with:\n%s", v.requestID, &results)
|
||||
v.t.Consensus.RecordPoll(results)
|
||||
v.t.Config.Context.Log.Debug("Finishing poll [%d] with:\n%s", v.requestID, &results)
|
||||
if err := v.t.Consensus.RecordPoll(results); err != nil {
|
||||
v.t.errs.Add(err)
|
||||
return
|
||||
}
|
||||
|
||||
v.t.Config.VM.SetPreference(v.t.Consensus.Preference())
|
||||
|
||||
if v.t.Consensus.Finalized() {
|
||||
v.t.Config.Context.Log.Verbo("Snowman engine can quiesce")
|
||||
v.t.Config.Context.Log.Debug("Snowman engine can quiesce")
|
||||
return
|
||||
}
|
||||
|
||||
v.t.Config.Context.Log.Verbo("Snowman engine can't quiesce")
|
||||
v.t.Config.Context.Log.Debug("Snowman engine can't quiesce")
|
||||
v.t.repoll()
|
||||
}
|
||||
|
||||
|
|
|
@ -1,248 +0,0 @@
|
|||
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
|
||||
// See the file LICENSE for licensing terms.
|
||||
|
||||
package handler
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
)
|
||||
|
||||
// Handler passes incoming messages from the network to the consensus engine
|
||||
// (Actually, it receives the incoming messages from a ChainRouter, but same difference)
|
||||
type Handler struct {
|
||||
msgs chan message
|
||||
wg sync.WaitGroup
|
||||
engine common.Engine
|
||||
msgChan <-chan common.Message
|
||||
}
|
||||
|
||||
// Initialize this consensus handler
|
||||
func (h *Handler) Initialize(engine common.Engine, msgChan <-chan common.Message, bufferSize int) {
|
||||
h.msgs = make(chan message, bufferSize)
|
||||
h.engine = engine
|
||||
h.msgChan = msgChan
|
||||
|
||||
h.wg.Add(1)
|
||||
}
|
||||
|
||||
// Context of this Handler
|
||||
func (h *Handler) Context() *snow.Context { return h.engine.Context() }
|
||||
|
||||
// Dispatch waits for incoming messages from the network
|
||||
// and, when they arrive, sends them to the consensus engine
|
||||
func (h *Handler) Dispatch() {
|
||||
defer h.wg.Done()
|
||||
|
||||
for {
|
||||
select {
|
||||
case msg := <-h.msgs:
|
||||
if !h.dispatchMsg(msg) {
|
||||
return
|
||||
}
|
||||
case msg := <-h.msgChan:
|
||||
if !h.dispatchMsg(message{messageType: notifyMsg, notification: msg}) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Dispatch a message to the consensus engine.
|
||||
// Returns false iff this consensus handler (and its associated engine) should shutdown
|
||||
// (due to receipt of a shutdown message)
|
||||
func (h *Handler) dispatchMsg(msg message) bool {
|
||||
ctx := h.engine.Context()
|
||||
|
||||
ctx.Lock.Lock()
|
||||
defer ctx.Lock.Unlock()
|
||||
|
||||
ctx.Log.Verbo("Forwarding message to consensus: %s", msg)
|
||||
|
||||
switch msg.messageType {
|
||||
case getAcceptedFrontierMsg:
|
||||
h.engine.GetAcceptedFrontier(msg.validatorID, msg.requestID)
|
||||
case acceptedFrontierMsg:
|
||||
h.engine.AcceptedFrontier(msg.validatorID, msg.requestID, msg.containerIDs)
|
||||
case getAcceptedFrontierFailedMsg:
|
||||
h.engine.GetAcceptedFrontierFailed(msg.validatorID, msg.requestID)
|
||||
case getAcceptedMsg:
|
||||
h.engine.GetAccepted(msg.validatorID, msg.requestID, msg.containerIDs)
|
||||
case acceptedMsg:
|
||||
h.engine.Accepted(msg.validatorID, msg.requestID, msg.containerIDs)
|
||||
case getAcceptedFailedMsg:
|
||||
h.engine.GetAcceptedFailed(msg.validatorID, msg.requestID)
|
||||
case getMsg:
|
||||
h.engine.Get(msg.validatorID, msg.requestID, msg.containerID)
|
||||
case getFailedMsg:
|
||||
h.engine.GetFailed(msg.validatorID, msg.requestID)
|
||||
case putMsg:
|
||||
h.engine.Put(msg.validatorID, msg.requestID, msg.containerID, msg.container)
|
||||
case pushQueryMsg:
|
||||
h.engine.PushQuery(msg.validatorID, msg.requestID, msg.containerID, msg.container)
|
||||
case pullQueryMsg:
|
||||
h.engine.PullQuery(msg.validatorID, msg.requestID, msg.containerID)
|
||||
case queryFailedMsg:
|
||||
h.engine.QueryFailed(msg.validatorID, msg.requestID)
|
||||
case chitsMsg:
|
||||
h.engine.Chits(msg.validatorID, msg.requestID, msg.containerIDs)
|
||||
case notifyMsg:
|
||||
h.engine.Notify(msg.notification)
|
||||
case gossipMsg:
|
||||
h.engine.Gossip()
|
||||
case shutdownMsg:
|
||||
h.engine.Shutdown()
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// GetAcceptedFrontier passes a GetAcceptedFrontier message received from the
|
||||
// network to the consensus engine.
|
||||
func (h *Handler) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32) {
|
||||
h.msgs <- message{
|
||||
messageType: getAcceptedFrontierMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
}
|
||||
}
|
||||
|
||||
// AcceptedFrontier passes a AcceptedFrontier message received from the network
|
||||
// to the consensus engine.
|
||||
func (h *Handler) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
h.msgs <- message{
|
||||
messageType: acceptedFrontierMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerIDs: containerIDs,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAcceptedFrontierFailed passes a GetAcceptedFrontierFailed message received
|
||||
// from the network to the consensus engine.
|
||||
func (h *Handler) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
h.msgs <- message{
|
||||
messageType: getAcceptedFrontierFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAccepted passes a GetAccepted message received from the
|
||||
// network to the consensus engine.
|
||||
func (h *Handler) GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
h.msgs <- message{
|
||||
messageType: getAcceptedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerIDs: containerIDs,
|
||||
}
|
||||
}
|
||||
|
||||
// Accepted passes a Accepted message received from the network to the consensus
|
||||
// engine.
|
||||
func (h *Handler) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
h.msgs <- message{
|
||||
messageType: acceptedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerIDs: containerIDs,
|
||||
}
|
||||
}
|
||||
|
||||
// GetAcceptedFailed passes a GetAcceptedFailed message received from the
|
||||
// network to the consensus engine.
|
||||
func (h *Handler) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
h.msgs <- message{
|
||||
messageType: getAcceptedFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
}
|
||||
}
|
||||
|
||||
// Get passes a Get message received from the network to the consensus engine.
|
||||
func (h *Handler) Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) {
|
||||
h.msgs <- message{
|
||||
messageType: getMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: containerID,
|
||||
}
|
||||
}
|
||||
|
||||
// Put passes a Put message received from the network to the consensus engine.
|
||||
func (h *Handler) Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) {
|
||||
h.msgs <- message{
|
||||
messageType: putMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: containerID,
|
||||
container: container,
|
||||
}
|
||||
}
|
||||
|
||||
// GetFailed passes a GetFailed message to the consensus engine.
|
||||
func (h *Handler) GetFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
h.msgs <- message{
|
||||
messageType: getFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
}
|
||||
}
|
||||
|
||||
// PushQuery passes a PushQuery message received from the network to the consensus engine.
|
||||
func (h *Handler) PushQuery(validatorID ids.ShortID, requestID uint32, blockID ids.ID, block []byte) {
|
||||
h.msgs <- message{
|
||||
messageType: pushQueryMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: blockID,
|
||||
container: block,
|
||||
}
|
||||
}
|
||||
|
||||
// PullQuery passes a PullQuery message received from the network to the consensus engine.
|
||||
func (h *Handler) PullQuery(validatorID ids.ShortID, requestID uint32, blockID ids.ID) {
|
||||
h.msgs <- message{
|
||||
messageType: pullQueryMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: blockID,
|
||||
}
|
||||
}
|
||||
|
||||
// Chits passes a Chits message received from the network to the consensus engine.
|
||||
func (h *Handler) Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set) {
|
||||
h.msgs <- message{
|
||||
messageType: chitsMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerIDs: votes,
|
||||
}
|
||||
}
|
||||
|
||||
// QueryFailed passes a QueryFailed message received from the network to the consensus engine.
|
||||
func (h *Handler) QueryFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
h.msgs <- message{
|
||||
messageType: queryFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
}
|
||||
}
|
||||
|
||||
// Gossip passes a gossip request to the consensus engine
|
||||
func (h *Handler) Gossip() { h.msgs <- message{messageType: gossipMsg} }
|
||||
|
||||
// Shutdown shuts down the dispatcher
|
||||
func (h *Handler) Shutdown() { h.msgs <- message{messageType: shutdownMsg}; h.wg.Wait() }
|
||||
|
||||
// Notify ...
|
||||
func (h *Handler) Notify(msg common.Message) {
|
||||
h.msgs <- message{
|
||||
messageType: notifyMsg,
|
||||
notification: msg,
|
||||
}
|
||||
}
|
|
@ -7,8 +7,9 @@ import (
|
|||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/ava-labs/gecko/utils/formatting"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/networking/handler"
|
||||
"github.com/ava-labs/gecko/snow/networking/timeout"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
"github.com/ava-labs/gecko/utils/timer"
|
||||
|
@ -19,11 +20,12 @@ import (
|
|||
// Note that consensus engines are uniquely identified by the ID of the chain
|
||||
// that they are working on.
|
||||
type ChainRouter struct {
|
||||
log logging.Logger
|
||||
lock sync.RWMutex
|
||||
chains map[[32]byte]*handler.Handler
|
||||
timeouts *timeout.Manager
|
||||
gossiper *timer.Repeater
|
||||
log logging.Logger
|
||||
lock sync.RWMutex
|
||||
chains map[[32]byte]*Handler
|
||||
timeouts *timeout.Manager
|
||||
gossiper *timer.Repeater
|
||||
closeTimeout time.Duration
|
||||
}
|
||||
|
||||
// Initialize the router.
|
||||
|
@ -34,38 +36,55 @@ type ChainRouter struct {
|
|||
//
|
||||
// This router also fires a gossip event every [gossipFrequency] to the engine,
|
||||
// notifying the engine it should gossip it's accepted set.
|
||||
func (sr *ChainRouter) Initialize(log logging.Logger, timeouts *timeout.Manager, gossipFrequency time.Duration) {
|
||||
func (sr *ChainRouter) Initialize(
|
||||
log logging.Logger,
|
||||
timeouts *timeout.Manager,
|
||||
gossipFrequency time.Duration,
|
||||
closeTimeout time.Duration,
|
||||
) {
|
||||
sr.log = log
|
||||
sr.chains = make(map[[32]byte]*handler.Handler)
|
||||
sr.chains = make(map[[32]byte]*Handler)
|
||||
sr.timeouts = timeouts
|
||||
sr.gossiper = timer.NewRepeater(sr.Gossip, gossipFrequency)
|
||||
sr.closeTimeout = closeTimeout
|
||||
|
||||
go log.RecoverAndPanic(sr.gossiper.Dispatch)
|
||||
}
|
||||
|
||||
// AddChain registers the specified chain so that incoming
|
||||
// messages can be routed to it
|
||||
func (sr *ChainRouter) AddChain(chain *handler.Handler) {
|
||||
func (sr *ChainRouter) AddChain(chain *Handler) {
|
||||
sr.lock.Lock()
|
||||
defer sr.lock.Unlock()
|
||||
|
||||
chainID := chain.Context().ChainID
|
||||
sr.log.Debug("registering chain %s with chain router", chainID)
|
||||
chain.toClose = func() { sr.RemoveChain(chainID) }
|
||||
sr.chains[chainID.Key()] = chain
|
||||
}
|
||||
|
||||
// RemoveChain removes the specified chain so that incoming
|
||||
// messages can't be routed to it
|
||||
func (sr *ChainRouter) RemoveChain(chainID ids.ID) {
|
||||
sr.lock.Lock()
|
||||
defer sr.lock.Unlock()
|
||||
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.Shutdown()
|
||||
delete(sr.chains, chainID.Key())
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.lock.RLock()
|
||||
chain, exists := sr.chains[chainID.Key()]
|
||||
if !exists {
|
||||
sr.log.Debug("can't remove unknown chain %s", chainID)
|
||||
sr.lock.RUnlock()
|
||||
return
|
||||
}
|
||||
chain.Shutdown()
|
||||
close(chain.msgs)
|
||||
delete(sr.chains, chainID.Key())
|
||||
sr.lock.RUnlock()
|
||||
|
||||
ticker := time.NewTicker(sr.closeTimeout)
|
||||
select {
|
||||
case _, _ = <-chain.closed:
|
||||
case <-ticker.C:
|
||||
chain.Context().Log.Warn("timed out while shutting down")
|
||||
}
|
||||
ticker.Stop()
|
||||
}
|
||||
|
||||
// GetAcceptedFrontier routes an incoming GetAcceptedFrontier request from the
|
||||
|
@ -78,7 +97,7 @@ func (sr *ChainRouter) GetAcceptedFrontier(validatorID ids.ShortID, chainID ids.
|
|||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.GetAcceptedFrontier(validatorID, requestID)
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("GetAcceptedFrontier(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -89,11 +108,12 @@ func (sr *ChainRouter) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID,
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.AcceptedFrontier(validatorID, requestID, containerIDs)
|
||||
if chain.AcceptedFrontier(validatorID, requestID, containerIDs) {
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("AcceptedFrontier(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerIDs)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -104,12 +124,19 @@ func (sr *ChainRouter) GetAcceptedFrontierFailed(validatorID ids.ShortID, chainI
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.GetAcceptedFrontierFailed(validatorID, requestID)
|
||||
if !chain.GetAcceptedFrontierFailed(validatorID, requestID) {
|
||||
sr.log.Debug("deferring GetAcceptedFrontier timeout due to a full queue on %s", chainID)
|
||||
// Defer this call to later
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.GetAcceptedFrontierFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Error("GetAcceptedFrontierFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// GetAccepted routes an incoming GetAccepted request from the
|
||||
|
@ -122,7 +149,7 @@ func (sr *ChainRouter) GetAccepted(validatorID ids.ShortID, chainID ids.ID, requ
|
|||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.GetAccepted(validatorID, requestID, containerIDs)
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("GetAccepted(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerIDs)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -133,11 +160,12 @@ func (sr *ChainRouter) Accepted(validatorID ids.ShortID, chainID ids.ID, request
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.Accepted(validatorID, requestID, containerIDs)
|
||||
if chain.Accepted(validatorID, requestID, containerIDs) {
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("Accepted(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerIDs)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -148,12 +176,69 @@ func (sr *ChainRouter) GetAcceptedFailed(validatorID ids.ShortID, chainID ids.ID
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.GetAcceptedFailed(validatorID, requestID)
|
||||
if !chain.GetAcceptedFailed(validatorID, requestID) {
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.log.Debug("deferring GetAccepted timeout due to a full queue on %s", chainID)
|
||||
sr.GetAcceptedFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Error("GetAcceptedFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// GetAncestors routes an incoming GetAncestors message from the validator with ID [validatorID]
|
||||
// to the consensus engine working on the chain with ID [chainID]
|
||||
// The maximum number of ancestors to respond with is define in snow/engine/commong/bootstrapper.go
|
||||
func (sr *ChainRouter) GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
|
||||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.GetAncestors(validatorID, requestID, containerID)
|
||||
} else {
|
||||
sr.log.Debug("GetAncestors(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
}
|
||||
|
||||
// MultiPut routes an incoming MultiPut message from the validator with ID [validatorID]
|
||||
// to the consensus engine working on the chain with ID [chainID]
|
||||
func (sr *ChainRouter) MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte) {
|
||||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
// This message came in response to a GetAncestors message from this node, and when we sent that
|
||||
// message we set a timeout. Since we got a response, cancel the timeout.
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
if chain.MultiPut(validatorID, requestID, containers) {
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("MultiPut(%s, %s, %d, %d) dropped due to unknown chain", validatorID, chainID, requestID, len(containers))
|
||||
}
|
||||
}
|
||||
|
||||
// GetAncestorsFailed routes an incoming GetAncestorsFailed message from the validator with ID [validatorID]
|
||||
// to the consensus engine working on the chain with ID [chainID]
|
||||
func (sr *ChainRouter) GetAncestorsFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32) {
|
||||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
if !chain.GetAncestorsFailed(validatorID, requestID) {
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.log.Debug("deferring GetAncestors timeout due to a full queue on %s", chainID)
|
||||
sr.GetAncestorsFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
} else {
|
||||
sr.log.Error("GetAncestorsFailed(%s, %s, %d, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// Get routes an incoming Get request from the validator with ID [validatorID]
|
||||
|
@ -165,7 +250,7 @@ func (sr *ChainRouter) Get(validatorID ids.ShortID, chainID ids.ID, requestID ui
|
|||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.Get(validatorID, requestID, containerID)
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("Get(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -177,11 +262,13 @@ func (sr *ChainRouter) Put(validatorID ids.ShortID, chainID ids.ID, requestID ui
|
|||
|
||||
// This message came in response to a Get message from this node, and when we sent that Get
|
||||
// message we set a timeout. Since we got a response, cancel the timeout.
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.Put(validatorID, requestID, containerID, container)
|
||||
if chain.Put(validatorID, requestID, containerID, container) {
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("Put(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID)
|
||||
sr.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -191,12 +278,18 @@ func (sr *ChainRouter) GetFailed(validatorID ids.ShortID, chainID ids.ID, reques
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.GetFailed(validatorID, requestID)
|
||||
if !chain.GetFailed(validatorID, requestID) {
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.log.Debug("deferring Get timeout due to a full queue on %s", chainID)
|
||||
sr.GetFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Error("GetFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// PushQuery routes an incoming PushQuery request from the validator with ID [validatorID]
|
||||
|
@ -208,7 +301,8 @@ func (sr *ChainRouter) PushQuery(validatorID ids.ShortID, chainID ids.ID, reques
|
|||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.PushQuery(validatorID, requestID, containerID, container)
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("PushQuery(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID)
|
||||
sr.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -221,7 +315,7 @@ func (sr *ChainRouter) PullQuery(validatorID ids.ShortID, chainID ids.ID, reques
|
|||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.PullQuery(validatorID, requestID, containerID)
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("PullQuery(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -232,11 +326,12 @@ func (sr *ChainRouter) Chits(validatorID ids.ShortID, chainID ids.ID, requestID
|
|||
defer sr.lock.RUnlock()
|
||||
|
||||
// Cancel timeout we set when sent the message asking for these Chits
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.Chits(validatorID, requestID, votes)
|
||||
if chain.Chits(validatorID, requestID, votes) {
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Debug("Chits(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, votes)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -246,38 +341,53 @@ func (sr *ChainRouter) QueryFailed(validatorID ids.ShortID, chainID ids.ID, requ
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
chain.QueryFailed(validatorID, requestID)
|
||||
if !chain.QueryFailed(validatorID, requestID) {
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.log.Debug("deferring Query timeout due to a full queue on %s", chainID)
|
||||
sr.QueryFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
} else {
|
||||
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
|
||||
sr.log.Error("QueryFailed(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// Shutdown shuts down this router
|
||||
func (sr *ChainRouter) Shutdown() {
|
||||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
sr.lock.Lock()
|
||||
prevChains := sr.chains
|
||||
sr.chains = map[[32]byte]*Handler{}
|
||||
sr.lock.Unlock()
|
||||
|
||||
sr.shutdown()
|
||||
}
|
||||
|
||||
func (sr *ChainRouter) shutdown() {
|
||||
for _, chain := range sr.chains {
|
||||
for _, chain := range prevChains {
|
||||
chain.Shutdown()
|
||||
close(chain.msgs)
|
||||
}
|
||||
|
||||
ticker := time.NewTicker(sr.closeTimeout)
|
||||
timedout := false
|
||||
for _, chain := range prevChains {
|
||||
select {
|
||||
case _, _ = <-chain.closed:
|
||||
case <-ticker.C:
|
||||
timedout = true
|
||||
}
|
||||
}
|
||||
if timedout {
|
||||
sr.log.Warn("timed out while shutting down the chains")
|
||||
}
|
||||
ticker.Stop()
|
||||
sr.gossiper.Stop()
|
||||
}
|
||||
|
||||
// Gossip accepted containers
|
||||
func (sr *ChainRouter) Gossip() {
|
||||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
sr.lock.Lock()
|
||||
defer sr.lock.Unlock()
|
||||
|
||||
sr.gossip()
|
||||
}
|
||||
|
||||
func (sr *ChainRouter) gossip() {
|
||||
for _, chain := range sr.chains {
|
||||
chain.Gossip()
|
||||
}
|
|
@ -0,0 +1,357 @@
|
|||
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
|
||||
// See the file LICENSE for licensing terms.
|
||||
|
||||
package router
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// Handler passes incoming messages from the network to the consensus engine
|
||||
// (Actually, it receives the incoming messages from a ChainRouter, but same difference)
|
||||
type Handler struct {
|
||||
metrics
|
||||
|
||||
msgs chan message
|
||||
closed chan struct{}
|
||||
engine common.Engine
|
||||
msgChan <-chan common.Message
|
||||
|
||||
toClose func()
|
||||
}
|
||||
|
||||
// Initialize this consensus handler
|
||||
func (h *Handler) Initialize(
|
||||
engine common.Engine,
|
||||
msgChan <-chan common.Message,
|
||||
bufferSize int,
|
||||
namespace string,
|
||||
metrics prometheus.Registerer,
|
||||
) {
|
||||
h.metrics.Initialize(namespace, metrics)
|
||||
h.msgs = make(chan message, bufferSize)
|
||||
h.closed = make(chan struct{})
|
||||
h.engine = engine
|
||||
h.msgChan = msgChan
|
||||
}
|
||||
|
||||
// Context of this Handler
|
||||
func (h *Handler) Context() *snow.Context { return h.engine.Context() }
|
||||
|
||||
// Dispatch waits for incoming messages from the network
|
||||
// and, when they arrive, sends them to the consensus engine
|
||||
func (h *Handler) Dispatch() {
|
||||
log := h.Context().Log
|
||||
defer func() {
|
||||
log.Info("finished shutting down chain")
|
||||
close(h.closed)
|
||||
}()
|
||||
|
||||
closing := false
|
||||
for {
|
||||
select {
|
||||
case msg, ok := <-h.msgs:
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
h.metrics.pending.Dec()
|
||||
if closing {
|
||||
log.Debug("dropping message due to closing:\n%s", msg)
|
||||
continue
|
||||
}
|
||||
if h.dispatchMsg(msg) {
|
||||
closing = true
|
||||
}
|
||||
case msg := <-h.msgChan:
|
||||
if closing {
|
||||
log.Debug("dropping internal message due to closing:\n%s", msg)
|
||||
continue
|
||||
}
|
||||
if h.dispatchMsg(message{messageType: notifyMsg, notification: msg}) {
|
||||
closing = true
|
||||
}
|
||||
}
|
||||
if closing && h.toClose != nil {
|
||||
go h.toClose()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Dispatch a message to the consensus engine.
|
||||
// Returns true iff this consensus handler (and its associated engine) should shutdown
|
||||
// (due to receipt of a shutdown message)
|
||||
func (h *Handler) dispatchMsg(msg message) bool {
|
||||
startTime := time.Now()
|
||||
ctx := h.engine.Context()
|
||||
|
||||
ctx.Lock.Lock()
|
||||
defer ctx.Lock.Unlock()
|
||||
|
||||
ctx.Log.Verbo("Forwarding message to consensus: %s", msg)
|
||||
var (
|
||||
err error
|
||||
done bool
|
||||
)
|
||||
switch msg.messageType {
|
||||
case getAcceptedFrontierMsg:
|
||||
err = h.engine.GetAcceptedFrontier(msg.validatorID, msg.requestID)
|
||||
h.getAcceptedFrontier.Observe(float64(time.Now().Sub(startTime)))
|
||||
case acceptedFrontierMsg:
|
||||
err = h.engine.AcceptedFrontier(msg.validatorID, msg.requestID, msg.containerIDs)
|
||||
h.acceptedFrontier.Observe(float64(time.Now().Sub(startTime)))
|
||||
case getAcceptedFrontierFailedMsg:
|
||||
err = h.engine.GetAcceptedFrontierFailed(msg.validatorID, msg.requestID)
|
||||
h.getAcceptedFrontierFailed.Observe(float64(time.Now().Sub(startTime)))
|
||||
case getAcceptedMsg:
|
||||
err = h.engine.GetAccepted(msg.validatorID, msg.requestID, msg.containerIDs)
|
||||
h.getAccepted.Observe(float64(time.Now().Sub(startTime)))
|
||||
case acceptedMsg:
|
||||
err = h.engine.Accepted(msg.validatorID, msg.requestID, msg.containerIDs)
|
||||
h.accepted.Observe(float64(time.Now().Sub(startTime)))
|
||||
case getAcceptedFailedMsg:
|
||||
err = h.engine.GetAcceptedFailed(msg.validatorID, msg.requestID)
|
||||
h.getAcceptedFailed.Observe(float64(time.Now().Sub(startTime)))
|
||||
case getAncestorsMsg:
|
||||
err = h.engine.GetAncestors(msg.validatorID, msg.requestID, msg.containerID)
|
||||
h.getAncestors.Observe(float64(time.Now().Sub(startTime)))
|
||||
case getAncestorsFailedMsg:
|
||||
err = h.engine.GetAncestorsFailed(msg.validatorID, msg.requestID)
|
||||
h.getAncestorsFailed.Observe(float64(time.Now().Sub(startTime)))
|
||||
case multiPutMsg:
|
||||
err = h.engine.MultiPut(msg.validatorID, msg.requestID, msg.containers)
|
||||
h.multiPut.Observe(float64(time.Now().Sub(startTime)))
|
||||
case getMsg:
|
||||
err = h.engine.Get(msg.validatorID, msg.requestID, msg.containerID)
|
||||
h.get.Observe(float64(time.Now().Sub(startTime)))
|
||||
case getFailedMsg:
|
||||
err = h.engine.GetFailed(msg.validatorID, msg.requestID)
|
||||
h.getFailed.Observe(float64(time.Now().Sub(startTime)))
|
||||
case putMsg:
|
||||
err = h.engine.Put(msg.validatorID, msg.requestID, msg.containerID, msg.container)
|
||||
h.put.Observe(float64(time.Now().Sub(startTime)))
|
||||
case pushQueryMsg:
|
||||
err = h.engine.PushQuery(msg.validatorID, msg.requestID, msg.containerID, msg.container)
|
||||
h.pushQuery.Observe(float64(time.Now().Sub(startTime)))
|
||||
case pullQueryMsg:
|
||||
err = h.engine.PullQuery(msg.validatorID, msg.requestID, msg.containerID)
|
||||
h.pullQuery.Observe(float64(time.Now().Sub(startTime)))
|
||||
case queryFailedMsg:
|
||||
err = h.engine.QueryFailed(msg.validatorID, msg.requestID)
|
||||
h.queryFailed.Observe(float64(time.Now().Sub(startTime)))
|
||||
case chitsMsg:
|
||||
err = h.engine.Chits(msg.validatorID, msg.requestID, msg.containerIDs)
|
||||
h.chits.Observe(float64(time.Now().Sub(startTime)))
|
||||
case notifyMsg:
|
||||
err = h.engine.Notify(msg.notification)
|
||||
h.notify.Observe(float64(time.Now().Sub(startTime)))
|
||||
case gossipMsg:
|
||||
err = h.engine.Gossip()
|
||||
h.gossip.Observe(float64(time.Now().Sub(startTime)))
|
||||
case shutdownMsg:
|
||||
err = h.engine.Shutdown()
|
||||
h.shutdown.Observe(float64(time.Now().Sub(startTime)))
|
||||
done = true
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
ctx.Log.Fatal("forcing chain to shutdown due to %s", err)
|
||||
}
|
||||
return done || err != nil
|
||||
}
|
||||
|
||||
// GetAcceptedFrontier passes a GetAcceptedFrontier message received from the
|
||||
// network to the consensus engine.
|
||||
func (h *Handler) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAcceptedFrontierMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
})
|
||||
}
|
||||
|
||||
// AcceptedFrontier passes a AcceptedFrontier message received from the network
|
||||
// to the consensus engine.
|
||||
func (h *Handler) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: acceptedFrontierMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerIDs: containerIDs,
|
||||
})
|
||||
}
|
||||
|
||||
// GetAcceptedFrontierFailed passes a GetAcceptedFrontierFailed message received
|
||||
// from the network to the consensus engine.
|
||||
func (h *Handler) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAcceptedFrontierFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
})
|
||||
}
|
||||
|
||||
// GetAccepted passes a GetAccepted message received from the
|
||||
// network to the consensus engine.
|
||||
func (h *Handler) GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAcceptedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerIDs: containerIDs,
|
||||
})
|
||||
}
|
||||
|
||||
// Accepted passes a Accepted message received from the network to the consensus
|
||||
// engine.
|
||||
func (h *Handler) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: acceptedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerIDs: containerIDs,
|
||||
})
|
||||
}
|
||||
|
||||
// GetAcceptedFailed passes a GetAcceptedFailed message received from the
|
||||
// network to the consensus engine.
|
||||
func (h *Handler) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAcceptedFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
})
|
||||
}
|
||||
|
||||
// Get passes a Get message received from the network to the consensus engine.
|
||||
func (h *Handler) Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: containerID,
|
||||
})
|
||||
}
|
||||
|
||||
// GetAncestors passes a GetAncestors message received from the network to the consensus engine.
|
||||
func (h *Handler) GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAncestorsMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: containerID,
|
||||
})
|
||||
}
|
||||
|
||||
// Put passes a Put message received from the network to the consensus engine.
|
||||
func (h *Handler) Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: putMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: containerID,
|
||||
container: container,
|
||||
})
|
||||
}
|
||||
|
||||
// MultiPut passes a MultiPut message received from the network to the consensus engine.
|
||||
func (h *Handler) MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: multiPutMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containers: containers,
|
||||
})
|
||||
}
|
||||
|
||||
// GetFailed passes a GetFailed message to the consensus engine.
|
||||
func (h *Handler) GetFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
})
|
||||
}
|
||||
|
||||
// GetAncestorsFailed passes a GetAncestorsFailed message to the consensus engine.
|
||||
func (h *Handler) GetAncestorsFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAncestorsFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
})
|
||||
}
|
||||
|
||||
// PushQuery passes a PushQuery message received from the network to the consensus engine.
|
||||
func (h *Handler) PushQuery(validatorID ids.ShortID, requestID uint32, blockID ids.ID, block []byte) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: pushQueryMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: blockID,
|
||||
container: block,
|
||||
})
|
||||
}
|
||||
|
||||
// PullQuery passes a PullQuery message received from the network to the consensus engine.
|
||||
func (h *Handler) PullQuery(validatorID ids.ShortID, requestID uint32, blockID ids.ID) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: pullQueryMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: blockID,
|
||||
})
|
||||
}
|
||||
|
||||
// Chits passes a Chits message received from the network to the consensus engine.
|
||||
func (h *Handler) Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: chitsMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerIDs: votes,
|
||||
})
|
||||
}
|
||||
|
||||
// QueryFailed passes a QueryFailed message received from the network to the consensus engine.
|
||||
func (h *Handler) QueryFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: queryFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
})
|
||||
}
|
||||
|
||||
// Gossip passes a gossip request to the consensus engine
|
||||
func (h *Handler) Gossip() bool {
|
||||
return h.sendMsg(message{messageType: gossipMsg})
|
||||
}
|
||||
|
||||
// Notify ...
|
||||
func (h *Handler) Notify(msg common.Message) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: notifyMsg,
|
||||
notification: msg,
|
||||
})
|
||||
}
|
||||
|
||||
// Shutdown shuts down the dispatcher
|
||||
func (h *Handler) Shutdown() {
|
||||
h.metrics.pending.Inc()
|
||||
h.msgs <- message{messageType: shutdownMsg}
|
||||
}
|
||||
|
||||
func (h *Handler) sendMsg(msg message) bool {
|
||||
select {
|
||||
case h.msgs <- msg:
|
||||
h.metrics.pending.Inc()
|
||||
return true
|
||||
default:
|
||||
h.metrics.dropped.Inc()
|
||||
return false
|
||||
}
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
|
||||
// See the file LICENSE for licensing terms.
|
||||
|
||||
package handler
|
||||
package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
@ -31,6 +31,9 @@ const (
|
|||
notifyMsg
|
||||
gossipMsg
|
||||
shutdownMsg
|
||||
getAncestorsMsg
|
||||
multiPutMsg
|
||||
getAncestorsFailedMsg
|
||||
)
|
||||
|
||||
type message struct {
|
||||
|
@ -39,6 +42,7 @@ type message struct {
|
|||
requestID uint32
|
||||
containerID ids.ID
|
||||
container []byte
|
||||
containers [][]byte
|
||||
containerIDs ids.Set
|
||||
notification common.Message
|
||||
}
|
||||
|
@ -74,8 +78,12 @@ func (t msgType) String() string {
|
|||
return "Get Accepted Failed Message"
|
||||
case getMsg:
|
||||
return "Get Message"
|
||||
case getAncestorsMsg:
|
||||
return "Get Ancestors Message"
|
||||
case putMsg:
|
||||
return "Put Message"
|
||||
case multiPutMsg:
|
||||
return "MultiPut Message"
|
||||
case getFailedMsg:
|
||||
return "Get Failed Message"
|
||||
case pushQueryMsg:
|
|
@ -0,0 +1,90 @@
|
|||
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
|
||||
// See the file LICENSE for licensing terms.
|
||||
|
||||
package router
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/ava-labs/gecko/utils/timer"
|
||||
"github.com/ava-labs/gecko/utils/wrappers"
|
||||
)
|
||||
|
||||
func initHistogram(namespace, name string, registerer prometheus.Registerer, errs *wrappers.Errs) prometheus.Histogram {
|
||||
histogram := prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: namespace,
|
||||
Name: name,
|
||||
Help: "Time spent processing this request in nanoseconds",
|
||||
Buckets: timer.NanosecondsBuckets,
|
||||
})
|
||||
|
||||
if err := registerer.Register(histogram); err != nil {
|
||||
errs.Add(fmt.Errorf("failed to register %s statistics due to %s", name, err))
|
||||
}
|
||||
return histogram
|
||||
}
|
||||
|
||||
type metrics struct {
|
||||
pending prometheus.Gauge
|
||||
dropped prometheus.Counter
|
||||
getAcceptedFrontier, acceptedFrontier, getAcceptedFrontierFailed,
|
||||
getAccepted, accepted, getAcceptedFailed,
|
||||
getAncestors, multiPut, getAncestorsFailed,
|
||||
get, put, getFailed,
|
||||
pushQuery, pullQuery, chits, queryFailed,
|
||||
notify,
|
||||
gossip,
|
||||
shutdown prometheus.Histogram
|
||||
}
|
||||
|
||||
// Initialize implements the Engine interface
|
||||
func (m *metrics) Initialize(namespace string, registerer prometheus.Registerer) error {
|
||||
errs := wrappers.Errs{}
|
||||
|
||||
m.pending = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: namespace,
|
||||
Name: "pending",
|
||||
Help: "Number of pending events",
|
||||
})
|
||||
|
||||
if err := registerer.Register(m.pending); err != nil {
|
||||
errs.Add(fmt.Errorf("failed to register pending statistics due to %s", err))
|
||||
}
|
||||
|
||||
m.dropped = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: namespace,
|
||||
Name: "dropped",
|
||||
Help: "Number of dropped events",
|
||||
})
|
||||
|
||||
if err := registerer.Register(m.dropped); err != nil {
|
||||
errs.Add(fmt.Errorf("failed to register dropped statistics due to %s", err))
|
||||
}
|
||||
|
||||
m.getAcceptedFrontier = initHistogram(namespace, "get_accepted_frontier", registerer, &errs)
|
||||
m.acceptedFrontier = initHistogram(namespace, "accepted_frontier", registerer, &errs)
|
||||
m.getAcceptedFrontierFailed = initHistogram(namespace, "get_accepted_frontier_failed", registerer, &errs)
|
||||
m.getAccepted = initHistogram(namespace, "get_accepted", registerer, &errs)
|
||||
m.accepted = initHistogram(namespace, "accepted", registerer, &errs)
|
||||
m.getAcceptedFailed = initHistogram(namespace, "get_accepted_failed", registerer, &errs)
|
||||
m.getAncestors = initHistogram(namespace, "get_ancestors", registerer, &errs)
|
||||
m.multiPut = initHistogram(namespace, "multi_put", registerer, &errs)
|
||||
m.getAncestorsFailed = initHistogram(namespace, "get_ancestors_failed", registerer, &errs)
|
||||
m.get = initHistogram(namespace, "get", registerer, &errs)
|
||||
m.put = initHistogram(namespace, "put", registerer, &errs)
|
||||
m.getFailed = initHistogram(namespace, "get_failed", registerer, &errs)
|
||||
m.pushQuery = initHistogram(namespace, "push_query", registerer, &errs)
|
||||
m.pullQuery = initHistogram(namespace, "pull_query", registerer, &errs)
|
||||
m.chits = initHistogram(namespace, "chits", registerer, &errs)
|
||||
m.queryFailed = initHistogram(namespace, "query_failed", registerer, &errs)
|
||||
m.notify = initHistogram(namespace, "notify", registerer, &errs)
|
||||
m.gossip = initHistogram(namespace, "gossip", registerer, &errs)
|
||||
m.shutdown = initHistogram(namespace, "shutdown", registerer, &errs)
|
||||
|
||||
return errs.Err
|
||||
}
|
|
@ -7,7 +7,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow/networking/handler"
|
||||
"github.com/ava-labs/gecko/snow/networking/timeout"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
)
|
||||
|
@ -18,10 +17,15 @@ type Router interface {
|
|||
ExternalRouter
|
||||
InternalRouter
|
||||
|
||||
AddChain(chain *handler.Handler)
|
||||
AddChain(chain *Handler)
|
||||
RemoveChain(chainID ids.ID)
|
||||
Shutdown()
|
||||
Initialize(log logging.Logger, timeouts *timeout.Manager, gossipFrequency time.Duration)
|
||||
Initialize(
|
||||
log logging.Logger,
|
||||
timeouts *timeout.Manager,
|
||||
gossipFrequency,
|
||||
shutdownTimeout time.Duration,
|
||||
)
|
||||
}
|
||||
|
||||
// ExternalRouter routes messages from the network to the
|
||||
|
@ -32,7 +36,9 @@ type ExternalRouter interface {
|
|||
GetAccepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
|
||||
Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
|
||||
Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
|
||||
MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte)
|
||||
PushQuery(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
|
||||
PullQuery(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set)
|
||||
|
@ -43,5 +49,6 @@ type InternalRouter interface {
|
|||
GetAcceptedFrontierFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
|
||||
GetAcceptedFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
|
||||
GetFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
|
||||
GetAncestorsFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
|
||||
QueryFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
|
||||
}
|
||||
|
|
|
@ -15,7 +15,10 @@ type ExternalSender interface {
|
|||
Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
|
||||
|
||||
Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
|
||||
Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
|
||||
MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte)
|
||||
|
||||
PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
|
||||
PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
|
|
|
@ -93,6 +93,20 @@ func (s *Sender) Get(validatorID ids.ShortID, requestID uint32, containerID ids.
|
|||
s.sender.Get(validatorID, s.ctx.ChainID, requestID, containerID)
|
||||
}
|
||||
|
||||
// GetAncestors sends a GetAncestors message
|
||||
func (s *Sender) GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) {
|
||||
s.ctx.Log.Verbo("Sending GetAncestors to validator %s. RequestID: %d. ContainerID: %s", validatorID, requestID, containerID)
|
||||
// Sending a GetAncestors to myself will always fail
|
||||
if validatorID.Equals(s.ctx.NodeID) {
|
||||
go s.router.GetAncestorsFailed(validatorID, s.ctx.ChainID, requestID)
|
||||
return
|
||||
}
|
||||
s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() {
|
||||
s.router.GetAncestorsFailed(validatorID, s.ctx.ChainID, requestID)
|
||||
})
|
||||
s.sender.GetAncestors(validatorID, s.ctx.ChainID, requestID, containerID)
|
||||
}
|
||||
|
||||
// Put sends a Put message to the consensus engine running on the specified chain
|
||||
// on the specified validator.
|
||||
// The Put message signifies that this consensus engine is giving to the recipient
|
||||
|
@ -102,6 +116,14 @@ func (s *Sender) Put(validatorID ids.ShortID, requestID uint32, containerID ids.
|
|||
s.sender.Put(validatorID, s.ctx.ChainID, requestID, containerID, container)
|
||||
}
|
||||
|
||||
// MultiPut sends a MultiPut message to the consensus engine running on the specified chain
|
||||
// on the specified validator.
|
||||
// The MultiPut message gives the recipient the contents of several containers.
|
||||
func (s *Sender) MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte) {
|
||||
s.ctx.Log.Verbo("Sending MultiPut to validator %s. RequestID: %d. NumContainers: %d", validatorID, requestID, len(containers))
|
||||
s.sender.MultiPut(validatorID, s.ctx.ChainID, requestID, containers)
|
||||
}
|
||||
|
||||
// PushQuery sends a PushQuery message to the consensus engines running on the specified chains
|
||||
// on the specified validators.
|
||||
// The PushQuery message signifies that this consensus engine would like each validator to send
|
||||
|
|
|
@ -12,10 +12,10 @@ import (
|
|||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/snow/networking/handler"
|
||||
"github.com/ava-labs/gecko/snow/networking/router"
|
||||
"github.com/ava-labs/gecko/snow/networking/timeout"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func TestSenderContext(t *testing.T) {
|
||||
|
@ -37,11 +37,11 @@ func TestTimeout(t *testing.T) {
|
|||
tm.Initialize(time.Millisecond)
|
||||
go tm.Dispatch()
|
||||
|
||||
router := router.ChainRouter{}
|
||||
router.Initialize(logging.NoLog{}, &tm, time.Hour)
|
||||
chainRouter := router.ChainRouter{}
|
||||
chainRouter.Initialize(logging.NoLog{}, &tm, time.Hour, time.Second)
|
||||
|
||||
sender := Sender{}
|
||||
sender.Initialize(snow.DefaultContextTest(), &ExternalSenderTest{}, &router, &tm)
|
||||
sender.Initialize(snow.DefaultContextTest(), &ExternalSenderTest{}, &chainRouter, &tm)
|
||||
|
||||
engine := common.EngineTest{T: t}
|
||||
engine.Default(true)
|
||||
|
@ -52,16 +52,23 @@ func TestTimeout(t *testing.T) {
|
|||
wg.Add(2)
|
||||
|
||||
failedVDRs := ids.ShortSet{}
|
||||
engine.QueryFailedF = func(validatorID ids.ShortID, _ uint32) {
|
||||
engine.QueryFailedF = func(validatorID ids.ShortID, _ uint32) error {
|
||||
failedVDRs.Add(validatorID)
|
||||
wg.Done()
|
||||
return nil
|
||||
}
|
||||
|
||||
handler := handler.Handler{}
|
||||
handler.Initialize(&engine, nil, 1)
|
||||
handler := router.Handler{}
|
||||
handler.Initialize(
|
||||
&engine,
|
||||
nil,
|
||||
1,
|
||||
"",
|
||||
prometheus.NewRegistry(),
|
||||
)
|
||||
go handler.Dispatch()
|
||||
|
||||
router.AddChain(&handler)
|
||||
chainRouter.AddChain(&handler)
|
||||
|
||||
vdrIDs := ids.ShortSet{}
|
||||
vdrIDs.Add(ids.NewShortID([20]byte{255}))
|
||||
|
|
|
@ -16,7 +16,7 @@ type ExternalSenderTest struct {
|
|||
|
||||
CantGetAcceptedFrontier, CantAcceptedFrontier,
|
||||
CantGetAccepted, CantAccepted,
|
||||
CantGet, CantPut,
|
||||
CantGet, CantGetAncestors, CantPut, CantMultiPut,
|
||||
CantPullQuery, CantPushQuery, CantChits,
|
||||
CantGossip bool
|
||||
|
||||
|
@ -24,8 +24,9 @@ type ExternalSenderTest struct {
|
|||
AcceptedFrontierF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
|
||||
GetAcceptedF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set)
|
||||
AcceptedF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
|
||||
GetF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
GetF, GetAncestorsF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
PutF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
|
||||
MultiPutF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte)
|
||||
PushQueryF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
|
||||
PullQueryF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID)
|
||||
ChitsF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set)
|
||||
|
@ -39,7 +40,9 @@ func (s *ExternalSenderTest) Default(cant bool) {
|
|||
s.CantGetAccepted = cant
|
||||
s.CantAccepted = cant
|
||||
s.CantGet = cant
|
||||
s.CantGetAncestors = cant
|
||||
s.CantPut = cant
|
||||
s.CantMultiPut = cant
|
||||
s.CantPullQuery = cant
|
||||
s.CantPushQuery = cant
|
||||
s.CantChits = cant
|
||||
|
@ -111,6 +114,19 @@ func (s *ExternalSenderTest) Get(vdr ids.ShortID, chainID ids.ID, requestID uint
|
|||
}
|
||||
}
|
||||
|
||||
// GetAncestors calls GetAncestorsF if it was initialized. If it wasn't initialized and this
|
||||
// function shouldn't be called and testing was initialized, then testing will
|
||||
// fail.
|
||||
func (s *ExternalSenderTest) GetAncestors(vdr ids.ShortID, chainID ids.ID, requestID uint32, vtxID ids.ID) {
|
||||
if s.GetAncestorsF != nil {
|
||||
s.GetAncestorsF(vdr, chainID, requestID, vtxID)
|
||||
} else if s.CantGetAncestors && s.T != nil {
|
||||
s.T.Fatalf("Unexpectedly called GetAncestors")
|
||||
} else if s.CantGetAncestors && s.B != nil {
|
||||
s.B.Fatalf("Unexpectedly called GetAncestors")
|
||||
}
|
||||
}
|
||||
|
||||
// Put calls PutF if it was initialized. If it wasn't initialized and this
|
||||
// function shouldn't be called and testing was initialized, then testing will
|
||||
// fail.
|
||||
|
@ -124,6 +140,19 @@ func (s *ExternalSenderTest) Put(vdr ids.ShortID, chainID ids.ID, requestID uint
|
|||
}
|
||||
}
|
||||
|
||||
// MultiPut calls MultiPutF if it was initialized. If it wasn't initialized and this
|
||||
// function shouldn't be called and testing was initialized, then testing will
|
||||
// fail.
|
||||
func (s *ExternalSenderTest) MultiPut(vdr ids.ShortID, chainID ids.ID, requestID uint32, vtxs [][]byte) {
|
||||
if s.MultiPutF != nil {
|
||||
s.MultiPutF(vdr, chainID, requestID, vtxs)
|
||||
} else if s.CantMultiPut && s.T != nil {
|
||||
s.T.Fatalf("Unexpectedly called MultiPut")
|
||||
} else if s.CantMultiPut && s.B != nil {
|
||||
s.B.Fatalf("Unexpectedly called MultiPut")
|
||||
}
|
||||
}
|
||||
|
||||
// PushQuery calls PushQueryF if it was initialized. If it wasn't initialized
|
||||
// and this function shouldn't be called and testing was initialized, then
|
||||
// testing will fail.
|
||||
|
|
|
@ -5,14 +5,12 @@ package logging
|
|||
|
||||
import (
|
||||
"path"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
)
|
||||
|
||||
// Factory ...
|
||||
type Factory interface {
|
||||
Make() (Logger, error)
|
||||
MakeChain(chainID ids.ID, subdir string) (Logger, error)
|
||||
MakeChain(chainID string, subdir string) (Logger, error)
|
||||
MakeSubdir(subdir string) (Logger, error)
|
||||
Close()
|
||||
}
|
||||
|
@ -41,10 +39,10 @@ func (f *factory) Make() (Logger, error) {
|
|||
}
|
||||
|
||||
// MakeChain ...
|
||||
func (f *factory) MakeChain(chainID ids.ID, subdir string) (Logger, error) {
|
||||
func (f *factory) MakeChain(chainID string, subdir string) (Logger, error) {
|
||||
config := f.config
|
||||
config.MsgPrefix = "chain " + chainID.String()
|
||||
config.Directory = path.Join(config.Directory, "chain", chainID.String(), subdir)
|
||||
config.MsgPrefix = chainID + " Chain"
|
||||
config.Directory = path.Join(config.Directory, "chain", chainID, subdir)
|
||||
|
||||
log, err := New(config)
|
||||
if err == nil {
|
||||
|
|
|
@ -3,10 +3,6 @@
|
|||
|
||||
package logging
|
||||
|
||||
import (
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
)
|
||||
|
||||
// NoFactory ...
|
||||
type NoFactory struct{}
|
||||
|
||||
|
@ -14,7 +10,7 @@ type NoFactory struct{}
|
|||
func (NoFactory) Make() (Logger, error) { return NoLog{}, nil }
|
||||
|
||||
// MakeChain ...
|
||||
func (NoFactory) MakeChain(ids.ID, string) (Logger, error) { return NoLog{}, nil }
|
||||
func (NoFactory) MakeChain(string, string) (Logger, error) { return NoLog{}, nil }
|
||||
|
||||
// MakeSubdir ...
|
||||
func (NoFactory) MakeSubdir(string) (Logger, error) { return NoLog{}, nil }
|
||||
|
|
|
@ -3,9 +3,13 @@
|
|||
|
||||
package timer
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// Useful latency buckets
|
||||
var (
|
||||
Buckets = []float64{
|
||||
MillisecondsBuckets = []float64{
|
||||
10, // 10 ms is ~ instant
|
||||
100, // 100 ms
|
||||
250, // 250 ms
|
||||
|
@ -18,4 +22,15 @@ var (
|
|||
10000, // 10 seconds
|
||||
// anything larger than 10 seconds will be bucketed together
|
||||
}
|
||||
NanosecondsBuckets = []float64{
|
||||
float64(100 * time.Nanosecond),
|
||||
float64(time.Microsecond),
|
||||
float64(10 * time.Microsecond),
|
||||
float64(100 * time.Microsecond),
|
||||
float64(time.Millisecond),
|
||||
float64(10 * time.Millisecond),
|
||||
float64(100 * time.Millisecond),
|
||||
float64(time.Second),
|
||||
// anything larger than a second will be bucketed together
|
||||
}
|
||||
)
|
||||
|
|
|
@ -256,6 +256,24 @@ func (p *Packer) UnpackFixedByteSlices(size int) [][]byte {
|
|||
return bytes
|
||||
}
|
||||
|
||||
// Pack2DByteSlice append a 2D byte slice to the byte array
|
||||
func (p *Packer) Pack2DByteSlice(byteSlices [][]byte) {
|
||||
p.PackInt(uint32(len(byteSlices)))
|
||||
for _, bytes := range byteSlices {
|
||||
p.PackBytes(bytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Unpack2DByteSlice returns a 2D byte slice from the byte array.
|
||||
func (p *Packer) Unpack2DByteSlice() [][]byte {
|
||||
sliceSize := p.UnpackInt()
|
||||
bytes := [][]byte(nil)
|
||||
for i := uint32(0); i < sliceSize && !p.Errored(); i++ {
|
||||
bytes = append(bytes, p.UnpackBytes())
|
||||
}
|
||||
return bytes
|
||||
}
|
||||
|
||||
// PackStr append a string to the byte array
|
||||
func (p *Packer) PackStr(str string) {
|
||||
strSize := len(str)
|
||||
|
@ -432,6 +450,20 @@ func TryUnpackBytes(packer *Packer) interface{} {
|
|||
return packer.UnpackBytes()
|
||||
}
|
||||
|
||||
// TryPack2DBytes attempts to pack the value as a 2D byte slice
|
||||
func TryPack2DBytes(packer *Packer, valIntf interface{}) {
|
||||
if val, ok := valIntf.([][]byte); ok {
|
||||
packer.Pack2DByteSlice(val)
|
||||
} else {
|
||||
packer.Add(errBadType)
|
||||
}
|
||||
}
|
||||
|
||||
// TryUnpack2DBytes attempts to unpack the value as a 2D byte slice
|
||||
func TryUnpack2DBytes(packer *Packer) interface{} {
|
||||
return packer.Unpack2DByteSlice()
|
||||
}
|
||||
|
||||
// TryPackStr attempts to pack the value as a string
|
||||
func TryPackStr(packer *Packer, valIntf interface{}) {
|
||||
if val, ok := valIntf.(string); ok {
|
||||
|
|
|
@ -506,3 +506,63 @@ func TestPackerUnpackBool(t *testing.T) {
|
|||
t.Fatalf("Packer.UnpackBool returned %t, expected sentinal value %t", actual, BoolSentinal)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPacker2DByteSlice(t *testing.T) {
|
||||
// Case: empty array
|
||||
p := Packer{MaxSize: 1024}
|
||||
arr := [][]byte{}
|
||||
p.Pack2DByteSlice(arr)
|
||||
if p.Errored() {
|
||||
t.Fatal(p.Err)
|
||||
}
|
||||
arrUnpacked := p.Unpack2DByteSlice()
|
||||
if len(arrUnpacked) != 0 {
|
||||
t.Fatal("should be empty")
|
||||
}
|
||||
|
||||
// Case: Array has one element
|
||||
p = Packer{MaxSize: 1024}
|
||||
arr = [][]byte{
|
||||
[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
}
|
||||
p.Pack2DByteSlice(arr)
|
||||
if p.Errored() {
|
||||
t.Fatal(p.Err)
|
||||
}
|
||||
p = Packer{MaxSize: 1024, Bytes: p.Bytes}
|
||||
arrUnpacked = p.Unpack2DByteSlice()
|
||||
if p.Errored() {
|
||||
t.Fatal(p.Err)
|
||||
}
|
||||
if l := len(arrUnpacked); l != 1 {
|
||||
t.Fatalf("should be length 1 but is length %d", l)
|
||||
}
|
||||
if !bytes.Equal(arrUnpacked[0], arr[0]) {
|
||||
t.Fatal("should match")
|
||||
}
|
||||
|
||||
// Case: Array has multiple elements
|
||||
p = Packer{MaxSize: 1024}
|
||||
arr = [][]byte{
|
||||
[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
[]byte{11, 12, 3, 4, 5, 6, 7, 8, 9, 10},
|
||||
}
|
||||
p.Pack2DByteSlice(arr)
|
||||
if p.Errored() {
|
||||
t.Fatal(p.Err)
|
||||
}
|
||||
p = Packer{MaxSize: 1024, Bytes: p.Bytes}
|
||||
arrUnpacked = p.Unpack2DByteSlice()
|
||||
if p.Errored() {
|
||||
t.Fatal(p.Err)
|
||||
}
|
||||
if l := len(arrUnpacked); l != 2 {
|
||||
t.Fatalf("should be length 1 but is length %d", l)
|
||||
}
|
||||
if !bytes.Equal(arrUnpacked[0], arr[0]) {
|
||||
t.Fatal("should match")
|
||||
}
|
||||
if !bytes.Equal(arrUnpacked[1], arr[1]) {
|
||||
t.Fatal("should match")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -840,6 +840,16 @@ func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cr := codecRegistry{
|
||||
index: 1,
|
||||
typeToFxIndex: vm.typeToFxIndex,
|
||||
|
@ -1386,6 +1396,16 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cr := codecRegistry{
|
||||
index: 1,
|
||||
typeToFxIndex: vm.typeToFxIndex,
|
||||
|
@ -1538,6 +1558,16 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
cr := codecRegistry{
|
||||
index: 1,
|
||||
typeToFxIndex: vm.typeToFxIndex,
|
||||
|
|
|
@ -13,6 +13,7 @@ import (
|
|||
"github.com/ava-labs/gecko/snow"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/utils/crypto"
|
||||
"github.com/ava-labs/gecko/utils/hashing"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
"github.com/ava-labs/gecko/vms/components/ava"
|
||||
"github.com/ava-labs/gecko/vms/components/codec"
|
||||
|
@ -150,6 +151,16 @@ func TestIssueExportTx(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
key := keys[0]
|
||||
|
||||
tx := &Tx{UnsignedTx: &ExportTx{
|
||||
|
@ -245,6 +256,16 @@ func TestIssueExportTx(t *testing.T) {
|
|||
if _, err := state.AVMUTXO(utxoID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
addrID := ids.NewID(hashing.ComputeHash256Array(key.PublicKey().Address().Bytes()))
|
||||
|
||||
utxoIDs, err := state.AVMFunds(addrID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(utxoIDs) != 1 {
|
||||
t.Fatalf("wrong number of utxoIDs %d", len(utxoIDs))
|
||||
}
|
||||
}
|
||||
|
||||
// Test force accepting an import transaction.
|
||||
|
@ -286,6 +307,16 @@ func TestClearForceAcceptedExportTx(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
key := keys[0]
|
||||
|
||||
tx := &Tx{UnsignedTx: &ExportTx{
|
||||
|
|
|
@ -5,6 +5,7 @@ package avm
|
|||
|
||||
import (
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
)
|
||||
|
||||
// ID that this VM uses when labeled
|
||||
|
@ -19,7 +20,7 @@ type Factory struct {
|
|||
}
|
||||
|
||||
// New ...
|
||||
func (f *Factory) New() (interface{}, error) {
|
||||
func (f *Factory) New(*snow.Context) (interface{}, error) {
|
||||
return &VM{
|
||||
ava: f.AVA,
|
||||
platform: f.Platform,
|
||||
|
|
|
@ -19,6 +19,12 @@ type Fx interface {
|
|||
// return an error if the VM is incompatible.
|
||||
Initialize(vm interface{}) error
|
||||
|
||||
// Notify this Fx that the VM is in bootstrapping
|
||||
Bootstrapping() error
|
||||
|
||||
// Notify this Fx that the VM is bootstrapped
|
||||
Bootstrapped() error
|
||||
|
||||
// VerifyTransfer verifies that the specified transaction can spend the
|
||||
// provided utxo with no restrictions on the destination. If the transaction
|
||||
// can't spend the output based on the input and credential, a non-nil error
|
||||
|
|
|
@ -4,10 +4,12 @@
|
|||
package avm
|
||||
|
||||
type testFx struct {
|
||||
initialize, verifyTransfer, verifyOperation error
|
||||
initialize, bootstrapping, bootstrapped, verifyTransfer, verifyOperation error
|
||||
}
|
||||
|
||||
func (fx *testFx) Initialize(_ interface{}) error { return fx.initialize }
|
||||
func (fx *testFx) Bootstrapping() error { return fx.bootstrapping }
|
||||
func (fx *testFx) Bootstrapped() error { return fx.bootstrapped }
|
||||
func (fx *testFx) VerifyTransfer(_, _, _, _ interface{}) error { return fx.verifyTransfer }
|
||||
func (fx *testFx) VerifyOperation(_, _, _ interface{}, _ []interface{}) error {
|
||||
return fx.verifyOperation
|
||||
|
|
|
@ -140,6 +140,16 @@ func TestIssueImportTx(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
key := keys[0]
|
||||
|
||||
utxoID := ava.UTXOID{
|
||||
|
@ -288,6 +298,16 @@ func TestForceAcceptImportTx(t *testing.T) {
|
|||
}
|
||||
vm.batchTimeout = 0
|
||||
|
||||
err = vm.Bootstrapping()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
err = vm.Bootstrapped()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
key := keys[0]
|
||||
|
||||
genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t)
|
||||
|
|
|
@ -15,7 +15,6 @@ const (
|
|||
txID uint64 = iota
|
||||
utxoID
|
||||
txStatusID
|
||||
fundsID
|
||||
dbInitializedID
|
||||
)
|
||||
|
||||
|
@ -28,8 +27,8 @@ var (
|
|||
type prefixedState struct {
|
||||
state *state
|
||||
|
||||
tx, utxo, txStatus, funds cache.Cacher
|
||||
uniqueTx cache.Deduplicator
|
||||
tx, utxo, txStatus cache.Cacher
|
||||
uniqueTx cache.Deduplicator
|
||||
}
|
||||
|
||||
// UniqueTx de-duplicates the transaction.
|
||||
|
@ -76,14 +75,7 @@ func (s *prefixedState) SetDBInitialized(status choices.Status) error {
|
|||
|
||||
// Funds returns the mapping from the 32 byte representation of an address to a
|
||||
// list of utxo IDs that reference the address.
|
||||
func (s *prefixedState) Funds(id ids.ID) ([]ids.ID, error) {
|
||||
return s.state.IDs(uniqueID(id, fundsID, s.funds))
|
||||
}
|
||||
|
||||
// SetFunds saves the mapping from address to utxo IDs to storage.
|
||||
func (s *prefixedState) SetFunds(id ids.ID, idSlice []ids.ID) error {
|
||||
return s.state.SetIDs(uniqueID(id, fundsID, s.funds), idSlice)
|
||||
}
|
||||
func (s *prefixedState) Funds(id ids.ID) ([]ids.ID, error) { return s.state.IDs(id) }
|
||||
|
||||
// SpendUTXO consumes the provided utxo.
|
||||
func (s *prefixedState) SpendUTXO(utxoID ids.ID) error {
|
||||
|
@ -106,11 +98,7 @@ func (s *prefixedState) SpendUTXO(utxoID ids.ID) error {
|
|||
func (s *prefixedState) removeUTXO(addrs [][]byte, utxoID ids.ID) error {
|
||||
for _, addr := range addrs {
|
||||
addrID := ids.NewID(hashing.ComputeHash256Array(addr))
|
||||
utxos := ids.Set{}
|
||||
funds, _ := s.Funds(addrID)
|
||||
utxos.Add(funds...)
|
||||
utxos.Remove(utxoID)
|
||||
if err := s.SetFunds(addrID, utxos.List()); err != nil {
|
||||
if err := s.state.RemoveID(addrID, utxoID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -135,11 +123,7 @@ func (s *prefixedState) FundUTXO(utxo *ava.UTXO) error {
|
|||
func (s *prefixedState) addUTXO(addrs [][]byte, utxoID ids.ID) error {
|
||||
for _, addr := range addrs {
|
||||
addrID := ids.NewID(hashing.ComputeHash256Array(addr))
|
||||
utxos := ids.Set{}
|
||||
funds, _ := s.Funds(addrID)
|
||||
utxos.Add(funds...)
|
||||
utxos.Add(utxoID)
|
||||
if err := s.SetFunds(addrID, utxos.List()); err != nil {
|
||||
if err := s.state.AddID(addrID, utxoID); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
|
|
@ -151,7 +151,7 @@ func TestPrefixedFundingAddresses(t *testing.T) {
|
|||
|
||||
state := vm.state
|
||||
|
||||
vm.codec.RegisterType(&testAddressable{})
|
||||
vm.codec.RegisterType(&ava.TestAddressable{})
|
||||
|
||||
utxo := &ava.UTXO{
|
||||
UTXOID: ava.UTXOID{
|
||||
|
@ -159,7 +159,7 @@ func TestPrefixedFundingAddresses(t *testing.T) {
|
|||
OutputIndex: 1,
|
||||
},
|
||||
Asset: ava.Asset{ID: ids.Empty},
|
||||
Out: &testAddressable{
|
||||
Out: &ava.TestAddressable{
|
||||
Addrs: [][]byte{
|
||||
[]byte{0},
|
||||
},
|
||||
|
@ -182,8 +182,11 @@ func TestPrefixedFundingAddresses(t *testing.T) {
|
|||
if err := state.SpendUTXO(utxo.InputID()); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, err = state.Funds(ids.NewID(hashing.ComputeHash256Array([]byte{0})))
|
||||
if err == nil {
|
||||
t.Fatalf("Should have returned no utxoIDs")
|
||||
funds, err = state.Funds(ids.NewID(hashing.ComputeHash256Array([]byte{0})))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(funds) != 0 {
|
||||
t.Fatalf("Should have returned 0 utxoIDs")
|
||||
}
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue