mirror of https://github.com/poanetwork/gecko.git
Merge branch 'master' into upnp
This commit is contained in:
commit
8f009cfa4a
|
@ -10,129 +10,35 @@ import (
|
|||
|
||||
"github.com/ava-labs/gecko/api"
|
||||
"github.com/ava-labs/gecko/chains"
|
||||
"github.com/ava-labs/gecko/genesis"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/network"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
"github.com/ava-labs/gecko/version"
|
||||
|
||||
cjson "github.com/ava-labs/gecko/utils/json"
|
||||
)
|
||||
|
||||
// Admin is the API service for node admin management
|
||||
type Admin struct {
|
||||
version version.Version
|
||||
nodeID ids.ShortID
|
||||
networkID uint32
|
||||
log logging.Logger
|
||||
networking network.Network
|
||||
performance Performance
|
||||
chainManager chains.Manager
|
||||
httpServer *api.Server
|
||||
}
|
||||
|
||||
// NewService returns a new admin API service
|
||||
func NewService(version version.Version, nodeID ids.ShortID, networkID uint32, log logging.Logger, chainManager chains.Manager, peers network.Network, httpServer *api.Server) *common.HTTPHandler {
|
||||
func NewService(log logging.Logger, chainManager chains.Manager, peers network.Network, httpServer *api.Server) *common.HTTPHandler {
|
||||
newServer := rpc.NewServer()
|
||||
codec := cjson.NewCodec()
|
||||
newServer.RegisterCodec(codec, "application/json")
|
||||
newServer.RegisterCodec(codec, "application/json;charset=UTF-8")
|
||||
newServer.RegisterService(&Admin{
|
||||
version: version,
|
||||
nodeID: nodeID,
|
||||
networkID: networkID,
|
||||
log: log,
|
||||
chainManager: chainManager,
|
||||
networking: peers,
|
||||
httpServer: httpServer,
|
||||
}, "admin")
|
||||
return &common.HTTPHandler{Handler: newServer}
|
||||
}
|
||||
|
||||
// GetNodeVersionReply are the results from calling GetNodeVersion
|
||||
type GetNodeVersionReply struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// GetNodeVersion returns the version this node is running
|
||||
func (service *Admin) GetNodeVersion(_ *http.Request, _ *struct{}, reply *GetNodeVersionReply) error {
|
||||
service.log.Info("Admin: GetNodeVersion called")
|
||||
|
||||
reply.Version = service.version.String()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNodeIDReply are the results from calling GetNodeID
|
||||
type GetNodeIDReply struct {
|
||||
NodeID ids.ShortID `json:"nodeID"`
|
||||
}
|
||||
|
||||
// GetNodeID returns the node ID of this node
|
||||
func (service *Admin) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error {
|
||||
service.log.Info("Admin: GetNodeID called")
|
||||
|
||||
reply.NodeID = service.nodeID
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNetworkIDReply are the results from calling GetNetworkID
|
||||
type GetNetworkIDReply struct {
|
||||
NetworkID cjson.Uint32 `json:"networkID"`
|
||||
}
|
||||
|
||||
// GetNetworkID returns the network ID this node is running on
|
||||
func (service *Admin) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error {
|
||||
service.log.Info("Admin: GetNetworkID called")
|
||||
|
||||
reply.NetworkID = cjson.Uint32(service.networkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNetworkNameReply is the result from calling GetNetworkName
|
||||
type GetNetworkNameReply struct {
|
||||
NetworkName string `json:"networkName"`
|
||||
}
|
||||
|
||||
// GetNetworkName returns the network name this node is running on
|
||||
func (service *Admin) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error {
|
||||
service.log.Info("Admin: GetNetworkName called")
|
||||
|
||||
reply.NetworkName = genesis.NetworkName(service.networkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlockchainIDArgs are the arguments for calling GetBlockchainID
|
||||
type GetBlockchainIDArgs struct {
|
||||
Alias string `json:"alias"`
|
||||
}
|
||||
|
||||
// GetBlockchainIDReply are the results from calling GetBlockchainID
|
||||
type GetBlockchainIDReply struct {
|
||||
BlockchainID string `json:"blockchainID"`
|
||||
}
|
||||
|
||||
// GetBlockchainID returns the blockchain ID that resolves the alias that was supplied
|
||||
func (service *Admin) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error {
|
||||
service.log.Info("Admin: GetBlockchainID called")
|
||||
|
||||
bID, err := service.chainManager.Lookup(args.Alias)
|
||||
reply.BlockchainID = bID.String()
|
||||
return err
|
||||
}
|
||||
|
||||
// PeersReply are the results from calling Peers
|
||||
type PeersReply struct {
|
||||
Peers []network.PeerID `json:"peers"`
|
||||
}
|
||||
|
||||
// Peers returns the list of current validators
|
||||
func (service *Admin) Peers(_ *http.Request, _ *struct{}, reply *PeersReply) error {
|
||||
service.log.Info("Admin: Peers called")
|
||||
reply.Peers = service.networking.Peers()
|
||||
return nil
|
||||
}
|
||||
|
||||
// StartCPUProfilerArgs are the arguments for calling StartCPUProfiler
|
||||
type StartCPUProfilerArgs struct {
|
||||
Filename string `json:"filename"`
|
||||
|
|
|
@ -0,0 +1,131 @@
|
|||
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
|
||||
// See the file LICENSE for licensing terms.
|
||||
|
||||
package info
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gorilla/rpc/v2"
|
||||
|
||||
"github.com/ava-labs/gecko/chains"
|
||||
"github.com/ava-labs/gecko/genesis"
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/network"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
"github.com/ava-labs/gecko/version"
|
||||
|
||||
cjson "github.com/ava-labs/gecko/utils/json"
|
||||
)
|
||||
|
||||
// Info is the API service for unprivileged info on a node
|
||||
type Info struct {
|
||||
version version.Version
|
||||
nodeID ids.ShortID
|
||||
networkID uint32
|
||||
log logging.Logger
|
||||
networking network.Network
|
||||
chainManager chains.Manager
|
||||
}
|
||||
|
||||
// NewService returns a new admin API service
|
||||
func NewService(log logging.Logger, version version.Version, nodeID ids.ShortID, networkID uint32, chainManager chains.Manager, peers network.Network) *common.HTTPHandler {
|
||||
newServer := rpc.NewServer()
|
||||
codec := cjson.NewCodec()
|
||||
newServer.RegisterCodec(codec, "application/json")
|
||||
newServer.RegisterCodec(codec, "application/json;charset=UTF-8")
|
||||
newServer.RegisterService(&Info{
|
||||
version: version,
|
||||
nodeID: nodeID,
|
||||
networkID: networkID,
|
||||
log: log,
|
||||
chainManager: chainManager,
|
||||
networking: peers,
|
||||
}, "info")
|
||||
return &common.HTTPHandler{Handler: newServer}
|
||||
}
|
||||
|
||||
// GetNodeVersionReply are the results from calling GetNodeVersion
|
||||
type GetNodeVersionReply struct {
|
||||
Version string `json:"version"`
|
||||
}
|
||||
|
||||
// GetNodeVersion returns the version this node is running
|
||||
func (service *Info) GetNodeVersion(_ *http.Request, _ *struct{}, reply *GetNodeVersionReply) error {
|
||||
service.log.Info("Info: GetNodeVersion called")
|
||||
|
||||
reply.Version = service.version.String()
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNodeIDReply are the results from calling GetNodeID
|
||||
type GetNodeIDReply struct {
|
||||
NodeID ids.ShortID `json:"nodeID"`
|
||||
}
|
||||
|
||||
// GetNodeID returns the node ID of this node
|
||||
func (service *Info) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error {
|
||||
service.log.Info("Info: GetNodeID called")
|
||||
|
||||
reply.NodeID = service.nodeID
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNetworkIDReply are the results from calling GetNetworkID
|
||||
type GetNetworkIDReply struct {
|
||||
NetworkID cjson.Uint32 `json:"networkID"`
|
||||
}
|
||||
|
||||
// GetNetworkID returns the network ID this node is running on
|
||||
func (service *Info) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error {
|
||||
service.log.Info("Info: GetNetworkID called")
|
||||
|
||||
reply.NetworkID = cjson.Uint32(service.networkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetNetworkNameReply is the result from calling GetNetworkName
|
||||
type GetNetworkNameReply struct {
|
||||
NetworkName string `json:"networkName"`
|
||||
}
|
||||
|
||||
// GetNetworkName returns the network name this node is running on
|
||||
func (service *Info) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error {
|
||||
service.log.Info("Info: GetNetworkName called")
|
||||
|
||||
reply.NetworkName = genesis.NetworkName(service.networkID)
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetBlockchainIDArgs are the arguments for calling GetBlockchainID
|
||||
type GetBlockchainIDArgs struct {
|
||||
Alias string `json:"alias"`
|
||||
}
|
||||
|
||||
// GetBlockchainIDReply are the results from calling GetBlockchainID
|
||||
type GetBlockchainIDReply struct {
|
||||
BlockchainID string `json:"blockchainID"`
|
||||
}
|
||||
|
||||
// GetBlockchainID returns the blockchain ID that resolves the alias that was supplied
|
||||
func (service *Info) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error {
|
||||
service.log.Info("Info: GetBlockchainID called")
|
||||
|
||||
bID, err := service.chainManager.Lookup(args.Alias)
|
||||
reply.BlockchainID = bID.String()
|
||||
return err
|
||||
}
|
||||
|
||||
// PeersReply are the results from calling Peers
|
||||
type PeersReply struct {
|
||||
Peers []network.PeerID `json:"peers"`
|
||||
}
|
||||
|
||||
// Peers returns the list of current validators
|
||||
func (service *Info) Peers(_ *http.Request, _ *struct{}, reply *PeersReply) error {
|
||||
service.log.Info("Info: Peers called")
|
||||
|
||||
reply.Peers = service.networking.Peers()
|
||||
return nil
|
||||
}
|
|
@ -10,6 +10,10 @@ import (
|
|||
"github.com/ava-labs/gecko/ids"
|
||||
)
|
||||
|
||||
const (
|
||||
minCacheSize = 32
|
||||
)
|
||||
|
||||
type entry struct {
|
||||
Key ids.ID
|
||||
Value interface{}
|
||||
|
@ -59,7 +63,7 @@ func (c *LRU) Flush() {
|
|||
|
||||
func (c *LRU) init() {
|
||||
if c.entryMap == nil {
|
||||
c.entryMap = make(map[[32]byte]*list.Element)
|
||||
c.entryMap = make(map[[32]byte]*list.Element, minCacheSize)
|
||||
}
|
||||
if c.entryList == nil {
|
||||
c.entryList = list.New()
|
||||
|
@ -134,6 +138,6 @@ func (c *LRU) evict(key ids.ID) {
|
|||
func (c *LRU) flush() {
|
||||
c.init()
|
||||
|
||||
c.entryMap = make(map[[32]byte]*list.Element)
|
||||
c.entryMap = make(map[[32]byte]*list.Element, minCacheSize)
|
||||
c.entryList = list.New()
|
||||
}
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
)
|
||||
|
||||
func BenchmarkLRUCachePutSmall(b *testing.B) {
|
||||
smallLen := 5
|
||||
cache := &LRU{Size: smallLen}
|
||||
for n := 0; n < b.N; n++ {
|
||||
for i := 0; i < smallLen; i++ {
|
||||
var idBytes [32]byte
|
||||
rand.Read(idBytes[:])
|
||||
cache.Put(ids.NewID(idBytes), n)
|
||||
}
|
||||
b.StopTimer()
|
||||
cache.Flush()
|
||||
b.StartTimer()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCachePutMedium(b *testing.B) {
|
||||
mediumLen := 250
|
||||
cache := &LRU{Size: mediumLen}
|
||||
for n := 0; n < b.N; n++ {
|
||||
for i := 0; i < mediumLen; i++ {
|
||||
var idBytes [32]byte
|
||||
rand.Read(idBytes[:])
|
||||
cache.Put(ids.NewID(idBytes), n)
|
||||
}
|
||||
b.StopTimer()
|
||||
cache.Flush()
|
||||
b.StartTimer()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkLRUCachePutLarge(b *testing.B) {
|
||||
largeLen := 10000
|
||||
cache := &LRU{Size: largeLen}
|
||||
for n := 0; n < b.N; n++ {
|
||||
for i := 0; i < largeLen; i++ {
|
||||
var idBytes [32]byte
|
||||
rand.Read(idBytes[:])
|
||||
cache.Put(ids.NewID(idBytes), n)
|
||||
}
|
||||
b.StopTimer()
|
||||
cache.Flush()
|
||||
b.StartTimer()
|
||||
}
|
||||
}
|
|
@ -50,6 +50,122 @@ func (c *Config) init() error {
|
|||
|
||||
// Hard coded genesis constants
|
||||
var (
|
||||
EverestConfig = Config{
|
||||
MintAddresses: []string{
|
||||
"95YUFjhDG892VePMzpwKF9JzewGKvGRi3",
|
||||
},
|
||||
FundedAddresses: []string{
|
||||
"9uKvvA7E35QCwLvAaohXTCfFejbf3Rv17",
|
||||
"JLrYNMYXANGj43BfWXBxMMAEenUBp1Sbn",
|
||||
"7TUTzwrU6nbZtWHjTHEpdneUvjKBxb3EM",
|
||||
"77mPUXBdQKwQpPoX6rckCZGLGGdkuG1G6",
|
||||
"4gGWdFZ4Gax1B466YKXyKRRpWLb42Afdt",
|
||||
"CKTkzAPsRxCreyiDTnjGxLmjMarxF28fi",
|
||||
"4ABm9gFHVtsNdcKSd1xsacFkGneSgzpaa",
|
||||
"DpL8PTsrjtLzv5J8LL3D2A6YcnCTqrNH9",
|
||||
"ZdhZv6oZrmXLyFDy6ovXAu6VxmbTsT2h",
|
||||
"6cesTteH62Y5mLoDBUASaBvCXuL2AthL",
|
||||
},
|
||||
StakerIDs: []string{
|
||||
"LQwRLm4cbJ7T2kxcxp4uXCU5XD8DFrE1C",
|
||||
"hArafGhY2HFTbwaaVh1CSCUCUCiJ2Vfb",
|
||||
"2m38qc95mhHXtrhjyGbe7r2NhniqHHJRB",
|
||||
"4QBwET5o8kUhvt9xArhir4d3R25CtmZho",
|
||||
"NpagUxt6KQiwPch9Sd4osv8kD1TZnkjdk",
|
||||
},
|
||||
EVMBytes: []byte{
|
||||
0x7b, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
|
||||
0x22, 0x3a, 0x7b, 0x22, 0x63, 0x68, 0x61, 0x69,
|
||||
0x6e, 0x49, 0x64, 0x22, 0x3a, 0x34, 0x33, 0x31,
|
||||
0x31, 0x30, 0x2c, 0x22, 0x68, 0x6f, 0x6d, 0x65,
|
||||
0x73, 0x74, 0x65, 0x61, 0x64, 0x42, 0x6c, 0x6f,
|
||||
0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x64,
|
||||
0x61, 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x42, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22,
|
||||
0x64, 0x61, 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x53,
|
||||
0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x3a,
|
||||
0x74, 0x72, 0x75, 0x65, 0x2c, 0x22, 0x65, 0x69,
|
||||
0x70, 0x31, 0x35, 0x30, 0x42, 0x6c, 0x6f, 0x63,
|
||||
0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x65, 0x69,
|
||||
0x70, 0x31, 0x35, 0x30, 0x48, 0x61, 0x73, 0x68,
|
||||
0x22, 0x3a, 0x22, 0x30, 0x78, 0x32, 0x30, 0x38,
|
||||
0x36, 0x37, 0x39, 0x39, 0x61, 0x65, 0x65, 0x62,
|
||||
0x65, 0x61, 0x65, 0x31, 0x33, 0x35, 0x63, 0x32,
|
||||
0x34, 0x36, 0x63, 0x36, 0x35, 0x30, 0x32, 0x31,
|
||||
0x63, 0x38, 0x32, 0x62, 0x34, 0x65, 0x31, 0x35,
|
||||
0x61, 0x32, 0x63, 0x34, 0x35, 0x31, 0x33, 0x34,
|
||||
0x30, 0x39, 0x39, 0x33, 0x61, 0x61, 0x63, 0x66,
|
||||
0x64, 0x32, 0x37, 0x35, 0x31, 0x38, 0x38, 0x36,
|
||||
0x35, 0x31, 0x34, 0x66, 0x30, 0x22, 0x2c, 0x22,
|
||||
0x65, 0x69, 0x70, 0x31, 0x35, 0x35, 0x42, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22,
|
||||
0x65, 0x69, 0x70, 0x31, 0x35, 0x38, 0x42, 0x6c,
|
||||
0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22,
|
||||
0x62, 0x79, 0x7a, 0x61, 0x6e, 0x74, 0x69, 0x75,
|
||||
0x6d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a,
|
||||
0x30, 0x2c, 0x22, 0x63, 0x6f, 0x6e, 0x73, 0x74,
|
||||
0x61, 0x6e, 0x74, 0x69, 0x6e, 0x6f, 0x70, 0x6c,
|
||||
0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a,
|
||||
0x30, 0x2c, 0x22, 0x70, 0x65, 0x74, 0x65, 0x72,
|
||||
0x73, 0x62, 0x75, 0x72, 0x67, 0x42, 0x6c, 0x6f,
|
||||
0x63, 0x6b, 0x22, 0x3a, 0x30, 0x7d, 0x2c, 0x22,
|
||||
0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0x3a, 0x22,
|
||||
0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x74, 0x69,
|
||||
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22,
|
||||
0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22,
|
||||
0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74,
|
||||
0x61, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30,
|
||||
0x22, 0x2c, 0x22, 0x67, 0x61, 0x73, 0x4c, 0x69,
|
||||
0x6d, 0x69, 0x74, 0x22, 0x3a, 0x22, 0x30, 0x78,
|
||||
0x35, 0x66, 0x35, 0x65, 0x31, 0x30, 0x30, 0x22,
|
||||
0x2c, 0x22, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63,
|
||||
0x75, 0x6c, 0x74, 0x79, 0x22, 0x3a, 0x22, 0x30,
|
||||
0x78, 0x30, 0x22, 0x2c, 0x22, 0x6d, 0x69, 0x78,
|
||||
0x48, 0x61, 0x73, 0x68, 0x22, 0x3a, 0x22, 0x30,
|
||||
0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x22, 0x2c, 0x22, 0x63, 0x6f, 0x69, 0x6e,
|
||||
0x62, 0x61, 0x73, 0x65, 0x22, 0x3a, 0x22, 0x30,
|
||||
0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x22, 0x2c, 0x22, 0x61, 0x6c, 0x6c, 0x6f,
|
||||
0x63, 0x22, 0x3a, 0x7b, 0x22, 0x35, 0x37, 0x32,
|
||||
0x66, 0x34, 0x64, 0x38, 0x30, 0x66, 0x31, 0x30,
|
||||
0x66, 0x36, 0x36, 0x33, 0x62, 0x35, 0x30, 0x34,
|
||||
0x39, 0x66, 0x37, 0x38, 0x39, 0x35, 0x34, 0x36,
|
||||
0x66, 0x32, 0x35, 0x66, 0x37, 0x30, 0x62, 0x62,
|
||||
0x36, 0x32, 0x61, 0x37, 0x66, 0x22, 0x3a, 0x7b,
|
||||
0x22, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65,
|
||||
0x22, 0x3a, 0x22, 0x30, 0x78, 0x33, 0x33, 0x62,
|
||||
0x32, 0x65, 0x33, 0x63, 0x39, 0x66, 0x64, 0x30,
|
||||
0x38, 0x30, 0x34, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x22, 0x7d, 0x7d, 0x2c,
|
||||
0x22, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22,
|
||||
0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22,
|
||||
0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x22,
|
||||
0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22,
|
||||
0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61,
|
||||
0x73, 0x68, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
|
||||
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22,
|
||||
0x7d,
|
||||
},
|
||||
}
|
||||
DenaliConfig = Config{
|
||||
MintAddresses: []string{
|
||||
"95YUFjhDG892VePMzpwKF9JzewGKvGRi3",
|
||||
|
@ -393,6 +509,8 @@ var (
|
|||
// GetConfig ...
|
||||
func GetConfig(networkID uint32) *Config {
|
||||
switch networkID {
|
||||
case EverestID:
|
||||
return &EverestConfig
|
||||
case DenaliID:
|
||||
return &DenaliConfig
|
||||
case CascadeID:
|
||||
|
|
|
@ -23,8 +23,11 @@ func TestNetworkName(t *testing.T) {
|
|||
if name := NetworkName(DenaliID); name != DenaliName {
|
||||
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, DenaliName)
|
||||
}
|
||||
if name := NetworkName(TestnetID); name != DenaliName {
|
||||
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, DenaliName)
|
||||
if name := NetworkName(EverestID); name != EverestName {
|
||||
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, EverestName)
|
||||
}
|
||||
if name := NetworkName(TestnetID); name != EverestName {
|
||||
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, EverestName)
|
||||
}
|
||||
if name := NetworkName(4294967295); name != "network-4294967295" {
|
||||
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, "network-4294967295")
|
||||
|
|
|
@ -16,13 +16,15 @@ var (
|
|||
MainnetID uint32 = 1
|
||||
CascadeID uint32 = 2
|
||||
DenaliID uint32 = 3
|
||||
EverestID uint32 = 4
|
||||
|
||||
TestnetID uint32 = 3
|
||||
TestnetID uint32 = 4
|
||||
LocalID uint32 = 12345
|
||||
|
||||
MainnetName = "mainnet"
|
||||
CascadeName = "cascade"
|
||||
DenaliName = "denali"
|
||||
EverestName = "everest"
|
||||
|
||||
TestnetName = "testnet"
|
||||
LocalName = "local"
|
||||
|
@ -31,6 +33,7 @@ var (
|
|||
MainnetID: MainnetName,
|
||||
CascadeID: CascadeName,
|
||||
DenaliID: DenaliName,
|
||||
EverestID: EverestName,
|
||||
|
||||
LocalID: LocalName,
|
||||
}
|
||||
|
@ -38,6 +41,7 @@ var (
|
|||
MainnetName: MainnetID,
|
||||
CascadeName: CascadeID,
|
||||
DenaliName: DenaliID,
|
||||
EverestName: EverestID,
|
||||
|
||||
TestnetName: TestnetID,
|
||||
LocalName: LocalID,
|
||||
|
|
1
go.mod
1
go.mod
|
@ -33,6 +33,7 @@ require (
|
|||
github.com/olekukonko/tablewriter v0.0.4 // indirect
|
||||
github.com/pborman/uuid v1.2.0 // indirect
|
||||
github.com/prometheus/client_golang v1.6.0
|
||||
github.com/prometheus/common v0.9.1
|
||||
github.com/prometheus/tsdb v0.10.0 // indirect
|
||||
github.com/rjeczalik/notify v0.9.2 // indirect
|
||||
github.com/rs/cors v1.7.0
|
||||
|
|
4
go.sum
4
go.sum
|
@ -8,8 +8,10 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
|
|||
github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU=
|
||||
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=
|
||||
github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
|
||||
|
@ -220,6 +222,7 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
|||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 h1:Oo2KZNP70KE0+IUJSidPj/BFS/RXNHmKIJOdckzml2E=
|
||||
|
@ -339,6 +342,7 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
|
|||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
17
ids/bag.go
17
ids/bag.go
|
@ -8,6 +8,10 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
minBagSize = 16
|
||||
)
|
||||
|
||||
// Bag is a multiset of IDs.
|
||||
//
|
||||
// A bag has the ability to split and filter on it's bits for ease of use for
|
||||
|
@ -25,7 +29,7 @@ type Bag struct {
|
|||
|
||||
func (b *Bag) init() {
|
||||
if b.counts == nil {
|
||||
b.counts = make(map[[32]byte]int)
|
||||
b.counts = make(map[[32]byte]int, minBagSize)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -72,16 +76,21 @@ func (b *Bag) AddCount(id ID, count int) {
|
|||
}
|
||||
|
||||
// Count returns the number of times the id has been added.
|
||||
func (b *Bag) Count(id ID) int { return b.counts[*id.ID] }
|
||||
func (b *Bag) Count(id ID) int {
|
||||
b.init()
|
||||
return b.counts[*id.ID]
|
||||
}
|
||||
|
||||
// Len returns the number of times an id has been added.
|
||||
func (b *Bag) Len() int { return b.size }
|
||||
|
||||
// List returns a list of all ids that have been added.
|
||||
func (b *Bag) List() []ID {
|
||||
idList := []ID(nil)
|
||||
idList := make([]ID, len(b.counts), len(b.counts))
|
||||
i := 0
|
||||
for id := range b.counts {
|
||||
idList = append(idList, NewID(id))
|
||||
idList[i] = NewID(id)
|
||||
i++
|
||||
}
|
||||
return idList
|
||||
}
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
package ids
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
//
|
||||
func BenchmarkBagListSmall(b *testing.B) {
|
||||
smallLen := 5
|
||||
bag := Bag{}
|
||||
for i := 0; i < smallLen; i++ {
|
||||
var idBytes [32]byte
|
||||
rand.Read(idBytes[:])
|
||||
NewID(idBytes)
|
||||
bag.Add(NewID(idBytes))
|
||||
}
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
bag.List()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBagListMedium(b *testing.B) {
|
||||
mediumLen := 25
|
||||
bag := Bag{}
|
||||
for i := 0; i < mediumLen; i++ {
|
||||
var idBytes [32]byte
|
||||
rand.Read(idBytes[:])
|
||||
NewID(idBytes)
|
||||
bag.Add(NewID(idBytes))
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
bag.List()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkBagListLarge(b *testing.B) {
|
||||
largeLen := 100000
|
||||
bag := Bag{}
|
||||
for i := 0; i < largeLen; i++ {
|
||||
var idBytes [32]byte
|
||||
rand.Read(idBytes[:])
|
||||
NewID(idBytes)
|
||||
bag.Add(NewID(idBytes))
|
||||
}
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
bag.List()
|
||||
}
|
||||
}
|
|
@ -18,8 +18,8 @@ func TestBagAdd(t *testing.T) {
|
|||
} else if count := bag.Count(id1); count != 0 {
|
||||
t.Fatalf("Bag.Count returned %d expected %d", count, 0)
|
||||
} else if size := bag.Len(); size != 0 {
|
||||
t.Fatalf("Bag.Len returned %d expected %d", count, 0)
|
||||
} else if list := bag.List(); list != nil {
|
||||
t.Fatalf("Bag.Len returned %d elements expected %d", count, 0)
|
||||
} else if list := bag.List(); len(list) != 0 {
|
||||
t.Fatalf("Bag.List returned %v expected %v", list, nil)
|
||||
} else if mode, freq := bag.Mode(); !mode.IsZero() {
|
||||
t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, ID{})
|
||||
|
|
14
ids/set.go
14
ids/set.go
|
@ -7,11 +7,19 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
// The minimum capacity of a set
|
||||
minSetSize = 16
|
||||
)
|
||||
|
||||
// Set is a set of IDs
|
||||
type Set map[[32]byte]bool
|
||||
|
||||
func (ids *Set) init(size int) {
|
||||
if *ids == nil {
|
||||
if minSetSize > size {
|
||||
size = minSetSize
|
||||
}
|
||||
*ids = make(map[[32]byte]bool, size)
|
||||
}
|
||||
}
|
||||
|
@ -70,9 +78,11 @@ func (ids *Set) Clear() { *ids = nil }
|
|||
|
||||
// List converts this set into a list
|
||||
func (ids Set) List() []ID {
|
||||
idList := []ID(nil)
|
||||
idList := make([]ID, ids.Len(), ids.Len())
|
||||
i := 0
|
||||
for id := range ids {
|
||||
idList = append(idList, NewID(id))
|
||||
idList[i] = NewID(id)
|
||||
i++
|
||||
}
|
||||
return idList
|
||||
}
|
||||
|
|
|
@ -0,0 +1,53 @@
|
|||
package ids
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"testing"
|
||||
)
|
||||
|
||||
//
|
||||
func BenchmarkSetListSmall(b *testing.B) {
|
||||
smallLen := 5
|
||||
set := Set{}
|
||||
for i := 0; i < smallLen; i++ {
|
||||
var idBytes [32]byte
|
||||
rand.Read(idBytes[:])
|
||||
NewID(idBytes)
|
||||
set.Add(NewID(idBytes))
|
||||
}
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
set.List()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSetListMedium(b *testing.B) {
|
||||
mediumLen := 25
|
||||
set := Set{}
|
||||
for i := 0; i < mediumLen; i++ {
|
||||
var idBytes [32]byte
|
||||
rand.Read(idBytes[:])
|
||||
NewID(idBytes)
|
||||
set.Add(NewID(idBytes))
|
||||
}
|
||||
b.ResetTimer()
|
||||
|
||||
for n := 0; n < b.N; n++ {
|
||||
set.List()
|
||||
}
|
||||
}
|
||||
|
||||
func BenchmarkSetListLarge(b *testing.B) {
|
||||
largeLen := 100000
|
||||
set := Set{}
|
||||
for i := 0; i < largeLen; i++ {
|
||||
var idBytes [32]byte
|
||||
rand.Read(idBytes[:])
|
||||
NewID(idBytes)
|
||||
set.Add(NewID(idBytes))
|
||||
}
|
||||
b.ResetTimer()
|
||||
for n := 0; n < b.N; n++ {
|
||||
set.List()
|
||||
}
|
||||
}
|
|
@ -5,11 +5,18 @@ package ids
|
|||
|
||||
import "strings"
|
||||
|
||||
const (
|
||||
minShortSetSize = 16
|
||||
)
|
||||
|
||||
// ShortSet is a set of ShortIDs
|
||||
type ShortSet map[[20]byte]bool
|
||||
|
||||
func (ids *ShortSet) init(size int) {
|
||||
if *ids == nil {
|
||||
if minShortSetSize > size {
|
||||
size = minShortSetSize
|
||||
}
|
||||
*ids = make(map[[20]byte]bool, size)
|
||||
}
|
||||
}
|
||||
|
@ -65,9 +72,11 @@ func (ids ShortSet) CappedList(size int) []ShortID {
|
|||
|
||||
// List converts this set into a list
|
||||
func (ids ShortSet) List() []ShortID {
|
||||
idList := make([]ShortID, len(ids))[:0]
|
||||
idList := make([]ShortID, len(ids), len(ids))
|
||||
i := 0
|
||||
for id := range ids {
|
||||
idList = append(idList, NewShortID(id))
|
||||
idList[i] = NewShortID(id)
|
||||
i++
|
||||
}
|
||||
return idList
|
||||
}
|
||||
|
|
|
@ -8,12 +8,16 @@ import (
|
|||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
minUniqueBagSize = 16
|
||||
)
|
||||
|
||||
// UniqueBag ...
|
||||
type UniqueBag map[[32]byte]BitSet
|
||||
|
||||
func (b *UniqueBag) init() {
|
||||
if *b == nil {
|
||||
*b = make(map[[32]byte]BitSet)
|
||||
*b = make(map[[32]byte]BitSet, minUniqueBagSize)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
dbVersion = "v0.5.0"
|
||||
dbVersion = "v0.6.0"
|
||||
)
|
||||
|
||||
// Results of parsing the CLI
|
||||
|
@ -226,6 +226,7 @@ func init() {
|
|||
|
||||
// Enable/Disable APIs:
|
||||
fs.BoolVar(&Config.AdminAPIEnabled, "api-admin-enabled", true, "If true, this node exposes the Admin API")
|
||||
fs.BoolVar(&Config.InfoAPIEnabled, "api-info-enabled", true, "If true, this node exposes the Info API")
|
||||
fs.BoolVar(&Config.KeystoreAPIEnabled, "api-keystore-enabled", true, "If true, this node exposes the Keystore API")
|
||||
fs.BoolVar(&Config.MetricsAPIEnabled, "api-metrics-enabled", true, "If true, this node exposes the Metrics API")
|
||||
fs.BoolVar(&Config.HealthAPIEnabled, "api-health-enabled", true, "If true, this node exposes the Health API")
|
||||
|
|
|
@ -33,6 +33,12 @@ func (m Builder) PeerList(ipDescs []utils.IPDesc) (Msg, error) {
|
|||
return m.Pack(PeerList, map[Field]interface{}{Peers: ipDescs})
|
||||
}
|
||||
|
||||
// Ping message
|
||||
func (m Builder) Ping() (Msg, error) { return m.Pack(Ping, nil) }
|
||||
|
||||
// Pong message
|
||||
func (m Builder) Pong() (Msg, error) { return m.Pack(Pong, nil) }
|
||||
|
||||
// GetAcceptedFrontier message
|
||||
func (m Builder) GetAcceptedFrontier(chainID ids.ID, requestID uint32) (Msg, error) {
|
||||
return m.Pack(GetAcceptedFrontier, map[Field]interface{}{
|
||||
|
|
|
@ -132,6 +132,10 @@ func (op Op) String() string {
|
|||
return "get_peerlist"
|
||||
case PeerList:
|
||||
return "peerlist"
|
||||
case Ping:
|
||||
return "ping"
|
||||
case Pong:
|
||||
return "pong"
|
||||
case GetAcceptedFrontier:
|
||||
return "get_accepted_frontier"
|
||||
case AcceptedFrontier:
|
||||
|
@ -166,22 +170,21 @@ const (
|
|||
Version
|
||||
GetPeerList
|
||||
PeerList
|
||||
Ping
|
||||
Pong
|
||||
// Bootstrapping:
|
||||
GetAcceptedFrontier
|
||||
AcceptedFrontier
|
||||
GetAccepted
|
||||
Accepted
|
||||
GetAncestors
|
||||
MultiPut
|
||||
// Consensus:
|
||||
Get
|
||||
Put
|
||||
PushQuery
|
||||
PullQuery
|
||||
Chits
|
||||
// Bootstrapping:
|
||||
// TODO: Move GetAncestors and MultiPut with the rest of the bootstrapping
|
||||
// commands when we do non-backwards compatible upgrade
|
||||
GetAncestors
|
||||
MultiPut
|
||||
)
|
||||
|
||||
// Defines the messages that can be sent/received with this network
|
||||
|
@ -192,6 +195,8 @@ var (
|
|||
Version: {NetworkID, NodeID, MyTime, IP, VersionStr},
|
||||
GetPeerList: {},
|
||||
PeerList: {Peers},
|
||||
Ping: {},
|
||||
Pong: {},
|
||||
// Bootstrapping:
|
||||
GetAcceptedFrontier: {ChainID, RequestID},
|
||||
AcceptedFrontier: {ChainID, RequestID, ContainerIDs},
|
||||
|
|
|
@ -54,6 +54,7 @@ type metrics struct {
|
|||
|
||||
getVersion, version,
|
||||
getPeerlist, peerlist,
|
||||
ping, pong,
|
||||
getAcceptedFrontier, acceptedFrontier,
|
||||
getAccepted, accepted,
|
||||
get, getAncestors, put, multiPut,
|
||||
|
@ -78,6 +79,8 @@ func (m *metrics) initialize(registerer prometheus.Registerer) error {
|
|||
errs.Add(m.version.initialize(Version, registerer))
|
||||
errs.Add(m.getPeerlist.initialize(GetPeerList, registerer))
|
||||
errs.Add(m.peerlist.initialize(PeerList, registerer))
|
||||
errs.Add(m.ping.initialize(Ping, registerer))
|
||||
errs.Add(m.pong.initialize(Pong, registerer))
|
||||
errs.Add(m.getAcceptedFrontier.initialize(GetAcceptedFrontier, registerer))
|
||||
errs.Add(m.acceptedFrontier.initialize(AcceptedFrontier, registerer))
|
||||
errs.Add(m.getAccepted.initialize(GetAccepted, registerer))
|
||||
|
@ -103,6 +106,10 @@ func (m *metrics) message(msgType Op) *messageMetrics {
|
|||
return &m.getPeerlist
|
||||
case PeerList:
|
||||
return &m.peerlist
|
||||
case Ping:
|
||||
return &m.ping
|
||||
case Pong:
|
||||
return &m.pong
|
||||
case GetAcceptedFrontier:
|
||||
return &m.getAcceptedFrontier
|
||||
case AcceptedFrontier:
|
||||
|
|
|
@ -43,6 +43,12 @@ const (
|
|||
defaultGetVersionTimeout = 2 * time.Second
|
||||
defaultAllowPrivateIPs = true
|
||||
defaultGossipSize = 50
|
||||
defaultPingPongTimeout = time.Minute
|
||||
defaultPingFrequency = 3 * defaultPingPongTimeout / 4
|
||||
|
||||
// Request ID used when sending a Put message to gossip an accepted container
|
||||
// (ie not sent in response to a Get)
|
||||
GossipMsgRequestID = math.MaxUint32
|
||||
)
|
||||
|
||||
// Network defines the functionality of the networking library.
|
||||
|
@ -119,6 +125,8 @@ type network struct {
|
|||
getVersionTimeout time.Duration
|
||||
allowPrivateIPs bool
|
||||
gossipSize int
|
||||
pingPongTimeout time.Duration
|
||||
pingFrequency time.Duration
|
||||
|
||||
executor timer.Executor
|
||||
|
||||
|
@ -180,6 +188,8 @@ func NewDefaultNetwork(
|
|||
defaultGetVersionTimeout,
|
||||
defaultAllowPrivateIPs,
|
||||
defaultGossipSize,
|
||||
defaultPingPongTimeout,
|
||||
defaultPingFrequency,
|
||||
)
|
||||
}
|
||||
|
||||
|
@ -211,6 +221,8 @@ func NewNetwork(
|
|||
getVersionTimeout time.Duration,
|
||||
allowPrivateIPs bool,
|
||||
gossipSize int,
|
||||
pingPongTimeout time.Duration,
|
||||
pingFrequency time.Duration,
|
||||
) Network {
|
||||
net := &network{
|
||||
log: log,
|
||||
|
@ -239,6 +251,8 @@ func NewNetwork(
|
|||
getVersionTimeout: getVersionTimeout,
|
||||
allowPrivateIPs: allowPrivateIPs,
|
||||
gossipSize: gossipSize,
|
||||
pingPongTimeout: pingPongTimeout,
|
||||
pingFrequency: pingFrequency,
|
||||
|
||||
disconnectedIPs: make(map[string]struct{}),
|
||||
connectedIPs: make(map[string]struct{}),
|
||||
|
@ -705,7 +719,7 @@ func (n *network) Track(ip utils.IPDesc) {
|
|||
|
||||
// assumes the stateLock is not held.
|
||||
func (n *network) gossipContainer(chainID, containerID ids.ID, container []byte) error {
|
||||
msg, err := n.b.Put(chainID, math.MaxUint32, containerID, container)
|
||||
msg, err := n.b.Put(chainID, GossipMsgRequestID, containerID, container)
|
||||
if err != nil {
|
||||
return fmt.Errorf("attempted to pack too large of a Put message.\nContainer length: %d", len(container))
|
||||
}
|
||||
|
|
|
@ -64,6 +64,24 @@ func (p *peer) Start() {
|
|||
// Initially send the version to the peer
|
||||
go p.Version()
|
||||
go p.requestVersion()
|
||||
go p.sendPings()
|
||||
}
|
||||
|
||||
func (p *peer) sendPings() {
|
||||
t := time.NewTicker(p.net.pingFrequency)
|
||||
defer t.Stop()
|
||||
|
||||
for range t.C {
|
||||
p.net.stateLock.Lock()
|
||||
closed := p.closed
|
||||
p.net.stateLock.Unlock()
|
||||
|
||||
if closed {
|
||||
return
|
||||
}
|
||||
|
||||
p.Ping()
|
||||
}
|
||||
}
|
||||
|
||||
// request the version from the peer until we get the version from them
|
||||
|
@ -80,6 +98,7 @@ func (p *peer) requestVersion() {
|
|||
if connected || closed {
|
||||
return
|
||||
}
|
||||
|
||||
p.GetVersion()
|
||||
}
|
||||
}
|
||||
|
@ -88,6 +107,11 @@ func (p *peer) requestVersion() {
|
|||
func (p *peer) ReadMessages() {
|
||||
defer p.Close()
|
||||
|
||||
if err := p.conn.SetReadDeadline(p.net.clock.Time().Add(p.net.pingPongTimeout)); err != nil {
|
||||
p.net.log.Verbo("error on setting the connection read timeout %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
pendingBuffer := wrappers.Packer{}
|
||||
readBuffer := make([]byte, 1<<10)
|
||||
for {
|
||||
|
@ -218,7 +242,15 @@ func (p *peer) send(msg Msg) bool {
|
|||
// assumes the stateLock is not held
|
||||
func (p *peer) handle(msg Msg) {
|
||||
p.net.heartbeat()
|
||||
atomic.StoreInt64(&p.lastReceived, p.net.clock.Time().Unix())
|
||||
|
||||
currentTime := p.net.clock.Time()
|
||||
atomic.StoreInt64(&p.lastReceived, currentTime.Unix())
|
||||
|
||||
if err := p.conn.SetReadDeadline(currentTime.Add(p.net.pingPongTimeout)); err != nil {
|
||||
p.net.log.Verbo("error on setting the connection read timeout %s, closing the connection", err)
|
||||
p.Close()
|
||||
return
|
||||
}
|
||||
|
||||
op := msg.Op()
|
||||
msgMetrics := p.net.message(op)
|
||||
|
@ -235,6 +267,12 @@ func (p *peer) handle(msg Msg) {
|
|||
case GetVersion:
|
||||
p.getVersion(msg)
|
||||
return
|
||||
case Ping:
|
||||
p.ping(msg)
|
||||
return
|
||||
case Pong:
|
||||
p.pong(msg)
|
||||
return
|
||||
}
|
||||
if !p.connected {
|
||||
p.net.log.Debug("dropping message from %s because the connection hasn't been established yet", p.id)
|
||||
|
@ -318,6 +356,12 @@ func (p *peer) GetPeerList() {
|
|||
p.Send(msg)
|
||||
}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) SendPeerList() {
|
||||
ips := p.net.validatorIPs()
|
||||
p.PeerList(ips)
|
||||
}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) PeerList(peers []utils.IPDesc) {
|
||||
msg, err := p.net.b.PeerList(peers)
|
||||
|
@ -326,7 +370,28 @@ func (p *peer) PeerList(peers []utils.IPDesc) {
|
|||
return
|
||||
}
|
||||
p.Send(msg)
|
||||
return
|
||||
}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) Ping() {
|
||||
msg, err := p.net.b.Ping()
|
||||
p.net.log.AssertNoError(err)
|
||||
if p.Send(msg) {
|
||||
p.net.ping.numSent.Inc()
|
||||
} else {
|
||||
p.net.ping.numFailed.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) Pong() {
|
||||
msg, err := p.net.b.Pong()
|
||||
p.net.log.AssertNoError(err)
|
||||
if p.Send(msg) {
|
||||
p.net.pong.numSent.Inc()
|
||||
} else {
|
||||
p.net.pong.numFailed.Inc()
|
||||
}
|
||||
}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
|
@ -458,17 +523,6 @@ func (p *peer) version(msg Msg) {
|
|||
p.net.connected(p)
|
||||
}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) SendPeerList() {
|
||||
ips := p.net.validatorIPs()
|
||||
reply, err := p.net.b.PeerList(ips)
|
||||
if err != nil {
|
||||
p.net.log.Warn("failed to send PeerList message due to %s", err)
|
||||
return
|
||||
}
|
||||
p.Send(reply)
|
||||
}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) getPeerList(_ Msg) { p.SendPeerList() }
|
||||
|
||||
|
@ -488,6 +542,12 @@ func (p *peer) peerList(msg Msg) {
|
|||
p.net.stateLock.Unlock()
|
||||
}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) ping(_ Msg) { p.Pong() }
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) pong(_ Msg) {}
|
||||
|
||||
// assumes the stateLock is not held
|
||||
func (p *peer) getAcceptedFrontier(msg Msg) {
|
||||
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
|
||||
|
|
|
@ -52,6 +52,7 @@ type Config struct {
|
|||
|
||||
// Enable/Disable APIs
|
||||
AdminAPIEnabled bool
|
||||
InfoAPIEnabled bool
|
||||
KeystoreAPIEnabled bool
|
||||
MetricsAPIEnabled bool
|
||||
HealthAPIEnabled bool
|
||||
|
|
14
node/node.go
14
node/node.go
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/ava-labs/gecko/api"
|
||||
"github.com/ava-labs/gecko/api/admin"
|
||||
"github.com/ava-labs/gecko/api/health"
|
||||
"github.com/ava-labs/gecko/api/info"
|
||||
"github.com/ava-labs/gecko/api/ipcs"
|
||||
"github.com/ava-labs/gecko/api/keystore"
|
||||
"github.com/ava-labs/gecko/api/metrics"
|
||||
|
@ -56,7 +57,7 @@ var (
|
|||
genesisHashKey = []byte("genesisID")
|
||||
|
||||
// Version is the version of this code
|
||||
Version = version.NewDefaultVersion("avalanche", 0, 5, 5)
|
||||
Version = version.NewDefaultVersion("avalanche", 0, 6, 0)
|
||||
versionParser = version.NewDefaultParser()
|
||||
)
|
||||
|
||||
|
@ -461,11 +462,19 @@ func (n *Node) initMetricsAPI() {
|
|||
func (n *Node) initAdminAPI() {
|
||||
if n.Config.AdminAPIEnabled {
|
||||
n.Log.Info("initializing Admin API")
|
||||
service := admin.NewService(Version, n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.Net, &n.APIServer)
|
||||
service := admin.NewService(n.Log, n.chainManager, n.Net, &n.APIServer)
|
||||
n.APIServer.AddRoute(service, &sync.RWMutex{}, "admin", "", n.HTTPLog)
|
||||
}
|
||||
}
|
||||
|
||||
func (n *Node) initInfoAPI() {
|
||||
if n.Config.InfoAPIEnabled {
|
||||
n.Log.Info("initializing Info API")
|
||||
service := info.NewService(n.Log, Version, n.ID, n.Config.NetworkID, n.chainManager, n.Net)
|
||||
n.APIServer.AddRoute(service, &sync.RWMutex{}, "info", "", n.HTTPLog)
|
||||
}
|
||||
}
|
||||
|
||||
// initHealthAPI initializes the Health API service
|
||||
// Assumes n.Log, n.ConsensusAPI, and n.ValidatorAPI already initialized
|
||||
func (n *Node) initHealthAPI() {
|
||||
|
@ -562,6 +571,7 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg
|
|||
n.initChainManager() // Set up the chain manager
|
||||
|
||||
n.initAdminAPI() // Start the Admin API
|
||||
n.initInfoAPI() // Start the Info API
|
||||
n.initHealthAPI() // Start the Health API
|
||||
n.initIPCAPI() // Start the IPC API
|
||||
|
||||
|
|
|
@ -231,6 +231,7 @@ func (ta *Topological) pushVotes(
|
|||
kahnNodes map[[32]byte]kahnNode,
|
||||
leaves []ids.ID) ids.Bag {
|
||||
votes := make(ids.UniqueBag)
|
||||
txConflicts := make(map[[32]byte]ids.Set)
|
||||
|
||||
for len(leaves) > 0 {
|
||||
newLeavesSize := len(leaves) - 1
|
||||
|
@ -245,6 +246,12 @@ func (ta *Topological) pushVotes(
|
|||
// Give the votes to the consumer
|
||||
txID := tx.ID()
|
||||
votes.UnionSet(txID, kahn.votes)
|
||||
|
||||
// Map txID to set of Conflicts
|
||||
txKey := txID.Key()
|
||||
if _, exists := txConflicts[txKey]; !exists {
|
||||
txConflicts[txKey] = ta.cg.Conflicts(tx)
|
||||
}
|
||||
}
|
||||
|
||||
for _, dep := range vtx.Parents() {
|
||||
|
@ -265,6 +272,18 @@ func (ta *Topological) pushVotes(
|
|||
}
|
||||
}
|
||||
|
||||
// Create bag of votes for conflicting transactions
|
||||
conflictingVotes := make(ids.UniqueBag)
|
||||
for txHash, conflicts := range txConflicts {
|
||||
txID := ids.NewID(txHash)
|
||||
for conflictTxHash := range conflicts {
|
||||
conflictTxID := ids.NewID(conflictTxHash)
|
||||
conflictingVotes.UnionSet(txID, votes.GetSet(conflictTxID))
|
||||
}
|
||||
}
|
||||
|
||||
votes.Difference(&conflictingVotes)
|
||||
|
||||
return votes.Bag(ta.params.Alpha)
|
||||
}
|
||||
|
||||
|
|
|
@ -104,6 +104,78 @@ func TestAvalancheVoting(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestAvalancheIgnoreInvalidVoting(t *testing.T) {
|
||||
params := Parameters{
|
||||
Parameters: snowball.Parameters{
|
||||
Metrics: prometheus.NewRegistry(),
|
||||
K: 3,
|
||||
Alpha: 2,
|
||||
BetaVirtuous: 1,
|
||||
BetaRogue: 1,
|
||||
},
|
||||
Parents: 2,
|
||||
BatchSize: 1,
|
||||
}
|
||||
|
||||
vts := []Vertex{&Vtx{
|
||||
id: GenerateID(),
|
||||
status: choices.Accepted,
|
||||
}, &Vtx{
|
||||
id: GenerateID(),
|
||||
status: choices.Accepted,
|
||||
}}
|
||||
utxos := []ids.ID{GenerateID()}
|
||||
|
||||
ta := Topological{}
|
||||
ta.Initialize(snow.DefaultContextTest(), params, vts)
|
||||
|
||||
tx0 := &snowstorm.TestTx{
|
||||
Identifier: GenerateID(),
|
||||
Stat: choices.Processing,
|
||||
}
|
||||
tx0.Ins.Add(utxos[0])
|
||||
|
||||
vtx0 := &Vtx{
|
||||
dependencies: vts,
|
||||
id: GenerateID(),
|
||||
txs: []snowstorm.Tx{tx0},
|
||||
height: 1,
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
tx1 := &snowstorm.TestTx{
|
||||
Identifier: GenerateID(),
|
||||
Stat: choices.Processing,
|
||||
}
|
||||
tx1.Ins.Add(utxos[0])
|
||||
|
||||
vtx1 := &Vtx{
|
||||
dependencies: vts,
|
||||
id: GenerateID(),
|
||||
txs: []snowstorm.Tx{tx1},
|
||||
height: 1,
|
||||
status: choices.Processing,
|
||||
}
|
||||
|
||||
ta.Add(vtx0)
|
||||
ta.Add(vtx1)
|
||||
|
||||
sm := make(ids.UniqueBag)
|
||||
|
||||
sm.Add(0, vtx0.id)
|
||||
sm.Add(1, vtx1.id)
|
||||
|
||||
// Add Illegal Vote cast by Response 2
|
||||
sm.Add(2, vtx0.id)
|
||||
sm.Add(2, vtx1.id)
|
||||
|
||||
ta.RecordPoll(sm)
|
||||
|
||||
if ta.Finalized() {
|
||||
t.Fatalf("An avalanche instance finalized too early")
|
||||
}
|
||||
}
|
||||
|
||||
func TestAvalancheTransitiveVoting(t *testing.T) {
|
||||
params := Parameters{
|
||||
Parameters: snowball.Parameters{
|
||||
|
|
|
@ -27,11 +27,13 @@ func (sb *unarySnowball) Extend(beta int, choice int) BinarySnowball {
|
|||
bs := &binarySnowball{
|
||||
binarySnowflake: binarySnowflake{
|
||||
binarySlush: binarySlush{preference: choice},
|
||||
confidence: sb.confidence,
|
||||
beta: beta,
|
||||
finalized: sb.Finalized(),
|
||||
},
|
||||
preference: choice,
|
||||
}
|
||||
bs.numSuccessfulPolls[choice] = sb.numSuccessfulPolls
|
||||
return bs
|
||||
}
|
||||
|
||||
|
|
|
@ -42,11 +42,32 @@ func TestUnarySnowball(t *testing.T) {
|
|||
|
||||
binarySnowball := sbClone.Extend(beta, 0)
|
||||
|
||||
expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0)))"
|
||||
if result := binarySnowball.String(); result != expected {
|
||||
t.Fatalf("Expected:\n%s\nReturned:\n%s", expected, result)
|
||||
}
|
||||
|
||||
binarySnowball.RecordUnsuccessfulPoll()
|
||||
for i := 0; i < 3; i++ {
|
||||
if binarySnowball.Preference() != 0 {
|
||||
t.Fatalf("Wrong preference")
|
||||
} else if binarySnowball.Finalized() {
|
||||
t.Fatalf("Should not have finalized")
|
||||
}
|
||||
binarySnowball.RecordSuccessfulPoll(1)
|
||||
binarySnowball.RecordUnsuccessfulPoll()
|
||||
}
|
||||
|
||||
if binarySnowball.Preference() != 1 {
|
||||
t.Fatalf("Wrong preference")
|
||||
} else if binarySnowball.Finalized() {
|
||||
t.Fatalf("Should not have finalized")
|
||||
}
|
||||
|
||||
binarySnowball.RecordSuccessfulPoll(1)
|
||||
|
||||
if binarySnowball.Finalized() {
|
||||
if binarySnowball.Preference() != 1 {
|
||||
t.Fatalf("Wrong preference")
|
||||
} else if binarySnowball.Finalized() {
|
||||
t.Fatalf("Should not have finalized")
|
||||
}
|
||||
|
||||
|
@ -57,4 +78,9 @@ func TestUnarySnowball(t *testing.T) {
|
|||
} else if !binarySnowball.Finalized() {
|
||||
t.Fatalf("Should have finalized")
|
||||
}
|
||||
|
||||
expected = "SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = false))"
|
||||
if str := sb.String(); str != expected {
|
||||
t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -107,6 +107,9 @@ func (b *bootstrapper) fetch(vtxID ids.ID) error {
|
|||
|
||||
// Make sure we don't already have this vertex
|
||||
if _, err := b.State.GetVertex(vtxID); err == nil {
|
||||
if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier {
|
||||
return b.finish()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -124,15 +127,12 @@ func (b *bootstrapper) fetch(vtxID ids.ID) error {
|
|||
|
||||
// Process vertices
|
||||
func (b *bootstrapper) process(vtx avalanche.Vertex) error {
|
||||
toProcess := []avalanche.Vertex{vtx}
|
||||
for len(toProcess) > 0 {
|
||||
newLen := len(toProcess) - 1
|
||||
vtx := toProcess[newLen]
|
||||
toProcess = toProcess[:newLen]
|
||||
if _, ok := b.processedCache.Get(vtx.ID()); ok { // already processed this
|
||||
continue
|
||||
}
|
||||
|
||||
toProcess := newMaxVertexHeap()
|
||||
if _, ok := b.processedCache.Get(vtx.ID()); !ok { // only process if we haven't already
|
||||
toProcess.Push(vtx)
|
||||
}
|
||||
for toProcess.Len() > 0 {
|
||||
vtx := toProcess.Pop()
|
||||
switch vtx.Status() {
|
||||
case choices.Unknown:
|
||||
if err := b.fetch(vtx.ID()); err != nil {
|
||||
|
@ -168,7 +168,9 @@ func (b *bootstrapper) process(vtx avalanche.Vertex) error {
|
|||
}
|
||||
}
|
||||
for _, parent := range vtx.Parents() {
|
||||
toProcess = append(toProcess, parent)
|
||||
if _, ok := b.processedCache.Get(parent.ID()); !ok { // already processed this
|
||||
toProcess.Push(parent)
|
||||
}
|
||||
}
|
||||
b.processedCache.Put(vtx.ID(), nil)
|
||||
}
|
||||
|
|
|
@ -805,3 +805,113 @@ func TestBootstrapperIncompleteMultiPut(t *testing.T) {
|
|||
t.Fatal("should be accepted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBootstrapperFinalized(t *testing.T) {
|
||||
config, peerID, sender, state, vm := newConfig(t)
|
||||
|
||||
vtxID0 := ids.Empty.Prefix(0)
|
||||
vtxID1 := ids.Empty.Prefix(1)
|
||||
|
||||
vtxBytes0 := []byte{0}
|
||||
vtxBytes1 := []byte{1}
|
||||
|
||||
vtx0 := &Vtx{
|
||||
id: vtxID0,
|
||||
height: 0,
|
||||
status: choices.Unknown,
|
||||
bytes: vtxBytes0,
|
||||
}
|
||||
vtx1 := &Vtx{
|
||||
id: vtxID1,
|
||||
height: 1,
|
||||
parents: []avalanche.Vertex{vtx0},
|
||||
status: choices.Unknown,
|
||||
bytes: vtxBytes1,
|
||||
}
|
||||
|
||||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() error { *finished = true; return nil }
|
||||
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(vtxID0)
|
||||
acceptedIDs.Add(vtxID1)
|
||||
|
||||
parsedVtx0 := false
|
||||
parsedVtx1 := false
|
||||
state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) {
|
||||
switch {
|
||||
case vtxID.Equals(vtxID0):
|
||||
if parsedVtx0 {
|
||||
return vtx0, nil
|
||||
}
|
||||
return nil, errUnknownVertex
|
||||
case vtxID.Equals(vtxID1):
|
||||
if parsedVtx1 {
|
||||
return vtx1, nil
|
||||
}
|
||||
return nil, errUnknownVertex
|
||||
default:
|
||||
t.Fatal(errUnknownVertex)
|
||||
panic(errUnknownVertex)
|
||||
}
|
||||
}
|
||||
state.parseVertex = func(vtxBytes []byte) (avalanche.Vertex, error) {
|
||||
switch {
|
||||
case bytes.Equal(vtxBytes, vtxBytes0):
|
||||
vtx0.status = choices.Processing
|
||||
parsedVtx0 = true
|
||||
return vtx0, nil
|
||||
case bytes.Equal(vtxBytes, vtxBytes1):
|
||||
vtx1.status = choices.Processing
|
||||
parsedVtx1 = true
|
||||
return vtx1, nil
|
||||
}
|
||||
t.Fatal(errUnknownVertex)
|
||||
return nil, errUnknownVertex
|
||||
}
|
||||
|
||||
requestIDs := map[[32]byte]uint32{}
|
||||
sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
requestIDs[vtxID.Key()] = reqID
|
||||
}
|
||||
|
||||
vm.CantBootstrapping = false
|
||||
|
||||
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request vtx0 and vtx1
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
reqID, ok := requestIDs[vtxID1.Key()]
|
||||
if !ok {
|
||||
t.Fatalf("should have requested vtx1")
|
||||
}
|
||||
|
||||
vm.CantBootstrapped = false
|
||||
|
||||
if err := bs.MultiPut(peerID, reqID, [][]byte{vtxBytes1, vtxBytes0}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
reqID, ok = requestIDs[vtxID0.Key()]
|
||||
if !ok {
|
||||
t.Fatalf("should have requested vtx0")
|
||||
}
|
||||
|
||||
if err := bs.GetAncestorsFailed(peerID, reqID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !*finished {
|
||||
t.Fatalf("Bootstrapping should have finished")
|
||||
} else if vtx0.Status() != choices.Accepted {
|
||||
t.Fatalf("Vertex should be accepted")
|
||||
} else if vtx1.Status() != choices.Accepted {
|
||||
t.Fatalf("Vertex should be accepted")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -78,9 +78,12 @@ func (i *issuer) Update() {
|
|||
vdrSet.Add(vdr.ID())
|
||||
}
|
||||
|
||||
toSample := ids.ShortSet{} // Copy to a new variable because we may remove an element in sender.Sender
|
||||
toSample.Union(vdrSet) // and we don't want that to affect the set of validators we wait for [ie vdrSet]
|
||||
|
||||
i.t.RequestID++
|
||||
if numVdrs := len(vdrs); numVdrs == p.K && i.t.polls.Add(i.t.RequestID, vdrSet.Len()) {
|
||||
i.t.Config.Sender.PushQuery(vdrSet, i.t.RequestID, vtxID, i.vtx.Bytes())
|
||||
if numVdrs := len(vdrs); numVdrs == p.K && i.t.polls.Add(i.t.RequestID, vdrSet) {
|
||||
i.t.Config.Sender.PushQuery(toSample, i.t.RequestID, vtxID, i.vtx.Bytes())
|
||||
} else if numVdrs < p.K {
|
||||
i.t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", vtxID)
|
||||
}
|
||||
|
|
|
@ -32,16 +32,27 @@ import (
|
|||
type polls struct {
|
||||
log logging.Logger
|
||||
numPolls prometheus.Gauge
|
||||
alpha int
|
||||
m map[uint32]poll
|
||||
}
|
||||
|
||||
func newPolls(alpha int, log logging.Logger, numPolls prometheus.Gauge) polls {
|
||||
return polls{
|
||||
log: log,
|
||||
numPolls: numPolls,
|
||||
alpha: alpha,
|
||||
m: make(map[uint32]poll),
|
||||
}
|
||||
}
|
||||
|
||||
// Add to the current set of polls
|
||||
// Returns true if the poll was registered correctly and the network sample
|
||||
// should be made.
|
||||
func (p *polls) Add(requestID uint32, numPolled int) bool {
|
||||
func (p *polls) Add(requestID uint32, vdrs ids.ShortSet) bool {
|
||||
poll, exists := p.m[requestID]
|
||||
if !exists {
|
||||
poll.numPending = numPolled
|
||||
poll.polled = vdrs
|
||||
poll.alpha = p.alpha
|
||||
p.m[requestID] = poll
|
||||
|
||||
p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics
|
||||
|
@ -59,7 +70,7 @@ func (p *polls) Vote(requestID uint32, vdr ids.ShortID, votes []ids.ID) (ids.Uni
|
|||
return nil, false
|
||||
}
|
||||
|
||||
poll.Vote(votes)
|
||||
poll.Vote(votes, vdr)
|
||||
if poll.Finished() {
|
||||
p.log.Verbo("Poll is finished")
|
||||
delete(p.m, requestID)
|
||||
|
@ -83,19 +94,44 @@ func (p *polls) String() string {
|
|||
|
||||
// poll represents the current state of a network poll for a vertex
|
||||
type poll struct {
|
||||
votes ids.UniqueBag
|
||||
numPending int
|
||||
votes ids.UniqueBag
|
||||
polled ids.ShortSet
|
||||
alpha int
|
||||
}
|
||||
|
||||
// Vote registers a vote for this poll
|
||||
func (p *poll) Vote(votes []ids.ID) {
|
||||
if p.numPending > 0 {
|
||||
p.numPending--
|
||||
p.votes.Add(uint(p.numPending), votes...)
|
||||
func (p *poll) Vote(votes []ids.ID, vdr ids.ShortID) {
|
||||
if p.polled.Contains(vdr) {
|
||||
p.polled.Remove(vdr)
|
||||
p.votes.Add(uint(p.polled.Len()), votes...)
|
||||
}
|
||||
}
|
||||
|
||||
// Finished returns true if the poll has completed, with no more required
|
||||
// responses
|
||||
func (p poll) Finished() bool { return p.numPending <= 0 }
|
||||
func (p poll) String() string { return fmt.Sprintf("Waiting on %d chits", p.numPending) }
|
||||
func (p poll) Finished() bool {
|
||||
// If there are no outstanding queries, the poll is finished
|
||||
numPending := p.polled.Len()
|
||||
if numPending == 0 {
|
||||
return true
|
||||
}
|
||||
// If there are still enough pending responses to include another vertex,
|
||||
// then the poll must wait for more responses
|
||||
if numPending > p.alpha {
|
||||
return false
|
||||
}
|
||||
|
||||
// Ignore any vertex that has already received alpha votes. To safely skip
|
||||
// DAG traversal, assume that all votes for vertices with less than alpha
|
||||
// votes will be applied to a single shared ancestor. In this case, the poll
|
||||
// can terminate early, iff there are not enough pending votes for this
|
||||
// ancestor to receive alpha votes.
|
||||
partialVotes := ids.BitSet(0)
|
||||
for _, vote := range p.votes.List() {
|
||||
if voters := p.votes.GetSet(vote); voters.Len() < p.alpha {
|
||||
partialVotes.Union(voters)
|
||||
}
|
||||
}
|
||||
return partialVotes.Len()+numPending < p.alpha
|
||||
}
|
||||
func (p poll) String() string { return fmt.Sprintf("Waiting on %d chits", p.polled.Len()) }
|
||||
|
|
|
@ -0,0 +1,99 @@
|
|||
package avalanche
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
)
|
||||
|
||||
func TestPollTerminatesEarlyVirtuousCase(t *testing.T) {
|
||||
alpha := 3
|
||||
|
||||
vtxID := GenerateID()
|
||||
votes := []ids.ID{vtxID}
|
||||
|
||||
vdr1 := ids.NewShortID([20]byte{1})
|
||||
vdr2 := ids.NewShortID([20]byte{2})
|
||||
vdr3 := ids.NewShortID([20]byte{3})
|
||||
vdr4 := ids.NewShortID([20]byte{4})
|
||||
vdr5 := ids.NewShortID([20]byte{5}) // k = 5
|
||||
|
||||
vdrs := ids.ShortSet{}
|
||||
vdrs.Add(vdr1)
|
||||
vdrs.Add(vdr2)
|
||||
vdrs.Add(vdr3)
|
||||
vdrs.Add(vdr4)
|
||||
vdrs.Add(vdr5)
|
||||
|
||||
poll := poll{
|
||||
votes: make(ids.UniqueBag),
|
||||
polled: vdrs,
|
||||
alpha: alpha,
|
||||
}
|
||||
|
||||
poll.Vote(votes, vdr1)
|
||||
if poll.Finished() {
|
||||
t.Fatalf("Poll finished after less than alpha votes")
|
||||
}
|
||||
poll.Vote(votes, vdr2)
|
||||
if poll.Finished() {
|
||||
t.Fatalf("Poll finished after less than alpha votes")
|
||||
}
|
||||
poll.Vote(votes, vdr3)
|
||||
if !poll.Finished() {
|
||||
t.Fatalf("Poll did not terminate early after receiving alpha votes for one vertex and none for other vertices")
|
||||
}
|
||||
}
|
||||
|
||||
func TestPollAccountsForSharedAncestor(t *testing.T) {
|
||||
alpha := 4
|
||||
|
||||
vtxA := GenerateID()
|
||||
vtxB := GenerateID()
|
||||
vtxC := GenerateID()
|
||||
vtxD := GenerateID()
|
||||
|
||||
// If validators 1-3 vote for frontier vertices
|
||||
// B, C, and D respectively, which all share the common ancestor
|
||||
// A, then we cannot terminate early with alpha = k = 4
|
||||
// If the final vote is cast for any of A, B, C, or D, then
|
||||
// vertex A will have transitively received alpha = 4 votes
|
||||
vdr1 := ids.NewShortID([20]byte{1})
|
||||
vdr2 := ids.NewShortID([20]byte{2})
|
||||
vdr3 := ids.NewShortID([20]byte{3})
|
||||
vdr4 := ids.NewShortID([20]byte{4})
|
||||
|
||||
vdrs := ids.ShortSet{}
|
||||
vdrs.Add(vdr1)
|
||||
vdrs.Add(vdr2)
|
||||
vdrs.Add(vdr3)
|
||||
vdrs.Add(vdr4)
|
||||
|
||||
poll := poll{
|
||||
votes: make(ids.UniqueBag),
|
||||
polled: vdrs,
|
||||
alpha: alpha,
|
||||
}
|
||||
|
||||
votes1 := []ids.ID{vtxB}
|
||||
poll.Vote(votes1, vdr1)
|
||||
if poll.Finished() {
|
||||
t.Fatalf("Poll finished early after receiving one vote")
|
||||
}
|
||||
votes2 := []ids.ID{vtxC}
|
||||
poll.Vote(votes2, vdr2)
|
||||
if poll.Finished() {
|
||||
t.Fatalf("Poll finished early after receiving two votes")
|
||||
}
|
||||
votes3 := []ids.ID{vtxD}
|
||||
poll.Vote(votes3, vdr3)
|
||||
if poll.Finished() {
|
||||
t.Fatalf("Poll terminated early, when a shared ancestor could have received alpha votes")
|
||||
}
|
||||
|
||||
votes4 := []ids.ID{vtxA}
|
||||
poll.Vote(votes4, vdr4)
|
||||
if !poll.Finished() {
|
||||
t.Fatalf("Poll did not terminate after receiving all outstanding votes")
|
||||
}
|
||||
}
|
|
@ -57,9 +57,7 @@ func (t *Transitive) Initialize(config Config) error {
|
|||
|
||||
t.onFinished = t.finishBootstrapping
|
||||
|
||||
t.polls.log = config.Context.Log
|
||||
t.polls.numPolls = t.numPolls
|
||||
t.polls.m = make(map[uint32]poll)
|
||||
t.polls = newPolls(int(config.Alpha), config.Context.Log, t.numPolls)
|
||||
|
||||
return t.bootstrapper.Initialize(config.BootstrapConfig)
|
||||
}
|
||||
|
@ -169,7 +167,11 @@ func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxByt
|
|||
t.Config.Context.Log.Verbo("Put(%s, %d, %s) called", vdr, requestID, vtxID)
|
||||
|
||||
if !t.bootstrapped { // Bootstrapping unfinished --> didn't call Get --> this message is invalid
|
||||
t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
|
||||
if requestID == network.GossipMsgRequestID {
|
||||
t.Config.Context.Log.Verbo("dropping gossip Put(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
|
||||
} else {
|
||||
t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -335,10 +337,10 @@ func (t *Transitive) reinsertFrom(vdr ids.ShortID, vtxID ids.ID) (bool, error) {
|
|||
|
||||
func (t *Transitive) insertFrom(vdr ids.ShortID, vtx avalanche.Vertex) (bool, error) {
|
||||
issued := true
|
||||
vts := []avalanche.Vertex{vtx}
|
||||
for len(vts) > 0 {
|
||||
vtx := vts[0]
|
||||
vts = vts[1:]
|
||||
vertexHeap := newMaxVertexHeap()
|
||||
vertexHeap.Push(vtx)
|
||||
for vertexHeap.Len() > 0 {
|
||||
vtx := vertexHeap.Pop()
|
||||
|
||||
if t.Consensus.VertexIssued(vtx) {
|
||||
continue
|
||||
|
@ -353,7 +355,7 @@ func (t *Transitive) insertFrom(vdr ids.ShortID, vtx avalanche.Vertex) (bool, er
|
|||
t.sendRequest(vdr, parent.ID())
|
||||
issued = false
|
||||
} else {
|
||||
vts = append(vts, parent)
|
||||
vertexHeap.Push(parent)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -471,8 +473,11 @@ func (t *Transitive) issueRepoll() {
|
|||
vdrSet.Add(vdr.ID())
|
||||
}
|
||||
|
||||
vdrCopy := ids.ShortSet{}
|
||||
vdrCopy.Union((vdrSet))
|
||||
|
||||
t.RequestID++
|
||||
if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet.Len()) {
|
||||
if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrCopy) {
|
||||
t.Config.Sender.PullQuery(vdrSet, t.RequestID, vtxID)
|
||||
} else if numVdrs < p.K {
|
||||
t.Config.Context.Log.Error("re-query for %s was dropped due to an insufficient number of validators", vtxID)
|
||||
|
|
|
@ -3085,3 +3085,120 @@ func TestEngineDuplicatedIssuance(t *testing.T) {
|
|||
|
||||
te.Notify(common.PendingTxs)
|
||||
}
|
||||
|
||||
func TestEngineDoubleChit(t *testing.T) {
|
||||
config := DefaultConfig()
|
||||
|
||||
config.Params.Alpha = 2
|
||||
config.Params.K = 2
|
||||
|
||||
vdr0 := validators.GenerateRandomValidator(1)
|
||||
vdr1 := validators.GenerateRandomValidator(1)
|
||||
vals := validators.NewSet()
|
||||
vals.Add(vdr0)
|
||||
vals.Add(vdr1)
|
||||
config.Validators = vals
|
||||
|
||||
sender := &common.SenderTest{}
|
||||
sender.T = t
|
||||
config.Sender = sender
|
||||
|
||||
sender.Default(true)
|
||||
sender.CantGetAcceptedFrontier = false
|
||||
|
||||
st := &stateTest{t: t}
|
||||
config.State = st
|
||||
|
||||
st.Default(true)
|
||||
|
||||
gVtx := &Vtx{
|
||||
id: GenerateID(),
|
||||
status: choices.Accepted,
|
||||
}
|
||||
mVtx := &Vtx{
|
||||
id: GenerateID(),
|
||||
status: choices.Accepted,
|
||||
}
|
||||
|
||||
vts := []avalanche.Vertex{gVtx, mVtx}
|
||||
utxos := []ids.ID{GenerateID()}
|
||||
|
||||
tx := &TestTx{
|
||||
TestTx: snowstorm.TestTx{
|
||||
Identifier: GenerateID(),
|
||||
Stat: choices.Processing,
|
||||
},
|
||||
}
|
||||
tx.Ins.Add(utxos[0])
|
||||
|
||||
vtx := &Vtx{
|
||||
parents: vts,
|
||||
id: GenerateID(),
|
||||
txs: []snowstorm.Tx{tx},
|
||||
height: 1,
|
||||
status: choices.Processing,
|
||||
bytes: []byte{1, 1, 2, 3},
|
||||
}
|
||||
|
||||
st.edge = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} }
|
||||
st.getVertex = func(id ids.ID) (avalanche.Vertex, error) {
|
||||
switch {
|
||||
case id.Equals(gVtx.ID()):
|
||||
return gVtx, nil
|
||||
case id.Equals(mVtx.ID()):
|
||||
return mVtx, nil
|
||||
}
|
||||
t.Fatalf("Unknown vertex")
|
||||
panic("Should have errored")
|
||||
}
|
||||
|
||||
te := &Transitive{}
|
||||
te.Initialize(config)
|
||||
te.finishBootstrapping()
|
||||
|
||||
reqID := new(uint32)
|
||||
sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, vtxID ids.ID, _ []byte) {
|
||||
*reqID = requestID
|
||||
if inVdrs.Len() != 2 {
|
||||
t.Fatalf("Wrong number of validators")
|
||||
}
|
||||
if !vtxID.Equals(vtx.ID()) {
|
||||
t.Fatalf("Wrong vertex requested")
|
||||
}
|
||||
}
|
||||
st.getVertex = func(id ids.ID) (avalanche.Vertex, error) {
|
||||
switch {
|
||||
case id.Equals(vtx.ID()):
|
||||
return vtx, nil
|
||||
}
|
||||
t.Fatalf("Unknown vertex")
|
||||
panic("Should have errored")
|
||||
}
|
||||
|
||||
te.insert(vtx)
|
||||
|
||||
votes := ids.Set{}
|
||||
votes.Add(vtx.ID())
|
||||
|
||||
if status := tx.Status(); status != choices.Processing {
|
||||
t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing)
|
||||
}
|
||||
|
||||
te.Chits(vdr0.ID(), *reqID, votes)
|
||||
|
||||
if status := tx.Status(); status != choices.Processing {
|
||||
t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing)
|
||||
}
|
||||
|
||||
te.Chits(vdr0.ID(), *reqID, votes)
|
||||
|
||||
if status := tx.Status(); status != choices.Processing {
|
||||
t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing)
|
||||
}
|
||||
|
||||
te.Chits(vdr1.ID(), *reqID, votes)
|
||||
|
||||
if status := tx.Status(); status != choices.Accepted {
|
||||
t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Accepted)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,6 +7,10 @@ import (
|
|||
"github.com/ava-labs/gecko/ids"
|
||||
)
|
||||
|
||||
const (
|
||||
minRequestsSize = 32
|
||||
)
|
||||
|
||||
type req struct {
|
||||
vdr ids.ShortID
|
||||
id uint32
|
||||
|
@ -22,7 +26,7 @@ type Requests struct {
|
|||
// are only in one request at a time.
|
||||
func (r *Requests) Add(vdr ids.ShortID, requestID uint32, containerID ids.ID) {
|
||||
if r.reqsToID == nil {
|
||||
r.reqsToID = make(map[[20]byte]map[uint32]ids.ID)
|
||||
r.reqsToID = make(map[[20]byte]map[uint32]ids.ID, minRequestsSize)
|
||||
}
|
||||
vdrKey := vdr.Key()
|
||||
vdrReqs, ok := r.reqsToID[vdrKey]
|
||||
|
@ -33,7 +37,7 @@ func (r *Requests) Add(vdr ids.ShortID, requestID uint32, containerID ids.ID) {
|
|||
vdrReqs[requestID] = containerID
|
||||
|
||||
if r.idToReq == nil {
|
||||
r.idToReq = make(map[[32]byte]req)
|
||||
r.idToReq = make(map[[32]byte]req, minRequestsSize)
|
||||
}
|
||||
r.idToReq[containerID.Key()] = req{
|
||||
vdr: vdr,
|
||||
|
|
|
@ -115,6 +115,9 @@ func (b *bootstrapper) fetch(blkID ids.ID) error {
|
|||
|
||||
// Make sure we don't already have this block
|
||||
if _, err := b.VM.GetBlock(blkID); err == nil {
|
||||
if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier {
|
||||
return b.finish()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -622,3 +622,128 @@ func TestBootstrapperFilterAccepted(t *testing.T) {
|
|||
t.Fatalf("Blk shouldn't be accepted")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBootstrapperFinalized(t *testing.T) {
|
||||
config, peerID, sender, vm := newConfig(t)
|
||||
|
||||
blkID0 := ids.Empty.Prefix(0)
|
||||
blkID1 := ids.Empty.Prefix(1)
|
||||
blkID2 := ids.Empty.Prefix(2)
|
||||
|
||||
blkBytes0 := []byte{0}
|
||||
blkBytes1 := []byte{1}
|
||||
blkBytes2 := []byte{2}
|
||||
|
||||
blk0 := &Blk{
|
||||
id: blkID0,
|
||||
height: 0,
|
||||
status: choices.Accepted,
|
||||
bytes: blkBytes0,
|
||||
}
|
||||
blk1 := &Blk{
|
||||
parent: blk0,
|
||||
id: blkID1,
|
||||
height: 1,
|
||||
status: choices.Unknown,
|
||||
bytes: blkBytes1,
|
||||
}
|
||||
blk2 := &Blk{
|
||||
parent: blk1,
|
||||
id: blkID2,
|
||||
height: 2,
|
||||
status: choices.Unknown,
|
||||
bytes: blkBytes2,
|
||||
}
|
||||
|
||||
bs := bootstrapper{}
|
||||
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
|
||||
bs.Initialize(config)
|
||||
finished := new(bool)
|
||||
bs.onFinished = func() error { *finished = true; return nil }
|
||||
|
||||
acceptedIDs := ids.Set{}
|
||||
acceptedIDs.Add(blkID1)
|
||||
acceptedIDs.Add(blkID2)
|
||||
|
||||
parsedBlk1 := false
|
||||
parsedBlk2 := false
|
||||
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case blkID.Equals(blkID0):
|
||||
return blk0, nil
|
||||
case blkID.Equals(blkID1):
|
||||
if parsedBlk1 {
|
||||
return blk1, nil
|
||||
}
|
||||
return nil, errUnknownBlock
|
||||
case blkID.Equals(blkID2):
|
||||
if parsedBlk2 {
|
||||
return blk2, nil
|
||||
}
|
||||
return nil, errUnknownBlock
|
||||
default:
|
||||
t.Fatal(errUnknownBlock)
|
||||
panic(errUnknownBlock)
|
||||
}
|
||||
}
|
||||
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
|
||||
switch {
|
||||
case bytes.Equal(blkBytes, blkBytes0):
|
||||
return blk0, nil
|
||||
case bytes.Equal(blkBytes, blkBytes1):
|
||||
blk1.status = choices.Processing
|
||||
parsedBlk1 = true
|
||||
return blk1, nil
|
||||
case bytes.Equal(blkBytes, blkBytes2):
|
||||
blk2.status = choices.Processing
|
||||
parsedBlk2 = true
|
||||
return blk2, nil
|
||||
}
|
||||
t.Fatal(errUnknownBlock)
|
||||
return nil, errUnknownBlock
|
||||
}
|
||||
|
||||
requestIDs := map[[32]byte]uint32{}
|
||||
sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
|
||||
if !vdr.Equals(peerID) {
|
||||
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
|
||||
}
|
||||
requestIDs[vtxID.Key()] = reqID
|
||||
}
|
||||
|
||||
vm.CantBootstrapping = false
|
||||
|
||||
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk0 and blk1
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
reqID, ok := requestIDs[blkID2.Key()]
|
||||
if !ok {
|
||||
t.Fatalf("should have requested blk2")
|
||||
}
|
||||
|
||||
vm.CantBootstrapped = false
|
||||
|
||||
if err := bs.MultiPut(peerID, reqID, [][]byte{blkBytes2, blkBytes1}); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
reqID, ok = requestIDs[blkID1.Key()]
|
||||
if !ok {
|
||||
t.Fatalf("should have requested blk1")
|
||||
}
|
||||
|
||||
if err := bs.GetAncestorsFailed(peerID, reqID); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !*finished {
|
||||
t.Fatalf("Bootstrapping should have finished")
|
||||
} else if blk0.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
} else if blk1.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
} else if blk2.Status() != choices.Accepted {
|
||||
t.Fatalf("Block should be accepted")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,11 +22,11 @@ type polls struct {
|
|||
// Add to the current set of polls
|
||||
// Returns true if the poll was registered correctly and the network sample
|
||||
// should be made.
|
||||
func (p *polls) Add(requestID uint32, numPolled int) bool {
|
||||
func (p *polls) Add(requestID uint32, vdrs ids.ShortSet) bool {
|
||||
poll, exists := p.m[requestID]
|
||||
if !exists {
|
||||
poll.alpha = p.alpha
|
||||
poll.numPolled = numPolled
|
||||
poll.polled = vdrs
|
||||
p.m[requestID] = poll
|
||||
|
||||
p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics
|
||||
|
@ -42,7 +42,7 @@ func (p *polls) Vote(requestID uint32, vdr ids.ShortID, vote ids.ID) (ids.Bag, b
|
|||
if !exists {
|
||||
return ids.Bag{}, false
|
||||
}
|
||||
poll.Vote(vote)
|
||||
poll.Vote(vote, vdr)
|
||||
if poll.Finished() {
|
||||
delete(p.m, requestID)
|
||||
p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics
|
||||
|
@ -60,7 +60,7 @@ func (p *polls) CancelVote(requestID uint32, vdr ids.ShortID) (ids.Bag, bool) {
|
|||
return ids.Bag{}, false
|
||||
}
|
||||
|
||||
poll.CancelVote()
|
||||
poll.CancelVote(vdr)
|
||||
if poll.Finished() {
|
||||
delete(p.m, requestID)
|
||||
p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics
|
||||
|
@ -83,22 +83,18 @@ func (p *polls) String() string {
|
|||
|
||||
// poll represents the current state of a network poll for a block
|
||||
type poll struct {
|
||||
alpha int
|
||||
votes ids.Bag
|
||||
numPolled int
|
||||
alpha int
|
||||
votes ids.Bag
|
||||
polled ids.ShortSet
|
||||
}
|
||||
|
||||
// Vote registers a vote for this poll
|
||||
func (p *poll) CancelVote() {
|
||||
if p.numPolled > 0 {
|
||||
p.numPolled--
|
||||
}
|
||||
}
|
||||
func (p *poll) CancelVote(vdr ids.ShortID) { p.polled.Remove(vdr) }
|
||||
|
||||
// Vote registers a vote for this poll
|
||||
func (p *poll) Vote(vote ids.ID) {
|
||||
if p.numPolled > 0 {
|
||||
p.numPolled--
|
||||
func (p *poll) Vote(vote ids.ID, vdr ids.ShortID) {
|
||||
if p.polled.Contains(vdr) {
|
||||
p.polled.Remove(vdr)
|
||||
p.votes.Add(vote)
|
||||
}
|
||||
}
|
||||
|
@ -106,13 +102,14 @@ func (p *poll) Vote(vote ids.ID) {
|
|||
// Finished returns true if the poll has completed, with no more required
|
||||
// responses
|
||||
func (p poll) Finished() bool {
|
||||
remaining := p.polled.Len()
|
||||
received := p.votes.Len()
|
||||
_, freq := p.votes.Mode()
|
||||
return p.numPolled == 0 || // All k nodes responded
|
||||
return remaining == 0 || // All k nodes responded
|
||||
freq >= p.alpha || // An alpha majority has returned
|
||||
received+p.numPolled < p.alpha // An alpha majority can never return
|
||||
received+remaining < p.alpha // An alpha majority can never return
|
||||
}
|
||||
|
||||
func (p poll) String() string {
|
||||
return fmt.Sprintf("Waiting on %d chits", p.numPolled)
|
||||
return fmt.Sprintf("Waiting on %d chits from %s", p.polled.Len(), p.polled)
|
||||
}
|
||||
|
|
|
@ -185,7 +185,11 @@ func (t *Transitive) GetAncestors(vdr ids.ShortID, requestID uint32, blkID ids.I
|
|||
func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) error {
|
||||
// bootstrapping isn't done --> we didn't send any gets --> this put is invalid
|
||||
if !t.bootstrapped {
|
||||
t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
|
||||
if requestID == network.GossipMsgRequestID {
|
||||
t.Config.Context.Log.Verbo("dropping gossip Put(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
|
||||
} else {
|
||||
t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -542,18 +546,15 @@ func (t *Transitive) pullSample(blkID ids.ID) {
|
|||
vdrSet.Add(vdr.ID())
|
||||
}
|
||||
|
||||
if numVdrs := len(vdrs); numVdrs != p.K {
|
||||
t.Config.Context.Log.Error("query for %s was dropped due to an insufficient number of validators", blkID)
|
||||
return
|
||||
}
|
||||
toSample := ids.ShortSet{}
|
||||
toSample.Union(vdrSet)
|
||||
|
||||
t.RequestID++
|
||||
if !t.polls.Add(t.RequestID, vdrSet.Len()) {
|
||||
t.Config.Context.Log.Error("query for %s was dropped due to use of a duplicated requestID", blkID)
|
||||
return
|
||||
if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet) {
|
||||
t.Config.Sender.PullQuery(toSample, t.RequestID, blkID)
|
||||
} else if numVdrs < p.K {
|
||||
t.Config.Context.Log.Error("query for %s was dropped due to an insufficient number of validators", blkID)
|
||||
}
|
||||
|
||||
t.Config.Sender.PullQuery(vdrSet, t.RequestID, blkID)
|
||||
}
|
||||
|
||||
// send a push request for this block
|
||||
|
@ -566,20 +567,15 @@ func (t *Transitive) pushSample(blk snowman.Block) {
|
|||
vdrSet.Add(vdr.ID())
|
||||
}
|
||||
|
||||
blkID := blk.ID()
|
||||
if numVdrs := len(vdrs); numVdrs != p.K {
|
||||
t.Config.Context.Log.Error("query for %s was dropped due to an insufficient number of validators", blkID)
|
||||
return
|
||||
}
|
||||
toSample := ids.ShortSet{}
|
||||
toSample.Union(vdrSet)
|
||||
|
||||
t.RequestID++
|
||||
if !t.polls.Add(t.RequestID, vdrSet.Len()) {
|
||||
t.Config.Context.Log.Error("query for %s was dropped due to use of a duplicated requestID", blkID)
|
||||
return
|
||||
if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet) {
|
||||
t.Config.Sender.PushQuery(toSample, t.RequestID, blk.ID(), blk.Bytes())
|
||||
} else if numVdrs < p.K {
|
||||
t.Config.Context.Log.Error("query for %s was dropped due to an insufficient number of validators", blk.ID())
|
||||
}
|
||||
|
||||
t.Config.Sender.PushQuery(vdrSet, t.RequestID, blkID, blk.Bytes())
|
||||
return
|
||||
}
|
||||
|
||||
func (t *Transitive) deliver(blk snowman.Block) error {
|
||||
|
|
|
@ -1522,3 +1522,124 @@ func TestEngineAggressivePolling(t *testing.T) {
|
|||
t.Fatalf("Should have sent an additional pull query")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEngineDoubleChit(t *testing.T) {
|
||||
config := DefaultConfig()
|
||||
|
||||
config.Params = snowball.Parameters{
|
||||
Metrics: prometheus.NewRegistry(),
|
||||
K: 2,
|
||||
Alpha: 2,
|
||||
BetaVirtuous: 1,
|
||||
BetaRogue: 2,
|
||||
}
|
||||
|
||||
vdr0 := validators.GenerateRandomValidator(1)
|
||||
vdr1 := validators.GenerateRandomValidator(1)
|
||||
|
||||
vals := validators.NewSet()
|
||||
config.Validators = vals
|
||||
|
||||
vals.Add(vdr0)
|
||||
vals.Add(vdr1)
|
||||
|
||||
sender := &common.SenderTest{}
|
||||
sender.T = t
|
||||
config.Sender = sender
|
||||
|
||||
sender.Default(true)
|
||||
|
||||
vm := &VMTest{}
|
||||
vm.T = t
|
||||
config.VM = vm
|
||||
|
||||
vm.Default(true)
|
||||
vm.CantSetPreference = false
|
||||
|
||||
gBlk := &Blk{
|
||||
id: GenerateID(),
|
||||
status: choices.Accepted,
|
||||
}
|
||||
|
||||
vm.LastAcceptedF = func() ids.ID { return gBlk.ID() }
|
||||
sender.CantGetAcceptedFrontier = false
|
||||
|
||||
vm.GetBlockF = func(id ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case id.Equals(gBlk.ID()):
|
||||
return gBlk, nil
|
||||
}
|
||||
t.Fatalf("Unknown block")
|
||||
panic("Should have errored")
|
||||
}
|
||||
|
||||
te := &Transitive{}
|
||||
te.Initialize(config)
|
||||
te.finishBootstrapping()
|
||||
|
||||
vm.LastAcceptedF = nil
|
||||
sender.CantGetAcceptedFrontier = true
|
||||
|
||||
blk := &Blk{
|
||||
parent: gBlk,
|
||||
id: GenerateID(),
|
||||
status: choices.Processing,
|
||||
bytes: []byte{1},
|
||||
}
|
||||
|
||||
queried := new(bool)
|
||||
queryRequestID := new(uint32)
|
||||
sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID, blkBytes []byte) {
|
||||
if *queried {
|
||||
t.Fatalf("Asked multiple times")
|
||||
}
|
||||
*queried = true
|
||||
*queryRequestID = requestID
|
||||
vdrSet := ids.ShortSet{}
|
||||
vdrSet.Add(vdr0.ID(), vdr1.ID())
|
||||
if !inVdrs.Equals(vdrSet) {
|
||||
t.Fatalf("Asking wrong validator for preference")
|
||||
}
|
||||
if !blk.ID().Equals(blkID) {
|
||||
t.Fatalf("Asking for wrong block")
|
||||
}
|
||||
}
|
||||
|
||||
te.insert(blk)
|
||||
|
||||
vm.GetBlockF = func(id ids.ID) (snowman.Block, error) {
|
||||
switch {
|
||||
case id.Equals(gBlk.ID()):
|
||||
return gBlk, nil
|
||||
case id.Equals(blk.ID()):
|
||||
return blk, nil
|
||||
}
|
||||
t.Fatalf("Unknown block")
|
||||
panic("Should have errored")
|
||||
}
|
||||
|
||||
blkSet := ids.Set{}
|
||||
blkSet.Add(blk.ID())
|
||||
|
||||
if status := blk.Status(); status != choices.Processing {
|
||||
t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing)
|
||||
}
|
||||
|
||||
te.Chits(vdr0.ID(), *queryRequestID, blkSet)
|
||||
|
||||
if status := blk.Status(); status != choices.Processing {
|
||||
t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing)
|
||||
}
|
||||
|
||||
te.Chits(vdr0.ID(), *queryRequestID, blkSet)
|
||||
|
||||
if status := blk.Status(); status != choices.Processing {
|
||||
t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing)
|
||||
}
|
||||
|
||||
te.Chits(vdr1.ID(), *queryRequestID, blkSet)
|
||||
|
||||
if status := blk.Status(); status != choices.Accepted {
|
||||
t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Accepted)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -10,12 +10,16 @@ import (
|
|||
"github.com/ava-labs/gecko/ids"
|
||||
)
|
||||
|
||||
const (
|
||||
minBlockerSize = 16
|
||||
)
|
||||
|
||||
// Blocker tracks objects that are blocked
|
||||
type Blocker map[[32]byte][]Blockable
|
||||
|
||||
func (b *Blocker) init() {
|
||||
if *b == nil {
|
||||
*b = make(map[[32]byte][]Blockable)
|
||||
*b = make(map[[32]byte][]Blockable, minBlockerSize)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -124,19 +124,12 @@ func (sr *ChainRouter) GetAcceptedFrontierFailed(validatorID ids.ShortID, chainI
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
if !chain.GetAcceptedFrontierFailed(validatorID, requestID) {
|
||||
sr.log.Debug("deferring GetAcceptedFrontier timeout due to a full queue on %s", chainID)
|
||||
// Defer this call to later
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.GetAcceptedFrontierFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
chain.GetAcceptedFrontierFailed(validatorID, requestID)
|
||||
} else {
|
||||
sr.log.Error("GetAcceptedFrontierFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// GetAccepted routes an incoming GetAccepted request from the
|
||||
|
@ -176,18 +169,12 @@ func (sr *ChainRouter) GetAcceptedFailed(validatorID ids.ShortID, chainID ids.ID
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
if !chain.GetAcceptedFailed(validatorID, requestID) {
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.log.Debug("deferring GetAccepted timeout due to a full queue on %s", chainID)
|
||||
sr.GetAcceptedFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
chain.GetAcceptedFailed(validatorID, requestID)
|
||||
} else {
|
||||
sr.log.Error("GetAcceptedFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// GetAncestors routes an incoming GetAncestors message from the validator with ID [validatorID]
|
||||
|
@ -227,18 +214,12 @@ func (sr *ChainRouter) GetAncestorsFailed(validatorID ids.ShortID, chainID ids.I
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
if !chain.GetAncestorsFailed(validatorID, requestID) {
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.log.Debug("deferring GetAncestors timeout due to a full queue on %s", chainID)
|
||||
sr.GetAncestorsFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
chain.GetAncestorsFailed(validatorID, requestID)
|
||||
} else {
|
||||
sr.log.Error("GetAncestorsFailed(%s, %s, %d, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// Get routes an incoming Get request from the validator with ID [validatorID]
|
||||
|
@ -278,18 +259,12 @@ func (sr *ChainRouter) GetFailed(validatorID ids.ShortID, chainID ids.ID, reques
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
if !chain.GetFailed(validatorID, requestID) {
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.log.Debug("deferring Get timeout due to a full queue on %s", chainID)
|
||||
sr.GetFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
chain.GetFailed(validatorID, requestID)
|
||||
} else {
|
||||
sr.log.Error("GetFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// PushQuery routes an incoming PushQuery request from the validator with ID [validatorID]
|
||||
|
@ -341,18 +316,12 @@ func (sr *ChainRouter) QueryFailed(validatorID ids.ShortID, chainID ids.ID, requ
|
|||
sr.lock.RLock()
|
||||
defer sr.lock.RUnlock()
|
||||
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
if chain, exists := sr.chains[chainID.Key()]; exists {
|
||||
if !chain.QueryFailed(validatorID, requestID) {
|
||||
sr.timeouts.Register(validatorID, chainID, requestID, func() {
|
||||
sr.log.Debug("deferring Query timeout due to a full queue on %s", chainID)
|
||||
sr.QueryFailed(validatorID, chainID, requestID)
|
||||
})
|
||||
return
|
||||
}
|
||||
chain.QueryFailed(validatorID, requestID)
|
||||
} else {
|
||||
sr.log.Error("QueryFailed(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID)
|
||||
}
|
||||
sr.timeouts.Cancel(validatorID, chainID, requestID)
|
||||
}
|
||||
|
||||
// Shutdown shuts down this router
|
||||
|
|
|
@ -4,12 +4,14 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// Handler passes incoming messages from the network to the consensus engine
|
||||
|
@ -17,12 +19,18 @@ import (
|
|||
type Handler struct {
|
||||
metrics
|
||||
|
||||
msgs chan message
|
||||
closed chan struct{}
|
||||
engine common.Engine
|
||||
msgChan <-chan common.Message
|
||||
msgs chan message
|
||||
reliableMsgsSema chan struct{}
|
||||
reliableMsgsLock sync.Mutex
|
||||
reliableMsgs []message
|
||||
closed chan struct{}
|
||||
msgChan <-chan common.Message
|
||||
|
||||
ctx *snow.Context
|
||||
engine common.Engine
|
||||
|
||||
toClose func()
|
||||
closing bool
|
||||
}
|
||||
|
||||
// Initialize this consensus handler
|
||||
|
@ -35,9 +43,12 @@ func (h *Handler) Initialize(
|
|||
) {
|
||||
h.metrics.Initialize(namespace, metrics)
|
||||
h.msgs = make(chan message, bufferSize)
|
||||
h.reliableMsgsSema = make(chan struct{}, 1)
|
||||
h.closed = make(chan struct{})
|
||||
h.engine = engine
|
||||
h.msgChan = msgChan
|
||||
|
||||
h.ctx = engine.Context()
|
||||
h.engine = engine
|
||||
}
|
||||
|
||||
// Context of this Handler
|
||||
|
@ -46,37 +57,38 @@ func (h *Handler) Context() *snow.Context { return h.engine.Context() }
|
|||
// Dispatch waits for incoming messages from the network
|
||||
// and, when they arrive, sends them to the consensus engine
|
||||
func (h *Handler) Dispatch() {
|
||||
log := h.Context().Log
|
||||
defer func() {
|
||||
log.Info("finished shutting down chain")
|
||||
h.ctx.Log.Info("finished shutting down chain")
|
||||
close(h.closed)
|
||||
}()
|
||||
|
||||
closing := false
|
||||
for {
|
||||
select {
|
||||
case msg, ok := <-h.msgs:
|
||||
if !ok {
|
||||
// the msgs channel has been closed, so this dispatcher should exit
|
||||
return
|
||||
}
|
||||
|
||||
h.metrics.pending.Dec()
|
||||
if closing {
|
||||
log.Debug("dropping message due to closing:\n%s", msg)
|
||||
continue
|
||||
}
|
||||
if h.dispatchMsg(msg) {
|
||||
closing = true
|
||||
h.dispatchMsg(msg)
|
||||
case <-h.reliableMsgsSema:
|
||||
// get all the reliable messages
|
||||
h.reliableMsgsLock.Lock()
|
||||
msgs := h.reliableMsgs
|
||||
h.reliableMsgs = nil
|
||||
h.reliableMsgsLock.Unlock()
|
||||
|
||||
// fire all the reliable messages
|
||||
for _, msg := range msgs {
|
||||
h.metrics.pending.Dec()
|
||||
h.dispatchMsg(msg)
|
||||
}
|
||||
case msg := <-h.msgChan:
|
||||
if closing {
|
||||
log.Debug("dropping internal message due to closing:\n%s", msg)
|
||||
continue
|
||||
}
|
||||
if h.dispatchMsg(message{messageType: notifyMsg, notification: msg}) {
|
||||
closing = true
|
||||
}
|
||||
// handle a message from the VM
|
||||
h.dispatchMsg(message{messageType: notifyMsg, notification: msg})
|
||||
}
|
||||
if closing && h.toClose != nil {
|
||||
if h.closing && h.toClose != nil {
|
||||
go h.toClose()
|
||||
}
|
||||
}
|
||||
|
@ -85,14 +97,19 @@ func (h *Handler) Dispatch() {
|
|||
// Dispatch a message to the consensus engine.
|
||||
// Returns true iff this consensus handler (and its associated engine) should shutdown
|
||||
// (due to receipt of a shutdown message)
|
||||
func (h *Handler) dispatchMsg(msg message) bool {
|
||||
func (h *Handler) dispatchMsg(msg message) {
|
||||
if h.closing {
|
||||
h.ctx.Log.Debug("dropping message due to closing:\n%s", msg)
|
||||
h.metrics.dropped.Inc()
|
||||
return
|
||||
}
|
||||
|
||||
startTime := time.Now()
|
||||
ctx := h.engine.Context()
|
||||
|
||||
ctx.Lock.Lock()
|
||||
defer ctx.Lock.Unlock()
|
||||
h.ctx.Lock.Lock()
|
||||
defer h.ctx.Lock.Unlock()
|
||||
|
||||
ctx.Log.Verbo("Forwarding message to consensus: %s", msg)
|
||||
h.ctx.Log.Verbo("Forwarding message to consensus: %s", msg)
|
||||
var (
|
||||
err error
|
||||
done bool
|
||||
|
@ -159,9 +176,10 @@ func (h *Handler) dispatchMsg(msg message) bool {
|
|||
}
|
||||
|
||||
if err != nil {
|
||||
ctx.Log.Fatal("forcing chain to shutdown due to %s", err)
|
||||
h.ctx.Log.Fatal("forcing chain to shutdown due to %s", err)
|
||||
}
|
||||
return done || err != nil
|
||||
|
||||
h.closing = done || err != nil
|
||||
}
|
||||
|
||||
// GetAcceptedFrontier passes a GetAcceptedFrontier message received from the
|
||||
|
@ -187,8 +205,8 @@ func (h *Handler) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, co
|
|||
|
||||
// GetAcceptedFrontierFailed passes a GetAcceptedFrontierFailed message received
|
||||
// from the network to the consensus engine.
|
||||
func (h *Handler) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
func (h *Handler) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
h.sendReliableMsg(message{
|
||||
messageType: getAcceptedFrontierFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
|
@ -219,14 +237,43 @@ func (h *Handler) Accepted(validatorID ids.ShortID, requestID uint32, containerI
|
|||
|
||||
// GetAcceptedFailed passes a GetAcceptedFailed message received from the
|
||||
// network to the consensus engine.
|
||||
func (h *Handler) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
func (h *Handler) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
h.sendReliableMsg(message{
|
||||
messageType: getAcceptedFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
})
|
||||
}
|
||||
|
||||
// GetAncestors passes a GetAncestors message received from the network to the consensus engine.
|
||||
func (h *Handler) GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAncestorsMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: containerID,
|
||||
})
|
||||
}
|
||||
|
||||
// MultiPut passes a MultiPut message received from the network to the consensus engine.
|
||||
func (h *Handler) MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: multiPutMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containers: containers,
|
||||
})
|
||||
}
|
||||
|
||||
// GetAncestorsFailed passes a GetAncestorsFailed message to the consensus engine.
|
||||
func (h *Handler) GetAncestorsFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
h.sendReliableMsg(message{
|
||||
messageType: getAncestorsFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
})
|
||||
}
|
||||
|
||||
// Get passes a Get message received from the network to the consensus engine.
|
||||
func (h *Handler) Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) bool {
|
||||
return h.sendMsg(message{
|
||||
|
@ -237,16 +284,6 @@ func (h *Handler) Get(validatorID ids.ShortID, requestID uint32, containerID ids
|
|||
})
|
||||
}
|
||||
|
||||
// GetAncestors passes a GetAncestors message received from the network to the consensus engine.
|
||||
func (h *Handler) GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAncestorsMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containerID: containerID,
|
||||
})
|
||||
}
|
||||
|
||||
// Put passes a Put message received from the network to the consensus engine.
|
||||
func (h *Handler) Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) bool {
|
||||
return h.sendMsg(message{
|
||||
|
@ -258,34 +295,15 @@ func (h *Handler) Put(validatorID ids.ShortID, requestID uint32, containerID ids
|
|||
})
|
||||
}
|
||||
|
||||
// MultiPut passes a MultiPut message received from the network to the consensus engine.
|
||||
func (h *Handler) MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: multiPutMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
containers: containers,
|
||||
})
|
||||
}
|
||||
|
||||
// GetFailed passes a GetFailed message to the consensus engine.
|
||||
func (h *Handler) GetFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
func (h *Handler) GetFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
h.sendReliableMsg(message{
|
||||
messageType: getFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
})
|
||||
}
|
||||
|
||||
// GetAncestorsFailed passes a GetAncestorsFailed message to the consensus engine.
|
||||
func (h *Handler) GetAncestorsFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
messageType: getAncestorsFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
})
|
||||
}
|
||||
|
||||
// PushQuery passes a PushQuery message received from the network to the consensus engine.
|
||||
func (h *Handler) PushQuery(validatorID ids.ShortID, requestID uint32, blockID ids.ID, block []byte) bool {
|
||||
return h.sendMsg(message{
|
||||
|
@ -318,8 +336,8 @@ func (h *Handler) Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set
|
|||
}
|
||||
|
||||
// QueryFailed passes a QueryFailed message received from the network to the consensus engine.
|
||||
func (h *Handler) QueryFailed(validatorID ids.ShortID, requestID uint32) bool {
|
||||
return h.sendMsg(message{
|
||||
func (h *Handler) QueryFailed(validatorID ids.ShortID, requestID uint32) {
|
||||
h.sendReliableMsg(message{
|
||||
messageType: queryFailedMsg,
|
||||
validatorID: validatorID,
|
||||
requestID: requestID,
|
||||
|
@ -341,8 +359,9 @@ func (h *Handler) Notify(msg common.Message) bool {
|
|||
|
||||
// Shutdown shuts down the dispatcher
|
||||
func (h *Handler) Shutdown() {
|
||||
h.metrics.pending.Inc()
|
||||
h.msgs <- message{messageType: shutdownMsg}
|
||||
h.sendReliableMsg(message{
|
||||
messageType: shutdownMsg,
|
||||
})
|
||||
}
|
||||
|
||||
func (h *Handler) sendMsg(msg message) bool {
|
||||
|
@ -355,3 +374,15 @@ func (h *Handler) sendMsg(msg message) bool {
|
|||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func (h *Handler) sendReliableMsg(msg message) {
|
||||
h.reliableMsgsLock.Lock()
|
||||
defer h.reliableMsgsLock.Unlock()
|
||||
|
||||
h.metrics.pending.Inc()
|
||||
h.reliableMsgs = append(h.reliableMsgs, msg)
|
||||
select {
|
||||
case h.reliableMsgsSema <- struct{}{}:
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
|
|
@ -31,17 +31,16 @@ func (s *Sender) Context() *snow.Context { return s.ctx }
|
|||
|
||||
// GetAcceptedFrontier ...
|
||||
func (s *Sender) GetAcceptedFrontier(validatorIDs ids.ShortSet, requestID uint32) {
|
||||
if validatorIDs.Contains(s.ctx.NodeID) {
|
||||
validatorIDs.Remove(s.ctx.NodeID)
|
||||
go s.router.GetAcceptedFrontier(s.ctx.NodeID, s.ctx.ChainID, requestID)
|
||||
}
|
||||
validatorList := validatorIDs.List()
|
||||
for _, validatorID := range validatorList {
|
||||
for _, validatorID := range validatorIDs.List() {
|
||||
vID := validatorID
|
||||
s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() {
|
||||
s.router.GetAcceptedFrontierFailed(vID, s.ctx.ChainID, requestID)
|
||||
})
|
||||
}
|
||||
if validatorIDs.Contains(s.ctx.NodeID) {
|
||||
validatorIDs.Remove(s.ctx.NodeID)
|
||||
go s.router.GetAcceptedFrontier(s.ctx.NodeID, s.ctx.ChainID, requestID)
|
||||
}
|
||||
s.sender.GetAcceptedFrontier(validatorIDs, s.ctx.ChainID, requestID)
|
||||
}
|
||||
|
||||
|
@ -49,24 +48,23 @@ func (s *Sender) GetAcceptedFrontier(validatorIDs ids.ShortSet, requestID uint32
|
|||
func (s *Sender) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
if validatorID.Equals(s.ctx.NodeID) {
|
||||
go s.router.AcceptedFrontier(validatorID, s.ctx.ChainID, requestID, containerIDs)
|
||||
return
|
||||
} else {
|
||||
s.sender.AcceptedFrontier(validatorID, s.ctx.ChainID, requestID, containerIDs)
|
||||
}
|
||||
s.sender.AcceptedFrontier(validatorID, s.ctx.ChainID, requestID, containerIDs)
|
||||
}
|
||||
|
||||
// GetAccepted ...
|
||||
func (s *Sender) GetAccepted(validatorIDs ids.ShortSet, requestID uint32, containerIDs ids.Set) {
|
||||
if validatorIDs.Contains(s.ctx.NodeID) {
|
||||
validatorIDs.Remove(s.ctx.NodeID)
|
||||
go s.router.GetAccepted(s.ctx.NodeID, s.ctx.ChainID, requestID, containerIDs)
|
||||
}
|
||||
validatorList := validatorIDs.List()
|
||||
for _, validatorID := range validatorList {
|
||||
for _, validatorID := range validatorIDs.List() {
|
||||
vID := validatorID
|
||||
s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() {
|
||||
s.router.GetAcceptedFailed(vID, s.ctx.ChainID, requestID)
|
||||
})
|
||||
}
|
||||
if validatorIDs.Contains(s.ctx.NodeID) {
|
||||
validatorIDs.Remove(s.ctx.NodeID)
|
||||
go s.router.GetAccepted(s.ctx.NodeID, s.ctx.ChainID, requestID, containerIDs)
|
||||
}
|
||||
s.sender.GetAccepted(validatorIDs, s.ctx.ChainID, requestID, containerIDs)
|
||||
}
|
||||
|
||||
|
@ -74,9 +72,9 @@ func (s *Sender) GetAccepted(validatorIDs ids.ShortSet, requestID uint32, contai
|
|||
func (s *Sender) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
|
||||
if validatorID.Equals(s.ctx.NodeID) {
|
||||
go s.router.Accepted(validatorID, s.ctx.ChainID, requestID, containerIDs)
|
||||
return
|
||||
} else {
|
||||
s.sender.Accepted(validatorID, s.ctx.ChainID, requestID, containerIDs)
|
||||
}
|
||||
s.sender.Accepted(validatorID, s.ctx.ChainID, requestID, containerIDs)
|
||||
}
|
||||
|
||||
// Get sends a Get message to the consensus engine running on the specified
|
||||
|
@ -85,6 +83,13 @@ func (s *Sender) Accepted(validatorID ids.ShortID, requestID uint32, containerID
|
|||
// specified container.
|
||||
func (s *Sender) Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) {
|
||||
s.ctx.Log.Verbo("Sending Get to validator %s. RequestID: %d. ContainerID: %s", validatorID, requestID, containerID)
|
||||
|
||||
// Sending a Get to myself will always fail
|
||||
if validatorID.Equals(s.ctx.NodeID) {
|
||||
go s.router.GetFailed(validatorID, s.ctx.ChainID, requestID)
|
||||
return
|
||||
}
|
||||
|
||||
// Add a timeout -- if we don't get a response before the timeout expires,
|
||||
// send this consensus engine a GetFailed message
|
||||
s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() {
|
||||
|
@ -101,6 +106,7 @@ func (s *Sender) GetAncestors(validatorID ids.ShortID, requestID uint32, contain
|
|||
go s.router.GetAncestorsFailed(validatorID, s.ctx.ChainID, requestID)
|
||||
return
|
||||
}
|
||||
|
||||
s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() {
|
||||
s.router.GetAncestorsFailed(validatorID, s.ctx.ChainID, requestID)
|
||||
})
|
||||
|
@ -130,6 +136,13 @@ func (s *Sender) MultiPut(validatorID ids.ShortID, requestID uint32, containers
|
|||
// their preferred frontier given the existence of the specified container.
|
||||
func (s *Sender) PushQuery(validatorIDs ids.ShortSet, requestID uint32, containerID ids.ID, container []byte) {
|
||||
s.ctx.Log.Verbo("Sending PushQuery to validators %v. RequestID: %d. ContainerID: %s", validatorIDs, requestID, containerID)
|
||||
for _, validatorID := range validatorIDs.List() {
|
||||
vID := validatorID
|
||||
s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() {
|
||||
s.router.QueryFailed(vID, s.ctx.ChainID, requestID)
|
||||
})
|
||||
}
|
||||
|
||||
// If one of the validators in [validatorIDs] is myself, send this message directly
|
||||
// to my own router rather than sending it over the network
|
||||
if validatorIDs.Contains(s.ctx.NodeID) { // One of the validators in [validatorIDs] was myself
|
||||
|
@ -139,13 +152,7 @@ func (s *Sender) PushQuery(validatorIDs ids.ShortSet, requestID uint32, containe
|
|||
// If this were not a goroutine, then we would deadlock here when [handler].msgs is full
|
||||
go s.router.PushQuery(s.ctx.NodeID, s.ctx.ChainID, requestID, containerID, container)
|
||||
}
|
||||
validatorList := validatorIDs.List() // Convert set to list for easier iteration
|
||||
for _, validatorID := range validatorList {
|
||||
vID := validatorID
|
||||
s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() {
|
||||
s.router.QueryFailed(vID, s.ctx.ChainID, requestID)
|
||||
})
|
||||
}
|
||||
|
||||
s.sender.PushQuery(validatorIDs, s.ctx.ChainID, requestID, containerID, container)
|
||||
}
|
||||
|
||||
|
@ -155,6 +162,14 @@ func (s *Sender) PushQuery(validatorIDs ids.ShortSet, requestID uint32, containe
|
|||
// their preferred frontier.
|
||||
func (s *Sender) PullQuery(validatorIDs ids.ShortSet, requestID uint32, containerID ids.ID) {
|
||||
s.ctx.Log.Verbo("Sending PullQuery. RequestID: %d. ContainerID: %s", requestID, containerID)
|
||||
|
||||
for _, validatorID := range validatorIDs.List() {
|
||||
vID := validatorID
|
||||
s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() {
|
||||
s.router.QueryFailed(vID, s.ctx.ChainID, requestID)
|
||||
})
|
||||
}
|
||||
|
||||
// If one of the validators in [validatorIDs] is myself, send this message directly
|
||||
// to my own router rather than sending it over the network
|
||||
if validatorIDs.Contains(s.ctx.NodeID) { // One of the validators in [validatorIDs] was myself
|
||||
|
@ -164,13 +179,7 @@ func (s *Sender) PullQuery(validatorIDs ids.ShortSet, requestID uint32, containe
|
|||
// If this were not a goroutine, then we would deadlock when [handler].msgs is full
|
||||
go s.router.PullQuery(s.ctx.NodeID, s.ctx.ChainID, requestID, containerID)
|
||||
}
|
||||
validatorList := validatorIDs.List() // Convert set to list for easier iteration
|
||||
for _, validatorID := range validatorList {
|
||||
vID := validatorID
|
||||
s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() {
|
||||
s.router.QueryFailed(vID, s.ctx.ChainID, requestID)
|
||||
})
|
||||
}
|
||||
|
||||
s.sender.PullQuery(validatorIDs, s.ctx.ChainID, requestID, containerID)
|
||||
}
|
||||
|
||||
|
@ -181,9 +190,9 @@ func (s *Sender) Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set)
|
|||
// to my own router rather than sending it over the network
|
||||
if validatorID.Equals(s.ctx.NodeID) {
|
||||
go s.router.Chits(validatorID, s.ctx.ChainID, requestID, votes)
|
||||
return
|
||||
} else {
|
||||
s.sender.Chits(validatorID, s.ctx.ChainID, requestID, votes)
|
||||
}
|
||||
s.sender.Chits(validatorID, s.ctx.ChainID, requestID, votes)
|
||||
}
|
||||
|
||||
// Gossip the provided container
|
||||
|
|
|
@ -4,18 +4,20 @@
|
|||
package sender
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
"github.com/ava-labs/gecko/snow"
|
||||
"github.com/ava-labs/gecko/snow/engine/common"
|
||||
"github.com/ava-labs/gecko/snow/networking/router"
|
||||
"github.com/ava-labs/gecko/snow/networking/timeout"
|
||||
"github.com/ava-labs/gecko/utils/logging"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
func TestSenderContext(t *testing.T) {
|
||||
|
@ -82,3 +84,128 @@ func TestTimeout(t *testing.T) {
|
|||
t.Fatalf("Timeouts should have fired")
|
||||
}
|
||||
}
|
||||
|
||||
func TestReliableMessages(t *testing.T) {
|
||||
tm := timeout.Manager{}
|
||||
tm.Initialize(50 * time.Millisecond)
|
||||
go tm.Dispatch()
|
||||
|
||||
chainRouter := router.ChainRouter{}
|
||||
chainRouter.Initialize(logging.NoLog{}, &tm, time.Hour, time.Second)
|
||||
|
||||
sender := Sender{}
|
||||
sender.Initialize(snow.DefaultContextTest(), &ExternalSenderTest{}, &chainRouter, &tm)
|
||||
|
||||
engine := common.EngineTest{T: t}
|
||||
engine.Default(true)
|
||||
|
||||
engine.ContextF = snow.DefaultContextTest
|
||||
engine.GossipF = func() error { return nil }
|
||||
|
||||
queriesToSend := 1000
|
||||
awaiting := make([]chan struct{}, queriesToSend)
|
||||
for i := 0; i < queriesToSend; i++ {
|
||||
awaiting[i] = make(chan struct{}, 1)
|
||||
}
|
||||
|
||||
engine.QueryFailedF = func(validatorID ids.ShortID, reqID uint32) error {
|
||||
close(awaiting[int(reqID)])
|
||||
return nil
|
||||
}
|
||||
|
||||
handler := router.Handler{}
|
||||
handler.Initialize(
|
||||
&engine,
|
||||
nil,
|
||||
1,
|
||||
"",
|
||||
prometheus.NewRegistry(),
|
||||
)
|
||||
go handler.Dispatch()
|
||||
|
||||
chainRouter.AddChain(&handler)
|
||||
|
||||
go func() {
|
||||
for i := 0; i < queriesToSend; i++ {
|
||||
vdrIDs := ids.ShortSet{}
|
||||
vdrIDs.Add(ids.NewShortID([20]byte{1}))
|
||||
|
||||
sender.PullQuery(vdrIDs, uint32(i), ids.Empty)
|
||||
time.Sleep(time.Duration(rand.Float64() * float64(time.Microsecond)))
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
chainRouter.Gossip()
|
||||
time.Sleep(time.Duration(rand.Float64() * float64(time.Microsecond)))
|
||||
}
|
||||
}()
|
||||
|
||||
for _, await := range awaiting {
|
||||
_, _ = <-await
|
||||
}
|
||||
}
|
||||
|
||||
func TestReliableMessagesToMyself(t *testing.T) {
|
||||
tm := timeout.Manager{}
|
||||
tm.Initialize(50 * time.Millisecond)
|
||||
go tm.Dispatch()
|
||||
|
||||
chainRouter := router.ChainRouter{}
|
||||
chainRouter.Initialize(logging.NoLog{}, &tm, time.Hour, time.Second)
|
||||
|
||||
sender := Sender{}
|
||||
sender.Initialize(snow.DefaultContextTest(), &ExternalSenderTest{}, &chainRouter, &tm)
|
||||
|
||||
engine := common.EngineTest{T: t}
|
||||
engine.Default(false)
|
||||
|
||||
engine.ContextF = snow.DefaultContextTest
|
||||
engine.GossipF = func() error { return nil }
|
||||
engine.CantPullQuery = false
|
||||
|
||||
queriesToSend := 2
|
||||
awaiting := make([]chan struct{}, queriesToSend)
|
||||
for i := 0; i < queriesToSend; i++ {
|
||||
awaiting[i] = make(chan struct{}, 1)
|
||||
}
|
||||
|
||||
engine.QueryFailedF = func(validatorID ids.ShortID, reqID uint32) error {
|
||||
close(awaiting[int(reqID)])
|
||||
return nil
|
||||
}
|
||||
|
||||
handler := router.Handler{}
|
||||
handler.Initialize(
|
||||
&engine,
|
||||
nil,
|
||||
1,
|
||||
"",
|
||||
prometheus.NewRegistry(),
|
||||
)
|
||||
go handler.Dispatch()
|
||||
|
||||
chainRouter.AddChain(&handler)
|
||||
|
||||
go func() {
|
||||
for i := 0; i < queriesToSend; i++ {
|
||||
vdrIDs := ids.ShortSet{}
|
||||
vdrIDs.Add(engine.Context().NodeID)
|
||||
|
||||
sender.PullQuery(vdrIDs, uint32(i), ids.Empty)
|
||||
time.Sleep(time.Duration(rand.Float64() * float64(time.Microsecond)))
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
for {
|
||||
chainRouter.Gossip()
|
||||
time.Sleep(time.Duration(rand.Float64() * float64(time.Microsecond)))
|
||||
}
|
||||
}()
|
||||
|
||||
for _, await := range awaiting {
|
||||
_, _ = <-await
|
||||
}
|
||||
}
|
||||
|
|
|
@ -34,8 +34,7 @@ type Block struct {
|
|||
func (b *Block) Initialize(bytes []byte, vm *SnowmanVM) {
|
||||
b.VM = vm
|
||||
b.Metadata.Initialize(bytes)
|
||||
status := b.VM.State.GetStatus(vm.DB, b.ID())
|
||||
b.SetStatus(status)
|
||||
b.SetStatus(choices.Unknown) // don't set status until it is queried
|
||||
}
|
||||
|
||||
// ParentID returns [b]'s parent's ID
|
||||
|
@ -55,7 +54,6 @@ func (b *Block) Parent() snowman.Block {
|
|||
// Recall that b.vm.DB.Commit() must be called to persist to the DB
|
||||
func (b *Block) Accept() error {
|
||||
b.SetStatus(choices.Accepted) // Change state of this block
|
||||
|
||||
blkID := b.ID()
|
||||
|
||||
// Persist data
|
||||
|
|
|
@ -0,0 +1,48 @@
|
|||
package core
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/ava-labs/gecko/snow/choices"
|
||||
"github.com/ava-labs/gecko/snow/consensus/snowman"
|
||||
|
||||
"github.com/ava-labs/gecko/ids"
|
||||
|
||||
"github.com/ava-labs/gecko/database/memdb"
|
||||
"github.com/ava-labs/gecko/database/versiondb"
|
||||
)
|
||||
|
||||
func TestBlock(t *testing.T) {
|
||||
parentID := ids.NewID([32]byte{1, 2, 3, 4, 5})
|
||||
db := versiondb.New(memdb.New())
|
||||
state, err := NewSnowmanState(func([]byte) (snowman.Block, error) { return nil, nil })
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
b := NewBlock(parentID)
|
||||
|
||||
b.Initialize([]byte{1, 2, 3}, &SnowmanVM{
|
||||
DB: db,
|
||||
State: state,
|
||||
})
|
||||
|
||||
// should be unknown until someone queries for it
|
||||
if status := b.Metadata.status; status != choices.Unknown {
|
||||
t.Fatalf("status should be unknown but is %s", status)
|
||||
}
|
||||
|
||||
// querying should change status to processing
|
||||
if status := b.Status(); status != choices.Processing {
|
||||
t.Fatalf("status should be processing but is %s", status)
|
||||
}
|
||||
|
||||
b.Accept()
|
||||
if status := b.Status(); status != choices.Accepted {
|
||||
t.Fatalf("status should be accepted but is %s", status)
|
||||
}
|
||||
|
||||
b.Reject()
|
||||
if status := b.Status(); status != choices.Rejected {
|
||||
t.Fatalf("status should be rejected but is %s", status)
|
||||
}
|
||||
}
|
|
@ -128,19 +128,10 @@ func (s *state) Get(db database.Database, typeID uint64, key ids.ID) (interface{
|
|||
// The unique ID of this key/typeID pair
|
||||
uID := s.uniqueID(key, typeID)
|
||||
|
||||
// See if exists in database
|
||||
exists, err := db.Has(uID.Bytes())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !exists {
|
||||
return nil, database.ErrNotFound
|
||||
}
|
||||
|
||||
// Get the value from the database
|
||||
valueBytes, err := db.Get(uID.Bytes())
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("problem getting value from database: %w", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Unmarshal the value from bytes and return it
|
||||
|
|
|
@ -128,7 +128,7 @@ func (tx *addDefaultSubnetDelegatorTx) SemanticVerify(db database.Database) (*ve
|
|||
// The account if this block's proposal is committed and the validator is
|
||||
// added to the pending validator set. (Increase the account's nonce;
|
||||
// decrease its balance.)
|
||||
newAccount, err := account.Remove(0, tx.Nonce) // Remove also removes the fee
|
||||
newAccount, err := account.Remove(tx.Wght, tx.Nonce) // Remove also removes the fee
|
||||
if err != nil {
|
||||
return nil, nil, nil, nil, permError{err}
|
||||
}
|
||||
|
|
|
@ -386,4 +386,52 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) {
|
|||
t.Fatal("should have failed verification because payer account has no $AVA to pay fee")
|
||||
}
|
||||
txFee = txFeeSaved // Reset tx fee
|
||||
|
||||
// Case 8: fail verification for spending more funds than it has
|
||||
tx, err = vm.newAddDefaultSubnetDelegatorTx(
|
||||
defaultNonce+1,
|
||||
defaultBalance*2, // weight
|
||||
uint64(defaultValidateStartTime.Unix()), // start time
|
||||
uint64(defaultValidateEndTime.Unix()), // end time
|
||||
defaultKey.PublicKey().Address(), // node ID
|
||||
defaultKey.PublicKey().Address(), // destination
|
||||
testNetworkID, // network ID
|
||||
defaultKey, // tx fee payer
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
_, _, _, _, err = tx.SemanticVerify(vm.DB)
|
||||
if err == nil {
|
||||
t.Fatal("should have failed verification because payer account spent twice the account's balance")
|
||||
}
|
||||
|
||||
// Case 9: Confirm balance is correct
|
||||
tx, err = vm.newAddDefaultSubnetDelegatorTx(
|
||||
defaultNonce+1,
|
||||
defaultStakeAmount, // weight
|
||||
uint64(defaultValidateStartTime.Unix()), // start time
|
||||
uint64(defaultValidateEndTime.Unix()), // end time
|
||||
defaultKey.PublicKey().Address(), // node ID
|
||||
defaultKey.PublicKey().Address(), // destination
|
||||
testNetworkID, // network ID
|
||||
defaultKey, // tx fee payer
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
onCommitDB, _, _, _, err := tx.SemanticVerify(vm.DB)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
account, err := tx.vm.getAccount(onCommitDB, defaultKey.PublicKey().Address())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
balance := account.Balance
|
||||
|
||||
if balance != defaultBalance-(defaultStakeAmount+txFee) {
|
||||
t.Fatalf("balance was not updated correctly after subnet delegator tx")
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue