Merge branch 'network-upgrade' into c-chain-local-rpc

This commit is contained in:
StephenButtolph 2020-06-07 18:13:32 -04:00
commit 9cae34dcfc
74 changed files with 2782 additions and 1344 deletions

View File

@ -10,6 +10,7 @@ import (
"github.com/ava-labs/gecko/api"
"github.com/ava-labs/gecko/chains"
"github.com/ava-labs/gecko/genesis"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/network"
"github.com/ava-labs/gecko/snow/engine/common"
@ -46,38 +47,45 @@ func NewService(nodeID ids.ShortID, networkID uint32, log logging.Logger, chainM
return &common.HTTPHandler{Handler: newServer}
}
// GetNodeIDArgs are the arguments for calling GetNodeID
type GetNodeIDArgs struct{}
// GetNodeIDReply are the results from calling GetNodeID
type GetNodeIDReply struct {
NodeID ids.ShortID `json:"nodeID"`
}
// GetNodeID returns the node ID of this node
func (service *Admin) GetNodeID(r *http.Request, args *GetNodeIDArgs, reply *GetNodeIDReply) error {
func (service *Admin) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error {
service.log.Debug("Admin: GetNodeID called")
reply.NodeID = service.nodeID
return nil
}
// GetNetworkIDArgs are the arguments for calling GetNetworkID
type GetNetworkIDArgs struct{}
// GetNetworkIDReply are the results from calling GetNetworkID
type GetNetworkIDReply struct {
NetworkID cjson.Uint32 `json:"networkID"`
}
// GetNetworkID returns the network ID this node is running on
func (service *Admin) GetNetworkID(r *http.Request, args *GetNetworkIDArgs, reply *GetNetworkIDReply) error {
func (service *Admin) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error {
service.log.Debug("Admin: GetNetworkID called")
reply.NetworkID = cjson.Uint32(service.networkID)
return nil
}
// GetNetworkNameReply is the result from calling GetNetworkName
type GetNetworkNameReply struct {
NetworkName string `json:"networkName"`
}
// GetNetworkName returns the network name this node is running on
func (service *Admin) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error {
service.log.Debug("Admin: GetNetworkName called")
reply.NetworkName = genesis.NetworkName(service.networkID)
return nil
}
// GetBlockchainIDArgs are the arguments for calling GetBlockchainID
type GetBlockchainIDArgs struct {
Alias string `json:"alias"`
@ -89,7 +97,7 @@ type GetBlockchainIDReply struct {
}
// GetBlockchainID returns the blockchain ID that resolves the alias that was supplied
func (service *Admin) GetBlockchainID(r *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error {
func (service *Admin) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error {
service.log.Debug("Admin: GetBlockchainID called")
bID, err := service.chainManager.Lookup(args.Alias)
@ -97,16 +105,13 @@ func (service *Admin) GetBlockchainID(r *http.Request, args *GetBlockchainIDArgs
return err
}
// PeersArgs are the arguments for calling Peers
type PeersArgs struct{}
// PeersReply are the results from calling Peers
type PeersReply struct {
Peers []network.PeerID `json:"peers"`
}
// Peers returns the list of current validators
func (service *Admin) Peers(r *http.Request, args *PeersArgs, reply *PeersReply) error {
func (service *Admin) Peers(_ *http.Request, _ *struct{}, reply *PeersReply) error {
service.log.Debug("Admin: Peers called")
reply.Peers = service.networking.Peers()
return nil
@ -123,22 +128,19 @@ type StartCPUProfilerReply struct {
}
// StartCPUProfiler starts a cpu profile writing to the specified file
func (service *Admin) StartCPUProfiler(r *http.Request, args *StartCPUProfilerArgs, reply *StartCPUProfilerReply) error {
func (service *Admin) StartCPUProfiler(_ *http.Request, args *StartCPUProfilerArgs, reply *StartCPUProfilerReply) error {
service.log.Debug("Admin: StartCPUProfiler called with %s", args.Filename)
reply.Success = true
return service.performance.StartCPUProfiler(args.Filename)
}
// StopCPUProfilerArgs are the arguments for calling StopCPUProfiler
type StopCPUProfilerArgs struct{}
// StopCPUProfilerReply are the results from calling StopCPUProfiler
type StopCPUProfilerReply struct {
Success bool `json:"success"`
}
// StopCPUProfiler stops the cpu profile
func (service *Admin) StopCPUProfiler(r *http.Request, args *StopCPUProfilerArgs, reply *StopCPUProfilerReply) error {
func (service *Admin) StopCPUProfiler(_ *http.Request, _ *struct{}, reply *StopCPUProfilerReply) error {
service.log.Debug("Admin: StopCPUProfiler called")
reply.Success = true
return service.performance.StopCPUProfiler()
@ -155,7 +157,7 @@ type MemoryProfileReply struct {
}
// MemoryProfile runs a memory profile writing to the specified file
func (service *Admin) MemoryProfile(r *http.Request, args *MemoryProfileArgs, reply *MemoryProfileReply) error {
func (service *Admin) MemoryProfile(_ *http.Request, args *MemoryProfileArgs, reply *MemoryProfileReply) error {
service.log.Debug("Admin: MemoryProfile called with %s", args.Filename)
reply.Success = true
return service.performance.MemoryProfile(args.Filename)
@ -172,7 +174,7 @@ type LockProfileReply struct {
}
// LockProfile runs a mutex profile writing to the specified file
func (service *Admin) LockProfile(r *http.Request, args *LockProfileArgs, reply *LockProfileReply) error {
func (service *Admin) LockProfile(_ *http.Request, args *LockProfileArgs, reply *LockProfileReply) error {
service.log.Debug("Admin: LockProfile called with %s", args.Filename)
reply.Success = true
return service.performance.LockProfile(args.Filename)
@ -190,7 +192,7 @@ type AliasReply struct {
}
// Alias attempts to alias an HTTP endpoint to a new name
func (service *Admin) Alias(r *http.Request, args *AliasArgs, reply *AliasReply) error {
func (service *Admin) Alias(_ *http.Request, args *AliasArgs, reply *AliasReply) error {
service.log.Debug("Admin: Alias called with URL: %s, Alias: %s", args.Endpoint, args.Alias)
reply.Success = true
return service.httpServer.AddAliasesWithReadLock(args.Endpoint, args.Alias)
@ -233,7 +235,7 @@ type StacktraceReply struct {
}
// Stacktrace returns the current global stacktrace
func (service *Admin) Stacktrace(_ *http.Request, _ *StacktraceArgs, reply *StacktraceReply) error {
func (service *Admin) Stacktrace(_ *http.Request, _ *struct{}, reply *StacktraceReply) error {
reply.Stacktrace = logging.Stacktrace{Global: true}.String()
return nil
}

View File

@ -75,8 +75,9 @@ func (s *Server) RegisterChain(ctx *snow.Context, vmIntf interface{}) {
}
// all subroutes to a chain begin with "bc/<the chain's ID>"
defaultEndpoint := "bc/" + ctx.ChainID.String()
httpLogger, err := s.factory.MakeChain(ctx.ChainID, "http")
chainID := ctx.ChainID.String()
defaultEndpoint := "bc/" + chainID
httpLogger, err := s.factory.MakeChain(chainID, "http")
if err != nil {
s.log.Error("Failed to create new http logger: %s", err)
return

View File

@ -38,7 +38,7 @@ import (
const (
defaultChannelSize = 1000
requestTimeout = 2 * time.Second
requestTimeout = 4 * time.Second
gossipFrequency = 10 * time.Second
shutdownTimeout = 1 * time.Second
)
@ -247,8 +247,13 @@ func (m *manager) ForceCreateChain(chain ChainParameters) {
}
}
primaryAlias, err := m.PrimaryAlias(chain.ID)
if err != nil {
primaryAlias = chain.ID.String()
}
// Create the log and context of the chain
chainLog, err := m.logFactory.MakeChain(chain.ID, "")
chainLog, err := m.logFactory.MakeChain(primaryAlias, "")
if err != nil {
m.log.Error("error while creating chain's log %s", err)
return
@ -266,12 +271,9 @@ func (m *manager) ForceCreateChain(chain ChainParameters) {
SharedMemory: m.sharedMemory.NewBlockchainSharedMemory(chain.ID),
BCLookup: m,
}
consensusParams := m.consensusParams
if alias, err := m.PrimaryAlias(ctx.ChainID); err == nil {
consensusParams.Namespace = fmt.Sprintf("gecko_%s", alias)
} else {
consensusParams.Namespace = fmt.Sprintf("gecko_%s", ctx.ChainID)
}
consensusParams.Namespace = fmt.Sprintf("gecko_%s", primaryAlias)
// The validators of this blockchain
var validators validators.Set // Validators validating this blockchain
@ -429,7 +431,13 @@ func (m *manager) createAvalancheChain(
// Asynchronously passes messages from the network to the consensus engine
handler := &router.Handler{}
handler.Initialize(&engine, msgChan, defaultChannelSize)
handler.Initialize(
&engine,
msgChan,
defaultChannelSize,
fmt.Sprintf("%s_handler", consensusParams.Namespace),
consensusParams.Metrics,
)
// Allows messages to be routed to the new chain
m.chainRouter.AddChain(handler)
@ -515,7 +523,13 @@ func (m *manager) createSnowmanChain(
// Asynchronously passes messages from the network to the consensus engine
handler := &router.Handler{}
handler.Initialize(&engine, msgChan, defaultChannelSize)
handler.Initialize(
&engine,
msgChan,
defaultChannelSize,
fmt.Sprintf("%s_handler", consensusParams.Namespace),
consensusParams.Metrics,
)
// Allow incoming messages to be routed to the new chain
m.chainRouter.AddChain(handler)

View File

@ -62,6 +62,9 @@ func (b *UniqueBag) Difference(diff *UniqueBag) {
// GetSet ...
func (b *UniqueBag) GetSet(id ID) BitSet { return (*b)[*id.ID] }
// RemoveSet ...
func (b *UniqueBag) RemoveSet(id ID) { delete(*b, id.Key()) }
// List ...
func (b *UniqueBag) List() []ID {
idList := []ID(nil)

View File

@ -41,7 +41,7 @@ func main() {
defer Config.DB.Close()
if Config.StakingIP.IsZero() {
log.Warn("NAT traversal has failed. If this node becomes a staker, it may lose its reward due to being unreachable.")
log.Warn("NAT traversal has failed. It will be able to connect to less nodes.")
}
// Track if sybil control is enforced
@ -62,7 +62,7 @@ func main() {
// Track if assertions should be executed
if Config.LoggingConfig.Assertions {
log.Warn("assertions are enabled. This may slow down execution")
log.Debug("assertions are enabled. This may slow down execution")
}
mapper := nat.NewDefaultMapper(log, Config.Nat, nat.TCP, "gecko")
@ -83,5 +83,5 @@ func main() {
log.Debug("dispatching node handlers")
err = node.Dispatch()
log.Debug("dispatch returned with: %s", err)
log.Debug("node dispatching returned with %s", err)
}

View File

@ -25,6 +25,7 @@ import (
"github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/hashing"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/random"
"github.com/ava-labs/gecko/utils/wrappers"
)
@ -56,11 +57,26 @@ func GetIPs(networkID uint32) []string {
switch networkID {
case genesis.DenaliID:
return []string{
"3.20.56.211:21001",
"18.224.140.156:21001",
"3.133.83.66:21001",
"3.133.131.39:21001",
"18.188.121.35:21001",
"3.133.83.66:21001",
"3.15.206.239:21001",
"18.224.140.156:21001",
"3.133.131.39:21001",
"18.191.29.54:21001",
"18.224.172.110:21001",
"18.223.211.203:21001",
"18.216.130.143:21001",
"18.223.184.147:21001",
"52.15.48.84:21001",
"18.189.194.220:21001",
"18.223.119.104:21001",
"3.133.155.41:21001",
"13.58.170.174:21001",
"3.21.245.246:21001",
"52.15.190.149:21001",
"18.188.95.241:21001",
"3.12.197.248:21001",
"3.17.39.236:21001",
}
case genesis.CascadeID:
return []string{
@ -75,6 +91,68 @@ func GetIPs(networkID uint32) []string {
}
}
// GetIDs returns the default IDs for each network
func GetIDs(networkID uint32) []string {
switch networkID {
case genesis.DenaliID:
return []string{
"NpagUxt6KQiwPch9Sd4osv8kD1TZnkjdk",
"2m38qc95mhHXtrhjyGbe7r2NhniqHHJRB",
"LQwRLm4cbJ7T2kxcxp4uXCU5XD8DFrE1C",
"hArafGhY2HFTbwaaVh1CSCUCUCiJ2Vfb",
"4QBwET5o8kUhvt9xArhir4d3R25CtmZho",
"HGZ8ae74J3odT8ESreAdCtdnvWG1J4X5n",
"4KXitMCoE9p2BHA6VzXtaTxLoEjNDo2Pt",
"JyE4P8f4cTryNV8DCz2M81bMtGhFFHexG",
"EzGaipqomyK9UKx9DBHV6Ky3y68hoknrF",
"CYKruAjwH1BmV3m37sXNuprbr7dGQuJwG",
"LegbVf6qaMKcsXPnLStkdc1JVktmmiDxy",
"FesGqwKq7z5nPFHa5iwZctHE5EZV9Lpdq",
"BFa1padLXBj7VHa2JYvYGzcTBPQGjPhUy",
"4B4rc5vdD1758JSBYL1xyvE5NHGzz6xzH",
"EDESh4DfZFC15i613pMtWniQ9arbBZRnL",
"CZmZ9xpCzkWqjAyS7L4htzh5Lg6kf1k18",
"CTtkcXvVdhpNp6f97LEUXPwsRD3A2ZHqP",
"84KbQHSDnojroCVY7vQ7u9Tx7pUonPaS",
"JjvzhxnLHLUQ5HjVRkvG827ivbLXPwA9u",
"4CWTbdvgXHY1CLXqQNAp22nJDo5nAmts6",
}
case genesis.CascadeID:
return []string{
"NX4zVkuiRJZYe6Nzzav7GXN3TakUet3Co",
"CMsa8cMw4eib1Hb8GG4xiUKAq5eE1BwUX",
"DsMP6jLhi1MkDVc3qx9xx9AAZWx8e87Jd",
"N86eodVZja3GEyZJTo3DFUPGpxEEvjGHs",
"EkKeGSLUbHrrtuayBtbwgWDRUiAziC3ao",
}
default:
return nil
}
}
// GetDefaultBootstraps returns the default bootstraps this node should connect
// to
func GetDefaultBootstraps(networkID uint32, count int) ([]string, []string) {
ips := GetIPs(networkID)
ids := GetIDs(networkID)
if numIPs := len(ips); numIPs < count {
count = numIPs
}
sampledIPs := make([]string, 0, count)
sampledIDs := make([]string, 0, count)
sampler := random.Uniform{N: len(ips)}
for i := 0; i < count; i++ {
i := sampler.Sample()
sampledIPs = append(sampledIPs, ips[i])
sampledIDs = append(sampledIDs, ids[i])
}
return sampledIPs, sampledIDs
}
// Parse the CLI arguments
func init() {
errs := &wrappers.Errs{}
@ -206,9 +284,11 @@ func init() {
Port: uint16(*consensusPort),
}
defaultBootstrapIPs, defaultBootstrapIDs := GetDefaultBootstraps(networkID, 5)
// Bootstrapping:
if *bootstrapIPs == "default" {
*bootstrapIPs = strings.Join(GetIPs(networkID), ",")
*bootstrapIPs = strings.Join(defaultBootstrapIPs, ",")
}
for _, ip := range strings.Split(*bootstrapIPs, ",") {
if ip != "" {
@ -227,7 +307,7 @@ func init() {
if *bootstrapIPs == "" {
*bootstrapIDs = ""
} else {
*bootstrapIDs = strings.Join(genesis.GetConfig(networkID).StakerIDs, ",")
*bootstrapIDs = strings.Join(defaultBootstrapIDs, ",")
}
}
if Config.EnableStaking {

View File

@ -89,6 +89,15 @@ func (m Builder) Get(chainID ids.ID, requestID uint32, containerID ids.ID) (Msg,
})
}
// GetAncestors message
func (m Builder) GetAncestors(chainID ids.ID, requestID uint32, containerID ids.ID) (Msg, error) {
return m.Pack(GetAncestors, map[Field]interface{}{
ChainID: chainID.Bytes(),
RequestID: requestID,
ContainerID: containerID.Bytes(),
})
}
// Put message
func (m Builder) Put(chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) (Msg, error) {
return m.Pack(Put, map[Field]interface{}{
@ -99,6 +108,15 @@ func (m Builder) Put(chainID ids.ID, requestID uint32, containerID ids.ID, conta
})
}
// MultiPut message
func (m Builder) MultiPut(chainID ids.ID, requestID uint32, containers [][]byte) (Msg, error) {
return m.Pack(MultiPut, map[Field]interface{}{
ChainID: chainID.Bytes(),
RequestID: requestID,
MultiContainerBytes: containers,
})
}
// PushQuery message
func (m Builder) PushQuery(chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) (Msg, error) {
return m.Pack(PushQuery, map[Field]interface{}{

View File

@ -12,17 +12,18 @@ type Field uint32
// Fields that may be packed. These values are not sent over the wire.
const (
VersionStr Field = iota // Used in handshake
NetworkID // Used in handshake
NodeID // Used in handshake
MyTime // Used in handshake
IP // Used in handshake
Peers // Used in handshake
ChainID // Used for dispatching
RequestID // Used for all messages
ContainerID // Used for querying
ContainerBytes // Used for gossiping
ContainerIDs // Used for querying
VersionStr Field = iota // Used in handshake
NetworkID // Used in handshake
NodeID // Used in handshake
MyTime // Used in handshake
IP // Used in handshake
Peers // Used in handshake
ChainID // Used for dispatching
RequestID // Used for all messages
ContainerID // Used for querying
ContainerBytes // Used for gossiping
ContainerIDs // Used for querying
MultiContainerBytes // Used in MultiPut
)
// Packer returns the packer function that can be used to pack this field.
@ -50,6 +51,8 @@ func (f Field) Packer() func(*wrappers.Packer, interface{}) {
return wrappers.TryPackBytes
case ContainerIDs:
return wrappers.TryPackHashes
case MultiContainerBytes:
return wrappers.TryPack2DBytes
default:
return nil
}
@ -80,6 +83,8 @@ func (f Field) Unpacker() func(*wrappers.Packer) interface{} {
return wrappers.TryUnpackBytes
case ContainerIDs:
return wrappers.TryUnpackHashes
case MultiContainerBytes:
return wrappers.TryUnpack2DBytes
default:
return nil
}
@ -107,6 +112,8 @@ func (f Field) String() string {
return "Container Bytes"
case ContainerIDs:
return "Container IDs"
case MultiContainerBytes:
return "MultiContainerBytes"
default:
return "Unknown Field"
}
@ -135,8 +142,12 @@ func (op Op) String() string {
return "accepted"
case Get:
return "get"
case GetAncestors:
return "get_ancestors"
case Put:
return "put"
case MultiPut:
return "multi_put"
case PushQuery:
return "push_query"
case PullQuery:
@ -166,6 +177,11 @@ const (
PushQuery
PullQuery
Chits
// Bootstrapping:
// TODO: Move GetAncestors and MultiPut with the rest of the bootstrapping
// commands when we do non-backwards compatible upgrade
GetAncestors
MultiPut
)
// Defines the messages that can be sent/received with this network
@ -181,6 +197,8 @@ var (
AcceptedFrontier: []Field{ChainID, RequestID, ContainerIDs},
GetAccepted: []Field{ChainID, RequestID, ContainerIDs},
Accepted: []Field{ChainID, RequestID, ContainerIDs},
GetAncestors: []Field{ChainID, RequestID, ContainerID},
MultiPut: []Field{ChainID, RequestID, MultiContainerBytes},
// Consensus:
Get: []Field{ChainID, RequestID, ContainerID},
Put: []Field{ChainID, RequestID, ContainerID, ContainerBytes},

View File

@ -56,7 +56,7 @@ type metrics struct {
getPeerlist, peerlist,
getAcceptedFrontier, acceptedFrontier,
getAccepted, accepted,
get, put,
get, getAncestors, put, multiPut,
pushQuery, pullQuery, chits messageMetrics
}
@ -83,7 +83,9 @@ func (m *metrics) initialize(registerer prometheus.Registerer) error {
errs.Add(m.getAccepted.initialize(GetAccepted, registerer))
errs.Add(m.accepted.initialize(Accepted, registerer))
errs.Add(m.get.initialize(Get, registerer))
errs.Add(m.getAncestors.initialize(GetAncestors, registerer))
errs.Add(m.put.initialize(Put, registerer))
errs.Add(m.multiPut.initialize(MultiPut, registerer))
errs.Add(m.pushQuery.initialize(PushQuery, registerer))
errs.Add(m.pullQuery.initialize(PullQuery, registerer))
errs.Add(m.chits.initialize(Chits, registerer))
@ -111,8 +113,12 @@ func (m *metrics) message(msgType Op) *messageMetrics {
return &m.accepted
case Get:
return &m.get
case GetAncestors:
return &m.getAncestors
case Put:
return &m.put
case MultiPut:
return &m.multiPut
case PushQuery:
return &m.pushQuery
case PullQuery:

View File

@ -30,7 +30,7 @@ import (
const (
defaultInitialReconnectDelay = time.Second
defaultMaxReconnectDelay = time.Hour
defaultMaxMessageSize uint32 = 1 << 21
DefaultMaxMessageSize uint32 = 1 << 21
defaultSendQueueSize = 1 << 10
defaultMaxClockDifference = time.Minute
defaultPeerListGossipSpacing = time.Minute
@ -162,7 +162,7 @@ func NewDefaultNetwork(
router,
defaultInitialReconnectDelay,
defaultMaxReconnectDelay,
defaultMaxMessageSize,
DefaultMaxMessageSize,
defaultSendQueueSize,
defaultMaxClockDifference,
defaultPeerListGossipSpacing,
@ -359,6 +359,29 @@ func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32,
}
}
// GetAncestors implements the Sender interface.
func (n *network) GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
msg, err := n.b.GetAncestors(chainID, requestID, containerID)
if err != nil {
n.log.Error("failed to build GetAncestors message: %w", err)
return
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.getAncestors.numFailed.Inc()
n.log.Debug("failed to send a GetAncestors message to: %s", validatorID)
} else {
n.getAncestors.numSent.Inc()
}
}
// Put implements the Sender interface.
func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
msg, err := n.b.Put(chainID, requestID, containerID, container)
@ -382,6 +405,29 @@ func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32,
}
}
// MultiPut implements the Sender interface.
func (n *network) MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte) {
msg, err := n.b.MultiPut(chainID, requestID, containers)
if err != nil {
n.log.Error("failed to build MultiPut message because of container of size %d", len(containers))
return
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a MultiPut message to: %s", validatorID)
n.multiPut.numFailed.Inc()
} else {
n.multiPut.numSent.Inc()
}
}
// PushQuery implements the Sender interface.
func (n *network) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
msg, err := n.b.PushQuery(chainID, requestID, containerID, container)

View File

@ -201,7 +201,7 @@ func (p *peer) handle(msg Msg) {
op := msg.Op()
msgMetrics := p.net.message(op)
if msgMetrics == nil {
p.net.log.Debug("dropping an unknown message from %s with op %d", p.id, op)
p.net.log.Debug("dropping an unknown message from %s with op %s", p.id, op.String())
return
}
msgMetrics.numReceived.Inc()
@ -236,14 +236,20 @@ func (p *peer) handle(msg Msg) {
p.accepted(msg)
case Get:
p.get(msg)
case GetAncestors:
p.getAncestors(msg)
case Put:
p.put(msg)
case MultiPut:
p.multiPut(msg)
case PushQuery:
p.pushQuery(msg)
case PullQuery:
p.pullQuery(msg)
case Chits:
p.chits(msg)
default:
p.net.log.Debug("dropping an unknown message from %s with op %s", p.id, op.String())
}
}
@ -537,6 +543,16 @@ func (p *peer) get(msg Msg) {
p.net.router.Get(p.id, chainID, requestID, containerID)
}
func (p *peer) getAncestors(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(RequestID).(uint32)
containerID, err := ids.ToID(msg.Get(ContainerID).([]byte))
p.net.log.AssertNoError(err)
p.net.router.GetAncestors(p.id, chainID, requestID, containerID)
}
// assumes the stateLock is not held
func (p *peer) put(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
@ -549,6 +565,16 @@ func (p *peer) put(msg Msg) {
p.net.router.Put(p.id, chainID, requestID, containerID, container)
}
// assumes the stateLock is not held
func (p *peer) multiPut(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))
p.net.log.AssertNoError(err)
requestID := msg.Get(RequestID).(uint32)
containers := msg.Get(MultiContainerBytes).([][]byte)
p.net.router.MultiPut(p.id, chainID, requestID, containers)
}
// assumes the stateLock is not held
func (p *peer) pushQuery(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))

View File

@ -55,7 +55,7 @@ const (
var (
genesisHashKey = []byte("genesisID")
nodeVersion = version.NewDefaultVersion("avalanche", 0, 5, 1)
nodeVersion = version.NewDefaultVersion("avalanche", 0, 5, 2)
versionParser = version.NewDefaultParser()
)

View File

@ -77,6 +77,10 @@ type Vertex interface {
// Returns the vertices this vertex depends on
Parents() []Vertex
// Returns the height of this vertex. A vertex's height is defined by one
// greater than the maximum height of the parents.
Height() uint64
// Returns a series of state transitions to be performed on acceptance
Txs() []snowstorm.Tx

View File

@ -37,14 +37,14 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr
Namespace: namespace,
Name: "vtx_accepted",
Help: "Latency of accepting from the time the vertex was issued in milliseconds",
Buckets: timer.Buckets,
Buckets: timer.MillisecondsBuckets,
})
m.latRejected = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: namespace,
Name: "vtx_rejected",
Help: "Latency of rejecting from the time the vertex was issued in milliseconds",
Buckets: timer.Buckets,
Buckets: timer.MillisecondsBuckets,
})
if err := registerer.Register(m.numProcessing); err != nil {

View File

@ -16,7 +16,7 @@ type Vtx struct {
id ids.ID
txs []snowstorm.Tx
height int
height uint64
status choices.Status
bytes []byte
@ -25,6 +25,7 @@ type Vtx struct {
func (v *Vtx) ID() ids.ID { return v.id }
func (v *Vtx) ParentIDs() []ids.ID { return nil }
func (v *Vtx) Parents() []Vertex { return v.dependencies }
func (v *Vtx) Height() uint64 { return v.height }
func (v *Vtx) Txs() []snowstorm.Tx { return v.txs }
func (v *Vtx) Status() choices.Status { return v.status }
func (v *Vtx) Live() {}

View File

@ -37,14 +37,14 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr
Namespace: namespace,
Name: "accepted",
Help: "Latency of accepting from the time the block was issued in milliseconds",
Buckets: timer.Buckets,
Buckets: timer.MillisecondsBuckets,
})
m.latRejected = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: namespace,
Name: "rejected",
Help: "Latency of rejecting from the time the block was issued in milliseconds",
Buckets: timer.Buckets,
Buckets: timer.MillisecondsBuckets,
})
if err := registerer.Register(m.numProcessing); err != nil {

View File

@ -37,14 +37,14 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr
Namespace: namespace,
Name: "tx_accepted",
Help: "Latency of accepting from the time the transaction was issued in milliseconds",
Buckets: timer.Buckets,
Buckets: timer.MillisecondsBuckets,
})
m.latRejected = prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: namespace,
Name: "tx_rejected",
Help: "Latency of rejecting from the time the transaction was issued in milliseconds",
Buckets: timer.Buckets,
Buckets: timer.MillisecondsBuckets,
})
if err := registerer.Register(m.numProcessing); err != nil {

View File

@ -6,6 +6,7 @@ package avalanche
import (
"fmt"
"github.com/ava-labs/gecko/cache"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/choices"
"github.com/ava-labs/gecko/snow/consensus/avalanche"
@ -15,6 +16,10 @@ import (
"github.com/prometheus/client_golang/prometheus"
)
const (
cacheSize = 3000
)
// BootstrapConfig ...
type BootstrapConfig struct {
common.Config
@ -32,24 +37,29 @@ type bootstrapper struct {
metrics
common.Bootstrapper
// IDs of vertices that we're already in the process of getting
// TODO: Find a better way to track; this keeps every single vertex's ID in memory when bootstrapping from nothing
seen ids.Set
// true if all of the vertices in the original accepted frontier have been processed
processedStartingAcceptedFrontier bool
numFetched uint64 // number of vertices that have been fetched from validators
// number of vertices processed so far
numProcessed uint32
// vtxReqs prevents asking validators for the same vertex
vtxReqs common.Requests
// tracks which validators were asked for which containers in which requests
outstandingRequests common.Requests
// IDs of vertices that we have requested from other validators but haven't received
pending ids.Set
finished bool
// Contains IDs of vertices that have recently been processed
processedCache *cache.LRU
// true if bootstrapping is done
finished bool
// Called when bootstrapping is done
onFinished func() error
}
// Initialize this engine.
func (b *bootstrapper) Initialize(config BootstrapConfig) error {
b.BootstrapConfig = config
b.processedCache = &cache.LRU{Size: cacheSize}
b.VtxBlocked.SetParser(&vtxParser{
log: config.Context.Log,
@ -88,118 +98,53 @@ func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set {
return acceptedVtxIDs
}
// ForceAccepted ...
func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) error {
for _, vtxID := range acceptedContainerIDs.List() {
if err := b.fetch(vtxID); err != nil {
return err
}
}
if numPending := b.pending.Len(); numPending == 0 {
// TODO: This typically indicates bootstrapping has failed, so this
// should be handled appropriately
return b.finish()
}
return nil
}
// Put ...
func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) error {
vtx, err := b.State.ParseVertex(vtxBytes)
if err != nil {
b.BootstrapConfig.Context.Log.Debug("ParseVertex failed due to %s for block:\n%s",
err,
formatting.DumpBytes{Bytes: vtxBytes})
return b.GetFailed(vdr, requestID)
}
if !b.pending.Contains(vtx.ID()) {
b.BootstrapConfig.Context.Log.Debug("Validator %s sent an unrequested vertex:\n%s",
vdr,
formatting.DumpBytes{Bytes: vtxBytes})
return b.GetFailed(vdr, requestID)
}
return b.addVertex(vtx)
}
// GetFailed ...
func (b *bootstrapper) GetFailed(vdr ids.ShortID, requestID uint32) error {
vtxID, ok := b.vtxReqs.Remove(vdr, requestID)
if !ok {
b.BootstrapConfig.Context.Log.Debug("GetFailed called without sending the corresponding Get message from %s",
vdr)
return nil
}
b.sendRequest(vtxID)
return nil
}
// Get vertex [vtxID] and its ancestors
func (b *bootstrapper) fetch(vtxID ids.ID) error {
if b.pending.Contains(vtxID) {
// Make sure we haven't already requested this block
if b.outstandingRequests.Contains(vtxID) {
return nil
}
vtx, err := b.State.GetVertex(vtxID)
if err != nil {
b.sendRequest(vtxID)
// Make sure we don't already have this vertex
if _, err := b.State.GetVertex(vtxID); err == nil {
return nil
}
return b.storeVertex(vtx)
}
func (b *bootstrapper) sendRequest(vtxID ids.ID) {
validators := b.BootstrapConfig.Validators.Sample(1)
validators := b.BootstrapConfig.Validators.Sample(1) // validator to send request to
if len(validators) == 0 {
b.BootstrapConfig.Context.Log.Error("Dropping request for %s as there are no validators", vtxID)
return
return fmt.Errorf("Dropping request for %s as there are no validators", vtxID)
}
validatorID := validators[0].ID()
b.RequestID++
b.vtxReqs.RemoveAny(vtxID)
b.vtxReqs.Add(validatorID, b.RequestID, vtxID)
b.pending.Add(vtxID)
b.BootstrapConfig.Sender.Get(validatorID, b.RequestID, vtxID)
b.numBSPendingRequests.Set(float64(b.pending.Len()))
}
func (b *bootstrapper) addVertex(vtx avalanche.Vertex) error {
if err := b.storeVertex(vtx); err != nil {
return err
}
if numPending := b.pending.Len(); numPending == 0 {
return b.finish()
}
b.outstandingRequests.Add(validatorID, b.RequestID, vtxID)
b.BootstrapConfig.Sender.GetAncestors(validatorID, b.RequestID, vtxID) // request vertex and ancestors
return nil
}
func (b *bootstrapper) storeVertex(vtx avalanche.Vertex) error {
vts := []avalanche.Vertex{vtx}
b.numFetched++
if b.numFetched%2500 == 0 { // perioidcally inform user of progress
b.BootstrapConfig.Context.Log.Info("bootstrapping has fetched %d vertices", b.numFetched)
}
// Process vertices
func (b *bootstrapper) process(vtx avalanche.Vertex) error {
toProcess := []avalanche.Vertex{vtx}
for len(toProcess) > 0 {
newLen := len(toProcess) - 1
vtx := toProcess[newLen]
toProcess = toProcess[:newLen]
if _, ok := b.processedCache.Get(vtx.ID()); ok { // already processed this
continue
}
b.numProcessed++ // Progress tracker
if b.numProcessed%common.StatusUpdateFrequency == 0 {
b.BootstrapConfig.Context.Log.Info("processed %d vertices", b.numProcessed)
}
for len(vts) > 0 {
newLen := len(vts) - 1
vtx := vts[newLen]
vts = vts[:newLen]
vtxID := vtx.ID()
switch status := vtx.Status(); status {
switch vtx.Status() {
case choices.Unknown:
b.sendRequest(vtxID)
if err := b.fetch(vtx.ID()); err != nil {
return err
}
case choices.Rejected:
return fmt.Errorf("tried to accept %s even though it was previously rejected", vtx.ID())
case choices.Processing:
b.pending.Remove(vtxID)
if err := b.VtxBlocked.Push(&vertexJob{
log: b.BootstrapConfig.Context.Log,
numAccepted: b.numBSVtx,
@ -208,7 +153,7 @@ func (b *bootstrapper) storeVertex(vtx avalanche.Vertex) error {
}); err == nil {
b.numBSBlockedVtx.Inc()
} else {
b.BootstrapConfig.Context.Log.Verbo("couldn't push to vtxBlocked")
b.BootstrapConfig.Context.Log.Verbo("couldn't push to vtxBlocked: %s", err)
}
if err := b.VtxBlocked.Commit(); err != nil {
return err
@ -222,35 +167,103 @@ func (b *bootstrapper) storeVertex(vtx avalanche.Vertex) error {
}); err == nil {
b.numBSBlockedTx.Inc()
} else {
b.BootstrapConfig.Context.Log.Verbo("couldn't push to txBlocked")
b.BootstrapConfig.Context.Log.Verbo("couldn't push to txBlocked: %s", err)
}
}
if err := b.TxBlocked.Commit(); err != nil {
return err
}
for _, parent := range vtx.Parents() {
if parentID := parent.ID(); !b.seen.Contains(parentID) {
b.seen.Add(parentID)
vts = append(vts, parent)
}
toProcess = append(toProcess, parent)
}
case choices.Accepted:
b.BootstrapConfig.Context.Log.Verbo("bootstrapping confirmed %s", vtxID)
case choices.Rejected:
return fmt.Errorf("bootstrapping wants to accept %s, however it was previously rejected", vtxID)
b.processedCache.Put(vtx.ID(), nil)
}
}
numPending := b.pending.Len()
b.numBSPendingRequests.Set(float64(numPending))
if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier {
return b.finish()
}
return nil
}
// MultiPut handles the receipt of multiple containers. Should be received in response to a GetAncestors message to [vdr]
// with request ID [requestID]. Expects vtxs[0] to be the vertex requested in the corresponding GetAncestors.
func (b *bootstrapper) MultiPut(vdr ids.ShortID, requestID uint32, vtxs [][]byte) error {
if lenVtxs := len(vtxs); lenVtxs > common.MaxContainersPerMultiPut {
b.BootstrapConfig.Context.Log.Debug("MultiPut(%s, %d) contains more than maximum number of vertices", vdr, requestID)
return b.GetAncestorsFailed(vdr, requestID)
} else if lenVtxs == 0 {
b.BootstrapConfig.Context.Log.Debug("MultiPut(%s, %d) contains no vertices", vdr, requestID)
return b.GetAncestorsFailed(vdr, requestID)
}
// Make sure this is in response to a request we made
neededVtxID, needed := b.outstandingRequests.Remove(vdr, requestID)
if !needed { // this message isn't in response to a request we made
b.BootstrapConfig.Context.Log.Debug("received unexpected MultiPut from %s with ID %d", vdr, requestID)
return nil
}
neededVtx, err := b.State.ParseVertex(vtxs[0]) // the vertex we requested
if err != nil {
b.BootstrapConfig.Context.Log.Debug("Failed to parse requested vertex %s: %w", neededVtxID, err)
b.BootstrapConfig.Context.Log.Verbo("vertex: %s", formatting.DumpBytes{Bytes: vtxs[0]})
return b.fetch(neededVtxID)
} else if actualID := neededVtx.ID(); !actualID.Equals(neededVtxID) {
b.BootstrapConfig.Context.Log.Debug("expected the first block to be the requested block, %s, but is %s", neededVtxID, actualID)
return b.fetch(neededVtxID)
}
for _, vtxBytes := range vtxs { // Parse/persist all the vertices
if _, err := b.State.ParseVertex(vtxBytes); err != nil { // Persists the vtx
b.BootstrapConfig.Context.Log.Debug("Failed to parse vertex: %w", err)
b.BootstrapConfig.Context.Log.Verbo("vertex: %s", formatting.DumpBytes{Bytes: vtxBytes})
}
}
return b.process(neededVtx)
}
// GetAncestorsFailed is called when a GetAncestors message we sent fails
func (b *bootstrapper) GetAncestorsFailed(vdr ids.ShortID, requestID uint32) error {
vtxID, ok := b.outstandingRequests.Remove(vdr, requestID)
if !ok {
b.BootstrapConfig.Context.Log.Debug("GetAncestorsFailed(%s, %d) called but there was no outstanding request to this validator with this ID", vdr, requestID)
return nil
}
// Send another request for the vertex
return b.fetch(vtxID)
}
// ForceAccepted ...
func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) error {
if err := b.VM.Bootstrapping(); err != nil {
return fmt.Errorf("failed to notify VM that bootstrapping has started: %w",
err)
}
for _, vtxID := range acceptedContainerIDs.List() {
if vtx, err := b.State.GetVertex(vtxID); err == nil {
if err := b.process(vtx); err != nil {
return err
}
} else if err := b.fetch(vtxID); err != nil {
return err
}
}
b.processedStartingAcceptedFrontier = true
if numPending := b.outstandingRequests.Len(); numPending == 0 {
return b.finish()
}
return nil
}
// Finish bootstrapping
func (b *bootstrapper) finish() error {
if b.finished {
return nil
}
b.BootstrapConfig.Context.Log.Info("bootstrapping finished fetching vertices. executing state transitions...")
b.BootstrapConfig.Context.Log.Info("finished fetching vertices. executing state transitions...")
if err := b.executeAll(b.TxBlocked, b.numBSBlockedTx); err != nil {
return err
@ -259,11 +272,15 @@ func (b *bootstrapper) finish() error {
return err
}
if err := b.VM.Bootstrapped(); err != nil {
return fmt.Errorf("failed to notify VM that bootstrapping has finished: %w",
err)
}
// Start consensus
if err := b.onFinished(); err != nil {
return err
}
b.seen = ids.Set{}
b.finished = true
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -27,7 +27,7 @@ type Vtx struct {
id ids.ID
txs []snowstorm.Tx
height int
height uint64
status choices.Status
bytes []byte
@ -36,6 +36,7 @@ type Vtx struct {
func (v *Vtx) ID() ids.ID { return v.id }
func (v *Vtx) DependencyIDs() []ids.ID { return nil }
func (v *Vtx) Parents() []avalanche.Vertex { return v.parents }
func (v *Vtx) Height() uint64 { return v.height }
func (v *Vtx) Txs() []snowstorm.Tx { return v.txs }
func (v *Vtx) Status() choices.Status { return v.status }
func (v *Vtx) Accept() error { v.status = choices.Accepted; return nil }

View File

@ -121,6 +121,12 @@ func (vtx *uniqueVertex) Parents() []avalanche.Vertex {
return vtx.v.parents
}
func (vtx *uniqueVertex) Height() uint64 {
vtx.refresh()
return vtx.v.vtx.height
}
func (vtx *uniqueVertex) Txs() []snowstorm.Tx {
vtx.refresh()

View File

@ -4,8 +4,12 @@
package avalanche
import (
"time"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/network"
"github.com/ava-labs/gecko/snow"
"github.com/ava-labs/gecko/snow/choices"
"github.com/ava-labs/gecko/snow/consensus/avalanche"
"github.com/ava-labs/gecko/snow/consensus/snowstorm"
"github.com/ava-labs/gecko/snow/engine/common"
@ -15,6 +19,12 @@ import (
"github.com/ava-labs/gecko/utils/wrappers"
)
const (
// TODO define this constant in one place rather than here and in snowman
// Max containers size in a MultiPut message
maxContainersLen = int(4 * network.DefaultMaxMessageSize / 5)
)
// Transitive implements the Engine interface by attempting to fetch all
// transitive dependencies.
type Transitive struct {
@ -40,7 +50,7 @@ type Transitive struct {
// Initialize implements the Engine interface
func (t *Transitive) Initialize(config Config) error {
config.Context.Log.Info("Initializing Avalanche consensus")
config.Context.Log.Info("Initializing consensus engine")
t.Config = config
t.metrics.Initialize(config.Context.Log, config.Params.Namespace, config.Params.Metrics)
@ -61,13 +71,13 @@ func (t *Transitive) finishBootstrapping() error {
if vtx, err := t.Config.State.GetVertex(vtxID); err == nil {
frontier = append(frontier, vtx)
} else {
t.Config.Context.Log.Error("Vertex %s failed to be loaded from the frontier with %s", vtxID, err)
t.Config.Context.Log.Error("vertex %s failed to be loaded from the frontier with %s", vtxID, err)
}
}
t.Consensus.Initialize(t.Config.Context, t.Params, frontier)
t.bootstrapped = true
t.Config.Context.Log.Info("Bootstrapping finished with %d vertices in the accepted frontier", len(frontier))
t.Config.Context.Log.Info("bootstrapping finished with %d vertices in the accepted frontier", len(frontier))
return nil
}
@ -75,7 +85,7 @@ func (t *Transitive) finishBootstrapping() error {
func (t *Transitive) Gossip() error {
edge := t.Config.State.Edge()
if len(edge) == 0 {
t.Config.Context.Log.Debug("Dropping gossip request as no vertices have been accepted")
t.Config.Context.Log.Verbo("dropping gossip request as no vertices have been accepted")
return nil
}
@ -83,18 +93,18 @@ func (t *Transitive) Gossip() error {
vtxID := edge[sampler.Sample()]
vtx, err := t.Config.State.GetVertex(vtxID)
if err != nil {
t.Config.Context.Log.Warn("Dropping gossip request as %s couldn't be loaded due to %s", vtxID, err)
t.Config.Context.Log.Warn("dropping gossip request as %s couldn't be loaded due to: %s", vtxID, err)
return nil
}
t.Config.Context.Log.Debug("Gossiping %s as accepted to the network", vtxID)
t.Config.Context.Log.Verbo("gossiping %s as accepted to the network", vtxID)
t.Config.Sender.Gossip(vtxID, vtx.Bytes())
return nil
}
// Shutdown implements the Engine interface
func (t *Transitive) Shutdown() error {
t.Config.Context.Log.Info("Shutting down Avalanche consensus")
t.Config.Context.Log.Info("shutting down consensus engine")
return t.Config.VM.Shutdown()
}
@ -110,19 +120,63 @@ func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, vtxID ids.ID) error
return nil
}
// GetAncestors implements the Engine interface
func (t *Transitive) GetAncestors(vdr ids.ShortID, requestID uint32, vtxID ids.ID) error {
startTime := time.Now()
t.Config.Context.Log.Verbo("GetAncestors(%s, %d, %s) called", vdr, requestID, vtxID)
vertex, err := t.Config.State.GetVertex(vtxID)
if err != nil || vertex.Status() == choices.Unknown {
t.Config.Context.Log.Verbo("dropping getAncestors")
return nil // Don't have the requested vertex. Drop message.
}
queue := make([]avalanche.Vertex, 1, common.MaxContainersPerMultiPut) // for BFS
queue[0] = vertex
ancestorsBytesLen := 0 // length, in bytes, of vertex and its ancestors
ancestorsBytes := make([][]byte, 0, common.MaxContainersPerMultiPut) // vertex and its ancestors in BFS order
visited := ids.Set{} // IDs of vertices that have been in queue before
visited.Add(vertex.ID())
for len(ancestorsBytes) < common.MaxContainersPerMultiPut && len(queue) > 0 && time.Since(startTime) < common.MaxTimeFetchingAncestors {
var vtx avalanche.Vertex
vtx, queue = queue[0], queue[1:] // pop
vtxBytes := vtx.Bytes()
// Ensure response size isn't too large. Include wrappers.IntLen because the size of the message
// is included with each container, and the size is repr. by an int.
if newLen := wrappers.IntLen + ancestorsBytesLen + len(vtxBytes); newLen < maxContainersLen {
ancestorsBytes = append(ancestorsBytes, vtxBytes)
ancestorsBytesLen = newLen
} else { // reached maximum response size
break
}
for _, parent := range vtx.Parents() {
if parent.Status() == choices.Unknown { // Don't have this vertex;ignore
continue
}
if parentID := parent.ID(); !visited.Contains(parentID) { // If already visited, ignore
queue = append(queue, parent)
visited.Add(parentID)
}
}
}
t.Config.Sender.MultiPut(vdr, requestID, ancestorsBytes)
return nil
}
// Put implements the Engine interface
func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) error {
t.Config.Context.Log.Verbo("Put called for vertexID %s", vtxID)
t.Config.Context.Log.Verbo("Put(%s, %d, %s) called", vdr, requestID, vtxID)
if !t.bootstrapped {
return t.bootstrapper.Put(vdr, requestID, vtxID, vtxBytes)
if !t.bootstrapped { // Bootstrapping unfinished --> didn't call Get --> this message is invalid
t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
return nil
}
vtx, err := t.Config.State.ParseVertex(vtxBytes)
if err != nil {
t.Config.Context.Log.Debug("ParseVertex failed due to %s for block:\n%s",
err,
formatting.DumpBytes{Bytes: vtxBytes})
t.Config.Context.Log.Debug("failed to parse vertex %s due to: %s", vtxID, err)
t.Config.Context.Log.Verbo("vertex:\n%s", formatting.DumpBytes{Bytes: vtxBytes})
return t.GetFailed(vdr, requestID)
}
_, err = t.insertFrom(vdr, vtx)
@ -131,14 +185,14 @@ func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxByt
// GetFailed implements the Engine interface
func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) error {
if !t.bootstrapped {
return t.bootstrapper.GetFailed(vdr, requestID)
if !t.bootstrapped { // Bootstrapping unfinished --> didn't call Get --> this message is invalid
t.Config.Context.Log.Debug("dropping GetFailed(%s, %d) due to bootstrapping", vdr, requestID)
return nil
}
vtxID, ok := t.vtxReqs.Remove(vdr, requestID)
if !ok {
t.Config.Context.Log.Warn("GetFailed called without sending the corresponding Get message from %s",
vdr)
t.Config.Context.Log.Debug("GetFailed(%s, %d) called without having sent corresponding Get", vdr, requestID)
return nil
}
@ -160,7 +214,7 @@ func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) error {
// PullQuery implements the Engine interface
func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID) error {
if !t.bootstrapped {
t.Config.Context.Log.Debug("Dropping PullQuery for %s due to bootstrapping", vtxID)
t.Config.Context.Log.Debug("dropping PullQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
return nil
}
@ -188,15 +242,14 @@ func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID)
// PushQuery implements the Engine interface
func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) error {
if !t.bootstrapped {
t.Config.Context.Log.Debug("Dropping PushQuery for %s due to bootstrapping", vtxID)
t.Config.Context.Log.Debug("dropping PushQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
return nil
}
vtx, err := t.Config.State.ParseVertex(vtxBytes)
if err != nil {
t.Config.Context.Log.Warn("ParseVertex failed due to %s for block:\n%s",
err,
formatting.DumpBytes{Bytes: vtxBytes})
t.Config.Context.Log.Debug("failed to parse vertex %s due to: %s", vtxID, err)
t.Config.Context.Log.Verbo("vertex:\n%s", formatting.DumpBytes{Bytes: vtxBytes})
return nil
}
@ -210,7 +263,7 @@ func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID,
// Chits implements the Engine interface
func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) error {
if !t.bootstrapped {
t.Config.Context.Log.Debug("Dropping Chits due to bootstrapping")
t.Config.Context.Log.Debug("dropping Chits(%s, %d) due to bootstrapping", vdr, requestID)
return nil
}
@ -241,7 +294,7 @@ func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) error {
// Notify implements the Engine interface
func (t *Transitive) Notify(msg common.Message) error {
if !t.bootstrapped {
t.Config.Context.Log.Warn("Dropping Notify due to bootstrapping")
t.Config.Context.Log.Debug("dropping Notify due to bootstrapping")
return nil
}
@ -345,7 +398,7 @@ func (t *Transitive) insert(vtx avalanche.Vertex) error {
}
}
t.Config.Context.Log.Verbo("Vertex: %s is blocking on %d vertices and %d transactions", vtxID, i.vtxDeps.Len(), i.txDeps.Len())
t.Config.Context.Log.Verbo("vertex %s is blocking on %d vertices and %d transactions", vtxID, i.vtxDeps.Len(), i.txDeps.Len())
t.vtxBlocked.Register(&vtxIssuer{i: i})
t.txBlocked.Register(&txIssuer{i: i})
@ -403,7 +456,7 @@ func (t *Transitive) issueRepoll() {
preferredIDs := t.Consensus.Preferences().List()
numPreferredIDs := len(preferredIDs)
if numPreferredIDs == 0 {
t.Config.Context.Log.Error("Re-query attempt was dropped due to no pending vertices")
t.Config.Context.Log.Error("re-query attempt was dropped due to no pending vertices")
return
}
@ -422,12 +475,12 @@ func (t *Transitive) issueRepoll() {
if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet.Len()) {
t.Config.Sender.PullQuery(vdrSet, t.RequestID, vtxID)
} else if numVdrs < p.K {
t.Config.Context.Log.Error("Re-query for %s was dropped due to an insufficient number of validators", vtxID)
t.Config.Context.Log.Error("re-query for %s was dropped due to an insufficient number of validators", vtxID)
}
}
func (t *Transitive) issueBatch(txs []snowstorm.Tx) error {
t.Config.Context.Log.Verbo("Batching %d transactions into a new vertex", len(txs))
t.Config.Context.Log.Verbo("batching %d transactions into a new vertex", len(txs))
virtuousIDs := t.Consensus.Virtuous().List()
sampler := random.Uniform{N: len(virtuousIDs)}
@ -438,7 +491,7 @@ func (t *Transitive) issueBatch(txs []snowstorm.Tx) error {
vtx, err := t.Config.State.BuildVertex(parentIDs, txs)
if err != nil {
t.Config.Context.Log.Warn("Error building new vertex with %d parents and %d transactions", len(parentIDs), len(txs))
t.Config.Context.Log.Warn("error building new vertex with %d parents and %d transactions", len(parentIDs), len(txs))
return nil
}
return t.insert(vtx)
@ -446,7 +499,7 @@ func (t *Transitive) issueBatch(txs []snowstorm.Tx) error {
func (t *Transitive) sendRequest(vdr ids.ShortID, vtxID ids.ID) {
if t.vtxReqs.Contains(vtxID) {
t.Config.Context.Log.Debug("Not requesting a vertex because we have recently sent a request")
t.Config.Context.Log.Debug("not requesting a vertex because we have recently sent a request")
return
}

View File

@ -2167,6 +2167,9 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) {
vm.Default(true)
vm.CantBootstrapping = false
vm.CantBootstrapped = false
utxos := []ids.ID{GenerateID(), GenerateID()}
txID0 := GenerateID()
@ -2272,7 +2275,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) {
panic("Unknown vertex requested")
}
sender.GetF = func(inVdr ids.ShortID, reqID uint32, vtxID ids.ID) {
sender.GetAncestorsF = func(inVdr ids.ShortID, reqID uint32, vtxID ids.ID) {
if !vdrID.Equals(inVdr) {
t.Fatalf("Asking wrong validator for vertex")
}
@ -2315,7 +2318,7 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) {
panic("Unknown bytes provided")
}
te.Put(vdrID, *requestID, vtxID0, vtxBytes0)
te.MultiPut(vdrID, *requestID, [][]byte{vtxBytes0})
vm.ParseTxF = nil
st.parseVertex = nil

View File

@ -0,0 +1,117 @@
package avalanche
import (
"container/heap"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/consensus/avalanche"
)
// A vertexItem is a Vertex managed by the priority queue.
type vertexItem struct {
vertex avalanche.Vertex
index int // The index of the item in the heap.
}
// A priorityQueue implements heap.Interface and holds vertexItems.
type priorityQueue []*vertexItem
func (pq priorityQueue) Len() int { return len(pq) }
// Returns true if the vertex at index i has greater height than the vertex at
// index j.
func (pq priorityQueue) Less(i, j int) bool {
statusI := pq[i].vertex.Status()
statusJ := pq[j].vertex.Status()
// Put unknown vertices at the front of the heap to ensure once we have made
// it below a certain height in DAG traversal we do not need to reset
if !statusI.Fetched() {
return true
}
if !statusJ.Fetched() {
return false
}
return pq[i].vertex.Height() > pq[j].vertex.Height()
}
func (pq priorityQueue) Swap(i, j int) {
pq[i], pq[j] = pq[j], pq[i]
pq[i].index = i
pq[j].index = j
}
// Push adds an item to this priority queue. x must have type *vertexItem
func (pq *priorityQueue) Push(x interface{}) {
n := len(*pq)
item := x.(*vertexItem)
item.index = n
*pq = append(*pq, item)
}
// Pop returns the last item in this priorityQueue
func (pq *priorityQueue) Pop() interface{} {
old := *pq
n := len(old)
item := old[n-1]
old[n-1] = nil
item.index = -1
*pq = old[0 : n-1]
return item
}
// vertexHeap defines the functionality of a heap of vertices
// with unique VertexIDs ordered by height
type vertexHeap interface {
Clear()
Push(avalanche.Vertex)
Pop() avalanche.Vertex // Requires that there be at least one element
Contains(avalanche.Vertex) bool
Len() int
}
type maxHeightVertexHeap struct {
heap *priorityQueue
elementIDs ids.Set
}
func newMaxVertexHeap() *maxHeightVertexHeap {
return &maxHeightVertexHeap{
heap: &priorityQueue{},
elementIDs: ids.Set{},
}
}
func (vh *maxHeightVertexHeap) Clear() {
vh.heap = &priorityQueue{}
vh.elementIDs.Clear()
}
// Push adds an element to this heap. Returns true if the element was added.
// Returns false if it was already in the heap.
func (vh *maxHeightVertexHeap) Push(vtx avalanche.Vertex) bool {
vtxID := vtx.ID()
if vh.elementIDs.Contains(vtxID) {
return false
}
vh.elementIDs.Add(vtxID)
item := &vertexItem{
vertex: vtx,
}
heap.Push(vh.heap, item)
return true
}
// If there are any vertices in this heap with status Unknown, removes one such
// vertex and returns it. Otherwise, removes and returns the vertex in this heap
// with the greatest height.
func (vh *maxHeightVertexHeap) Pop() avalanche.Vertex {
vtx := heap.Pop(vh.heap).(*vertexItem).vertex
vh.elementIDs.Remove(vtx.ID())
return vtx
}
func (vh *maxHeightVertexHeap) Len() int { return vh.heap.Len() }
func (vh *maxHeightVertexHeap) Contains(vtxID ids.ID) bool { return vh.elementIDs.Contains(vtxID) }

View File

@ -0,0 +1,130 @@
package avalanche
import (
"testing"
"github.com/ava-labs/gecko/snow/choices"
"github.com/ava-labs/gecko/snow/consensus/avalanche"
)
// This example inserts several ints into an IntHeap, checks the minimum,
// and removes them in order of priority.
func TestUniqueVertexHeapReturnsOrdered(t *testing.T) {
h := newMaxVertexHeap()
vtx0 := &Vtx{
id: GenerateID(),
height: 0,
status: choices.Processing,
}
vtx1 := &Vtx{
id: GenerateID(),
height: 1,
status: choices.Processing,
}
vtx2 := &Vtx{
id: GenerateID(),
height: 1,
status: choices.Processing,
}
vtx3 := &Vtx{
id: GenerateID(),
height: 3,
status: choices.Processing,
}
vtx4 := &Vtx{
id: GenerateID(),
status: choices.Unknown,
}
vts := []avalanche.Vertex{vtx0, vtx1, vtx2, vtx3, vtx4}
for _, vtx := range vts {
h.Push(vtx)
}
vtxZ := h.Pop()
if !vtxZ.ID().Equals(vtx4.ID()) {
t.Fatalf("Heap did not pop unknown element first")
}
vtxA := h.Pop()
if vtxA.Height() != 3 {
t.Fatalf("First height from heap was incorrect")
} else if !vtxA.ID().Equals(vtx3.ID()) {
t.Fatalf("Incorrect ID on vertex popped from heap")
}
vtxB := h.Pop()
if vtxB.Height() != 1 {
t.Fatalf("First height from heap was incorrect")
} else if !vtxB.ID().Equals(vtx1.ID()) && !vtxB.ID().Equals(vtx2.ID()) {
t.Fatalf("Incorrect ID on vertex popped from heap")
}
vtxC := h.Pop()
if vtxC.Height() != 1 {
t.Fatalf("First height from heap was incorrect")
} else if !vtxC.ID().Equals(vtx1.ID()) && !vtxC.ID().Equals(vtx2.ID()) {
t.Fatalf("Incorrect ID on vertex popped from heap")
}
if vtxB.ID().Equals(vtxC.ID()) {
t.Fatalf("Heap returned same element more than once")
}
vtxD := h.Pop()
if vtxD.Height() != 0 {
t.Fatalf("Last height returned was incorrect")
} else if !vtxD.ID().Equals(vtx0.ID()) {
t.Fatalf("Last item from heap had incorrect ID")
}
if h.Len() != 0 {
t.Fatalf("Heap was not empty after popping all of its elements")
}
}
func TestUniqueVertexHeapRemainsUnique(t *testing.T) {
h := newMaxVertexHeap()
vtx0 := &Vtx{
height: 0,
id: GenerateID(),
status: choices.Processing,
}
vtx1 := &Vtx{
height: 1,
id: GenerateID(),
status: choices.Processing,
}
sharedID := GenerateID()
vtx2 := &Vtx{
height: 1,
id: sharedID,
status: choices.Processing,
}
vtx3 := &Vtx{
height: 2,
id: sharedID,
status: choices.Processing,
}
pushed1 := h.Push(vtx0)
pushed2 := h.Push(vtx1)
pushed3 := h.Push(vtx2)
pushed4 := h.Push(vtx3)
if h.Len() != 3 {
t.Fatalf("Unique Vertex Heap has incorrect length: %d", h.Len())
} else if !(pushed1 && pushed2 && pushed3) {
t.Fatalf("Failed to push a new unique element")
} else if pushed4 {
t.Fatalf("Pushed non-unique element to the unique vertex heap")
}
}

View File

@ -5,7 +5,6 @@ package avalanche
import (
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/consensus/avalanche"
"github.com/ava-labs/gecko/snow/consensus/snowstorm"
)
@ -60,47 +59,56 @@ func (v *voter) Update() {
}
if v.t.Consensus.Quiesce() {
v.t.Config.Context.Log.Verbo("Avalanche engine can quiesce")
v.t.Config.Context.Log.Debug("Avalanche engine can quiesce")
return
}
v.t.Config.Context.Log.Verbo("Avalanche engine can't quiesce")
v.t.Config.Context.Log.Debug("Avalanche engine can't quiesce")
v.t.errs.Add(v.t.repoll())
}
func (v *voter) bubbleVotes(votes ids.UniqueBag) ids.UniqueBag {
bubbledVotes := ids.UniqueBag{}
vertexHeap := newMaxVertexHeap()
for _, vote := range votes.List() {
set := votes.GetSet(vote)
vtx, err := v.t.Config.State.GetVertex(vote)
if err != nil {
continue
}
vts := []avalanche.Vertex{vtx}
for len(vts) > 0 {
vtx := vts[0]
vts = vts[1:]
vertexHeap.Push(vtx)
}
status := vtx.Status()
if !status.Fetched() {
v.t.Config.Context.Log.Verbo("Dropping %d vote(s) for %s because the vertex is unknown", set.Len(), vtx.ID())
continue
}
for vertexHeap.Len() > 0 {
vtx := vertexHeap.Pop()
vtxID := vtx.ID()
set := votes.GetSet(vtxID)
status := vtx.Status()
if status.Decided() {
v.t.Config.Context.Log.Verbo("Dropping %d vote(s) for %s because the vertex is decided", set.Len(), vtx.ID())
continue
}
if !status.Fetched() {
v.t.Config.Context.Log.Verbo("Dropping %d vote(s) for %s because the vertex is unknown", set.Len(), vtxID)
bubbledVotes.RemoveSet(vtx.ID())
continue
}
if v.t.Consensus.VertexIssued(vtx) {
v.t.Config.Context.Log.Verbo("Applying %d vote(s) for %s", set.Len(), vtx.ID())
bubbledVotes.UnionSet(vtx.ID(), set)
} else {
v.t.Config.Context.Log.Verbo("Bubbling %d vote(s) for %s because the vertex isn't issued", set.Len(), vtx.ID())
vts = append(vts, vtx.Parents()...)
if status.Decided() {
v.t.Config.Context.Log.Verbo("Dropping %d vote(s) for %s because the vertex is decided", set.Len(), vtxID)
bubbledVotes.RemoveSet(vtx.ID())
continue
}
if v.t.Consensus.VertexIssued(vtx) {
v.t.Config.Context.Log.Verbo("Applying %d vote(s) for %s", set.Len(), vtx.ID())
bubbledVotes.UnionSet(vtx.ID(), set)
} else {
v.t.Config.Context.Log.Verbo("Bubbling %d vote(s) for %s because the vertex isn't issued", set.Len(), vtx.ID())
bubbledVotes.RemoveSet(vtx.ID()) // Remove votes for this vertex because it hasn't been issued
for _, parentVtx := range vtx.Parents() {
bubbledVotes.UnionSet(parentVtx.ID(), set)
vertexHeap.Push(parentVtx)
}
}
}
return bubbledVotes
}

View File

@ -5,15 +5,31 @@ package common
import (
stdmath "math"
"time"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/math"
)
const (
// MaxContainersPerMultiPut is the maximum number of containers that can be sent in a MultiPut
MaxContainersPerMultiPut = 2000
// StatusUpdateFrequency ... bootstrapper logs "processed X blocks/vertices" every [statusUpdateFrequency] blocks/vertices
StatusUpdateFrequency = 2500
)
var (
// MaxTimeFetchingAncestors is the maximum amount of time to spend fetching vertices during a call to GetAncestors
MaxTimeFetchingAncestors = 100 * time.Millisecond
)
// Bootstrapper implements the Engine interface.
type Bootstrapper struct {
Config
// IDs of validators we have requested the accepted frontier from but haven't
// received a reply from
pendingAcceptedFrontier ids.ShortSet
acceptedFrontier ids.Set
@ -43,6 +59,7 @@ func (b *Bootstrapper) Startup() error {
return b.Bootstrapable.ForceAccepted(ids.Set{})
}
// Ask each of the bootstrap validators to send their accepted frontier
vdrs := ids.ShortSet{}
vdrs.Union(b.pendingAcceptedFrontier)
@ -59,6 +76,7 @@ func (b *Bootstrapper) GetAcceptedFrontier(validatorID ids.ShortID, requestID ui
// GetAcceptedFrontierFailed implements the Engine interface.
func (b *Bootstrapper) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) error {
// If we can't get a response from [validatorID], act as though they said their accepted frontier is empty
b.AcceptedFrontier(validatorID, requestID, ids.Set{})
return nil
}
@ -69,10 +87,16 @@ func (b *Bootstrapper) AcceptedFrontier(validatorID ids.ShortID, requestID uint3
b.Context.Log.Debug("Received an AcceptedFrontier message from %s unexpectedly", validatorID)
return nil
}
// Mark that we received a response from [validatorID]
b.pendingAcceptedFrontier.Remove(validatorID)
// Union the reported accepted frontier from [validatorID] with the accepted frontier we got from others
b.acceptedFrontier.Union(containerIDs)
// We've received the accepted frontier from every bootstrap validator
// Ask each bootstrap validator to filter the list of containers that we were
// told are on the accepted frontier such that the list only contains containers
// they think are accepted
if b.pendingAcceptedFrontier.Len() == 0 {
vdrs := ids.ShortSet{}
vdrs.Union(b.pendingAccepted)
@ -91,6 +115,8 @@ func (b *Bootstrapper) GetAccepted(validatorID ids.ShortID, requestID uint32, co
// GetAcceptedFailed implements the Engine interface.
func (b *Bootstrapper) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) error {
// If we can't get a response from [validatorID], act as though they said
// that they think none of the containers we sent them in GetAccepted are accepted
return b.Accepted(validatorID, requestID, ids.Set{})
}
@ -100,6 +126,7 @@ func (b *Bootstrapper) Accepted(validatorID ids.ShortID, requestID uint32, conta
b.Context.Log.Debug("Received an Accepted message from %s unexpectedly", validatorID)
return nil
}
// Mark that we received a response from [validatorID]
b.pendingAccepted.Remove(validatorID)
weight := uint64(0)
@ -121,6 +148,8 @@ func (b *Bootstrapper) Accepted(validatorID ids.ShortID, requestID uint32, conta
return nil
}
// We've received the filtered accepted frontier from every bootstrap validator
// Accept all containers that have a sufficient weight behind them
accepted := ids.Set{}
for key, weight := range b.acceptedVotes {
if weight >= b.Config.Alpha {

View File

@ -135,6 +135,21 @@ type FetchHandler interface {
// dropped.
Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error
// Notify this engine of a request for a container and its ancestors.
// The request is from validator [validatorID]. The requested container is [containerID].
//
// This function can be called by any validator. It is not safe to assume
// this message is utilizing a unique requestID. It is also not safe to
// assume the requested containerID exists. However, the validatorID is
// assumed to be authenticated.
//
// This engine should respond with a MultiPut message with the same requestID,
// which contains [containerID] as well as its ancestors. See MultiPut's documentation.
//
// If this engine doesn't have some ancestors, it should reply with its best effort attempt at getting them.
// If this engine doesn't have [containerID] it can ignore this message.
GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error
// Notify this engine of a container.
//
// This function can be called by any validator. It is not safe to assume
@ -152,6 +167,24 @@ type FetchHandler interface {
container []byte,
) error
// Notify this engine of multiple containers.
// Each element of [containers] is the byte representation of a container.
//
// This should only be called during bootstrapping, and in response to a GetAncestors message to
// [validatorID] with request ID [requestID]. This call should contain the container requested in
// that message, along with ancestors.
// The containers should be in BFS order (ie the first container must be the container
// requested in the GetAncestors message and further back ancestors are later in [containers]
//
// It is not safe to assume this message is in response to a GetAncestor message, that this
// message has a unique requestID or that any of the containers in [containers] are valid.
// However, the validatorID is assumed to be authenticated.
MultiPut(
validatorID ids.ShortID,
requestID uint32,
containers [][]byte,
) error
// Notify this engine that a get request it issued has failed.
//
// This function will be called if the engine sent a Get message that is not
@ -161,6 +194,16 @@ type FetchHandler interface {
// The validatorID and requestID are assumed to be the same as those sent in
// the Get message.
GetFailed(validatorID ids.ShortID, requestID uint32) error
// Notify this engine that a GetAncestors request it issued has failed.
//
// This function will be called if the engine sent a GetAncestors message that is not
// anticipated to be responded to. This could be because the recipient of
// the message is unknown or if the message request has timed out.
//
// The validatorID and requestID are assumed to be the same as those sent in
// the GetAncestors message.
GetAncestorsFailed(validatorID ids.ShortID, requestID uint32) error
}
// QueryHandler defines how a consensus engine reacts to query messages from

View File

@ -50,9 +50,17 @@ type FetchSender interface {
// to this validator
Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
// GetAncestors requests that the validator with ID [validatorID] send container [containerID] and its
// ancestors. The maximum number of ancestors to send in response is defined in snow/engine/common/bootstrapper.go
GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
// Tell the specified validator that the container whose ID is <containerID>
// has body <container>
Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte)
// Give the specified validator several containers at once
// Should be in response to a GetAncestors message with request ID [requestID] from the validator
MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte)
}
// QuerySender defines how a consensus engine sends query messages to other

View File

@ -32,21 +32,26 @@ type EngineTest struct {
CantAccepted,
CantGet,
CantGetAncestors,
CantGetFailed,
CantGetAncestorsFailed,
CantPut,
CantMultiPut,
CantPushQuery,
CantPullQuery,
CantQueryFailed,
CantChits bool
ContextF func() *snow.Context
StartupF, GossipF, ShutdownF func() error
NotifyF func(Message) error
GetF, PullQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error
PutF, PushQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) error
AcceptedFrontierF, GetAcceptedF, AcceptedF, ChitsF func(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) error
GetAcceptedFrontierF, GetFailedF, QueryFailedF, GetAcceptedFrontierFailedF, GetAcceptedFailedF func(validatorID ids.ShortID, requestID uint32) error
ContextF func() *snow.Context
StartupF, GossipF, ShutdownF func() error
NotifyF func(Message) error
GetF, GetAncestorsF, PullQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error
PutF, PushQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) error
MultiPutF func(validatorID ids.ShortID, requestID uint32, containers [][]byte) error
AcceptedFrontierF, GetAcceptedF, AcceptedF, ChitsF func(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) error
GetAcceptedFrontierF, GetFailedF, GetAncestorsFailedF,
QueryFailedF, GetAcceptedFrontierFailedF, GetAcceptedFailedF func(validatorID ids.ShortID, requestID uint32) error
}
var _ Engine = &EngineTest{}
@ -70,8 +75,11 @@ func (e *EngineTest) Default(cant bool) {
e.CantAccepted = cant
e.CantGet = cant
e.CantGetAncestors = cant
e.CantGetAncestorsFailed = cant
e.CantGetFailed = cant
e.CantPut = cant
e.CantMultiPut = cant
e.CantPushQuery = cant
e.CantPullQuery = cant
@ -233,6 +241,16 @@ func (e *EngineTest) Get(validatorID ids.ShortID, requestID uint32, containerID
return nil
}
// GetAncestors ...
func (e *EngineTest) GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) error {
if e.GetAncestorsF != nil {
e.GetAncestorsF(validatorID, requestID, containerID)
} else if e.CantGetAncestors && e.T != nil {
e.T.Fatalf("Unexpectedly called GetAncestors")
}
return nil
}
// GetFailed ...
func (e *EngineTest) GetFailed(validatorID ids.ShortID, requestID uint32) error {
if e.GetFailedF != nil {
@ -246,6 +264,19 @@ func (e *EngineTest) GetFailed(validatorID ids.ShortID, requestID uint32) error
return nil
}
// GetAncestorsFailed ...
func (e *EngineTest) GetAncestorsFailed(validatorID ids.ShortID, requestID uint32) error {
if e.GetAncestorsFailedF != nil {
return e.GetAncestorsFailedF(validatorID, requestID)
} else if e.CantGetAncestorsFailed {
if e.T != nil {
e.T.Fatalf("Unexpectedly called GetAncestorsFailed")
}
return errors.New("Unexpectedly called GetAncestorsFailed")
}
return nil
}
// Put ...
func (e *EngineTest) Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) error {
if e.PutF != nil {
@ -259,6 +290,19 @@ func (e *EngineTest) Put(validatorID ids.ShortID, requestID uint32, containerID
return nil
}
// MultiPut ...
func (e *EngineTest) MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte) error {
if e.MultiPutF != nil {
return e.MultiPutF(validatorID, requestID, containers)
} else if e.CantMultiPut {
if e.T != nil {
e.T.Fatalf("Unexpectedly called MultiPut")
}
return errors.New("Unexpectedly called MultiPut")
}
return nil
}
// PushQuery ...
func (e *EngineTest) PushQuery(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) error {
if e.PushQueryF != nil {

View File

@ -15,7 +15,7 @@ type SenderTest struct {
CantGetAcceptedFrontier, CantAcceptedFrontier,
CantGetAccepted, CantAccepted,
CantGet, CantPut,
CantGet, CantGetAncestors, CantPut, CantMultiPut,
CantPullQuery, CantPushQuery, CantChits,
CantGossip bool
@ -24,7 +24,9 @@ type SenderTest struct {
GetAcceptedF func(ids.ShortSet, uint32, ids.Set)
AcceptedF func(ids.ShortID, uint32, ids.Set)
GetF func(ids.ShortID, uint32, ids.ID)
GetAncestorsF func(ids.ShortID, uint32, ids.ID)
PutF func(ids.ShortID, uint32, ids.ID, []byte)
MultiPutF func(ids.ShortID, uint32, [][]byte)
PushQueryF func(ids.ShortSet, uint32, ids.ID, []byte)
PullQueryF func(ids.ShortSet, uint32, ids.ID)
ChitsF func(ids.ShortID, uint32, ids.Set)
@ -38,7 +40,9 @@ func (s *SenderTest) Default(cant bool) {
s.CantGetAccepted = cant
s.CantAccepted = cant
s.CantGet = cant
s.CantGetAccepted = cant
s.CantPut = cant
s.CantMultiPut = cant
s.CantPullQuery = cant
s.CantPushQuery = cant
s.CantChits = cant
@ -100,6 +104,17 @@ func (s *SenderTest) Get(vdr ids.ShortID, requestID uint32, vtxID ids.ID) {
}
}
// GetAncestors calls GetAncestorsF if it was initialized. If it
// wasn't initialized and this function shouldn't be called and testing was
// initialized, then testing will fail.
func (s *SenderTest) GetAncestors(validatorID ids.ShortID, requestID uint32, vtxID ids.ID) {
if s.GetAncestorsF != nil {
s.GetAncestorsF(validatorID, requestID, vtxID)
} else if s.CantGetAncestors && s.T != nil {
s.T.Fatalf("Unexpectedly called CantGetAncestors")
}
}
// Put calls PutF if it was initialized. If it wasn't initialized and this
// function shouldn't be called and testing was initialized, then testing will
// fail.
@ -111,6 +126,17 @@ func (s *SenderTest) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtx []
}
}
// MultiPut calls MultiPutF if it was initialized. If it wasn't initialized and this
// function shouldn't be called and testing was initialized, then testing will
// fail.
func (s *SenderTest) MultiPut(vdr ids.ShortID, requestID uint32, vtxs [][]byte) {
if s.MultiPutF != nil {
s.MultiPutF(vdr, requestID, vtxs)
} else if s.CantMultiPut && s.T != nil {
s.T.Fatalf("Unexpectedly called MultiPut")
}
}
// PushQuery calls PushQueryF if it was initialized. If it wasn't initialized
// and this function shouldn't be called and testing was initialized, then
// testing will fail.

View File

@ -19,19 +19,22 @@ var (
type VMTest struct {
T *testing.T
CantInitialize, CantShutdown, CantCreateHandlers, CantCreateStaticHandlers bool
CantInitialize, CantBootstrapping, CantBootstrapped, CantShutdown, CantCreateHandlers, CantCreateStaticHandlers bool
InitializeF func(*snow.Context, database.Database, []byte, chan<- Message, []*Fx) error
ShutdownF func() error
CreateHandlersF func() map[string]*HTTPHandler
CreateStaticHandlersF func() map[string]*HTTPHandler
InitializeF func(*snow.Context, database.Database, []byte, chan<- Message, []*Fx) error
BootstrappingF, BootstrappedF, ShutdownF func() error
CreateHandlersF func() map[string]*HTTPHandler
CreateStaticHandlersF func() map[string]*HTTPHandler
}
// Default ...
func (vm *VMTest) Default(cant bool) {
vm.CantInitialize = cant
vm.CantBootstrapping = cant
vm.CantBootstrapped = cant
vm.CantShutdown = cant
vm.CantCreateHandlers = cant
vm.CantCreateStaticHandlers = cant
}
// Initialize ...
@ -45,6 +48,32 @@ func (vm *VMTest) Initialize(ctx *snow.Context, db database.Database, initState
return errInitialize
}
// Bootstrapping ...
func (vm *VMTest) Bootstrapping() error {
if vm.BootstrappingF != nil {
return vm.BootstrappingF()
} else if vm.CantBootstrapping {
if vm.T != nil {
vm.T.Fatalf("Unexpectedly called Bootstrapping")
}
return errors.New("Unexpectedly called Bootstrapping")
}
return nil
}
// Bootstrapped ...
func (vm *VMTest) Bootstrapped() error {
if vm.BootstrappedF != nil {
return vm.BootstrappedF()
} else if vm.CantBootstrapped {
if vm.T != nil {
vm.T.Fatalf("Unexpectedly called Bootstrapped")
}
return errors.New("Unexpectedly called Bootstrapped")
}
return nil
}
// Shutdown ...
func (vm *VMTest) Shutdown() error {
if vm.ShutdownF != nil {

View File

@ -12,8 +12,7 @@ import (
type VM interface {
// Initialize this VM.
// [ctx]: Metadata about this VM.
// [ctx.networkID]: The ID of the network this VM's chain is running
// on.
// [ctx.networkID]: The ID of the network this VM's chain is running on.
// [ctx.chainID]: The unique ID of the chain this VM is running on.
// [ctx.Log]: Used to log messages
// [ctx.NodeID]: The unique staker ID of this node.
@ -37,6 +36,12 @@ type VM interface {
fxs []*Fx,
) error
// Bootstrapping is called when the node is starting to bootstrap this chain.
Bootstrapping() error
// Bootstrapped is called when the node is done bootstrapping this chain.
Bootstrapped() error
// Shutdown is called when the node is shutting down.
Shutdown() error

View File

@ -22,9 +22,6 @@ type BootstrapConfig struct {
// Blocked tracks operations that are blocked on blocks
Blocked *queue.Jobs
// blocks that have outstanding get requests
blkReqs common.Requests
VM ChainVM
Bootstrapped func()
@ -35,8 +32,19 @@ type bootstrapper struct {
metrics
common.Bootstrapper
pending ids.Set
finished bool
// true if all of the vertices in the original accepted frontier have been processed
processedStartingAcceptedFrontier bool
// Number of blocks processed
numProcessed uint32
// tracks which validators were asked for which containers in which requests
outstandingRequests common.Requests
// true if bootstrapping is done
finished bool
// Called when bootstrapping is done
onFinished func() error
}
@ -56,14 +64,14 @@ func (b *bootstrapper) Initialize(config BootstrapConfig) error {
return nil
}
// CurrentAcceptedFrontier ...
// CurrentAcceptedFrontier returns the last accepted block
func (b *bootstrapper) CurrentAcceptedFrontier() ids.Set {
acceptedFrontier := ids.Set{}
acceptedFrontier.Add(b.VM.LastAccepted())
return acceptedFrontier
}
// FilterAccepted ...
// FilterAccepted returns the blocks in [containerIDs] that we have accepted
func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set {
acceptedIDs := ids.Set{}
for _, blkID := range containerIDs.List() {
@ -76,106 +84,109 @@ func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set {
// ForceAccepted ...
func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) error {
if err := b.VM.Bootstrapping(); err != nil {
return fmt.Errorf("failed to notify VM that bootstrapping has started: %w",
err)
}
for _, blkID := range acceptedContainerIDs.List() {
if err := b.fetch(blkID); err != nil {
if blk, err := b.VM.GetBlock(blkID); err == nil {
if err := b.process(blk); err != nil {
return err
}
} else if err := b.fetch(blkID); err != nil {
return err
}
}
if numPending := b.pending.Len(); numPending == 0 {
// TODO: This typically indicates bootstrapping has failed, so this
// should be handled appropriately
b.processedStartingAcceptedFrontier = true
if numPending := b.outstandingRequests.Len(); numPending == 0 {
return b.finish()
}
return nil
}
// Put ...
func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) error {
b.BootstrapConfig.Context.Log.Verbo("Put called for blkID %s", blkID)
blk, err := b.VM.ParseBlock(blkBytes)
if err != nil {
b.BootstrapConfig.Context.Log.Debug("ParseBlock failed due to %s for block:\n%s",
err,
formatting.DumpBytes{Bytes: blkBytes})
b.GetFailed(vdr, requestID)
return nil
}
if !b.pending.Contains(blk.ID()) {
b.BootstrapConfig.Context.Log.Debug("Validator %s sent an unrequested block:\n%s",
vdr,
formatting.DumpBytes{Bytes: blkBytes})
b.GetFailed(vdr, requestID)
return nil
}
return b.addBlock(blk)
}
// GetFailed ...
func (b *bootstrapper) GetFailed(vdr ids.ShortID, requestID uint32) error {
blkID, ok := b.blkReqs.Remove(vdr, requestID)
if !ok {
b.BootstrapConfig.Context.Log.Debug("GetFailed called without sending the corresponding Get message from %s",
vdr)
return nil
}
b.sendRequest(blkID)
return nil
}
// Get block [blkID] and its ancestors from a validator
func (b *bootstrapper) fetch(blkID ids.ID) error {
if b.pending.Contains(blkID) {
// Make sure we haven't already requested this block
if b.outstandingRequests.Contains(blkID) {
return nil
}
blk, err := b.VM.GetBlock(blkID)
if err != nil {
b.sendRequest(blkID)
// Make sure we don't already have this block
if _, err := b.VM.GetBlock(blkID); err == nil {
return nil
}
return b.storeBlock(blk)
}
func (b *bootstrapper) sendRequest(blkID ids.ID) {
validators := b.BootstrapConfig.Validators.Sample(1)
validators := b.BootstrapConfig.Validators.Sample(1) // validator to send request to
if len(validators) == 0 {
b.BootstrapConfig.Context.Log.Error("Dropping request for %s as there are no validators", blkID)
return
return fmt.Errorf("Dropping request for %s as there are no validators", blkID)
}
validatorID := validators[0].ID()
b.RequestID++
b.blkReqs.RemoveAny(blkID)
b.blkReqs.Add(validatorID, b.RequestID, blkID)
b.pending.Add(blkID)
b.BootstrapConfig.Sender.Get(validatorID, b.RequestID, blkID)
b.numPendingRequests.Set(float64(b.pending.Len()))
}
func (b *bootstrapper) addBlock(blk snowman.Block) error {
if err := b.storeBlock(blk); err != nil {
return err
}
if numPending := b.pending.Len(); numPending == 0 {
return b.finish()
}
b.outstandingRequests.Add(validatorID, b.RequestID, blkID)
b.BootstrapConfig.Sender.GetAncestors(validatorID, b.RequestID, blkID) // request block and ancestors
return nil
}
func (b *bootstrapper) storeBlock(blk snowman.Block) error {
// MultiPut handles the receipt of multiple containers. Should be received in response to a GetAncestors message to [vdr]
// with request ID [requestID]
func (b *bootstrapper) MultiPut(vdr ids.ShortID, requestID uint32, blks [][]byte) error {
if lenBlks := len(blks); lenBlks > common.MaxContainersPerMultiPut {
b.BootstrapConfig.Context.Log.Debug("MultiPut(%s, %d) contains more than maximum number of blocks", vdr, requestID)
return b.GetAncestorsFailed(vdr, requestID)
} else if lenBlks == 0 {
b.BootstrapConfig.Context.Log.Debug("MultiPut(%s, %d) contains no blocks", vdr, requestID)
return b.GetAncestorsFailed(vdr, requestID)
}
// Make sure this is in response to a request we made
wantedBlkID, ok := b.outstandingRequests.Remove(vdr, requestID)
if !ok { // this message isn't in response to a request we made
b.BootstrapConfig.Context.Log.Debug("received unexpected MultiPut from %s with ID %d", vdr, requestID)
return nil
}
wantedBlk, err := b.VM.ParseBlock(blks[0]) // the block we requested
if err != nil {
b.BootstrapConfig.Context.Log.Debug("Failed to parse requested block %s: %w", wantedBlkID, err)
return b.fetch(wantedBlkID)
} else if actualID := wantedBlk.ID(); !actualID.Equals(wantedBlkID) {
b.BootstrapConfig.Context.Log.Debug("expected the first block to be the requested block, %s, but is %s", wantedBlk, actualID)
return b.fetch(wantedBlkID)
}
for _, blkBytes := range blks {
if _, err := b.VM.ParseBlock(blkBytes); err != nil { // persists the block
b.BootstrapConfig.Context.Log.Debug("Failed to parse block: %w", err)
b.BootstrapConfig.Context.Log.Verbo("block: %s", formatting.DumpBytes{Bytes: blkBytes})
}
}
return b.process(wantedBlk)
}
// GetAncestorsFailed is called when a GetAncestors message we sent fails
func (b *bootstrapper) GetAncestorsFailed(vdr ids.ShortID, requestID uint32) error {
blkID, ok := b.outstandingRequests.Remove(vdr, requestID)
if !ok {
b.BootstrapConfig.Context.Log.Debug("GetAncestorsFailed(%s, %d) called but there was no outstanding request to this validator with this ID", vdr, requestID)
return nil
}
// Send another request for this
return b.fetch(blkID)
}
// process a block
func (b *bootstrapper) process(blk snowman.Block) error {
status := blk.Status()
blkID := blk.ID()
for status == choices.Processing {
b.pending.Remove(blkID)
b.numProcessed++ // Progress tracker
if b.numProcessed%common.StatusUpdateFrequency == 0 { // Periodically print progress
b.BootstrapConfig.Context.Log.Info("processed %d blocks", b.numProcessed)
}
if err := b.Blocked.Push(&blockJob{
numAccepted: b.numBootstrapped,
numDropped: b.numDropped,
@ -188,6 +199,7 @@ func (b *bootstrapper) storeBlock(blk snowman.Block) error {
return err
}
// Process this block's parent
blk = blk.Parent()
status = blk.Status()
blkID = blk.ID()
@ -195,15 +207,16 @@ func (b *bootstrapper) storeBlock(blk snowman.Block) error {
switch status := blk.Status(); status {
case choices.Unknown:
b.sendRequest(blkID)
case choices.Accepted:
b.BootstrapConfig.Context.Log.Verbo("Bootstrapping confirmed %s", blkID)
case choices.Rejected:
if err := b.fetch(blkID); err != nil {
return err
}
case choices.Rejected: // Should never happen
return fmt.Errorf("bootstrapping wants to accept %s, however it was previously rejected", blkID)
}
numPending := b.pending.Len()
b.numPendingRequests.Set(float64(numPending))
if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier {
return b.finish()
}
return nil
}
@ -211,11 +224,17 @@ func (b *bootstrapper) finish() error {
if b.finished {
return nil
}
b.BootstrapConfig.Context.Log.Info("bootstrapping finished fetching blocks. executing state transitions...")
if err := b.executeAll(b.Blocked, b.numBlocked); err != nil {
return err
}
if err := b.VM.Bootstrapped(); err != nil {
return fmt.Errorf("failed to notify VM that bootstrapping has finished: %w",
err)
}
// Start consensus
if err := b.onFinished(); err != nil {
return err

View File

@ -52,7 +52,13 @@ func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest,
peerID := peer.ID()
peers.Add(peer)
handler.Initialize(engine, make(chan common.Message), 1)
handler.Initialize(
engine,
make(chan common.Message),
1,
"",
prometheus.NewRegistry(),
)
timeouts.Initialize(0)
router.Initialize(ctx.Log, timeouts, time.Hour, time.Second)
@ -72,8 +78,9 @@ func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest,
}, peerID, sender, vm
}
// Single node in the accepted frontier; no need to fecth parent
func TestBootstrapperSingleFrontier(t *testing.T) {
config, peerID, sender, vm := newConfig(t)
config, _, _, vm := newConfig(t)
blkID0 := ids.Empty.Prefix(0)
blkID1 := ids.Empty.Prefix(1)
@ -98,6 +105,8 @@ func TestBootstrapperSingleFrontier(t *testing.T) {
bs := bootstrapper{}
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
bs.Initialize(config)
finished := new(bool)
bs.onFinished = func() error { *finished = true; return nil }
acceptedIDs := ids.Set{}
acceptedIDs.Add(blkID1)
@ -105,57 +114,41 @@ func TestBootstrapperSingleFrontier(t *testing.T) {
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
switch {
case blkID.Equals(blkID1):
return nil, errUnknownBlock
return blk1, nil
case blkID.Equals(blkID0):
return blk0, nil
default:
t.Fatal(errUnknownBlock)
panic(errUnknownBlock)
}
}
reqID := new(uint32)
sender.GetF = func(vdr ids.ShortID, innerReqID uint32, blkID ids.ID) {
if !vdr.Equals(peerID) {
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
}
switch {
case blkID.Equals(blkID1):
default:
t.Fatalf("Requested unknown vertex")
}
*reqID = innerReqID
}
bs.ForceAccepted(acceptedIDs)
vm.GetBlockF = nil
sender.GetF = nil
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
switch {
case bytes.Equal(blkBytes, blkBytes1):
return blk1, nil
case bytes.Equal(blkBytes, blkBytes0):
return blk0, nil
}
t.Fatal(errUnknownBlock)
return nil, errUnknownBlock
}
finished := new(bool)
bs.onFinished = func() error { *finished = true; return nil }
vm.CantBootstrapping = false
vm.CantBootstrapped = false
bs.Put(peerID, *reqID, blkID1, blkBytes1)
vm.ParseBlockF = nil
bs.onFinished = nil
if !*finished {
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should finish
t.Fatal(err)
} else if !*finished {
t.Fatalf("Bootstrapping should have finished")
}
if blk1.Status() != choices.Accepted {
} else if blk1.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
}
}
// Requests the unknown block and gets back a MultiPut with unexpected request ID.
// Requests again and gets response from unexpected peer.
// Requests again and gets an unexpected block.
// Requests again and gets the expected block.
func TestBootstrapperUnknownByzantineResponse(t *testing.T) {
config, peerID, sender, vm := newConfig(t)
@ -167,103 +160,6 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) {
blkBytes1 := []byte{1}
blkBytes2 := []byte{2}
blk0 := &Blk{
id: blkID0,
height: 0,
status: choices.Accepted,
bytes: blkBytes0,
}
blk1 := &Blk{
parent: blk0,
id: blkID1,
height: 1,
status: choices.Processing,
bytes: blkBytes1,
}
blk2 := &Blk{
parent: blk1,
id: blkID2,
height: 2,
status: choices.Processing,
bytes: blkBytes2,
}
bs := bootstrapper{}
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
bs.Initialize(config)
acceptedIDs := ids.Set{}
acceptedIDs.Add(blkID1)
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
switch {
case blkID.Equals(blkID1):
return nil, errUnknownBlock
default:
t.Fatal(errUnknownBlock)
panic(errUnknownBlock)
}
}
requestID := new(uint32)
sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
if !vdr.Equals(peerID) {
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
}
switch {
case vtxID.Equals(blkID1):
default:
t.Fatalf("Requested unknown block")
}
*requestID = reqID
}
bs.ForceAccepted(acceptedIDs)
vm.GetBlockF = nil
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
switch {
case bytes.Equal(blkBytes, blkBytes1):
return blk1, nil
case bytes.Equal(blkBytes, blkBytes2):
return blk2, nil
}
t.Fatal(errUnknownBlock)
return nil, errUnknownBlock
}
finished := new(bool)
bs.onFinished = func() error { *finished = true; return nil }
bs.Put(peerID, *requestID, blkID2, blkBytes2)
bs.Put(peerID, *requestID, blkID1, blkBytes1)
vm.ParseBlockF = nil
if !*finished {
t.Fatalf("Bootstrapping should have finished")
}
if blk1.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
}
if blk2.Status() != choices.Processing {
t.Fatalf("Block should be processing")
}
}
func TestBootstrapperDependency(t *testing.T) {
config, peerID, sender, vm := newConfig(t)
blkID0 := ids.Empty.Prefix(0)
blkID1 := ids.Empty.Prefix(1)
blkID2 := ids.Empty.Prefix(2)
blkBytes0 := []byte{0}
blkBytes1 := []byte{1}
blkBytes2 := []byte{2}
blk0 := &Blk{
id: blkID0,
height: 0,
@ -288,42 +184,36 @@ func TestBootstrapperDependency(t *testing.T) {
bs := bootstrapper{}
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
bs.Initialize(config)
finished := new(bool)
bs.onFinished = func() error { *finished = true; return nil }
acceptedIDs := ids.Set{}
acceptedIDs.Add(blkID2)
parsedBlk1 := false
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
switch {
case blkID.Equals(blkID0):
return blk0, nil
case blkID.Equals(blkID1):
if parsedBlk1 {
return blk1, nil
}
return nil, errUnknownBlock
case blkID.Equals(blkID2):
return blk2, nil
default:
t.Fatalf("Requested unknown block")
panic("Requested unknown block")
t.Fatal(errUnknownBlock)
panic(errUnknownBlock)
}
}
requestID := new(uint32)
sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
if !vdr.Equals(peerID) {
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
}
switch {
case vtxID.Equals(blkID1):
default:
t.Fatalf("Requested unknown block")
}
*requestID = reqID
}
bs.ForceAccepted(acceptedIDs)
vm.GetBlockF = nil
sender.GetF = nil
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
switch {
case bytes.Equal(blkBytes, blkBytes0):
return blk0, nil
case bytes.Equal(blkBytes, blkBytes1):
blk1.status = choices.Processing
parsedBlk1 = true
return blk1, nil
case bytes.Equal(blkBytes, blkBytes2):
return blk2, nil
@ -332,20 +222,325 @@ func TestBootstrapperDependency(t *testing.T) {
return nil, errUnknownBlock
}
blk1.status = choices.Processing
requestID := new(uint32)
sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
if !vdr.Equals(peerID) {
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
}
switch {
case vtxID.Equals(blkID1):
default:
t.Fatalf("should have requested blk1")
}
*requestID = reqID
}
vm.CantBootstrapping = false
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk1
t.Fatal(err)
}
oldReqID := *requestID
if err := bs.MultiPut(peerID, *requestID+1, [][]byte{blkBytes1}); err != nil { // respond with wrong request ID
t.Fatal(err)
} else if oldReqID != *requestID {
t.Fatal("should not have sent new request")
}
if err := bs.MultiPut(ids.NewShortID([20]byte{1, 2, 3}), *requestID, [][]byte{blkBytes1}); err != nil { // respond from wrong peer
t.Fatal(err)
} else if oldReqID != *requestID {
t.Fatal("should not have sent new request")
}
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes0}); err != nil { // respond with wrong block
t.Fatal(err)
} else if oldReqID == *requestID {
t.Fatal("should have sent new request")
}
vm.CantBootstrapped = false
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes1}); err != nil { // respond with right block
t.Fatal(err)
} else if !*finished {
t.Fatalf("Bootstrapping should have finished")
} else if blk0.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
} else if blk1.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
} else if blk2.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
}
}
// There are multiple needed blocks and MultiPut returns one at a time
func TestBootstrapperPartialFetch(t *testing.T) {
config, peerID, sender, vm := newConfig(t)
blkID0 := ids.Empty.Prefix(0)
blkID1 := ids.Empty.Prefix(1)
blkID2 := ids.Empty.Prefix(2)
blkID3 := ids.Empty.Prefix(3)
blkBytes0 := []byte{0}
blkBytes1 := []byte{1}
blkBytes2 := []byte{2}
blkBytes3 := []byte{3}
blk0 := &Blk{
id: blkID0,
height: 0,
status: choices.Accepted,
bytes: blkBytes0,
}
blk1 := &Blk{
parent: blk0,
id: blkID1,
height: 1,
status: choices.Unknown,
bytes: blkBytes1,
}
blk2 := &Blk{
parent: blk1,
id: blkID2,
height: 2,
status: choices.Unknown,
bytes: blkBytes2,
}
blk3 := &Blk{
parent: blk2,
id: blkID3,
height: 3,
status: choices.Processing,
bytes: blkBytes3,
}
bs := bootstrapper{}
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
bs.Initialize(config)
finished := new(bool)
bs.onFinished = func() error { *finished = true; return nil }
bs.Put(peerID, *requestID, blkID1, blkBytes1)
acceptedIDs := ids.Set{}
acceptedIDs.Add(blkID3)
parsedBlk1 := false
parsedBlk2 := false
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
switch {
case blkID.Equals(blkID0):
return blk0, nil
case blkID.Equals(blkID1):
if parsedBlk1 {
return blk1, nil
}
return nil, errUnknownBlock
case blkID.Equals(blkID2):
if parsedBlk2 {
return blk2, nil
}
return nil, errUnknownBlock
case blkID.Equals(blkID3):
return blk3, nil
default:
t.Fatal(errUnknownBlock)
panic(errUnknownBlock)
}
}
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
switch {
case bytes.Equal(blkBytes, blkBytes0):
return blk0, nil
case bytes.Equal(blkBytes, blkBytes1):
blk1.status = choices.Processing
parsedBlk1 = true
return blk1, nil
case bytes.Equal(blkBytes, blkBytes2):
blk2.status = choices.Processing
parsedBlk2 = true
return blk2, nil
case bytes.Equal(blkBytes, blkBytes3):
return blk3, nil
}
t.Fatal(errUnknownBlock)
return nil, errUnknownBlock
}
requestID := new(uint32)
requested := ids.Empty
sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
if !vdr.Equals(peerID) {
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
}
switch {
case vtxID.Equals(blkID1), vtxID.Equals(blkID2):
default:
t.Fatalf("should have requested blk1 or blk2")
}
*requestID = reqID
requested = vtxID
}
vm.CantBootstrapping = false
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk2
t.Fatal(err)
}
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes2}); err != nil { // respond with blk2
t.Fatal(err)
} else if !requested.Equals(blkID1) {
t.Fatal("should have requested blk1")
}
vm.CantBootstrapped = false
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes1}); err != nil { // respond with blk1
t.Fatal(err)
} else if !requested.Equals(blkID1) {
t.Fatal("should not have requested another block")
}
if !*finished {
t.Fatalf("Bootstrapping should have finished")
}
if blk1.Status() != choices.Accepted {
} else if blk0.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
} else if blk1.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
} else if blk2.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
}
if blk2.Status() != choices.Accepted {
}
// There are multiple needed blocks and MultiPut returns all at once
func TestBootstrapperMultiPut(t *testing.T) {
config, peerID, sender, vm := newConfig(t)
blkID0 := ids.Empty.Prefix(0)
blkID1 := ids.Empty.Prefix(1)
blkID2 := ids.Empty.Prefix(2)
blkID3 := ids.Empty.Prefix(3)
blkBytes0 := []byte{0}
blkBytes1 := []byte{1}
blkBytes2 := []byte{2}
blkBytes3 := []byte{3}
blk0 := &Blk{
id: blkID0,
height: 0,
status: choices.Accepted,
bytes: blkBytes0,
}
blk1 := &Blk{
parent: blk0,
id: blkID1,
height: 1,
status: choices.Unknown,
bytes: blkBytes1,
}
blk2 := &Blk{
parent: blk1,
id: blkID2,
height: 2,
status: choices.Unknown,
bytes: blkBytes2,
}
blk3 := &Blk{
parent: blk2,
id: blkID3,
height: 3,
status: choices.Processing,
bytes: blkBytes3,
}
vm.CantBootstrapping = false
bs := bootstrapper{}
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
bs.Initialize(config)
finished := new(bool)
bs.onFinished = func() error { *finished = true; return nil }
acceptedIDs := ids.Set{}
acceptedIDs.Add(blkID3)
parsedBlk1 := false
parsedBlk2 := false
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
switch {
case blkID.Equals(blkID0):
return blk0, nil
case blkID.Equals(blkID1):
if parsedBlk1 {
return blk1, nil
}
return nil, errUnknownBlock
case blkID.Equals(blkID2):
if parsedBlk2 {
return blk2, nil
}
return nil, errUnknownBlock
case blkID.Equals(blkID3):
return blk3, nil
default:
t.Fatal(errUnknownBlock)
panic(errUnknownBlock)
}
}
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
switch {
case bytes.Equal(blkBytes, blkBytes0):
return blk0, nil
case bytes.Equal(blkBytes, blkBytes1):
blk1.status = choices.Processing
parsedBlk1 = true
return blk1, nil
case bytes.Equal(blkBytes, blkBytes2):
blk2.status = choices.Processing
parsedBlk2 = true
return blk2, nil
case bytes.Equal(blkBytes, blkBytes3):
return blk3, nil
}
t.Fatal(errUnknownBlock)
return nil, errUnknownBlock
}
requestID := new(uint32)
requested := ids.Empty
sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
if !vdr.Equals(peerID) {
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
}
switch {
case vtxID.Equals(blkID1), vtxID.Equals(blkID2):
default:
t.Fatalf("should have requested blk1 or blk2")
}
*requestID = reqID
requested = vtxID
}
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk2
t.Fatal(err)
}
vm.CantBootstrapped = false
if err := bs.MultiPut(peerID, *requestID, [][]byte{blkBytes2, blkBytes1}); err != nil { // respond with blk2 and blk1
t.Fatal(err)
} else if !requested.Equals(blkID2) {
t.Fatal("should not have requested another block")
}
if !*finished {
t.Fatalf("Bootstrapping should have finished")
} else if blk0.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
} else if blk1.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
} else if blk2.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
}
}
@ -410,6 +605,7 @@ func TestBootstrapperFilterAccepted(t *testing.T) {
t.Fatal(errUnknownBlock)
return nil, errUnknownBlock
}
vm.CantBootstrapping = false
accepted := bs.FilterAccepted(blkIDs)
@ -426,164 +622,3 @@ func TestBootstrapperFilterAccepted(t *testing.T) {
t.Fatalf("Blk shouldn't be accepted")
}
}
func TestBootstrapperPartialFetch(t *testing.T) {
config, _, sender, vm := newConfig(t)
blkID0 := ids.Empty.Prefix(0)
blkID1 := ids.Empty.Prefix(1)
blkBytes0 := []byte{0}
blk0 := &Blk{
id: blkID0,
height: 0,
status: choices.Accepted,
bytes: blkBytes0,
}
bs := bootstrapper{}
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
bs.Initialize(config)
acceptedIDs := ids.Set{}
acceptedIDs.Add(
blkID0,
blkID1,
)
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
switch {
case blkID.Equals(blkID0):
return blk0, nil
case blkID.Equals(blkID1):
return nil, errUnknownBlock
default:
t.Fatal(errUnknownBlock)
panic(errUnknownBlock)
}
}
sender.CantGet = false
bs.onFinished = func() error { return nil }
bs.ForceAccepted(acceptedIDs)
if bs.finished {
t.Fatalf("should have requested a block")
}
if bs.pending.Len() != 1 {
t.Fatalf("wrong number pending")
}
}
func TestBootstrapperWrongIDByzantineResponse(t *testing.T) {
config, peerID, sender, vm := newConfig(t)
blkID0 := ids.Empty.Prefix(0)
blkID1 := ids.Empty.Prefix(1)
blkID2 := ids.Empty.Prefix(2)
blkBytes0 := []byte{0}
blkBytes1 := []byte{1}
blkBytes2 := []byte{2}
blk0 := &Blk{
id: blkID0,
height: 0,
status: choices.Accepted,
bytes: blkBytes0,
}
blk1 := &Blk{
parent: blk0,
id: blkID1,
height: 1,
status: choices.Processing,
bytes: blkBytes1,
}
blk2 := &Blk{
parent: blk1,
id: blkID2,
height: 2,
status: choices.Processing,
bytes: blkBytes2,
}
bs := bootstrapper{}
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
bs.Initialize(config)
acceptedIDs := ids.Set{}
acceptedIDs.Add(blkID1)
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
switch {
case blkID.Equals(blkID1):
return nil, errUnknownBlock
default:
t.Fatal(errUnknownBlock)
panic(errUnknownBlock)
}
}
requestID := new(uint32)
sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
if !vdr.Equals(peerID) {
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
}
switch {
case vtxID.Equals(blkID1):
default:
t.Fatalf("Requested unknown block")
}
*requestID = reqID
}
bs.ForceAccepted(acceptedIDs)
vm.GetBlockF = nil
sender.GetF = nil
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
switch {
case bytes.Equal(blkBytes, blkBytes2):
return blk2, nil
}
t.Fatal(errUnknownBlock)
return nil, errUnknownBlock
}
sender.CantGet = false
bs.Put(peerID, *requestID, blkID1, blkBytes2)
sender.CantGet = true
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
switch {
case bytes.Equal(blkBytes, blkBytes1):
return blk1, nil
}
t.Fatal(errUnknownBlock)
return nil, errUnknownBlock
}
finished := new(bool)
bs.onFinished = func() error { *finished = true; return nil }
bs.Put(peerID, *requestID, blkID1, blkBytes1)
vm.ParseBlockF = nil
if !*finished {
t.Fatalf("Bootstrapping should have finished")
}
if blk1.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
}
if blk2.Status() != choices.Processing {
t.Fatalf("Block should be processing")
}
}

View File

@ -4,7 +4,10 @@
package snowman
import (
"time"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/network"
"github.com/ava-labs/gecko/snow"
"github.com/ava-labs/gecko/snow/choices"
"github.com/ava-labs/gecko/snow/consensus/snowman"
@ -14,6 +17,12 @@ import (
"github.com/ava-labs/gecko/utils/wrappers"
)
const (
// TODO define this constant in one place rather than here and in snowman
// Max containers size in a MultiPut message
maxContainersLen = int(4 * network.DefaultMaxMessageSize / 5)
)
// Transitive implements the Engine interface by attempting to fetch all
// transitive dependencies.
type Transitive struct {
@ -44,7 +53,7 @@ type Transitive struct {
// Initialize implements the Engine interface
func (t *Transitive) Initialize(config Config) error {
config.Context.Log.Info("Initializing Snowman consensus")
config.Context.Log.Info("initializing consensus engine")
t.Config = config
t.metrics.Initialize(
@ -78,7 +87,7 @@ func (t *Transitive) finishBootstrapping() error {
// oracle block
tail, err := t.Config.VM.GetBlock(tailID)
if err != nil {
t.Config.Context.Log.Error("Failed to get last accepted block due to: %s", err)
t.Config.Context.Log.Error("failed to get last accepted block due to: %s", err)
return err
}
@ -96,7 +105,7 @@ func (t *Transitive) finishBootstrapping() error {
t.Config.VM.SetPreference(tailID)
}
t.Config.Context.Log.Info("Bootstrapping finished with %s as the last accepted block", tailID)
t.Config.Context.Log.Info("bootstrapping finished with %s as the last accepted block", tailID)
return nil
}
@ -105,18 +114,18 @@ func (t *Transitive) Gossip() error {
blkID := t.Config.VM.LastAccepted()
blk, err := t.Config.VM.GetBlock(blkID)
if err != nil {
t.Config.Context.Log.Warn("Dropping gossip request as %s couldn't be loaded due to %s", blkID, err)
t.Config.Context.Log.Warn("dropping gossip request as %s couldn't be loaded due to %s", blkID, err)
return nil
}
t.Config.Context.Log.Debug("Gossiping %s as accepted to the network", blkID)
t.Config.Context.Log.Verbo("gossiping %s as accepted to the network", blkID)
t.Config.Sender.Gossip(blkID, blk.Bytes())
return nil
}
// Shutdown implements the Engine interface
func (t *Transitive) Shutdown() error {
t.Config.Context.Log.Info("Shutting down Snowman consensus")
t.Config.Context.Log.Info("shutting down consensus engine")
return t.Config.VM.Shutdown()
}
@ -130,9 +139,7 @@ func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, blkID ids.ID) error
// If we failed to get the block, that means either an unexpected error
// has occurred, the validator is not following the protocol, or the
// block has been pruned.
t.Config.Context.Log.Warn("Get called for blockID %s errored with %s",
blkID,
err)
t.Config.Context.Log.Debug("Get(%s, %d, %s) failed with: %s", vdr, requestID, blkID, err)
return nil
}
@ -141,22 +148,51 @@ func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, blkID ids.ID) error
return nil
}
// GetAncestors implements the Engine interface
func (t *Transitive) GetAncestors(vdr ids.ShortID, requestID uint32, blkID ids.ID) error {
startTime := time.Now()
blk, err := t.Config.VM.GetBlock(blkID)
if err != nil { // Don't have the block. Drop this request.
t.Config.Context.Log.Verbo("couldn't get block %s. dropping GetAncestors(%s, %d, %s)", blkID, vdr, requestID, blkID)
return nil
}
ancestorsBytes := make([][]byte, 1, common.MaxContainersPerMultiPut) // First elt is byte repr. of blk, then its parents, then grandparent, etc.
ancestorsBytes[0] = blk.Bytes()
ancestorsBytesLen := len(blk.Bytes()) + wrappers.IntLen // length, in bytes, of all elements of ancestors
for numFetched := 1; numFetched < common.MaxContainersPerMultiPut && time.Since(startTime) < common.MaxTimeFetchingAncestors; numFetched++ {
blk = blk.Parent()
if blk.Status() == choices.Unknown {
break
}
blkBytes := blk.Bytes()
// Ensure response size isn't too large. Include wrappers.IntLen because the size of the message
// is included with each container, and the size is repr. by an int.
if newLen := wrappers.IntLen + ancestorsBytesLen + len(blkBytes); newLen < maxContainersLen {
ancestorsBytes = append(ancestorsBytes, blkBytes)
ancestorsBytesLen = newLen
} else { // reached maximum response size
break
}
}
t.Config.Sender.MultiPut(vdr, requestID, ancestorsBytes)
return nil
}
// Put implements the Engine interface
func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) error {
t.Config.Context.Log.Verbo("Put called for blockID %s", blkID)
// if the engine hasn't been bootstrapped, forward the request to the
// bootstrapper
// bootstrapping isn't done --> we didn't send any gets --> this put is invalid
if !t.bootstrapped {
return t.bootstrapper.Put(vdr, requestID, blkID, blkBytes)
t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
return nil
}
blk, err := t.Config.VM.ParseBlock(blkBytes)
if err != nil {
t.Config.Context.Log.Debug("ParseBlock failed due to %s for block:\n%s",
err,
formatting.DumpBytes{Bytes: blkBytes})
t.Config.Context.Log.Debug("failed to parse block %s: %s", blkID, err)
t.Config.Context.Log.Verbo("block:\n%s", formatting.DumpBytes{Bytes: blkBytes})
// because GetFailed doesn't utilize the assumption that we actually
// sent a Get message, we can safely call GetFailed here to potentially
// abandon the request.
@ -174,10 +210,10 @@ func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkByt
// GetFailed implements the Engine interface
func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) error {
// if the engine hasn't been bootstrapped, forward the request to the
// bootstrapper
// not done bootstrapping --> didn't send a get --> this message is invalid
if !t.bootstrapped {
return t.bootstrapper.GetFailed(vdr, requestID)
t.Config.Context.Log.Debug("dropping GetFailed(%s, %d) due to bootstrapping")
return nil
}
// we don't use the assumption that this function is called after a failed
@ -185,8 +221,7 @@ func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) error {
// and also get what the request was for if it exists
blkID, ok := t.blkReqs.Remove(vdr, requestID)
if !ok {
t.Config.Context.Log.Warn("GetFailed called without sending the corresponding Get message from %s",
vdr)
t.Config.Context.Log.Debug("getFailed(%s, %d) called without having sent corresponding Get", vdr, requestID)
return nil
}
@ -201,8 +236,7 @@ func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID)
// if the engine hasn't been bootstrapped, we aren't ready to respond to
// queries
if !t.bootstrapped {
t.Config.Context.Log.Debug("Dropping PullQuery for %s due to bootstrapping",
blkID)
t.Config.Context.Log.Debug("dropping PullQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
return nil
}
@ -234,16 +268,15 @@ func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID,
// if the engine hasn't been bootstrapped, we aren't ready to respond to
// queries
if !t.bootstrapped {
t.Config.Context.Log.Debug("Dropping PushQuery for %s due to bootstrapping", blkID)
t.Config.Context.Log.Debug("dropping PushQuery(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID)
return nil
}
blk, err := t.Config.VM.ParseBlock(blkBytes)
// If the parsing fails, we just drop the request, as we didn't ask for it
if err != nil {
t.Config.Context.Log.Warn("ParseBlock failed due to %s for block:\n%s",
err,
formatting.DumpBytes{Bytes: blkBytes})
t.Config.Context.Log.Debug("failed to parse block %s: %s", blkID, err)
t.Config.Context.Log.Verbo("block:\n%s", formatting.DumpBytes{Bytes: blkBytes})
return nil
}
@ -264,17 +297,13 @@ func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID,
func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) error {
// if the engine hasn't been bootstrapped, we shouldn't be receiving chits
if !t.bootstrapped {
t.Config.Context.Log.Debug("Dropping Chits due to bootstrapping")
t.Config.Context.Log.Debug("dropping Chits(%s, %d) due to bootstrapping", vdr, requestID)
return nil
}
// Since this is snowman, there should only be one ID in the vote set
if votes.Len() != 1 {
t.Config.Context.Log.Debug("Chits was called with the wrong number of votes %d. ValidatorID: %s, RequestID: %d",
votes.Len(),
vdr,
requestID)
t.Config.Context.Log.Debug("Chits(%s, %d) was called with %d votes (expected 1)", vdr, requestID, votes.Len())
// because QueryFailed doesn't utilize the assumption that we actually
// sent a Query message, we can safely call QueryFailed here to
// potentially abandon the request.
@ -282,7 +311,7 @@ func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) err
}
vote := votes.List()[0]
t.Config.Context.Log.Verbo("Chit was called. RequestID: %v. Vote: %s", requestID, vote)
t.Config.Context.Log.Verbo("Chits(%s, %d) contains vote for %s", vdr, requestID, vote)
v := &voter{
t: t,
@ -310,7 +339,7 @@ func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) err
func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) error {
// if the engine hasn't been bootstrapped, we won't have sent a query
if !t.bootstrapped {
t.Config.Context.Log.Warn("Dropping QueryFailed due to bootstrapping")
t.Config.Context.Log.Warn("dropping QueryFailed(%s, %d) due to bootstrapping", vdr, requestID)
return nil
}
@ -326,24 +355,24 @@ func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) error {
func (t *Transitive) Notify(msg common.Message) error {
// if the engine hasn't been bootstrapped, we shouldn't issuing blocks
if !t.bootstrapped {
t.Config.Context.Log.Warn("Dropping Notify due to bootstrapping")
t.Config.Context.Log.Debug("dropping Notify due to bootstrapping")
return nil
}
t.Config.Context.Log.Verbo("Snowman engine notified of %s from the vm", msg)
t.Config.Context.Log.Verbo("snowman engine notified of %s from the vm", msg)
switch msg {
case common.PendingTxs:
// the pending txs message means we should attempt to build a block.
blk, err := t.Config.VM.BuildBlock()
if err != nil {
t.Config.Context.Log.Verbo("VM.BuildBlock errored with %s", err)
t.Config.Context.Log.Debug("VM.BuildBlock errored with: %s", err)
return nil
}
// a newly created block is expected to be processing. If this check
// fails, there is potentially an error in the VM this engine is running
if status := blk.Status(); status != choices.Processing {
t.Config.Context.Log.Warn("Attempting to issue a block with status: %s, expected Processing", status)
t.Config.Context.Log.Warn("attempting to issue a block with status: %s, expected Processing", status)
}
// the newly created block should be built on top of the preferred
@ -351,7 +380,7 @@ func (t *Transitive) Notify(msg common.Message) error {
// confirmed.
parentID := blk.Parent().ID()
if pref := t.Consensus.Preference(); !parentID.Equals(pref) {
t.Config.Context.Log.Warn("Built block with parent: %s, expected %s", parentID, pref)
t.Config.Context.Log.Warn("built block with parent: %s, expected %s", parentID, pref)
}
added, err := t.insertAll(blk)
@ -361,12 +390,12 @@ func (t *Transitive) Notify(msg common.Message) error {
// inserting the block shouldn't have any missing dependencies
if added {
t.Config.Context.Log.Verbo("Successfully issued new block from the VM")
t.Config.Context.Log.Verbo("successfully issued new block from the VM")
} else {
t.Config.Context.Log.Warn("VM.BuildBlock returned a block that is pending for ancestors")
}
default:
t.Config.Context.Log.Warn("Unexpected message from the VM: %s", msg)
t.Config.Context.Log.Warn("unexpected message from the VM: %s", msg)
}
return nil
}
@ -476,7 +505,7 @@ func (t *Transitive) insert(blk snowman.Block) error {
// block on the parent if needed
if parent := blk.Parent(); !t.Consensus.Issued(parent) {
parentID := parent.ID()
t.Config.Context.Log.Verbo("Block waiting for parent %s", parentID)
t.Config.Context.Log.Verbo("block %s waiting for parent %s", blkID, parentID)
i.deps.Add(parentID)
}
@ -494,10 +523,9 @@ func (t *Transitive) sendRequest(vdr ids.ShortID, blkID ids.ID) {
return
}
t.Config.Context.Log.Verbo("Sending Get message for %s", blkID)
t.RequestID++
t.blkReqs.Add(vdr, t.RequestID, blkID)
t.Config.Context.Log.Verbo("sending Get(%s, %d, %s)", vdr, t.RequestID, blkID)
t.Config.Sender.Get(vdr, t.RequestID, blkID)
// Tracks performance statistics
@ -506,7 +534,7 @@ func (t *Transitive) sendRequest(vdr ids.ShortID, blkID ids.ID) {
// send a pull request for this block ID
func (t *Transitive) pullSample(blkID ids.ID) {
t.Config.Context.Log.Verbo("About to sample from: %s", t.Config.Validators)
t.Config.Context.Log.Verbo("about to sample from: %s", t.Config.Validators)
p := t.Consensus.Parameters()
vdrs := t.Config.Validators.Sample(p.K)
vdrSet := ids.ShortSet{}
@ -515,13 +543,13 @@ func (t *Transitive) pullSample(blkID ids.ID) {
}
if numVdrs := len(vdrs); numVdrs != p.K {
t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blkID)
t.Config.Context.Log.Error("query for %s was dropped due to an insufficient number of validators", blkID)
return
}
t.RequestID++
if !t.polls.Add(t.RequestID, vdrSet.Len()) {
t.Config.Context.Log.Error("Query for %s was dropped due to use of a duplicated requestID", blkID)
t.Config.Context.Log.Error("query for %s was dropped due to use of a duplicated requestID", blkID)
return
}
@ -530,7 +558,7 @@ func (t *Transitive) pullSample(blkID ids.ID) {
// send a push request for this block
func (t *Transitive) pushSample(blk snowman.Block) {
t.Config.Context.Log.Verbo("About to sample from: %s", t.Config.Validators)
t.Config.Context.Log.Verbo("about to sample from: %s", t.Config.Validators)
p := t.Consensus.Parameters()
vdrs := t.Config.Validators.Sample(p.K)
vdrSet := ids.ShortSet{}
@ -540,13 +568,13 @@ func (t *Transitive) pushSample(blk snowman.Block) {
blkID := blk.ID()
if numVdrs := len(vdrs); numVdrs != p.K {
t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blkID)
t.Config.Context.Log.Error("query for %s was dropped due to an insufficient number of validators", blkID)
return
}
t.RequestID++
if !t.polls.Add(t.RequestID, vdrSet.Len()) {
t.Config.Context.Log.Error("Query for %s was dropped due to use of a duplicated requestID", blkID)
t.Config.Context.Log.Error("query for %s was dropped due to use of a duplicated requestID", blkID)
return
}
@ -564,7 +592,7 @@ func (t *Transitive) deliver(blk snowman.Block) error {
t.pending.Remove(blkID)
if err := blk.Verify(); err != nil {
t.Config.Context.Log.Debug("Block failed verification due to %s, dropping block", err)
t.Config.Context.Log.Debug("block failed verification due to %s, dropping block", err)
// if verify fails, then all decedents are also invalid
t.blocked.Abandon(blkID)
@ -572,7 +600,7 @@ func (t *Transitive) deliver(blk snowman.Block) error {
return t.errs.Err
}
t.Config.Context.Log.Verbo("Adding block to consensus: %s", blkID)
t.Config.Context.Log.Verbo("adding block to consensus: %s", blkID)
t.Consensus.Add(blk)
// Add all the oracle blocks if they exist. We call verify on all the blocks
@ -584,7 +612,7 @@ func (t *Transitive) deliver(blk snowman.Block) error {
case OracleBlock:
for _, blk := range blk.Options() {
if err := blk.Verify(); err != nil {
t.Config.Context.Log.Debug("Block failed verification due to %s, dropping block", err)
t.Config.Context.Log.Debug("block failed verification due to %s, dropping block", err)
dropped = append(dropped, blk)
} else {
t.Consensus.Add(blk)

View File

@ -45,7 +45,7 @@ func (v *voter) Update() {
// must be bubbled to the nearest valid block
results = v.bubbleVotes(results)
v.t.Config.Context.Log.Verbo("Finishing poll [%d] with:\n%s", v.requestID, &results)
v.t.Config.Context.Log.Debug("Finishing poll [%d] with:\n%s", v.requestID, &results)
if err := v.t.Consensus.RecordPoll(results); err != nil {
v.t.errs.Add(err)
return
@ -54,11 +54,11 @@ func (v *voter) Update() {
v.t.Config.VM.SetPreference(v.t.Consensus.Preference())
if v.t.Consensus.Finalized() {
v.t.Config.Context.Log.Verbo("Snowman engine can quiesce")
v.t.Config.Context.Log.Debug("Snowman engine can quiesce")
return
}
v.t.Config.Context.Log.Verbo("Snowman engine can't quiesce")
v.t.Config.Context.Log.Debug("Snowman engine can't quiesce")
v.t.repoll()
}

View File

@ -4,14 +4,19 @@
package router
import (
"time"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow"
"github.com/ava-labs/gecko/snow/engine/common"
"github.com/prometheus/client_golang/prometheus"
)
// Handler passes incoming messages from the network to the consensus engine
// (Actually, it receives the incoming messages from a ChainRouter, but same difference)
type Handler struct {
metrics
msgs chan message
closed chan struct{}
engine common.Engine
@ -21,7 +26,14 @@ type Handler struct {
}
// Initialize this consensus handler
func (h *Handler) Initialize(engine common.Engine, msgChan <-chan common.Message, bufferSize int) {
func (h *Handler) Initialize(
engine common.Engine,
msgChan <-chan common.Message,
bufferSize int,
namespace string,
metrics prometheus.Registerer,
) {
h.metrics.Initialize(namespace, metrics)
h.msgs = make(chan message, bufferSize)
h.closed = make(chan struct{})
h.engine = engine
@ -47,6 +59,7 @@ func (h *Handler) Dispatch() {
if !ok {
return
}
h.metrics.pending.Dec()
if closing {
log.Debug("dropping message due to closing:\n%s", msg)
continue
@ -73,6 +86,7 @@ func (h *Handler) Dispatch() {
// Returns true iff this consensus handler (and its associated engine) should shutdown
// (due to receipt of a shutdown message)
func (h *Handler) dispatchMsg(msg message) bool {
startTime := time.Now()
ctx := h.engine.Context()
ctx.Lock.Lock()
@ -86,36 +100,61 @@ func (h *Handler) dispatchMsg(msg message) bool {
switch msg.messageType {
case getAcceptedFrontierMsg:
err = h.engine.GetAcceptedFrontier(msg.validatorID, msg.requestID)
h.getAcceptedFrontier.Observe(float64(time.Now().Sub(startTime)))
case acceptedFrontierMsg:
err = h.engine.AcceptedFrontier(msg.validatorID, msg.requestID, msg.containerIDs)
h.acceptedFrontier.Observe(float64(time.Now().Sub(startTime)))
case getAcceptedFrontierFailedMsg:
err = h.engine.GetAcceptedFrontierFailed(msg.validatorID, msg.requestID)
h.getAcceptedFrontierFailed.Observe(float64(time.Now().Sub(startTime)))
case getAcceptedMsg:
err = h.engine.GetAccepted(msg.validatorID, msg.requestID, msg.containerIDs)
h.getAccepted.Observe(float64(time.Now().Sub(startTime)))
case acceptedMsg:
err = h.engine.Accepted(msg.validatorID, msg.requestID, msg.containerIDs)
h.accepted.Observe(float64(time.Now().Sub(startTime)))
case getAcceptedFailedMsg:
err = h.engine.GetAcceptedFailed(msg.validatorID, msg.requestID)
h.getAcceptedFailed.Observe(float64(time.Now().Sub(startTime)))
case getAncestorsMsg:
err = h.engine.GetAncestors(msg.validatorID, msg.requestID, msg.containerID)
h.getAncestors.Observe(float64(time.Now().Sub(startTime)))
case getAncestorsFailedMsg:
err = h.engine.GetAncestorsFailed(msg.validatorID, msg.requestID)
h.getAncestorsFailed.Observe(float64(time.Now().Sub(startTime)))
case multiPutMsg:
err = h.engine.MultiPut(msg.validatorID, msg.requestID, msg.containers)
h.multiPut.Observe(float64(time.Now().Sub(startTime)))
case getMsg:
err = h.engine.Get(msg.validatorID, msg.requestID, msg.containerID)
h.get.Observe(float64(time.Now().Sub(startTime)))
case getFailedMsg:
err = h.engine.GetFailed(msg.validatorID, msg.requestID)
h.getFailed.Observe(float64(time.Now().Sub(startTime)))
case putMsg:
err = h.engine.Put(msg.validatorID, msg.requestID, msg.containerID, msg.container)
h.put.Observe(float64(time.Now().Sub(startTime)))
case pushQueryMsg:
err = h.engine.PushQuery(msg.validatorID, msg.requestID, msg.containerID, msg.container)
h.pushQuery.Observe(float64(time.Now().Sub(startTime)))
case pullQueryMsg:
err = h.engine.PullQuery(msg.validatorID, msg.requestID, msg.containerID)
h.pullQuery.Observe(float64(time.Now().Sub(startTime)))
case queryFailedMsg:
err = h.engine.QueryFailed(msg.validatorID, msg.requestID)
h.queryFailed.Observe(float64(time.Now().Sub(startTime)))
case chitsMsg:
err = h.engine.Chits(msg.validatorID, msg.requestID, msg.containerIDs)
h.chits.Observe(float64(time.Now().Sub(startTime)))
case notifyMsg:
err = h.engine.Notify(msg.notification)
h.notify.Observe(float64(time.Now().Sub(startTime)))
case gossipMsg:
err = h.engine.Gossip()
h.gossip.Observe(float64(time.Now().Sub(startTime)))
case shutdownMsg:
err = h.engine.Shutdown()
h.shutdown.Observe(float64(time.Now().Sub(startTime)))
done = true
}
@ -128,6 +167,7 @@ func (h *Handler) dispatchMsg(msg message) bool {
// GetAcceptedFrontier passes a GetAcceptedFrontier message received from the
// network to the consensus engine.
func (h *Handler) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32) {
h.metrics.pending.Inc()
h.msgs <- message{
messageType: getAcceptedFrontierMsg,
validatorID: validatorID,
@ -138,6 +178,7 @@ func (h *Handler) GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32)
// AcceptedFrontier passes a AcceptedFrontier message received from the network
// to the consensus engine.
func (h *Handler) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
h.metrics.pending.Inc()
h.msgs <- message{
messageType: acceptedFrontierMsg,
validatorID: validatorID,
@ -149,6 +190,7 @@ func (h *Handler) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, co
// GetAcceptedFrontierFailed passes a GetAcceptedFrontierFailed message received
// from the network to the consensus engine.
func (h *Handler) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) {
h.metrics.pending.Inc()
h.msgs <- message{
messageType: getAcceptedFrontierFailedMsg,
validatorID: validatorID,
@ -159,6 +201,7 @@ func (h *Handler) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID u
// GetAccepted passes a GetAccepted message received from the
// network to the consensus engine.
func (h *Handler) GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
h.metrics.pending.Inc()
h.msgs <- message{
messageType: getAcceptedMsg,
validatorID: validatorID,
@ -170,6 +213,7 @@ func (h *Handler) GetAccepted(validatorID ids.ShortID, requestID uint32, contain
// Accepted passes a Accepted message received from the network to the consensus
// engine.
func (h *Handler) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) {
h.metrics.pending.Inc()
h.msgs <- message{
messageType: acceptedMsg,
validatorID: validatorID,
@ -181,6 +225,7 @@ func (h *Handler) Accepted(validatorID ids.ShortID, requestID uint32, containerI
// GetAcceptedFailed passes a GetAcceptedFailed message received from the
// network to the consensus engine.
func (h *Handler) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) {
h.metrics.pending.Inc()
h.msgs <- message{
messageType: getAcceptedFailedMsg,
validatorID: validatorID,
@ -190,6 +235,7 @@ func (h *Handler) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) {
// Get passes a Get message received from the network to the consensus engine.
func (h *Handler) Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) {
h.metrics.pending.Inc()
h.msgs <- message{
messageType: getMsg,
validatorID: validatorID,
@ -198,8 +244,19 @@ func (h *Handler) Get(validatorID ids.ShortID, requestID uint32, containerID ids
}
}
// GetAncestors passes a GetAncestors message received from the network to the consensus engine.
func (h *Handler) GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) {
h.msgs <- message{
messageType: getAncestorsMsg,
validatorID: validatorID,
requestID: requestID,
containerID: containerID,
}
}
// Put passes a Put message received from the network to the consensus engine.
func (h *Handler) Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) {
h.metrics.pending.Inc()
h.msgs <- message{
messageType: putMsg,
validatorID: validatorID,
@ -209,8 +266,19 @@ func (h *Handler) Put(validatorID ids.ShortID, requestID uint32, containerID ids
}
}
// MultiPut passes a MultiPut message received from the network to the consensus engine.
func (h *Handler) MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte) {
h.msgs <- message{
messageType: multiPutMsg,
validatorID: validatorID,
requestID: requestID,
containers: containers,
}
}
// GetFailed passes a GetFailed message to the consensus engine.
func (h *Handler) GetFailed(validatorID ids.ShortID, requestID uint32) {
h.metrics.pending.Inc()
h.msgs <- message{
messageType: getFailedMsg,
validatorID: validatorID,
@ -218,8 +286,18 @@ func (h *Handler) GetFailed(validatorID ids.ShortID, requestID uint32) {
}
}
// GetAncestorsFailed passes a GetAncestorsFailed message to the consensus engine.
func (h *Handler) GetAncestorsFailed(validatorID ids.ShortID, requestID uint32) {
h.msgs <- message{
messageType: getAncestorsFailedMsg,
validatorID: validatorID,
requestID: requestID,
}
}
// PushQuery passes a PushQuery message received from the network to the consensus engine.
func (h *Handler) PushQuery(validatorID ids.ShortID, requestID uint32, blockID ids.ID, block []byte) {
h.metrics.pending.Inc()
h.msgs <- message{
messageType: pushQueryMsg,
validatorID: validatorID,
@ -231,6 +309,7 @@ func (h *Handler) PushQuery(validatorID ids.ShortID, requestID uint32, blockID i
// PullQuery passes a PullQuery message received from the network to the consensus engine.
func (h *Handler) PullQuery(validatorID ids.ShortID, requestID uint32, blockID ids.ID) {
h.metrics.pending.Inc()
h.msgs <- message{
messageType: pullQueryMsg,
validatorID: validatorID,
@ -241,6 +320,7 @@ func (h *Handler) PullQuery(validatorID ids.ShortID, requestID uint32, blockID i
// Chits passes a Chits message received from the network to the consensus engine.
func (h *Handler) Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set) {
h.metrics.pending.Inc()
h.msgs <- message{
messageType: chitsMsg,
validatorID: validatorID,
@ -251,6 +331,7 @@ func (h *Handler) Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set
// QueryFailed passes a QueryFailed message received from the network to the consensus engine.
func (h *Handler) QueryFailed(validatorID ids.ShortID, requestID uint32) {
h.metrics.pending.Inc()
h.msgs <- message{
messageType: queryFailedMsg,
validatorID: validatorID,
@ -259,13 +340,20 @@ func (h *Handler) QueryFailed(validatorID ids.ShortID, requestID uint32) {
}
// Gossip passes a gossip request to the consensus engine
func (h *Handler) Gossip() { h.msgs <- message{messageType: gossipMsg} }
func (h *Handler) Gossip() {
h.metrics.pending.Inc()
h.msgs <- message{messageType: gossipMsg}
}
// Shutdown shuts down the dispatcher
func (h *Handler) Shutdown() { h.msgs <- message{messageType: shutdownMsg} }
func (h *Handler) Shutdown() {
h.metrics.pending.Inc()
h.msgs <- message{messageType: shutdownMsg}
}
// Notify ...
func (h *Handler) Notify(msg common.Message) {
h.metrics.pending.Inc()
h.msgs <- message{
messageType: notifyMsg,
notification: msg,

View File

@ -31,6 +31,9 @@ const (
notifyMsg
gossipMsg
shutdownMsg
getAncestorsMsg
multiPutMsg
getAncestorsFailedMsg
)
type message struct {
@ -39,6 +42,7 @@ type message struct {
requestID uint32
containerID ids.ID
container []byte
containers [][]byte
containerIDs ids.Set
notification common.Message
}
@ -74,8 +78,12 @@ func (t msgType) String() string {
return "Get Accepted Failed Message"
case getMsg:
return "Get Message"
case getAncestorsMsg:
return "Get Ancestors Message"
case putMsg:
return "Put Message"
case multiPutMsg:
return "MultiPut Message"
case getFailedMsg:
return "Get Failed Message"
case pushQueryMsg:

View File

@ -0,0 +1,79 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package router
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/ava-labs/gecko/utils/timer"
"github.com/ava-labs/gecko/utils/wrappers"
)
func initHistogram(namespace, name string, registerer prometheus.Registerer, errs *wrappers.Errs) prometheus.Histogram {
histogram := prometheus.NewHistogram(
prometheus.HistogramOpts{
Namespace: namespace,
Name: name,
Help: "Time spent processing this request in nanoseconds",
Buckets: timer.NanosecondsBuckets,
})
if err := registerer.Register(histogram); err != nil {
errs.Add(fmt.Errorf("failed to register %s statistics due to %s", name, err))
}
return histogram
}
type metrics struct {
pending prometheus.Gauge
getAcceptedFrontier, acceptedFrontier, getAcceptedFrontierFailed,
getAccepted, accepted, getAcceptedFailed,
getAncestors, multiPut, getAncestorsFailed,
get, put, getFailed,
pushQuery, pullQuery, chits, queryFailed,
notify,
gossip,
shutdown prometheus.Histogram
}
// Initialize implements the Engine interface
func (m *metrics) Initialize(namespace string, registerer prometheus.Registerer) error {
errs := wrappers.Errs{}
m.pending = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "pending",
Help: "Number of pending events",
})
if err := registerer.Register(m.pending); err != nil {
errs.Add(fmt.Errorf("failed to register pending statistics due to %s", err))
}
m.getAcceptedFrontier = initHistogram(namespace, "get_accepted_frontier", registerer, &errs)
m.acceptedFrontier = initHistogram(namespace, "accepted_frontier", registerer, &errs)
m.getAcceptedFrontierFailed = initHistogram(namespace, "get_accepted_frontier_failed", registerer, &errs)
m.getAccepted = initHistogram(namespace, "get_accepted", registerer, &errs)
m.accepted = initHistogram(namespace, "accepted", registerer, &errs)
m.getAcceptedFailed = initHistogram(namespace, "get_accepted_failed", registerer, &errs)
m.getAncestors = initHistogram(namespace, "get_ancestors", registerer, &errs)
m.multiPut = initHistogram(namespace, "multi_put", registerer, &errs)
m.getAncestorsFailed = initHistogram(namespace, "get_ancestors_failed", registerer, &errs)
m.get = initHistogram(namespace, "get", registerer, &errs)
m.put = initHistogram(namespace, "put", registerer, &errs)
m.getFailed = initHistogram(namespace, "get_failed", registerer, &errs)
m.pushQuery = initHistogram(namespace, "push_query", registerer, &errs)
m.pullQuery = initHistogram(namespace, "pull_query", registerer, &errs)
m.chits = initHistogram(namespace, "chits", registerer, &errs)
m.queryFailed = initHistogram(namespace, "query_failed", registerer, &errs)
m.notify = initHistogram(namespace, "notify", registerer, &errs)
m.gossip = initHistogram(namespace, "gossip", registerer, &errs)
m.shutdown = initHistogram(namespace, "shutdown", registerer, &errs)
return errs.Err
}

View File

@ -36,7 +36,9 @@ type ExternalRouter interface {
GetAccepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte)
PushQuery(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
PullQuery(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set)
@ -47,5 +49,6 @@ type InternalRouter interface {
GetAcceptedFrontierFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
GetAcceptedFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
GetFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
GetAncestorsFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
QueryFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
}

View File

@ -186,6 +186,20 @@ func (sr *ChainRouter) Get(validatorID ids.ShortID, chainID ids.ID, requestID ui
}
}
// GetAncestors routes an incoming GetAncestors message from the validator with ID [validatorID]
// to the consensus engine working on the chain with ID [chainID]
// The maximum number of ancestors to respond with is define in snow/engine/commong/bootstrapper.go
func (sr *ChainRouter) GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
sr.lock.RLock()
defer sr.lock.RUnlock()
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.GetAncestors(validatorID, requestID, containerID)
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
// Put routes an incoming Put request from the validator with ID [validatorID]
// to the consensus engine working on the chain with ID [chainID]
func (sr *ChainRouter) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
@ -202,6 +216,22 @@ func (sr *ChainRouter) Put(validatorID ids.ShortID, chainID ids.ID, requestID ui
}
}
// MultiPut routes an incoming MultiPut message from the validator with ID [validatorID]
// to the consensus engine working on the chain with ID [chainID]
func (sr *ChainRouter) MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte) {
sr.lock.RLock()
defer sr.lock.RUnlock()
// This message came in response to a GetAncestors message from this node, and when we sent that
// message we set a timeout. Since we got a response, cancel the timeout.
sr.timeouts.Cancel(validatorID, chainID, requestID)
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.MultiPut(validatorID, requestID, containers)
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
// GetFailed routes an incoming GetFailed message from the validator with ID [validatorID]
// to the consensus engine working on the chain with ID [chainID]
func (sr *ChainRouter) GetFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32) {
@ -216,6 +246,20 @@ func (sr *ChainRouter) GetFailed(validatorID ids.ShortID, chainID ids.ID, reques
}
}
// GetAncestorsFailed routes an incoming GetAncestorsFailed message from the validator with ID [validatorID]
// to the consensus engine working on the chain with ID [chainID]
func (sr *ChainRouter) GetAncestorsFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32) {
sr.lock.RLock()
defer sr.lock.RUnlock()
sr.timeouts.Cancel(validatorID, chainID, requestID)
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.GetAncestorsFailed(validatorID, requestID)
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
// PushQuery routes an incoming PushQuery request from the validator with ID [validatorID]
// to the consensus engine working on the chain with ID [chainID]
func (sr *ChainRouter) PushQuery(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {

View File

@ -15,7 +15,10 @@ type ExternalSender interface {
Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte)
PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID)

View File

@ -93,6 +93,20 @@ func (s *Sender) Get(validatorID ids.ShortID, requestID uint32, containerID ids.
s.sender.Get(validatorID, s.ctx.ChainID, requestID, containerID)
}
// GetAncestors sends a GetAncestors message
func (s *Sender) GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) {
s.ctx.Log.Verbo("Sending GetAncestors to validator %s. RequestID: %d. ContainerID: %s", validatorID, requestID, containerID)
// Sending a GetAncestors to myself will always fail
if validatorID.Equals(s.ctx.NodeID) {
go s.router.GetAncestorsFailed(validatorID, s.ctx.ChainID, requestID)
return
}
s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() {
s.router.GetAncestorsFailed(validatorID, s.ctx.ChainID, requestID)
})
s.sender.GetAncestors(validatorID, s.ctx.ChainID, requestID, containerID)
}
// Put sends a Put message to the consensus engine running on the specified chain
// on the specified validator.
// The Put message signifies that this consensus engine is giving to the recipient
@ -102,6 +116,14 @@ func (s *Sender) Put(validatorID ids.ShortID, requestID uint32, containerID ids.
s.sender.Put(validatorID, s.ctx.ChainID, requestID, containerID, container)
}
// MultiPut sends a MultiPut message to the consensus engine running on the specified chain
// on the specified validator.
// The MultiPut message gives the recipient the contents of several containers.
func (s *Sender) MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte) {
s.ctx.Log.Verbo("Sending MultiPut to validator %s. RequestID: %d. NumContainers: %d", validatorID, requestID, len(containers))
s.sender.MultiPut(validatorID, s.ctx.ChainID, requestID, containers)
}
// PushQuery sends a PushQuery message to the consensus engines running on the specified chains
// on the specified validators.
// The PushQuery message signifies that this consensus engine would like each validator to send

View File

@ -15,6 +15,7 @@ import (
"github.com/ava-labs/gecko/snow/networking/router"
"github.com/ava-labs/gecko/snow/networking/timeout"
"github.com/ava-labs/gecko/utils/logging"
"github.com/prometheus/client_golang/prometheus"
)
func TestSenderContext(t *testing.T) {
@ -58,7 +59,13 @@ func TestTimeout(t *testing.T) {
}
handler := router.Handler{}
handler.Initialize(&engine, nil, 1)
handler.Initialize(
&engine,
nil,
1,
"",
prometheus.NewRegistry(),
)
go handler.Dispatch()
chainRouter.AddChain(&handler)

View File

@ -16,7 +16,7 @@ type ExternalSenderTest struct {
CantGetAcceptedFrontier, CantAcceptedFrontier,
CantGetAccepted, CantAccepted,
CantGet, CantPut,
CantGet, CantGetAncestors, CantPut, CantMultiPut,
CantPullQuery, CantPushQuery, CantChits,
CantGossip bool
@ -24,8 +24,9 @@ type ExternalSenderTest struct {
AcceptedFrontierF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
GetAcceptedF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set)
AcceptedF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
GetF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
GetF, GetAncestorsF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
PutF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
MultiPutF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte)
PushQueryF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
PullQueryF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID)
ChitsF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set)
@ -39,7 +40,9 @@ func (s *ExternalSenderTest) Default(cant bool) {
s.CantGetAccepted = cant
s.CantAccepted = cant
s.CantGet = cant
s.CantGetAncestors = cant
s.CantPut = cant
s.CantMultiPut = cant
s.CantPullQuery = cant
s.CantPushQuery = cant
s.CantChits = cant
@ -111,6 +114,19 @@ func (s *ExternalSenderTest) Get(vdr ids.ShortID, chainID ids.ID, requestID uint
}
}
// GetAncestors calls GetAncestorsF if it was initialized. If it wasn't initialized and this
// function shouldn't be called and testing was initialized, then testing will
// fail.
func (s *ExternalSenderTest) GetAncestors(vdr ids.ShortID, chainID ids.ID, requestID uint32, vtxID ids.ID) {
if s.GetAncestorsF != nil {
s.GetAncestorsF(vdr, chainID, requestID, vtxID)
} else if s.CantGetAncestors && s.T != nil {
s.T.Fatalf("Unexpectedly called GetAncestors")
} else if s.CantGetAncestors && s.B != nil {
s.B.Fatalf("Unexpectedly called GetAncestors")
}
}
// Put calls PutF if it was initialized. If it wasn't initialized and this
// function shouldn't be called and testing was initialized, then testing will
// fail.
@ -124,6 +140,19 @@ func (s *ExternalSenderTest) Put(vdr ids.ShortID, chainID ids.ID, requestID uint
}
}
// MultiPut calls MultiPutF if it was initialized. If it wasn't initialized and this
// function shouldn't be called and testing was initialized, then testing will
// fail.
func (s *ExternalSenderTest) MultiPut(vdr ids.ShortID, chainID ids.ID, requestID uint32, vtxs [][]byte) {
if s.MultiPutF != nil {
s.MultiPutF(vdr, chainID, requestID, vtxs)
} else if s.CantMultiPut && s.T != nil {
s.T.Fatalf("Unexpectedly called MultiPut")
} else if s.CantMultiPut && s.B != nil {
s.B.Fatalf("Unexpectedly called MultiPut")
}
}
// PushQuery calls PushQueryF if it was initialized. If it wasn't initialized
// and this function shouldn't be called and testing was initialized, then
// testing will fail.

View File

@ -5,14 +5,12 @@ package logging
import (
"path"
"github.com/ava-labs/gecko/ids"
)
// Factory ...
type Factory interface {
Make() (Logger, error)
MakeChain(chainID ids.ID, subdir string) (Logger, error)
MakeChain(chainID string, subdir string) (Logger, error)
MakeSubdir(subdir string) (Logger, error)
Close()
}
@ -41,10 +39,10 @@ func (f *factory) Make() (Logger, error) {
}
// MakeChain ...
func (f *factory) MakeChain(chainID ids.ID, subdir string) (Logger, error) {
func (f *factory) MakeChain(chainID string, subdir string) (Logger, error) {
config := f.config
config.MsgPrefix = "chain " + chainID.String()
config.Directory = path.Join(config.Directory, "chain", chainID.String(), subdir)
config.MsgPrefix = chainID + " Chain"
config.Directory = path.Join(config.Directory, "chain", chainID, subdir)
log, err := New(config)
if err == nil {

View File

@ -3,10 +3,6 @@
package logging
import (
"github.com/ava-labs/gecko/ids"
)
// NoFactory ...
type NoFactory struct{}
@ -14,7 +10,7 @@ type NoFactory struct{}
func (NoFactory) Make() (Logger, error) { return NoLog{}, nil }
// MakeChain ...
func (NoFactory) MakeChain(ids.ID, string) (Logger, error) { return NoLog{}, nil }
func (NoFactory) MakeChain(string, string) (Logger, error) { return NoLog{}, nil }
// MakeSubdir ...
func (NoFactory) MakeSubdir(string) (Logger, error) { return NoLog{}, nil }

View File

@ -3,9 +3,13 @@
package timer
import (
"time"
)
// Useful latency buckets
var (
Buckets = []float64{
MillisecondsBuckets = []float64{
10, // 10 ms is ~ instant
100, // 100 ms
250, // 250 ms
@ -18,4 +22,15 @@ var (
10000, // 10 seconds
// anything larger than 10 seconds will be bucketed together
}
NanosecondsBuckets = []float64{
float64(100 * time.Nanosecond),
float64(time.Microsecond),
float64(10 * time.Microsecond),
float64(100 * time.Microsecond),
float64(time.Millisecond),
float64(10 * time.Millisecond),
float64(100 * time.Millisecond),
float64(time.Second),
// anything larger than a second will be bucketed together
}
)

View File

@ -256,6 +256,24 @@ func (p *Packer) UnpackFixedByteSlices(size int) [][]byte {
return bytes
}
// Pack2DByteSlice append a 2D byte slice to the byte array
func (p *Packer) Pack2DByteSlice(byteSlices [][]byte) {
p.PackInt(uint32(len(byteSlices)))
for _, bytes := range byteSlices {
p.PackBytes(bytes)
}
}
// Unpack2DByteSlice returns a 2D byte slice from the byte array.
func (p *Packer) Unpack2DByteSlice() [][]byte {
sliceSize := p.UnpackInt()
bytes := [][]byte(nil)
for i := uint32(0); i < sliceSize && !p.Errored(); i++ {
bytes = append(bytes, p.UnpackBytes())
}
return bytes
}
// PackStr append a string to the byte array
func (p *Packer) PackStr(str string) {
strSize := len(str)
@ -432,6 +450,20 @@ func TryUnpackBytes(packer *Packer) interface{} {
return packer.UnpackBytes()
}
// TryPack2DBytes attempts to pack the value as a 2D byte slice
func TryPack2DBytes(packer *Packer, valIntf interface{}) {
if val, ok := valIntf.([][]byte); ok {
packer.Pack2DByteSlice(val)
} else {
packer.Add(errBadType)
}
}
// TryUnpack2DBytes attempts to unpack the value as a 2D byte slice
func TryUnpack2DBytes(packer *Packer) interface{} {
return packer.Unpack2DByteSlice()
}
// TryPackStr attempts to pack the value as a string
func TryPackStr(packer *Packer, valIntf interface{}) {
if val, ok := valIntf.(string); ok {

View File

@ -506,3 +506,63 @@ func TestPackerUnpackBool(t *testing.T) {
t.Fatalf("Packer.UnpackBool returned %t, expected sentinal value %t", actual, BoolSentinal)
}
}
func TestPacker2DByteSlice(t *testing.T) {
// Case: empty array
p := Packer{MaxSize: 1024}
arr := [][]byte{}
p.Pack2DByteSlice(arr)
if p.Errored() {
t.Fatal(p.Err)
}
arrUnpacked := p.Unpack2DByteSlice()
if len(arrUnpacked) != 0 {
t.Fatal("should be empty")
}
// Case: Array has one element
p = Packer{MaxSize: 1024}
arr = [][]byte{
[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
}
p.Pack2DByteSlice(arr)
if p.Errored() {
t.Fatal(p.Err)
}
p = Packer{MaxSize: 1024, Bytes: p.Bytes}
arrUnpacked = p.Unpack2DByteSlice()
if p.Errored() {
t.Fatal(p.Err)
}
if l := len(arrUnpacked); l != 1 {
t.Fatalf("should be length 1 but is length %d", l)
}
if !bytes.Equal(arrUnpacked[0], arr[0]) {
t.Fatal("should match")
}
// Case: Array has multiple elements
p = Packer{MaxSize: 1024}
arr = [][]byte{
[]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
[]byte{11, 12, 3, 4, 5, 6, 7, 8, 9, 10},
}
p.Pack2DByteSlice(arr)
if p.Errored() {
t.Fatal(p.Err)
}
p = Packer{MaxSize: 1024, Bytes: p.Bytes}
arrUnpacked = p.Unpack2DByteSlice()
if p.Errored() {
t.Fatal(p.Err)
}
if l := len(arrUnpacked); l != 2 {
t.Fatalf("should be length 1 but is length %d", l)
}
if !bytes.Equal(arrUnpacked[0], arr[0]) {
t.Fatal("should match")
}
if !bytes.Equal(arrUnpacked[1], arr[1]) {
t.Fatal("should match")
}
}

View File

@ -840,6 +840,16 @@ func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) {
}
vm.batchTimeout = 0
err = vm.Bootstrapping()
if err != nil {
t.Fatal(err)
}
err = vm.Bootstrapped()
if err != nil {
t.Fatal(err)
}
cr := codecRegistry{
index: 1,
typeToFxIndex: vm.typeToFxIndex,
@ -1386,6 +1396,16 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) {
}
vm.batchTimeout = 0
err = vm.Bootstrapping()
if err != nil {
t.Fatal(err)
}
err = vm.Bootstrapped()
if err != nil {
t.Fatal(err)
}
cr := codecRegistry{
index: 1,
typeToFxIndex: vm.typeToFxIndex,
@ -1538,6 +1558,16 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) {
}
vm.batchTimeout = 0
err = vm.Bootstrapping()
if err != nil {
t.Fatal(err)
}
err = vm.Bootstrapped()
if err != nil {
t.Fatal(err)
}
cr := codecRegistry{
index: 1,
typeToFxIndex: vm.typeToFxIndex,

View File

@ -151,6 +151,16 @@ func TestIssueExportTx(t *testing.T) {
}
vm.batchTimeout = 0
err = vm.Bootstrapping()
if err != nil {
t.Fatal(err)
}
err = vm.Bootstrapped()
if err != nil {
t.Fatal(err)
}
key := keys[0]
tx := &Tx{UnsignedTx: &ExportTx{
@ -297,6 +307,16 @@ func TestClearForceAcceptedExportTx(t *testing.T) {
}
vm.batchTimeout = 0
err = vm.Bootstrapping()
if err != nil {
t.Fatal(err)
}
err = vm.Bootstrapped()
if err != nil {
t.Fatal(err)
}
key := keys[0]
tx := &Tx{UnsignedTx: &ExportTx{

View File

@ -19,6 +19,12 @@ type Fx interface {
// return an error if the VM is incompatible.
Initialize(vm interface{}) error
// Notify this Fx that the VM is in bootstrapping
Bootstrapping() error
// Notify this Fx that the VM is bootstrapped
Bootstrapped() error
// VerifyTransfer verifies that the specified transaction can spend the
// provided utxo with no restrictions on the destination. If the transaction
// can't spend the output based on the input and credential, a non-nil error

View File

@ -4,10 +4,12 @@
package avm
type testFx struct {
initialize, verifyTransfer, verifyOperation error
initialize, bootstrapping, bootstrapped, verifyTransfer, verifyOperation error
}
func (fx *testFx) Initialize(_ interface{}) error { return fx.initialize }
func (fx *testFx) Bootstrapping() error { return fx.bootstrapping }
func (fx *testFx) Bootstrapped() error { return fx.bootstrapped }
func (fx *testFx) VerifyTransfer(_, _, _, _ interface{}) error { return fx.verifyTransfer }
func (fx *testFx) VerifyOperation(_, _, _ interface{}, _ []interface{}) error {
return fx.verifyOperation

View File

@ -140,6 +140,16 @@ func TestIssueImportTx(t *testing.T) {
}
vm.batchTimeout = 0
err = vm.Bootstrapping()
if err != nil {
t.Fatal(err)
}
err = vm.Bootstrapped()
if err != nil {
t.Fatal(err)
}
key := keys[0]
utxoID := ava.UTXOID{
@ -288,6 +298,16 @@ func TestForceAcceptImportTx(t *testing.T) {
}
vm.batchTimeout = 0
err = vm.Bootstrapping()
if err != nil {
t.Fatal(err)
}
err = vm.Bootstrapped()
if err != nil {
t.Fatal(err)
}
key := keys[0]
genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t)

View File

@ -45,6 +45,7 @@ var (
errGenesisAssetMustHaveState = errors.New("genesis asset must have non-empty state")
errInvalidAddress = errors.New("invalid address")
errWrongBlockchainID = errors.New("wrong blockchain ID")
errBootstrapping = errors.New("chain is currently bootstrapping")
)
// VM implements the avalanche.DAGVM interface
@ -67,6 +68,9 @@ type VM struct {
// State management
state *prefixedState
// Set to true once this VM is marked as `Bootstrapped` by the engine
bootstrapped bool
// Transaction issuing
timer *timer.Timer
batchTimeout time.Duration
@ -197,6 +201,29 @@ func (vm *VM) Initialize(
return vm.db.Commit()
}
// Bootstrapping is called by the consensus engine when it starts bootstrapping
// this chain
func (vm *VM) Bootstrapping() error {
for _, fx := range vm.fxs {
if err := fx.Fx.Bootstrapping(); err != nil {
return err
}
}
return nil
}
// Bootstrapped is called by the consensus engine when it is done bootstrapping
// this chain
func (vm *VM) Bootstrapped() error {
for _, fx := range vm.fxs {
if err := fx.Fx.Bootstrapped(); err != nil {
return err
}
}
vm.bootstrapped = true
return nil
}
// Shutdown implements the avalanche.DAGVM interface
func (vm *VM) Shutdown() error {
if vm.timer == nil {
@ -272,6 +299,9 @@ func (vm *VM) GetTx(txID ids.ID) (snowstorm.Tx, error) {
// either accepted or rejected with the appropriate status. This function will
// go out of scope when the transaction is removed from memory.
func (vm *VM) IssueTx(b []byte, onDecide func(choices.Status)) (ids.ID, error) {
if !vm.bootstrapped {
return ids.ID{}, errBootstrapping
}
tx, err := vm.parseTx(b)
if err != nil {
return ids.ID{}, err

View File

@ -178,6 +178,14 @@ func GenesisVM(t *testing.T) ([]byte, chan common.Message, *VM) {
}
vm.batchTimeout = 0
if err := vm.Bootstrapping(); err != nil {
t.Fatal(err)
}
if err := vm.Bootstrapped(); err != nil {
t.Fatal(err)
}
return genesisBytes, issuer, vm
}
@ -678,6 +686,16 @@ func TestIssueNFT(t *testing.T) {
}
vm.batchTimeout = 0
err = vm.Bootstrapping()
if err != nil {
t.Fatal(err)
}
err = vm.Bootstrapped()
if err != nil {
t.Fatal(err)
}
createAssetTx := &Tx{UnsignedTx: &CreateAssetTx{
BaseTx: BaseTx{
NetID: networkID,
@ -841,6 +859,16 @@ func TestIssueProperty(t *testing.T) {
}
vm.batchTimeout = 0
err = vm.Bootstrapping()
if err != nil {
t.Fatal(err)
}
err = vm.Bootstrapped()
if err != nil {
t.Fatal(err)
}
createAssetTx := &Tx{UnsignedTx: &CreateAssetTx{
BaseTx: BaseTx{
NetID: networkID,

View File

@ -81,6 +81,12 @@ func (svm *SnowmanVM) GetBlock(ID ids.ID) (snowman.Block, error) {
return nil, errBadData // Should never happen
}
// Bootstrapping marks this VM as bootstrapping
func (svm *SnowmanVM) Bootstrapping() error { return nil }
// Bootstrapped marks this VM as bootstrapped
func (svm *SnowmanVM) Bootstrapped() error { return nil }
// Shutdown this vm
func (svm *SnowmanVM) Shutdown() error {
if svm.DB == nil {

View File

@ -23,6 +23,11 @@ func (a *Abort) Verify() error {
parent, ok := a.parentBlock().(*ProposalBlock)
// Abort is a decision, so its parent must be a proposal
if !ok {
if err := a.Reject(); err == nil {
a.vm.DB.Commit()
} else {
a.vm.DB.Abort()
}
return errInvalidBlockType
}

View File

@ -23,6 +23,11 @@ func (c *Commit) Verify() error {
// the parent of an Commit block should always be a proposal
parent, ok := c.parentBlock().(*ProposalBlock)
if !ok {
if err := c.Reject(); err == nil {
c.vm.DB.Commit()
} else {
c.vm.DB.Abort()
}
return errInvalidBlockType
}

View File

@ -98,6 +98,11 @@ func (pb *ProposalBlock) Verify() error {
// The parent of a proposal block (ie this block) must be a decision block
parent, ok := parentIntf.(decision)
if !ok {
if err := pb.Reject(); err == nil {
pb.vm.DB.Commit()
} else {
pb.vm.DB.Abort()
}
return errInvalidBlockType
}
@ -107,6 +112,11 @@ func (pb *ProposalBlock) Verify() error {
var err error
pb.onCommitDB, pb.onAbortDB, pb.onCommitFunc, pb.onAbortFunc, err = pb.Tx.SemanticVerify(pdb)
if err != nil {
if err := pb.Reject(); err == nil {
pb.vm.DB.Commit()
} else {
pb.vm.DB.Abort()
}
return err
}

View File

@ -54,6 +54,11 @@ func (sb *StandardBlock) Verify() error {
// be a decision.
parent, ok := parentBlock.(decision)
if !ok {
if err := sb.Reject(); err == nil {
sb.vm.DB.Commit()
} else {
sb.vm.DB.Abort()
}
return errInvalidBlockType
}
@ -64,6 +69,11 @@ func (sb *StandardBlock) Verify() error {
for _, tx := range sb.Txs {
onAccept, err := tx.SemanticVerify(sb.onAcceptDB)
if err != nil {
if err := sb.Reject(); err == nil {
sb.vm.DB.Commit()
} else {
sb.vm.DB.Abort()
}
return err
}
if onAccept != nil {

View File

@ -399,6 +399,12 @@ func (vm *VM) createChain(tx *CreateChainTx) {
vm.chainManager.CreateChain(chainParams)
}
// Bootstrapping marks this VM as bootstrapping
func (vm *VM) Bootstrapping() error { return nil }
// Bootstrapped marks this VM as bootstrapped
func (vm *VM) Bootstrapped() error { return nil }
// Shutdown this blockchain
func (vm *VM) Shutdown() error {
if vm.timer == nil {

View File

@ -376,6 +376,65 @@ func TestAddDefaultSubnetValidatorCommit(t *testing.T) {
}
}
// verify invalid proposal to add validator to default subnet
func TestInvalidAddDefaultSubnetValidatorCommit(t *testing.T) {
vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
startTime := defaultGenesisTime.Add(-Delta).Add(-1 * time.Second)
endTime := startTime.Add(MinimumStakingDuration)
key, _ := vm.factory.NewPrivateKey()
ID := key.PublicKey().Address()
// create invalid tx
tx, err := vm.newAddDefaultSubnetValidatorTx(
defaultNonce+1,
defaultStakeAmount,
uint64(startTime.Unix()),
uint64(endTime.Unix()),
ID,
ID,
NumberOfShares,
testNetworkID,
defaultKey,
)
if err != nil {
t.Fatal(err)
}
blk, err := vm.newProposalBlock(vm.LastAccepted(), tx)
if err != nil {
t.Fatal(err)
}
if err := vm.State.PutBlock(vm.DB, blk); err != nil {
t.Fatal(err)
}
if err := vm.DB.Commit(); err != nil {
t.Fatal(err)
}
if err := blk.Verify(); err == nil {
t.Fatalf("Should have errored during verification")
}
if status := blk.Status(); status != choices.Rejected {
t.Fatalf("Should have marked the block as rejected")
}
parsedBlk, err := vm.GetBlock(blk.ID())
if err != nil {
t.Fatal(err)
}
if status := parsedBlk.Status(); status != choices.Rejected {
t.Fatalf("Should have marked the block as rejected")
}
}
// Reject proposal to add validator to default subnet
func TestAddDefaultSubnetValidatorReject(t *testing.T) {
vm := defaultVM()
@ -1551,8 +1610,9 @@ func TestBootstrapPartiallyAccepted(t *testing.T) {
advanceTimePreference := advanceTimeBlk.Options()[0]
peerID := ids.NewShortID([20]byte{1, 2, 3, 4, 5, 4, 3, 2, 1})
vdrs := validators.NewSet()
vdrs.Add(validators.NewValidator(ctx.NodeID, 1))
vdrs.Add(validators.NewValidator(peerID, 1))
beacons := vdrs
timeoutManager := timeout.Manager{}
@ -1597,7 +1657,13 @@ func TestBootstrapPartiallyAccepted(t *testing.T) {
// Asynchronously passes messages from the network to the consensus engine
handler := &router.Handler{}
handler.Initialize(&engine, msgChan, 1000)
handler.Initialize(
&engine,
msgChan,
1000,
"",
prometheus.NewRegistry(),
)
// Allow incoming messages to be routed to the new chain
chainRouter.AddChain(handler)
@ -1617,23 +1683,23 @@ func TestBootstrapPartiallyAccepted(t *testing.T) {
frontier := ids.Set{}
frontier.Add(advanceTimeBlkID)
engine.AcceptedFrontier(ctx.NodeID, *reqID, frontier)
engine.AcceptedFrontier(peerID, *reqID, frontier)
externalSender.GetAcceptedF = nil
externalSender.GetF = func(_ ids.ShortID, _ ids.ID, requestID uint32, containerID ids.ID) {
externalSender.GetAncestorsF = func(_ ids.ShortID, _ ids.ID, requestID uint32, containerID ids.ID) {
*reqID = requestID
if !containerID.Equals(advanceTimeBlkID) {
t.Fatalf("wrong block requested")
}
}
engine.Accepted(ctx.NodeID, *reqID, frontier)
engine.Accepted(peerID, *reqID, frontier)
externalSender.GetF = nil
externalSender.CantPushQuery = false
externalSender.CantPullQuery = false
engine.Put(ctx.NodeID, *reqID, advanceTimeBlkID, advanceTimeBlkBytes)
engine.MultiPut(peerID, *reqID, [][]byte{advanceTimeBlkBytes})
externalSender.CantPushQuery = true

View File

@ -129,6 +129,18 @@ func (vm *VMClient) startMessengerServer(opts []grpc.ServerOption) *grpc.Server
return server
}
// Bootstrapping ...
func (vm *VMClient) Bootstrapping() error {
_, err := vm.client.Bootstrapping(context.Background(), &vmproto.BootstrappingRequest{})
return err
}
// Bootstrapped ...
func (vm *VMClient) Bootstrapped() error {
_, err := vm.client.Bootstrapped(context.Background(), &vmproto.BootstrappedRequest{})
return err
}
// Shutdown ...
func (vm *VMClient) Shutdown() error {
vm.lock.Lock()

View File

@ -84,8 +84,18 @@ func (vm *VMServer) Initialize(_ context.Context, req *vmproto.InitializeRequest
return &vmproto.InitializeResponse{}, nil
}
// Bootstrapping ...
func (vm *VMServer) Bootstrapping(context.Context, *vmproto.BootstrappingRequest) (*vmproto.BootstrappingResponse, error) {
return &vmproto.BootstrappingResponse{}, vm.vm.Bootstrapping()
}
// Bootstrapped ...
func (vm *VMServer) Bootstrapped(context.Context, *vmproto.BootstrappedRequest) (*vmproto.BootstrappedResponse, error) {
return &vmproto.BootstrappedResponse{}, vm.vm.Bootstrapped()
}
// Shutdown ...
func (vm *VMServer) Shutdown(_ context.Context, _ *vmproto.ShutdownRequest) (*vmproto.ShutdownResponse, error) {
func (vm *VMServer) Shutdown(context.Context, *vmproto.ShutdownRequest) (*vmproto.ShutdownResponse, error) {
vm.lock.Lock()
defer vm.lock.Unlock()

View File

@ -110,6 +110,130 @@ func (m *InitializeResponse) XXX_DiscardUnknown() {
var xxx_messageInfo_InitializeResponse proto.InternalMessageInfo
type BootstrappingRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BootstrappingRequest) Reset() { *m = BootstrappingRequest{} }
func (m *BootstrappingRequest) String() string { return proto.CompactTextString(m) }
func (*BootstrappingRequest) ProtoMessage() {}
func (*BootstrappingRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{2}
}
func (m *BootstrappingRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BootstrappingRequest.Unmarshal(m, b)
}
func (m *BootstrappingRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BootstrappingRequest.Marshal(b, m, deterministic)
}
func (m *BootstrappingRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_BootstrappingRequest.Merge(m, src)
}
func (m *BootstrappingRequest) XXX_Size() int {
return xxx_messageInfo_BootstrappingRequest.Size(m)
}
func (m *BootstrappingRequest) XXX_DiscardUnknown() {
xxx_messageInfo_BootstrappingRequest.DiscardUnknown(m)
}
var xxx_messageInfo_BootstrappingRequest proto.InternalMessageInfo
type BootstrappingResponse struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BootstrappingResponse) Reset() { *m = BootstrappingResponse{} }
func (m *BootstrappingResponse) String() string { return proto.CompactTextString(m) }
func (*BootstrappingResponse) ProtoMessage() {}
func (*BootstrappingResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{3}
}
func (m *BootstrappingResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BootstrappingResponse.Unmarshal(m, b)
}
func (m *BootstrappingResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BootstrappingResponse.Marshal(b, m, deterministic)
}
func (m *BootstrappingResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_BootstrappingResponse.Merge(m, src)
}
func (m *BootstrappingResponse) XXX_Size() int {
return xxx_messageInfo_BootstrappingResponse.Size(m)
}
func (m *BootstrappingResponse) XXX_DiscardUnknown() {
xxx_messageInfo_BootstrappingResponse.DiscardUnknown(m)
}
var xxx_messageInfo_BootstrappingResponse proto.InternalMessageInfo
type BootstrappedRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BootstrappedRequest) Reset() { *m = BootstrappedRequest{} }
func (m *BootstrappedRequest) String() string { return proto.CompactTextString(m) }
func (*BootstrappedRequest) ProtoMessage() {}
func (*BootstrappedRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{4}
}
func (m *BootstrappedRequest) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BootstrappedRequest.Unmarshal(m, b)
}
func (m *BootstrappedRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BootstrappedRequest.Marshal(b, m, deterministic)
}
func (m *BootstrappedRequest) XXX_Merge(src proto.Message) {
xxx_messageInfo_BootstrappedRequest.Merge(m, src)
}
func (m *BootstrappedRequest) XXX_Size() int {
return xxx_messageInfo_BootstrappedRequest.Size(m)
}
func (m *BootstrappedRequest) XXX_DiscardUnknown() {
xxx_messageInfo_BootstrappedRequest.DiscardUnknown(m)
}
var xxx_messageInfo_BootstrappedRequest proto.InternalMessageInfo
type BootstrappedResponse struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *BootstrappedResponse) Reset() { *m = BootstrappedResponse{} }
func (m *BootstrappedResponse) String() string { return proto.CompactTextString(m) }
func (*BootstrappedResponse) ProtoMessage() {}
func (*BootstrappedResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{5}
}
func (m *BootstrappedResponse) XXX_Unmarshal(b []byte) error {
return xxx_messageInfo_BootstrappedResponse.Unmarshal(m, b)
}
func (m *BootstrappedResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
return xxx_messageInfo_BootstrappedResponse.Marshal(b, m, deterministic)
}
func (m *BootstrappedResponse) XXX_Merge(src proto.Message) {
xxx_messageInfo_BootstrappedResponse.Merge(m, src)
}
func (m *BootstrappedResponse) XXX_Size() int {
return xxx_messageInfo_BootstrappedResponse.Size(m)
}
func (m *BootstrappedResponse) XXX_DiscardUnknown() {
xxx_messageInfo_BootstrappedResponse.DiscardUnknown(m)
}
var xxx_messageInfo_BootstrappedResponse proto.InternalMessageInfo
type ShutdownRequest struct {
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
@ -120,7 +244,7 @@ func (m *ShutdownRequest) Reset() { *m = ShutdownRequest{} }
func (m *ShutdownRequest) String() string { return proto.CompactTextString(m) }
func (*ShutdownRequest) ProtoMessage() {}
func (*ShutdownRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{2}
return fileDescriptor_cab246c8c7c5372d, []int{6}
}
func (m *ShutdownRequest) XXX_Unmarshal(b []byte) error {
@ -151,7 +275,7 @@ func (m *ShutdownResponse) Reset() { *m = ShutdownResponse{} }
func (m *ShutdownResponse) String() string { return proto.CompactTextString(m) }
func (*ShutdownResponse) ProtoMessage() {}
func (*ShutdownResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{3}
return fileDescriptor_cab246c8c7c5372d, []int{7}
}
func (m *ShutdownResponse) XXX_Unmarshal(b []byte) error {
@ -182,7 +306,7 @@ func (m *CreateHandlersRequest) Reset() { *m = CreateHandlersRequest{} }
func (m *CreateHandlersRequest) String() string { return proto.CompactTextString(m) }
func (*CreateHandlersRequest) ProtoMessage() {}
func (*CreateHandlersRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{4}
return fileDescriptor_cab246c8c7c5372d, []int{8}
}
func (m *CreateHandlersRequest) XXX_Unmarshal(b []byte) error {
@ -214,7 +338,7 @@ func (m *CreateHandlersResponse) Reset() { *m = CreateHandlersResponse{}
func (m *CreateHandlersResponse) String() string { return proto.CompactTextString(m) }
func (*CreateHandlersResponse) ProtoMessage() {}
func (*CreateHandlersResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{5}
return fileDescriptor_cab246c8c7c5372d, []int{9}
}
func (m *CreateHandlersResponse) XXX_Unmarshal(b []byte) error {
@ -255,7 +379,7 @@ func (m *Handler) Reset() { *m = Handler{} }
func (m *Handler) String() string { return proto.CompactTextString(m) }
func (*Handler) ProtoMessage() {}
func (*Handler) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{6}
return fileDescriptor_cab246c8c7c5372d, []int{10}
}
func (m *Handler) XXX_Unmarshal(b []byte) error {
@ -307,7 +431,7 @@ func (m *BuildBlockRequest) Reset() { *m = BuildBlockRequest{} }
func (m *BuildBlockRequest) String() string { return proto.CompactTextString(m) }
func (*BuildBlockRequest) ProtoMessage() {}
func (*BuildBlockRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{7}
return fileDescriptor_cab246c8c7c5372d, []int{11}
}
func (m *BuildBlockRequest) XXX_Unmarshal(b []byte) error {
@ -341,7 +465,7 @@ func (m *BuildBlockResponse) Reset() { *m = BuildBlockResponse{} }
func (m *BuildBlockResponse) String() string { return proto.CompactTextString(m) }
func (*BuildBlockResponse) ProtoMessage() {}
func (*BuildBlockResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{8}
return fileDescriptor_cab246c8c7c5372d, []int{12}
}
func (m *BuildBlockResponse) XXX_Unmarshal(b []byte) error {
@ -394,7 +518,7 @@ func (m *ParseBlockRequest) Reset() { *m = ParseBlockRequest{} }
func (m *ParseBlockRequest) String() string { return proto.CompactTextString(m) }
func (*ParseBlockRequest) ProtoMessage() {}
func (*ParseBlockRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{9}
return fileDescriptor_cab246c8c7c5372d, []int{13}
}
func (m *ParseBlockRequest) XXX_Unmarshal(b []byte) error {
@ -435,7 +559,7 @@ func (m *ParseBlockResponse) Reset() { *m = ParseBlockResponse{} }
func (m *ParseBlockResponse) String() string { return proto.CompactTextString(m) }
func (*ParseBlockResponse) ProtoMessage() {}
func (*ParseBlockResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{10}
return fileDescriptor_cab246c8c7c5372d, []int{14}
}
func (m *ParseBlockResponse) XXX_Unmarshal(b []byte) error {
@ -488,7 +612,7 @@ func (m *GetBlockRequest) Reset() { *m = GetBlockRequest{} }
func (m *GetBlockRequest) String() string { return proto.CompactTextString(m) }
func (*GetBlockRequest) ProtoMessage() {}
func (*GetBlockRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{11}
return fileDescriptor_cab246c8c7c5372d, []int{15}
}
func (m *GetBlockRequest) XXX_Unmarshal(b []byte) error {
@ -529,7 +653,7 @@ func (m *GetBlockResponse) Reset() { *m = GetBlockResponse{} }
func (m *GetBlockResponse) String() string { return proto.CompactTextString(m) }
func (*GetBlockResponse) ProtoMessage() {}
func (*GetBlockResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{12}
return fileDescriptor_cab246c8c7c5372d, []int{16}
}
func (m *GetBlockResponse) XXX_Unmarshal(b []byte) error {
@ -582,7 +706,7 @@ func (m *SetPreferenceRequest) Reset() { *m = SetPreferenceRequest{} }
func (m *SetPreferenceRequest) String() string { return proto.CompactTextString(m) }
func (*SetPreferenceRequest) ProtoMessage() {}
func (*SetPreferenceRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{13}
return fileDescriptor_cab246c8c7c5372d, []int{17}
}
func (m *SetPreferenceRequest) XXX_Unmarshal(b []byte) error {
@ -620,7 +744,7 @@ func (m *SetPreferenceResponse) Reset() { *m = SetPreferenceResponse{} }
func (m *SetPreferenceResponse) String() string { return proto.CompactTextString(m) }
func (*SetPreferenceResponse) ProtoMessage() {}
func (*SetPreferenceResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{14}
return fileDescriptor_cab246c8c7c5372d, []int{18}
}
func (m *SetPreferenceResponse) XXX_Unmarshal(b []byte) error {
@ -651,7 +775,7 @@ func (m *LastAcceptedRequest) Reset() { *m = LastAcceptedRequest{} }
func (m *LastAcceptedRequest) String() string { return proto.CompactTextString(m) }
func (*LastAcceptedRequest) ProtoMessage() {}
func (*LastAcceptedRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{15}
return fileDescriptor_cab246c8c7c5372d, []int{19}
}
func (m *LastAcceptedRequest) XXX_Unmarshal(b []byte) error {
@ -683,7 +807,7 @@ func (m *LastAcceptedResponse) Reset() { *m = LastAcceptedResponse{} }
func (m *LastAcceptedResponse) String() string { return proto.CompactTextString(m) }
func (*LastAcceptedResponse) ProtoMessage() {}
func (*LastAcceptedResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{16}
return fileDescriptor_cab246c8c7c5372d, []int{20}
}
func (m *LastAcceptedResponse) XXX_Unmarshal(b []byte) error {
@ -722,7 +846,7 @@ func (m *BlockVerifyRequest) Reset() { *m = BlockVerifyRequest{} }
func (m *BlockVerifyRequest) String() string { return proto.CompactTextString(m) }
func (*BlockVerifyRequest) ProtoMessage() {}
func (*BlockVerifyRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{17}
return fileDescriptor_cab246c8c7c5372d, []int{21}
}
func (m *BlockVerifyRequest) XXX_Unmarshal(b []byte) error {
@ -760,7 +884,7 @@ func (m *BlockVerifyResponse) Reset() { *m = BlockVerifyResponse{} }
func (m *BlockVerifyResponse) String() string { return proto.CompactTextString(m) }
func (*BlockVerifyResponse) ProtoMessage() {}
func (*BlockVerifyResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{18}
return fileDescriptor_cab246c8c7c5372d, []int{22}
}
func (m *BlockVerifyResponse) XXX_Unmarshal(b []byte) error {
@ -792,7 +916,7 @@ func (m *BlockAcceptRequest) Reset() { *m = BlockAcceptRequest{} }
func (m *BlockAcceptRequest) String() string { return proto.CompactTextString(m) }
func (*BlockAcceptRequest) ProtoMessage() {}
func (*BlockAcceptRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{19}
return fileDescriptor_cab246c8c7c5372d, []int{23}
}
func (m *BlockAcceptRequest) XXX_Unmarshal(b []byte) error {
@ -830,7 +954,7 @@ func (m *BlockAcceptResponse) Reset() { *m = BlockAcceptResponse{} }
func (m *BlockAcceptResponse) String() string { return proto.CompactTextString(m) }
func (*BlockAcceptResponse) ProtoMessage() {}
func (*BlockAcceptResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{20}
return fileDescriptor_cab246c8c7c5372d, []int{24}
}
func (m *BlockAcceptResponse) XXX_Unmarshal(b []byte) error {
@ -862,7 +986,7 @@ func (m *BlockRejectRequest) Reset() { *m = BlockRejectRequest{} }
func (m *BlockRejectRequest) String() string { return proto.CompactTextString(m) }
func (*BlockRejectRequest) ProtoMessage() {}
func (*BlockRejectRequest) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{21}
return fileDescriptor_cab246c8c7c5372d, []int{25}
}
func (m *BlockRejectRequest) XXX_Unmarshal(b []byte) error {
@ -900,7 +1024,7 @@ func (m *BlockRejectResponse) Reset() { *m = BlockRejectResponse{} }
func (m *BlockRejectResponse) String() string { return proto.CompactTextString(m) }
func (*BlockRejectResponse) ProtoMessage() {}
func (*BlockRejectResponse) Descriptor() ([]byte, []int) {
return fileDescriptor_cab246c8c7c5372d, []int{22}
return fileDescriptor_cab246c8c7c5372d, []int{26}
}
func (m *BlockRejectResponse) XXX_Unmarshal(b []byte) error {
@ -924,6 +1048,10 @@ var xxx_messageInfo_BlockRejectResponse proto.InternalMessageInfo
func init() {
proto.RegisterType((*InitializeRequest)(nil), "vmproto.InitializeRequest")
proto.RegisterType((*InitializeResponse)(nil), "vmproto.InitializeResponse")
proto.RegisterType((*BootstrappingRequest)(nil), "vmproto.BootstrappingRequest")
proto.RegisterType((*BootstrappingResponse)(nil), "vmproto.BootstrappingResponse")
proto.RegisterType((*BootstrappedRequest)(nil), "vmproto.BootstrappedRequest")
proto.RegisterType((*BootstrappedResponse)(nil), "vmproto.BootstrappedResponse")
proto.RegisterType((*ShutdownRequest)(nil), "vmproto.ShutdownRequest")
proto.RegisterType((*ShutdownResponse)(nil), "vmproto.ShutdownResponse")
proto.RegisterType((*CreateHandlersRequest)(nil), "vmproto.CreateHandlersRequest")
@ -950,46 +1078,49 @@ func init() {
func init() { proto.RegisterFile("vm.proto", fileDescriptor_cab246c8c7c5372d) }
var fileDescriptor_cab246c8c7c5372d = []byte{
// 617 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x6d, 0x6f, 0xd2, 0x50,
0x14, 0x4e, 0x21, 0x32, 0x3c, 0xc0, 0x06, 0x17, 0xd8, 0xb0, 0x6e, 0x13, 0x1b, 0xb3, 0x60, 0x62,
0xf8, 0x30, 0x7f, 0xc0, 0x22, 0x8a, 0x6e, 0xf1, 0x6d, 0x96, 0x84, 0x98, 0xe8, 0x97, 0x42, 0x0f,
0x5b, 0x95, 0xb5, 0xf5, 0xde, 0x0b, 0x73, 0xfe, 0x23, 0xff, 0xa5, 0xa1, 0xbd, 0x6d, 0xef, 0xbd,
0xb4, 0x59, 0xe2, 0xb7, 0x9e, 0x73, 0x9e, 0xf3, 0x9c, 0x97, 0x7b, 0x9e, 0x42, 0x75, 0x7d, 0x33,
0x0c, 0x69, 0xc0, 0x03, 0xb2, 0xb3, 0xbe, 0x89, 0x3e, 0xac, 0x5b, 0x68, 0x5d, 0xf8, 0x1e, 0xf7,
0x9c, 0xa5, 0xf7, 0x07, 0x6d, 0xfc, 0xb5, 0x42, 0xc6, 0x89, 0x09, 0x55, 0x77, 0x36, 0x41, 0xba,
0x46, 0xda, 0x33, 0xfa, 0xc6, 0xa0, 0x61, 0xa7, 0x36, 0xb1, 0xa0, 0x7e, 0x85, 0x3e, 0x32, 0x8f,
0x8d, 0xee, 0x38, 0xb2, 0x5e, 0xa9, 0x6f, 0x0c, 0xea, 0xb6, 0xe2, 0xdb, 0x60, 0xd0, 0xbf, 0xf2,
0x7c, 0x14, 0x1c, 0xe5, 0x88, 0x43, 0xf1, 0x59, 0x1d, 0x20, 0x72, 0x61, 0x16, 0x06, 0x3e, 0x43,
0xab, 0x05, 0x7b, 0x93, 0xeb, 0x15, 0x77, 0x83, 0x5b, 0x5f, 0x34, 0x63, 0x11, 0x68, 0x66, 0x2e,
0x01, 0x3b, 0x80, 0xee, 0x6b, 0x8a, 0x0e, 0xc7, 0x73, 0xc7, 0x77, 0x97, 0x48, 0x59, 0x02, 0x7e,
0x0b, 0xfb, 0x7a, 0x20, 0x4e, 0x21, 0x2f, 0xa0, 0x7a, 0x2d, 0x7c, 0x3d, 0xa3, 0x5f, 0x1e, 0xd4,
0x4e, 0x9b, 0x43, 0xb1, 0x84, 0xa1, 0x00, 0xdb, 0x29, 0xc2, 0xfa, 0x06, 0x3b, 0xc2, 0x49, 0xf6,
0xa1, 0x12, 0x52, 0x5c, 0x78, 0xbf, 0xa3, 0x55, 0x3c, 0xb4, 0x85, 0x45, 0xfa, 0x50, 0x5b, 0x06,
0xf3, 0x9f, 0x9f, 0x43, 0xee, 0x05, 0x7e, 0xbc, 0x87, 0x86, 0x2d, 0xbb, 0x36, 0x99, 0x4c, 0x5e,
0x80, 0xb0, 0xac, 0x36, 0xb4, 0x46, 0x2b, 0x6f, 0xe9, 0x8e, 0x36, 0xe0, 0xa4, 0xf3, 0x29, 0x10,
0xd9, 0x29, 0xba, 0xde, 0x85, 0x92, 0xe7, 0x46, 0x85, 0xeb, 0x76, 0xc9, 0x73, 0x37, 0x2f, 0x13,
0x3a, 0x14, 0x7d, 0x7e, 0xf1, 0x46, 0x6c, 0x3e, 0xb5, 0x49, 0x07, 0x1e, 0xcc, 0xa2, 0x27, 0x29,
0x47, 0x81, 0xd8, 0xb0, 0x9e, 0x43, 0xeb, 0xd2, 0xa1, 0x0c, 0xe5, 0x62, 0x19, 0xd4, 0x90, 0xa1,
0x5f, 0x81, 0xc8, 0xd0, 0xff, 0x68, 0x61, 0x33, 0x31, 0x77, 0xf8, 0x8a, 0xa5, 0x13, 0x47, 0x96,
0xf5, 0x14, 0xf6, 0xde, 0x21, 0x57, 0x5a, 0xd0, 0x68, 0xad, 0xef, 0xd0, 0xcc, 0x20, 0xa2, 0xb4,
0x5c, 0xca, 0x28, 0x9a, 0xb6, 0x24, 0x8d, 0x50, 0xd8, 0xc0, 0x09, 0x74, 0x26, 0xc8, 0x2f, 0x29,
0x2e, 0x90, 0xa2, 0x3f, 0xc7, 0xa2, 0x2e, 0x0e, 0xa0, 0xab, 0xe1, 0xc4, 0xc5, 0x75, 0xa1, 0xfd,
0xc1, 0x61, 0xfc, 0xd5, 0x7c, 0x8e, 0x21, 0x47, 0x37, 0x79, 0xb5, 0x13, 0xe8, 0xa8, 0xee, 0xfc,
0xa5, 0x59, 0xcf, 0x80, 0x44, 0xa3, 0x4d, 0x91, 0x7a, 0x8b, 0xbb, 0xa2, 0xea, 0x5d, 0x68, 0x2b,
0x28, 0x51, 0x3b, 0x49, 0x8e, 0xab, 0xdc, 0x97, 0x9c, 0xa0, 0xb4, 0x64, 0x1b, 0x7f, 0xe0, 0xfc,
0xde, 0xe4, 0x04, 0x15, 0x27, 0x9f, 0xfe, 0xad, 0x40, 0x69, 0xfa, 0x91, 0x8c, 0x01, 0x32, 0xad,
0x12, 0x33, 0xd5, 0xcd, 0xd6, 0x9f, 0xc3, 0x7c, 0x9c, 0x1b, 0x13, 0x4b, 0x39, 0x83, 0x6a, 0xa2,
0x64, 0xd2, 0x4b, 0x81, 0x9a, 0xde, 0xcd, 0x47, 0x39, 0x11, 0x41, 0xf0, 0x05, 0x76, 0x55, 0x75,
0x93, 0xe3, 0x14, 0x9c, 0xfb, 0x3f, 0x30, 0x9f, 0x14, 0xc6, 0x05, 0xe5, 0x18, 0x20, 0x93, 0x9d,
0x34, 0xda, 0x96, 0x40, 0xa5, 0xd1, 0x72, 0x74, 0x3a, 0x06, 0xc8, 0xa4, 0x23, 0xd1, 0x6c, 0x49,
0x4f, 0xa2, 0xc9, 0xd1, 0xda, 0x19, 0x54, 0x13, 0x11, 0x48, 0x1b, 0xd2, 0xa4, 0x23, 0x6d, 0x68,
0x4b, 0x31, 0x9f, 0xa0, 0xa1, 0xdc, 0x2f, 0x39, 0xca, 0xb6, 0x99, 0x73, 0xff, 0xe6, 0x71, 0x51,
0x58, 0xf0, 0xbd, 0x87, 0xba, 0x7c, 0xdf, 0xe4, 0x30, 0xc5, 0xe7, 0xa8, 0xc1, 0x3c, 0x2a, 0x88,
0x0a, 0xb2, 0x73, 0xa8, 0x49, 0xe7, 0x4d, 0xa4, 0x85, 0x6e, 0x49, 0xc3, 0x3c, 0xcc, 0x0f, 0x6a,
0x4c, 0x71, 0x09, 0x9d, 0x49, 0xd1, 0x89, 0xce, 0xa4, 0xca, 0x23, 0x65, 0x8a, 0x0f, 0x5f, 0x67,
0x52, 0x44, 0xa3, 0x33, 0xa9, 0x5a, 0x99, 0x55, 0xa2, 0xd0, 0xcb, 0x7f, 0x01, 0x00, 0x00, 0xff,
0xff, 0xbb, 0xac, 0x5b, 0xc8, 0x65, 0x07, 0x00, 0x00,
// 672 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x61, 0x4f, 0x13, 0x41,
0x10, 0x4d, 0x4b, 0x84, 0x3a, 0xb4, 0x40, 0x97, 0x16, 0xea, 0x09, 0x5a, 0x2f, 0x86, 0x60, 0x62,
0xf8, 0x80, 0x3f, 0x80, 0x58, 0x45, 0x21, 0x2a, 0xe2, 0x91, 0x10, 0x13, 0xfd, 0x72, 0xf4, 0x06,
0x38, 0x2d, 0x77, 0xe7, 0xee, 0xb6, 0x88, 0x3f, 0xd0, 0xdf, 0x65, 0xee, 0x6e, 0xef, 0x6e, 0x76,
0xbb, 0x17, 0x12, 0xbf, 0x75, 0x67, 0xde, 0xbc, 0x99, 0x9b, 0xe9, 0x7b, 0xd0, 0x9a, 0xdd, 0xec,
0x25, 0x3c, 0x96, 0x31, 0x5b, 0x9a, 0xdd, 0x64, 0x3f, 0xdc, 0x5b, 0xe8, 0x1e, 0x47, 0xa1, 0x0c,
0xfd, 0x49, 0xf8, 0x07, 0x3d, 0xfc, 0x35, 0x45, 0x21, 0x99, 0x03, 0xad, 0xe0, 0xe2, 0x0c, 0xf9,
0x0c, 0xf9, 0xa0, 0x31, 0x6c, 0xec, 0x76, 0xbc, 0xf2, 0xcd, 0x5c, 0x68, 0x5f, 0x61, 0x84, 0x22,
0x14, 0xa3, 0x3b, 0x89, 0x62, 0xd0, 0x1c, 0x36, 0x76, 0xdb, 0x9e, 0x16, 0x4b, 0x31, 0x18, 0x5d,
0x85, 0x11, 0x2a, 0x8e, 0x85, 0x8c, 0x43, 0x8b, 0xb9, 0x3d, 0x60, 0xb4, 0xb1, 0x48, 0xe2, 0x48,
0xa0, 0xbb, 0x01, 0xbd, 0x51, 0x1c, 0x4b, 0x21, 0xb9, 0x9f, 0x24, 0x61, 0x74, 0xa5, 0x26, 0x72,
0x37, 0xa1, 0x6f, 0xc4, 0x55, 0x41, 0x1f, 0xd6, 0xab, 0x04, 0x06, 0x05, 0x5e, 0xe3, 0x49, 0xc3,
0x0a, 0xde, 0x85, 0xd5, 0xb3, 0xeb, 0xa9, 0x0c, 0xe2, 0xdb, 0xa8, 0x80, 0x32, 0x58, 0xab, 0x42,
0x0a, 0xb6, 0x09, 0xfd, 0x37, 0x1c, 0x7d, 0x89, 0x47, 0x7e, 0x14, 0x4c, 0x90, 0x8b, 0x02, 0xfc,
0x0e, 0x36, 0xcc, 0x44, 0x5e, 0xc2, 0x5e, 0x42, 0xeb, 0x5a, 0xc5, 0x06, 0x8d, 0xe1, 0xc2, 0xee,
0xf2, 0xfe, 0xda, 0x9e, 0x5a, 0xf2, 0x9e, 0x02, 0x7b, 0x25, 0xc2, 0xfd, 0x06, 0x4b, 0x2a, 0xc8,
0x36, 0x60, 0x31, 0xe1, 0x78, 0x19, 0xfe, 0xce, 0x56, 0xfd, 0xd0, 0x53, 0x2f, 0x36, 0x84, 0xe5,
0x49, 0x3c, 0xfe, 0xf9, 0x39, 0x91, 0x61, 0x1c, 0xe5, 0x7b, 0xee, 0x78, 0x34, 0x94, 0x56, 0x0a,
0xba, 0x60, 0xf5, 0x72, 0xd7, 0xa1, 0x3b, 0x9a, 0x86, 0x93, 0x60, 0x94, 0x82, 0x8b, 0xc9, 0xcf,
0x81, 0xd1, 0xa0, 0x9a, 0x7a, 0x05, 0x9a, 0x61, 0x90, 0x35, 0x6e, 0x7b, 0xcd, 0x30, 0x48, 0x2f,
0x9f, 0xf8, 0x1c, 0x23, 0x79, 0xfc, 0x56, 0x5d, 0xb6, 0x7c, 0xb3, 0x1e, 0x3c, 0xb8, 0xc8, 0x4e,
0xbe, 0x90, 0x25, 0xf2, 0x87, 0xfb, 0x02, 0xba, 0xa7, 0x3e, 0x17, 0x48, 0x9b, 0x55, 0xd0, 0x06,
0x85, 0x7e, 0x05, 0x46, 0xa1, 0xff, 0x31, 0x42, 0xfa, 0xc5, 0xd2, 0x97, 0x53, 0x51, 0x7e, 0x71,
0xf6, 0x72, 0x9f, 0xc1, 0xea, 0x7b, 0x94, 0xda, 0x08, 0x06, 0xad, 0xfb, 0x1d, 0xd6, 0x2a, 0x88,
0x6a, 0x4d, 0x5b, 0x35, 0xea, 0xbe, 0xb6, 0x49, 0x3e, 0xa1, 0x76, 0x80, 0x1d, 0xe8, 0x9d, 0xa1,
0x3c, 0xe5, 0x78, 0x89, 0x1c, 0xa3, 0x31, 0xd6, 0x4d, 0xb1, 0x09, 0x7d, 0x03, 0x57, 0xfd, 0x8f,
0x3f, 0xfa, 0x42, 0xbe, 0x1e, 0x8f, 0x31, 0x91, 0xd5, 0xff, 0x78, 0x07, 0x7a, 0x7a, 0xd8, 0xbe,
0x34, 0xf7, 0x39, 0xb0, 0xec, 0xd3, 0xce, 0x91, 0x87, 0x97, 0x77, 0x75, 0xdd, 0x53, 0xb1, 0x50,
0x94, 0xea, 0x5d, 0x14, 0xe7, 0x5d, 0xee, 0x2b, 0x2e, 0x50, 0x46, 0xb1, 0x87, 0x3f, 0x70, 0x7c,
0x6f, 0x71, 0x81, 0xca, 0x8b, 0xf7, 0xff, 0x2e, 0x41, 0xf3, 0xfc, 0x13, 0x3b, 0x04, 0xa8, 0xbc,
0x80, 0x39, 0xa5, 0x6e, 0xe6, 0x9c, 0xc9, 0x79, 0x6c, 0xcd, 0xa9, 0xa5, 0x9c, 0x40, 0x47, 0x33,
0x09, 0xb6, 0x5d, 0xa2, 0x6d, 0xa6, 0xe2, 0x3c, 0xa9, 0x4b, 0x2b, 0xbe, 0x0f, 0xd0, 0xa6, 0x26,
0xc2, 0xb6, 0x2c, 0xf8, 0xf2, 0x54, 0xce, 0x76, 0x4d, 0x56, 0x91, 0x1d, 0x40, 0xab, 0xb0, 0x19,
0x36, 0x28, 0xa1, 0x86, 0x19, 0x39, 0x8f, 0x2c, 0x19, 0x45, 0xf0, 0x05, 0x56, 0x74, 0xeb, 0x61,
0xd5, 0xfc, 0x56, 0xb3, 0x72, 0x9e, 0xd6, 0xe6, 0x15, 0xe5, 0x21, 0x40, 0xe5, 0x09, 0x64, 0xef,
0x73, 0xee, 0x41, 0xf6, 0x6e, 0x31, 0x91, 0x43, 0x80, 0x4a, 0xd7, 0x84, 0x66, 0xce, 0x17, 0x08,
0x8d, 0xc5, 0x08, 0x0e, 0xa0, 0x55, 0x28, 0x94, 0x6c, 0xc8, 0xd0, 0x35, 0xd9, 0xd0, 0x9c, 0x9c,
0x4f, 0xa0, 0xa3, 0x89, 0x8b, 0xdc, 0xdf, 0x26, 0x4e, 0x72, 0x7f, 0xab, 0x26, 0xd3, 0xfb, 0x53,
0xf1, 0x91, 0xfb, 0x5b, 0xa4, 0x4a, 0xee, 0x6f, 0x55, 0xec, 0x11, 0x2c, 0x13, 0xed, 0x31, 0xb2,
0xd0, 0x39, 0xdd, 0x3a, 0x5b, 0xf6, 0xa4, 0xc1, 0x94, 0xb7, 0x30, 0x99, 0x34, 0x11, 0x9b, 0x4c,
0xba, 0x76, 0x4b, 0xa6, 0x5c, 0x95, 0x26, 0x93, 0xa6, 0x68, 0x93, 0x49, 0x17, 0xf2, 0xc5, 0x62,
0x96, 0x7a, 0xf5, 0x2f, 0x00, 0x00, 0xff, 0xff, 0x92, 0xa7, 0x99, 0xc8, 0x62, 0x08, 0x00, 0x00,
}
// Reference imports to suppress errors if they are not otherwise used.
@ -1005,6 +1136,8 @@ const _ = grpc.SupportPackageIsVersion6
// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream.
type VMClient interface {
Initialize(ctx context.Context, in *InitializeRequest, opts ...grpc.CallOption) (*InitializeResponse, error)
Bootstrapping(ctx context.Context, in *BootstrappingRequest, opts ...grpc.CallOption) (*BootstrappingResponse, error)
Bootstrapped(ctx context.Context, in *BootstrappedRequest, opts ...grpc.CallOption) (*BootstrappedResponse, error)
Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error)
CreateHandlers(ctx context.Context, in *CreateHandlersRequest, opts ...grpc.CallOption) (*CreateHandlersResponse, error)
BuildBlock(ctx context.Context, in *BuildBlockRequest, opts ...grpc.CallOption) (*BuildBlockResponse, error)
@ -1034,6 +1167,24 @@ func (c *vMClient) Initialize(ctx context.Context, in *InitializeRequest, opts .
return out, nil
}
func (c *vMClient) Bootstrapping(ctx context.Context, in *BootstrappingRequest, opts ...grpc.CallOption) (*BootstrappingResponse, error) {
out := new(BootstrappingResponse)
err := c.cc.Invoke(ctx, "/vmproto.VM/Bootstrapping", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *vMClient) Bootstrapped(ctx context.Context, in *BootstrappedRequest, opts ...grpc.CallOption) (*BootstrappedResponse, error) {
out := new(BootstrappedResponse)
err := c.cc.Invoke(ctx, "/vmproto.VM/Bootstrapped", in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *vMClient) Shutdown(ctx context.Context, in *ShutdownRequest, opts ...grpc.CallOption) (*ShutdownResponse, error) {
out := new(ShutdownResponse)
err := c.cc.Invoke(ctx, "/vmproto.VM/Shutdown", in, out, opts...)
@ -1127,6 +1278,8 @@ func (c *vMClient) BlockReject(ctx context.Context, in *BlockRejectRequest, opts
// VMServer is the server API for VM service.
type VMServer interface {
Initialize(context.Context, *InitializeRequest) (*InitializeResponse, error)
Bootstrapping(context.Context, *BootstrappingRequest) (*BootstrappingResponse, error)
Bootstrapped(context.Context, *BootstrappedRequest) (*BootstrappedResponse, error)
Shutdown(context.Context, *ShutdownRequest) (*ShutdownResponse, error)
CreateHandlers(context.Context, *CreateHandlersRequest) (*CreateHandlersResponse, error)
BuildBlock(context.Context, *BuildBlockRequest) (*BuildBlockResponse, error)
@ -1146,6 +1299,12 @@ type UnimplementedVMServer struct {
func (*UnimplementedVMServer) Initialize(ctx context.Context, req *InitializeRequest) (*InitializeResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Initialize not implemented")
}
func (*UnimplementedVMServer) Bootstrapping(ctx context.Context, req *BootstrappingRequest) (*BootstrappingResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Bootstrapping not implemented")
}
func (*UnimplementedVMServer) Bootstrapped(ctx context.Context, req *BootstrappedRequest) (*BootstrappedResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Bootstrapped not implemented")
}
func (*UnimplementedVMServer) Shutdown(ctx context.Context, req *ShutdownRequest) (*ShutdownResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method Shutdown not implemented")
}
@ -1199,6 +1358,42 @@ func _VM_Initialize_Handler(srv interface{}, ctx context.Context, dec func(inter
return interceptor(ctx, in, info, handler)
}
func _VM_Bootstrapping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(BootstrappingRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(VMServer).Bootstrapping(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/vmproto.VM/Bootstrapping",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(VMServer).Bootstrapping(ctx, req.(*BootstrappingRequest))
}
return interceptor(ctx, in, info, handler)
}
func _VM_Bootstrapped_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(BootstrappedRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(VMServer).Bootstrapped(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/vmproto.VM/Bootstrapped",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(VMServer).Bootstrapped(ctx, req.(*BootstrappedRequest))
}
return interceptor(ctx, in, info, handler)
}
func _VM_Shutdown_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ShutdownRequest)
if err := dec(in); err != nil {
@ -1387,6 +1582,14 @@ var _VM_serviceDesc = grpc.ServiceDesc{
MethodName: "Initialize",
Handler: _VM_Initialize_Handler,
},
{
MethodName: "Bootstrapping",
Handler: _VM_Bootstrapping_Handler,
},
{
MethodName: "Bootstrapped",
Handler: _VM_Bootstrapped_Handler,
},
{
MethodName: "Shutdown",
Handler: _VM_Shutdown_Handler,

View File

@ -9,6 +9,14 @@ message InitializeRequest {
message InitializeResponse {}
message BootstrappingRequest {}
message BootstrappingResponse {}
message BootstrappedRequest {}
message BootstrappedResponse {}
message ShutdownRequest {}
message ShutdownResponse {}
@ -86,6 +94,8 @@ message BlockRejectResponse {}
service VM {
rpc Initialize(InitializeRequest) returns (InitializeResponse);
rpc Bootstrapping(BootstrappingRequest) returns (BootstrappingResponse);
rpc Bootstrapped(BootstrappedRequest) returns (BootstrappedResponse);
rpc Shutdown(ShutdownRequest) returns (ShutdownResponse);
rpc CreateHandlers(CreateHandlersRequest) returns (CreateHandlersResponse);
rpc BuildBlock(BuildBlockRequest) returns (BuildBlockResponse);
@ -97,4 +107,4 @@ service VM {
rpc BlockVerify(BlockVerifyRequest) returns (BlockVerifyResponse);
rpc BlockAccept(BlockAcceptRequest) returns (BlockAcceptResponse);
rpc BlockReject(BlockRejectRequest) returns (BlockRejectResponse);
}
}

View File

@ -34,8 +34,9 @@ var (
// Fx describes the secp256k1 feature extension
type Fx struct {
VM VM
SECPFactory crypto.FactorySECP256K1R
VM VM
SECPFactory crypto.FactorySECP256K1R
bootstrapped bool
}
// Initialize ...
@ -69,6 +70,12 @@ func (fx *Fx) InitializeVM(vmIntf interface{}) error {
return nil
}
// Bootstrapping ...
func (fx *Fx) Bootstrapping() error { return nil }
// Bootstrapped ...
func (fx *Fx) Bootstrapped() error { fx.bootstrapped = true; return nil }
// VerifyOperation ...
func (fx *Fx) VerifyOperation(txIntf, opIntf, credIntf interface{}, utxosIntf []interface{}) error {
tx, ok := txIntf.(Tx)
@ -156,6 +163,11 @@ func (fx *Fx) VerifyCredentials(tx Tx, in *Input, cred *Credential, out *OutputO
return errInputCredentialSignersMismatch
}
// disable signature verification during bootstrapping
if !fx.bootstrapped {
return nil
}
txBytes := tx.UnsignedBytes()
txHash := hashing.ComputeHash256(txBytes)

View File

@ -76,6 +76,12 @@ func TestFxVerifyTransfer(t *testing.T) {
if err := fx.Initialize(&vm); err != nil {
t.Fatal(err)
}
if err := fx.Bootstrapping(); err != nil {
t.Fatal(err)
}
if err := fx.Bootstrapped(); err != nil {
t.Fatal(err)
}
tx := &testTx{
bytes: txBytes,
}
@ -470,6 +476,9 @@ func TestFxVerifyTransferInvalidSignature(t *testing.T) {
if err := fx.Initialize(&vm); err != nil {
t.Fatal(err)
}
if err := fx.Bootstrapping(); err != nil {
t.Fatal(err)
}
tx := &testTx{
bytes: txBytes,
}
@ -495,6 +504,14 @@ func TestFxVerifyTransferInvalidSignature(t *testing.T) {
},
}
if err := fx.VerifyTransfer(tx, in, cred, out); err != nil {
t.Fatal(err)
}
if err := fx.Bootstrapped(); err != nil {
t.Fatal(err)
}
if err := fx.VerifyTransfer(tx, in, cred, out); err == nil {
t.Fatalf("Should have errored due to an invalid signature")
}
@ -508,6 +525,9 @@ func TestFxVerifyTransferWrongSigner(t *testing.T) {
if err := fx.Initialize(&vm); err != nil {
t.Fatal(err)
}
if err := fx.Bootstrapping(); err != nil {
t.Fatal(err)
}
tx := &testTx{
bytes: txBytes,
}
@ -533,6 +553,14 @@ func TestFxVerifyTransferWrongSigner(t *testing.T) {
},
}
if err := fx.VerifyTransfer(tx, in, cred, out); err != nil {
t.Fatal(err)
}
if err := fx.Bootstrapped(); err != nil {
t.Fatal(err)
}
if err := fx.VerifyTransfer(tx, in, cred, out); err == nil {
t.Fatalf("Should have errored due to a wrong signer")
}

View File

@ -105,7 +105,13 @@ func ConsensusLeader(numBlocks, numTxsPerBlock int, b *testing.B) {
// Asynchronously passes messages from the network to the consensus engine
handler := &router.Handler{}
handler.Initialize(&engine, msgChan, 1000)
handler.Initialize(
&engine,
msgChan,
1000,
"",
prometheus.NewRegistry(),
)
// Allow incoming messages to be routed to the new chain
chainRouter.AddChain(handler)
@ -238,7 +244,13 @@ func ConsensusFollower(numBlocks, numTxsPerBlock int, b *testing.B) {
// Asynchronously passes messages from the network to the consensus engine
handler := &router.Handler{}
handler.Initialize(&engine, msgChan, 1000)
handler.Initialize(
&engine,
msgChan,
1000,
"",
prometheus.NewRegistry(),
)
// Allow incoming messages to be routed to the new chain
chainRouter.AddChain(handler)

View File

@ -116,6 +116,12 @@ func (vm *VM) Initialize(
return nil
}
// Bootstrapping marks this VM as bootstrapping
func (vm *VM) Bootstrapping() error { return nil }
// Bootstrapped marks this VM as bootstrapped
func (vm *VM) Bootstrapped() error { return nil }
// Shutdown implements the snowman.ChainVM interface
func (vm *VM) Shutdown() error {
if vm.timer == nil {

View File

@ -128,6 +128,12 @@ func (vm *VM) Initialize(
return vm.db.Commit()
}
// Bootstrapping marks this VM as bootstrapping
func (vm *VM) Bootstrapping() error { return nil }
// Bootstrapped marks this VM as bootstrapped
func (vm *VM) Bootstrapped() error { return nil }
// Shutdown implements the avalanche.DAGVM interface
func (vm *VM) Shutdown() error {
if vm.timer == nil {