Merge branch 'master' into ansible-service

This commit is contained in:
holisticode 2020-07-13 14:05:55 -05:00 committed by GitHub
commit 658b6f080b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
174 changed files with 7574 additions and 3224 deletions

16
.ci/run_e2e_tests.sh Executable file
View File

@ -0,0 +1,16 @@
SCRIPTS_PATH=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd)
SRC_PATH=$(dirname "${SCRIPTS_PATH}")
# Build the runnable Gecko docker image
bash "${SRC_PATH}"/scripts/build_image.sh
GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1)
# Turn off GO111MODULE to pull e2e test source code in order to get run script.
GO111MODULE=off go get -t -v github.com/kurtosis-tech/ava-e2e-tests/...
cd "${GOPATH}"/src/github.com/kurtosis-tech/ava-e2e-tests/ || exit
bash "./scripts/rebuild_initializer_binary.sh"
bash "./scripts/rebuild_controller_image.sh"
# TODO: Make the controller image label a parameter to rebuild_controller_image script
# Standard controller image label used by above scripts.
CONTROLLER_IMAGE="kurtosistech/ava-e2e-tests_controller:latest"
./build/ava-e2e-tests --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${CONTROLLER_IMAGE}" --test-names="fiveStakingNodeGetValidatorsTest,fiveStakingNodeFullyConnectedTest"

View File

@ -14,6 +14,7 @@ env:
global:
- CODECOV_TOKEN="8c18c993-fc6e-4706-998b-01ddc7987804"
- GECKO_HOME=/go/src/github.com/ava-labs/gecko/
- E2E_TEST_HOME=/go/src/github.com/kurtosis-tech/ava-e2e-tests/
- COMMIT=${TRAVIS_COMMIT::8}
- DOCKERHUB_REPO=avaplatform/gecko
- secure: "L/A9+re0NEKP6EV6H9RcTGiDhX3WMvsiWrkRKDYKqnviqbjY30RK6EM4vvjrM4Lrw2QwsO3YKgnku3+zioE/TxEZFkpkbjNUXru0nYBrWAg1TKVsDXnYaIZkHUejfryST3E8N7F4Hx6zCtGEO0sEdUeKuT+MNUIuHezHooTgGzDjMogm70EWMFjQHc7VucTJu7dWU1RBPjovWQ0q9qflrtCpbrvXFIiihQQ1PQha1Q2C4wLakKuLbhhSafue90Mnyss0blaPHy/tyewcASJu4vsGTKRBn0DzttlkNTwuD6+nKrbmJY0ohunnkVFzYjrZAw1gyN+DCDb/lPbz4ZDItKPwrIUPEtL5xuUOrxUZPUh+0io3Q2d6rjaqkdGjd1KQXzbnW1mn0BxX3d3b2UpIqhBn9umYYjHBKnMuoRiTK33b7U9+LF3K84+tEvVDCPeHs/mw6Inp5jGRSravnM6yPQ6feGzogs4+3EMzZXxnkngKFKCsnd67Oe9xfV9amOU2aQAx4jaAwlPjEpBEkUa8YKx3lPznvmUk1QsNCUbLjdSl5JBaXojLJoiuPbj29hp4S5AXXgn+3Hvwk3ndcFCxi6/l1W9mjYSOtFqg3EAUdF4EgnA/ykQg9ZokkoKY0+qgOzG2bKOAYuCDWeGr7P1apToh00ccsQXL81nVPiq7uDw="
@ -26,7 +27,7 @@ install:
script:
- if [ "$TRAVIS_OS_NAME" = "osx" ]; then .ci/runscript_osx.sh; fi
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/runscript_linux.sh; fi
- if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/runscript_linux.sh; .ci/run_e2e_tests.sh; fi
#Need to push to docker hub only from one build
after_success:

View File

@ -10,6 +10,15 @@ import (
"runtime/pprof"
)
const (
// Name of file that CPU profile is written to when StartCPUProfiler called
cpuProfileFile = "cpu.profile"
// Name of file that memory profile is written to when MemoryProfile called
memProfileFile = "mem.profile"
// Name of file that lock profile is written to
lockProfileFile = "lock.profile"
)
var (
errCPUProfilerRunning = errors.New("cpu profiler already running")
errCPUProfilerNotRunning = errors.New("cpu profiler doesn't exist")
@ -20,12 +29,12 @@ var (
type Performance struct{ cpuProfileFile *os.File }
// StartCPUProfiler starts measuring the cpu utilization of this node
func (p *Performance) StartCPUProfiler(filename string) error {
func (p *Performance) StartCPUProfiler() error {
if p.cpuProfileFile != nil {
return errCPUProfilerRunning
}
file, err := os.Create(filename)
file, err := os.Create(cpuProfileFile)
if err != nil {
return err
}
@ -52,8 +61,8 @@ func (p *Performance) StopCPUProfiler() error {
}
// MemoryProfile dumps the current memory utilization of this node
func (p *Performance) MemoryProfile(filename string) error {
file, err := os.Create(filename)
func (p *Performance) MemoryProfile() error {
file, err := os.Create(memProfileFile)
if err != nil {
return err
}
@ -66,8 +75,8 @@ func (p *Performance) MemoryProfile(filename string) error {
}
// LockProfile dumps the current lock statistics of this node
func (p *Performance) LockProfile(filename string) error {
file, err := os.Create(filename)
func (p *Performance) LockProfile() error {
file, err := os.Create(lockProfileFile)
if err != nil {
return err
}

View File

@ -57,7 +57,7 @@ type GetNodeVersionReply struct {
// GetNodeVersion returns the version this node is running
func (service *Admin) GetNodeVersion(_ *http.Request, _ *struct{}, reply *GetNodeVersionReply) error {
service.log.Debug("Admin: GetNodeVersion called")
service.log.Info("Admin: GetNodeVersion called")
reply.Version = service.version.String()
return nil
@ -70,7 +70,7 @@ type GetNodeIDReply struct {
// GetNodeID returns the node ID of this node
func (service *Admin) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error {
service.log.Debug("Admin: GetNodeID called")
service.log.Info("Admin: GetNodeID called")
reply.NodeID = service.nodeID
return nil
@ -83,7 +83,7 @@ type GetNetworkIDReply struct {
// GetNetworkID returns the network ID this node is running on
func (service *Admin) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error {
service.log.Debug("Admin: GetNetworkID called")
service.log.Info("Admin: GetNetworkID called")
reply.NetworkID = cjson.Uint32(service.networkID)
return nil
@ -96,7 +96,7 @@ type GetNetworkNameReply struct {
// GetNetworkName returns the network name this node is running on
func (service *Admin) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error {
service.log.Debug("Admin: GetNetworkName called")
service.log.Info("Admin: GetNetworkName called")
reply.NetworkName = genesis.NetworkName(service.networkID)
return nil
@ -114,7 +114,7 @@ type GetBlockchainIDReply struct {
// GetBlockchainID returns the blockchain ID that resolves the alias that was supplied
func (service *Admin) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error {
service.log.Debug("Admin: GetBlockchainID called")
service.log.Info("Admin: GetBlockchainID called")
bID, err := service.chainManager.Lookup(args.Alias)
reply.BlockchainID = bID.String()
@ -128,26 +128,21 @@ type PeersReply struct {
// Peers returns the list of current validators
func (service *Admin) Peers(_ *http.Request, _ *struct{}, reply *PeersReply) error {
service.log.Debug("Admin: Peers called")
service.log.Info("Admin: Peers called")
reply.Peers = service.networking.Peers()
return nil
}
// StartCPUProfilerArgs are the arguments for calling StartCPUProfiler
type StartCPUProfilerArgs struct {
Filename string `json:"filename"`
}
// StartCPUProfilerReply are the results from calling StartCPUProfiler
type StartCPUProfilerReply struct {
Success bool `json:"success"`
}
// StartCPUProfiler starts a cpu profile writing to the specified file
func (service *Admin) StartCPUProfiler(_ *http.Request, args *StartCPUProfilerArgs, reply *StartCPUProfilerReply) error {
service.log.Debug("Admin: StartCPUProfiler called with %s", args.Filename)
func (service *Admin) StartCPUProfiler(_ *http.Request, args *struct{}, reply *StartCPUProfilerReply) error {
service.log.Info("Admin: StartCPUProfiler called")
reply.Success = true
return service.performance.StartCPUProfiler(args.Filename)
return service.performance.StartCPUProfiler()
}
// StopCPUProfilerReply are the results from calling StopCPUProfiler
@ -157,31 +152,21 @@ type StopCPUProfilerReply struct {
// StopCPUProfiler stops the cpu profile
func (service *Admin) StopCPUProfiler(_ *http.Request, _ *struct{}, reply *StopCPUProfilerReply) error {
service.log.Debug("Admin: StopCPUProfiler called")
service.log.Info("Admin: StopCPUProfiler called")
reply.Success = true
return service.performance.StopCPUProfiler()
}
// MemoryProfileArgs are the arguments for calling MemoryProfile
type MemoryProfileArgs struct {
Filename string `json:"filename"`
}
// MemoryProfileReply are the results from calling MemoryProfile
type MemoryProfileReply struct {
Success bool `json:"success"`
}
// MemoryProfile runs a memory profile writing to the specified file
func (service *Admin) MemoryProfile(_ *http.Request, args *MemoryProfileArgs, reply *MemoryProfileReply) error {
service.log.Debug("Admin: MemoryProfile called with %s", args.Filename)
func (service *Admin) MemoryProfile(_ *http.Request, args *struct{}, reply *MemoryProfileReply) error {
service.log.Info("Admin: MemoryProfile called")
reply.Success = true
return service.performance.MemoryProfile(args.Filename)
}
// LockProfileArgs are the arguments for calling LockProfile
type LockProfileArgs struct {
Filename string `json:"filename"`
return service.performance.MemoryProfile()
}
// LockProfileReply are the results from calling LockProfile
@ -190,10 +175,10 @@ type LockProfileReply struct {
}
// LockProfile runs a mutex profile writing to the specified file
func (service *Admin) LockProfile(_ *http.Request, args *LockProfileArgs, reply *LockProfileReply) error {
service.log.Debug("Admin: LockProfile called with %s", args.Filename)
func (service *Admin) LockProfile(_ *http.Request, args *struct{}, reply *LockProfileReply) error {
service.log.Info("Admin: LockProfile called")
reply.Success = true
return service.performance.LockProfile(args.Filename)
return service.performance.LockProfile()
}
// AliasArgs are the arguments for calling Alias
@ -209,7 +194,7 @@ type AliasReply struct {
// Alias attempts to alias an HTTP endpoint to a new name
func (service *Admin) Alias(_ *http.Request, args *AliasArgs, reply *AliasReply) error {
service.log.Debug("Admin: Alias called with URL: %s, Alias: %s", args.Endpoint, args.Alias)
service.log.Info("Admin: Alias called with URL: %s, Alias: %s", args.Endpoint, args.Alias)
reply.Success = true
return service.httpServer.AddAliasesWithReadLock(args.Endpoint, args.Alias)
}
@ -227,7 +212,7 @@ type AliasChainReply struct {
// AliasChain attempts to alias a chain to a new name
func (service *Admin) AliasChain(_ *http.Request, args *AliasChainArgs, reply *AliasChainReply) error {
service.log.Debug("Admin: AliasChain called with Chain: %s, Alias: %s", args.Chain, args.Alias)
service.log.Info("Admin: AliasChain called with Chain: %s, Alias: %s", args.Chain, args.Alias)
chainID, err := service.chainManager.Lookup(args.Chain)
if err != nil {

View File

@ -20,36 +20,66 @@ type CheckFn func() (interface{}, error)
// Check defines a single health check that we want to monitor and consider as
// part of our wider healthiness
type Check struct {
type Check interface {
// Name is the identifier for this check and must be unique among all Checks
Name string
Name() string
// CheckFn is the function to call to perform the the health check
CheckFn CheckFn
// Execute performs the health check. It returns nil if the check passes.
// It can also return additional information to marshal and display to the caller
Execute() (interface{}, error)
// ExecutionPeriod is the duration to wait between executions of this Check
ExecutionPeriod time.Duration
ExecutionPeriod() time.Duration
// InitialDelay is the duration to wait before executing the first time
InitialDelay time.Duration
InitialDelay() time.Duration
// InitiallyPassing is whether or not to consider the Check healthy before the
// initial execution
InitiallyPassing bool
InitiallyPassing() bool
}
// gosundheitCheck implements the health.Check interface backed by a CheckFn
type gosundheitCheck struct {
name string
checkFn CheckFn
// check implements the Check interface
type check struct {
name string
checkFn CheckFn
executionPeriod, initialDelay time.Duration
initiallyPassing bool
}
// Name implements the health.Check interface by returning a unique name
func (c gosundheitCheck) Name() string { return c.name }
// Name is the identifier for this check and must be unique among all Checks
func (c check) Name() string { return c.name }
// Execute implements the health.Check interface by executing the checkFn and
// returning the results
func (c gosundheitCheck) Execute() (interface{}, error) { return c.checkFn() }
// Execute performs the health check. It returns nil if the check passes.
// It can also return additional information to marshal and display to the caller
func (c check) Execute() (interface{}, error) { return c.checkFn() }
// ExecutionPeriod is the duration to wait between executions of this Check
func (c check) ExecutionPeriod() time.Duration { return c.executionPeriod }
// InitialDelay is the duration to wait before executing the first time
func (c check) InitialDelay() time.Duration { return c.initialDelay }
// InitiallyPassing is whether or not to consider the Check healthy before the initial execution
func (c check) InitiallyPassing() bool { return c.initiallyPassing }
// monotonicCheck is a check that will run until it passes once, and after that it will
// always pass without performing any logic. Used for bootstrapping, for example.
type monotonicCheck struct {
passed bool
check
}
func (mc monotonicCheck) Execute() (interface{}, error) {
if mc.passed {
return nil, nil
}
details, pass := mc.check.Execute()
if pass == nil {
mc.passed = true
}
return details, pass
}
// Heartbeater provides a getter to the most recently observed heartbeat
type Heartbeater interface {

View File

@ -7,15 +7,17 @@ import (
"net/http"
"time"
"github.com/AppsFlyer/go-sundheit"
health "github.com/AppsFlyer/go-sundheit"
"github.com/gorilla/rpc/v2"
"github.com/ava-labs/gecko/snow/engine/common"
"github.com/ava-labs/gecko/utils/json"
"github.com/ava-labs/gecko/utils/logging"
"github.com/gorilla/rpc/v2"
)
// defaultCheckOpts is a Check whose properties represent a default Check
var defaultCheckOpts = Check{ExecutionPeriod: time.Minute}
var defaultCheckOpts = check{executionPeriod: time.Minute}
// Health observes a set of vital signs and makes them available through an HTTP
// API.
@ -36,7 +38,18 @@ func (h *Health) Handler() *common.HTTPHandler {
newServer.RegisterCodec(codec, "application/json")
newServer.RegisterCodec(codec, "application/json;charset=UTF-8")
newServer.RegisterService(h, "health")
return &common.HTTPHandler{LockOptions: common.NoLock, Handler: newServer}
handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method == http.MethodGet { // GET request --> return 200 if getLiveness returns true, else 503
if _, healthy := h.health.Results(); healthy {
w.WriteHeader(http.StatusOK)
} else {
w.WriteHeader(http.StatusServiceUnavailable)
}
} else {
newServer.ServeHTTP(w, r) // Other request --> use JSON RPC
}
})
return &common.HTTPHandler{LockOptions: common.NoLock, Handler: handler}
}
// RegisterHeartbeat adds a check with default options and a CheckFn that checks
@ -48,18 +61,27 @@ func (h *Health) RegisterHeartbeat(name string, hb Heartbeater, max time.Duratio
// RegisterCheckFunc adds a Check with default options and the given CheckFn
func (h *Health) RegisterCheckFunc(name string, checkFn CheckFn) error {
check := defaultCheckOpts
check.Name = name
check.CheckFn = checkFn
check.name = name
check.checkFn = checkFn
return h.RegisterCheck(check)
}
// RegisterMonotonicCheckFunc adds a Check with default options and the given CheckFn
// After it passes once, its logic (checkFunc) is never run again; it just passes
func (h *Health) RegisterMonotonicCheckFunc(name string, checkFn CheckFn) error {
check := monotonicCheck{check: defaultCheckOpts}
check.name = name
check.checkFn = checkFn
return h.RegisterCheck(check)
}
// RegisterCheck adds the given Check
func (h *Health) RegisterCheck(c Check) error {
return h.health.RegisterCheck(&health.Config{
InitialDelay: c.InitialDelay,
ExecutionPeriod: c.ExecutionPeriod,
InitiallyPassing: c.InitiallyPassing,
Check: gosundheitCheck{c.Name, c.CheckFn},
InitialDelay: c.InitialDelay(),
ExecutionPeriod: c.ExecutionPeriod(),
InitiallyPassing: c.InitiallyPassing(),
Check: c,
})
}
@ -74,7 +96,7 @@ type GetLivenessReply struct {
// GetLiveness returns a summation of the health of the node
func (h *Health) GetLiveness(_ *http.Request, _ *GetLivenessArgs, reply *GetLivenessReply) error {
h.log.Debug("Health: GetLiveness called")
h.log.Info("Health: GetLiveness called")
reply.Checks, reply.Healthy = h.health.Results()
return nil
}

160
api/info/service.go Normal file
View File

@ -0,0 +1,160 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package info
import (
"fmt"
"net/http"
"github.com/gorilla/rpc/v2"
"github.com/ava-labs/gecko/chains"
"github.com/ava-labs/gecko/genesis"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/network"
"github.com/ava-labs/gecko/snow/engine/common"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/version"
cjson "github.com/ava-labs/gecko/utils/json"
)
// Info is the API service for unprivileged info on a node
type Info struct {
version version.Version
nodeID ids.ShortID
networkID uint32
log logging.Logger
networking network.Network
chainManager chains.Manager
}
// NewService returns a new admin API service
func NewService(log logging.Logger, version version.Version, nodeID ids.ShortID, networkID uint32, chainManager chains.Manager, peers network.Network) *common.HTTPHandler {
newServer := rpc.NewServer()
codec := cjson.NewCodec()
newServer.RegisterCodec(codec, "application/json")
newServer.RegisterCodec(codec, "application/json;charset=UTF-8")
newServer.RegisterService(&Info{
version: version,
nodeID: nodeID,
networkID: networkID,
log: log,
chainManager: chainManager,
networking: peers,
}, "info")
return &common.HTTPHandler{Handler: newServer}
}
// GetNodeVersionReply are the results from calling GetNodeVersion
type GetNodeVersionReply struct {
Version string `json:"version"`
}
// GetNodeVersion returns the version this node is running
func (service *Info) GetNodeVersion(_ *http.Request, _ *struct{}, reply *GetNodeVersionReply) error {
service.log.Info("Info: GetNodeVersion called")
reply.Version = service.version.String()
return nil
}
// GetNodeIDReply are the results from calling GetNodeID
type GetNodeIDReply struct {
NodeID ids.ShortID `json:"nodeID"`
}
// GetNodeID returns the node ID of this node
func (service *Info) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error {
service.log.Info("Info: GetNodeID called")
reply.NodeID = service.nodeID
return nil
}
// GetNetworkIDReply are the results from calling GetNetworkID
type GetNetworkIDReply struct {
NetworkID cjson.Uint32 `json:"networkID"`
}
// GetNetworkID returns the network ID this node is running on
func (service *Info) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error {
service.log.Info("Info: GetNetworkID called")
reply.NetworkID = cjson.Uint32(service.networkID)
return nil
}
// GetNetworkNameReply is the result from calling GetNetworkName
type GetNetworkNameReply struct {
NetworkName string `json:"networkName"`
}
// GetNetworkName returns the network name this node is running on
func (service *Info) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error {
service.log.Info("Info: GetNetworkName called")
reply.NetworkName = genesis.NetworkName(service.networkID)
return nil
}
// GetBlockchainIDArgs are the arguments for calling GetBlockchainID
type GetBlockchainIDArgs struct {
Alias string `json:"alias"`
}
// GetBlockchainIDReply are the results from calling GetBlockchainID
type GetBlockchainIDReply struct {
BlockchainID string `json:"blockchainID"`
}
// GetBlockchainID returns the blockchain ID that resolves the alias that was supplied
func (service *Info) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error {
service.log.Info("Info: GetBlockchainID called")
bID, err := service.chainManager.Lookup(args.Alias)
reply.BlockchainID = bID.String()
return err
}
// PeersReply are the results from calling Peers
type PeersReply struct {
Peers []network.PeerID `json:"peers"`
}
// Peers returns the list of current validators
func (service *Info) Peers(_ *http.Request, _ *struct{}, reply *PeersReply) error {
service.log.Info("Info: Peers called")
reply.Peers = service.networking.Peers()
return nil
}
// IsBootstrappedArgs are the arguments for calling IsBootstrapped
type IsBootstrappedArgs struct {
// Alias of the chain
// Can also be the string representation of the chain's ID
Chain string `json:"chain"`
}
// IsBootstrappedResponse are the results from calling IsBootstrapped
type IsBootstrappedResponse struct {
// True iff the chain exists and is done bootstrapping
IsBootstrapped bool `json:"isBootstrapped"`
}
// IsBootstrapped returns nil and sets [reply.IsBootstrapped] == true iff [args.Chain] exists and is done bootstrapping
// Returns an error if the chain doesn't exist
func (service *Info) IsBootstrapped(_ *http.Request, args *IsBootstrappedArgs, reply *IsBootstrappedResponse) error {
service.log.Info("Info: IsBootstrapped called")
if args.Chain == "" {
return fmt.Errorf("argument 'chain' not given")
}
chainID, err := service.chainManager.Lookup(args.Chain)
if err != nil {
return fmt.Errorf("there is no chain with alias/ID '%s'", args.Chain)
}
reply.IsBootstrapped = service.chainManager.IsBootstrapped(chainID)
return nil
}

View File

@ -61,6 +61,7 @@ type PublishBlockchainReply struct {
// PublishBlockchain publishes the finalized accepted transactions from the blockchainID over the IPC
func (ipc *IPCs) PublishBlockchain(r *http.Request, args *PublishBlockchainArgs, reply *PublishBlockchainReply) error {
ipc.log.Info("IPCs: PublishBlockchain called with BlockchainID: %s", args.BlockchainID)
chainID, err := ipc.chainManager.Lookup(args.BlockchainID)
if err != nil {
ipc.log.Error("unknown blockchainID: %s", err)
@ -116,6 +117,7 @@ type UnpublishBlockchainReply struct {
// UnpublishBlockchain closes publishing of a blockchainID
func (ipc *IPCs) UnpublishBlockchain(r *http.Request, args *UnpublishBlockchainArgs, reply *UnpublishBlockchainReply) error {
ipc.log.Info("IPCs: UnpublishBlockchain called with BlockchainID: %s", args.BlockchainID)
chainID, err := ipc.chainManager.Lookup(args.BlockchainID)
if err != nil {
ipc.log.Error("unknown blockchainID %s: %s", args.BlockchainID, err)

View File

@ -8,29 +8,41 @@ import (
"fmt"
"net/http"
"sync"
"testing"
"github.com/gorilla/rpc/v2"
zxcvbn "github.com/nbutton23/zxcvbn-go"
"github.com/ava-labs/gecko/chains/atomic"
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/database/encdb"
"github.com/ava-labs/gecko/database/memdb"
"github.com/ava-labs/gecko/database/prefixdb"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/engine/common"
"github.com/ava-labs/gecko/utils/codec"
"github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/vms/components/codec"
jsoncodec "github.com/ava-labs/gecko/utils/json"
zxcvbn "github.com/nbutton23/zxcvbn-go"
)
const (
// maxUserPassLen is the maximum length of the username or password allowed
maxUserPassLen = 1024
// requiredPassScore defines the score a password must achieve to be accepted
// as a password with strong characteristics by the zxcvbn package
// maxCheckedPassLen limits the length of the password that should be
// strength checked.
//
// As per issue https://github.com/ava-labs/gecko/issues/195 it was found
// the longer the length of password the slower zxcvbn.PasswordStrength()
// performs. To avoid performance issues, and a DoS vector, we only check
// the first 50 characters of the password.
maxCheckedPassLen = 50
// requiredPassScore defines the score a password must achieve to be
// accepted as a password with strong characteristics by the zxcvbn package
//
// The scoring mechanism defined is as follows;
//
@ -135,37 +147,11 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre
ks.lock.Lock()
defer ks.lock.Unlock()
ks.log.Verbo("CreateUser called with %.*s", maxUserPassLen, args.Username)
if len(args.Username) > maxUserPassLen || len(args.Password) > maxUserPassLen {
return errUserPassMaxLength
}
if args.Username == "" {
return errEmptyUsername
}
if usr, err := ks.getUser(args.Username); err == nil || usr != nil {
return fmt.Errorf("user already exists: %s", args.Username)
}
if zxcvbn.PasswordStrength(args.Password, nil).Score < requiredPassScore {
return errWeakPassword
}
usr := &User{}
if err := usr.Initialize(args.Password); err != nil {
ks.log.Info("Keystore: CreateUser called with %.*s", maxUserPassLen, args.Username)
if err := ks.AddUser(args.Username, args.Password); err != nil {
return err
}
usrBytes, err := ks.codec.Marshal(usr)
if err != nil {
return err
}
if err := ks.userDB.Put([]byte(args.Username), usrBytes); err != nil {
return err
}
ks.users[args.Username] = usr
reply.Success = true
return nil
}
@ -183,7 +169,7 @@ func (ks *Keystore) ListUsers(_ *http.Request, args *ListUsersArgs, reply *ListU
ks.lock.Lock()
defer ks.lock.Unlock()
ks.log.Verbo("ListUsers called")
ks.log.Info("Keystore: ListUsers called")
reply.Users = []string{}
@ -211,7 +197,7 @@ func (ks *Keystore) ExportUser(_ *http.Request, args *ExportUserArgs, reply *Exp
ks.lock.Lock()
defer ks.lock.Unlock()
ks.log.Verbo("ExportUser called for %s", args.Username)
ks.log.Info("Keystore: ExportUser called for %s", args.Username)
usr, err := ks.getUser(args.Username)
if err != nil {
@ -264,7 +250,7 @@ func (ks *Keystore) ImportUser(r *http.Request, args *ImportUserArgs, reply *Imp
ks.lock.Lock()
defer ks.lock.Unlock()
ks.log.Verbo("ImportUser called for %s", args.Username)
ks.log.Info("Keystore: ImportUser called for %s", args.Username)
if args.Username == "" {
return errEmptyUsername
@ -324,7 +310,7 @@ func (ks *Keystore) DeleteUser(_ *http.Request, args *DeleteUserArgs, reply *Del
ks.lock.Lock()
defer ks.lock.Unlock()
ks.log.Verbo("DeleteUser called with %s", args.Username)
ks.log.Info("Keystore: DeleteUser called with %s", args.Username)
if args.Username == "" {
return errEmptyUsername
@ -403,3 +389,51 @@ func (ks *Keystore) GetDatabase(bID ids.ID, username, password string) (database
return encDB, nil
}
// AddUser attempts to register this username and password as a new user of the
// keystore.
func (ks *Keystore) AddUser(username, password string) error {
if len(username) > maxUserPassLen || len(password) > maxUserPassLen {
return errUserPassMaxLength
}
if username == "" {
return errEmptyUsername
}
if usr, err := ks.getUser(username); err == nil || usr != nil {
return fmt.Errorf("user already exists: %s", username)
}
checkPass := password
if len(password) > maxCheckedPassLen {
checkPass = password[:maxCheckedPassLen]
}
if zxcvbn.PasswordStrength(checkPass, nil).Score < requiredPassScore {
return errWeakPassword
}
usr := &User{}
if err := usr.Initialize(password); err != nil {
return err
}
usrBytes, err := ks.codec.Marshal(usr)
if err != nil {
return err
}
if err := ks.userDB.Put([]byte(username), usrBytes); err != nil {
return err
}
ks.users[username] = usr
return nil
}
// CreateTestKeystore returns a new keystore that can be utilized for testing
func CreateTestKeystore(t *testing.T) *Keystore {
ks := &Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
return ks
}

View File

@ -10,9 +10,7 @@ import (
"reflect"
"testing"
"github.com/ava-labs/gecko/database/memdb"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/logging"
)
var (
@ -22,8 +20,7 @@ var (
)
func TestServiceListNoUsers(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
reply := ListUsersReply{}
if err := ks.ListUsers(nil, &ListUsersArgs{}, &reply); err != nil {
@ -35,8 +32,7 @@ func TestServiceListNoUsers(t *testing.T) {
}
func TestServiceCreateUser(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -75,8 +71,7 @@ func genStr(n int) string {
// TestServiceCreateUserArgsChecks generates excessively long usernames or
// passwords to assure the santity checks on string length are not exceeded
func TestServiceCreateUserArgsCheck(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -117,8 +112,7 @@ func TestServiceCreateUserArgsCheck(t *testing.T) {
// TestServiceCreateUserWeakPassword tests creating a new user with a weak
// password to ensure the password strength check is working
func TestServiceCreateUserWeakPassword(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -138,8 +132,7 @@ func TestServiceCreateUserWeakPassword(t *testing.T) {
}
func TestServiceCreateDuplicate(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -166,8 +159,7 @@ func TestServiceCreateDuplicate(t *testing.T) {
}
func TestServiceCreateUserNoName(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
reply := CreateUserReply{}
if err := ks.CreateUser(nil, &CreateUserArgs{
@ -178,8 +170,7 @@ func TestServiceCreateUserNoName(t *testing.T) {
}
func TestServiceUseBlockchainDB(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -218,8 +209,7 @@ func TestServiceUseBlockchainDB(t *testing.T) {
}
func TestServiceExportImport(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -252,8 +242,7 @@ func TestServiceExportImport(t *testing.T) {
t.Fatal(err)
}
newKS := Keystore{}
newKS.Initialize(logging.NoLog{}, memdb.New())
newKS := CreateTestKeystore(t)
{
reply := ImportUserReply{}
@ -358,11 +347,10 @@ func TestServiceDeleteUser(t *testing.T) {
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
if tt.setup != nil {
if err := tt.setup(&ks); err != nil {
if err := tt.setup(ks); err != nil {
t.Fatalf("failed to create user setup in keystore: %v", err)
}
}

View File

@ -4,9 +4,10 @@
package metrics
import (
"github.com/ava-labs/gecko/snow/engine/common"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/ava-labs/gecko/snow/engine/common"
)
// NewService returns a new prometheus service

8
cache/lru_cache.go vendored
View File

@ -10,6 +10,10 @@ import (
"github.com/ava-labs/gecko/ids"
)
const (
minCacheSize = 32
)
type entry struct {
Key ids.ID
Value interface{}
@ -59,7 +63,7 @@ func (c *LRU) Flush() {
func (c *LRU) init() {
if c.entryMap == nil {
c.entryMap = make(map[[32]byte]*list.Element)
c.entryMap = make(map[[32]byte]*list.Element, minCacheSize)
}
if c.entryList == nil {
c.entryList = list.New()
@ -134,6 +138,6 @@ func (c *LRU) evict(key ids.ID) {
func (c *LRU) flush() {
c.init()
c.entryMap = make(map[[32]byte]*list.Element)
c.entryMap = make(map[[32]byte]*list.Element, minCacheSize)
c.entryList = list.New()
}

53
cache/lru_cache_benchmark_test.go vendored Normal file
View File

@ -0,0 +1,53 @@
package cache
import (
"crypto/rand"
"testing"
"github.com/ava-labs/gecko/ids"
)
func BenchmarkLRUCachePutSmall(b *testing.B) {
smallLen := 5
cache := &LRU{Size: smallLen}
for n := 0; n < b.N; n++ {
for i := 0; i < smallLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
cache.Put(ids.NewID(idBytes), n)
}
b.StopTimer()
cache.Flush()
b.StartTimer()
}
}
func BenchmarkLRUCachePutMedium(b *testing.B) {
mediumLen := 250
cache := &LRU{Size: mediumLen}
for n := 0; n < b.N; n++ {
for i := 0; i < mediumLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
cache.Put(ids.NewID(idBytes), n)
}
b.StopTimer()
cache.Flush()
b.StartTimer()
}
}
func BenchmarkLRUCachePutLarge(b *testing.B) {
largeLen := 10000
cache := &LRU{Size: largeLen}
for n := 0; n < b.N; n++ {
for i := 0; i < largeLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
cache.Put(ids.NewID(idBytes), n)
}
b.StopTimer()
cache.Flush()
b.StartTimer()
}
}

View File

@ -10,9 +10,9 @@ import (
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/database/prefixdb"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/codec"
"github.com/ava-labs/gecko/utils/hashing"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/vms/components/codec"
)
type rcLock struct {

View File

@ -76,6 +76,9 @@ type Manager interface {
// Add an alias to a chain
Alias(ids.ID, string) error
// Returns true iff the chain with the given ID exists and is finished bootstrapping
IsBootstrapped(ids.ID) bool
Shutdown()
}
@ -114,6 +117,10 @@ type manager struct {
keystore *keystore.Keystore
sharedMemory *atomic.SharedMemory
// Key: Chain's ID
// Value: The chain
chains map[[32]byte]*router.Handler
unblocked bool
blockedChains []ChainParameters
}
@ -131,7 +138,7 @@ func New(
decisionEvents *triggers.EventDispatcher,
consensusEvents *triggers.EventDispatcher,
db database.Database,
router router.Router,
rtr router.Router,
net network.Network,
consensusParams avacon.Parameters,
validators validators.Manager,
@ -145,7 +152,7 @@ func New(
timeoutManager.Initialize(requestTimeout)
go log.RecoverAndPanic(timeoutManager.Dispatch)
router.Initialize(log, &timeoutManager, gossipFrequency, shutdownTimeout)
rtr.Initialize(log, &timeoutManager, gossipFrequency, shutdownTimeout)
m := &manager{
stakingEnabled: stakingEnabled,
@ -155,7 +162,7 @@ func New(
decisionEvents: decisionEvents,
consensusEvents: consensusEvents,
db: db,
chainRouter: router,
chainRouter: rtr,
net: net,
timeoutManager: &timeoutManager,
consensusParams: consensusParams,
@ -165,6 +172,7 @@ func New(
server: server,
keystore: keystore,
sharedMemory: sharedMemory,
chains: make(map[[32]byte]*router.Handler),
}
m.Initialize()
return m
@ -454,7 +462,7 @@ func (m *manager) createAvalancheChain(
eng: &engine,
})
}
m.chains[ctx.ChainID.Key()] = handler
return nil
}
@ -546,9 +554,20 @@ func (m *manager) createSnowmanChain(
eng: &engine,
})
}
m.chains[ctx.ChainID.Key()] = handler
return nil
}
func (m *manager) IsBootstrapped(id ids.ID) bool {
chain, exists := m.chains[id.Key()]
if !exists {
return false
}
chain.Context().Lock.Lock()
defer chain.Context().Lock.Unlock()
return chain.Engine().IsBootstrapped()
}
// Shutdown stops all the chains
func (m *manager) Shutdown() { m.chainRouter.Shutdown() }

View File

@ -35,3 +35,6 @@ func (mm MockManager) Alias(ids.ID, string) error { return nil }
// Shutdown ...
func (mm MockManager) Shutdown() {}
// IsBootstrapped ...
func (mm MockManager) IsBootstrapped(ids.ID) bool { return false }

14
database/common.go Normal file
View File

@ -0,0 +1,14 @@
package database
const (
// MaxExcessCapacityFactor ...
// If, when a batch is reset, the cap(batch)/len(batch) > MaxExcessCapacityFactor,
// the underlying array's capacity will be reduced by a factor of capacityReductionFactor.
// Higher value for MaxExcessCapacityFactor --> less aggressive array downsizing --> less memory allocations
// but more unnecessary data in the underlying array that can't be garbage collected.
// Higher value for CapacityReductionFactor --> more aggressive array downsizing --> more memory allocations
// but less unnecessary data in the underlying array that can't be garbage collected.
MaxExcessCapacityFactor = 4
// CapacityReductionFactor ...
CapacityReductionFactor = 2
)

View File

@ -13,8 +13,8 @@ import (
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/database/nodb"
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/codec"
"github.com/ava-labs/gecko/utils/hashing"
"github.com/ava-labs/gecko/vms/components/codec"
)
// Database encrypts all values that are provided
@ -201,7 +201,11 @@ func (b *batch) Write() error {
// Reset resets the batch for reuse.
func (b *batch) Reset() {
b.writes = b.writes[:0]
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.Batch.Reset()
}

View File

@ -6,14 +6,15 @@ package leveldb
import (
"bytes"
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/utils"
"github.com/syndtr/goleveldb/leveldb"
"github.com/syndtr/goleveldb/leveldb/errors"
"github.com/syndtr/goleveldb/leveldb/filter"
"github.com/syndtr/goleveldb/leveldb/iterator"
"github.com/syndtr/goleveldb/leveldb/opt"
"github.com/syndtr/goleveldb/leveldb/util"
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/utils"
)
const (

View File

@ -13,8 +13,10 @@ import (
"github.com/ava-labs/gecko/utils"
)
// DefaultSize is the default initial size of the memory database
const DefaultSize = 1 << 10
const (
// DefaultSize is the default initial size of the memory database
DefaultSize = 1 << 10
)
// Database is an ephemeral key-value store that implements the Database
// interface.
@ -191,7 +193,11 @@ func (b *batch) Write() error {
// Reset implements the Batch interface
func (b *batch) Reset() {
b.writes = b.writes[:0]
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.size = 0
}

View File

@ -17,7 +17,7 @@ func (*Database) Has([]byte) (bool, error) { return false, database.ErrClosed }
func (*Database) Get([]byte) ([]byte, error) { return nil, database.ErrClosed }
// Put returns nil
func (*Database) Put(_ []byte, _ []byte) error { return database.ErrClosed }
func (*Database) Put(_, _ []byte) error { return database.ErrClosed }
// Delete returns nil
func (*Database) Delete([]byte) error { return database.ErrClosed }

View File

@ -199,7 +199,11 @@ func (b *batch) Write() error {
// Reset resets the batch for reuse.
func (b *batch) Reset() {
b.writes = b.writes[:0]
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.Batch.Reset()
}

View File

@ -27,7 +27,7 @@ func NewClient(client rpcdbproto.DatabaseClient) *DatabaseClient {
return &DatabaseClient{client: client}
}
// Has returns false, nil
// Has attempts to return if the database has a key with the provided value.
func (db *DatabaseClient) Has(key []byte) (bool, error) {
resp, err := db.client.Has(context.Background(), &rpcdbproto.HasRequest{
Key: key,
@ -38,7 +38,7 @@ func (db *DatabaseClient) Has(key []byte) (bool, error) {
return resp.Has, nil
}
// Get returns nil, error
// Get attempts to return the value that was mapped to the key that was provided
func (db *DatabaseClient) Get(key []byte) ([]byte, error) {
resp, err := db.client.Get(context.Background(), &rpcdbproto.GetRequest{
Key: key,
@ -49,7 +49,7 @@ func (db *DatabaseClient) Get(key []byte) ([]byte, error) {
return resp.Value, nil
}
// Put returns nil
// Put attempts to set the value this key maps to
func (db *DatabaseClient) Put(key, value []byte) error {
_, err := db.client.Put(context.Background(), &rpcdbproto.PutRequest{
Key: key,
@ -58,7 +58,7 @@ func (db *DatabaseClient) Put(key, value []byte) error {
return updateError(err)
}
// Delete returns nil
// Delete attempts to remove any mapping from the key
func (db *DatabaseClient) Delete(key []byte) error {
_, err := db.client.Delete(context.Background(), &rpcdbproto.DeleteRequest{
Key: key,
@ -99,7 +99,7 @@ func (db *DatabaseClient) NewIteratorWithStartAndPrefix(start, prefix []byte) da
}
}
// Stat returns an error
// Stat attempts to return the statistic of this database
func (db *DatabaseClient) Stat(property string) (string, error) {
resp, err := db.client.Stat(context.Background(), &rpcdbproto.StatRequest{
Property: property,
@ -110,7 +110,7 @@ func (db *DatabaseClient) Stat(property string) (string, error) {
return resp.Stat, nil
}
// Compact returns nil
// Compact attempts to optimize the space utilization in the provided range
func (db *DatabaseClient) Compact(start, limit []byte) error {
_, err := db.client.Compact(context.Background(), &rpcdbproto.CompactRequest{
Start: start,
@ -119,7 +119,7 @@ func (db *DatabaseClient) Compact(start, limit []byte) error {
return updateError(err)
}
// Close returns nil
// Close attempts to close the database
func (db *DatabaseClient) Close() error {
_, err := db.client.Close(context.Background(), &rpcdbproto.CloseRequest{})
return updateError(err)
@ -180,7 +180,11 @@ func (b *batch) Write() error {
}
func (b *batch) Reset() {
b.writes = b.writes[:0]
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.size = 0
}
@ -207,7 +211,8 @@ type iterator struct {
err error
}
// Next returns false
// Next attempts to move the iterator to the next element and returns if this
// succeeded
func (it *iterator) Next() bool {
resp, err := it.db.client.IteratorNext(context.Background(), &rpcdbproto.IteratorNextRequest{
Id: it.id,
@ -221,7 +226,7 @@ func (it *iterator) Next() bool {
return resp.FoundNext
}
// Error returns any errors
// Error returns any that occurred while iterating
func (it *iterator) Error() error {
if it.err != nil {
return it.err
@ -234,19 +239,21 @@ func (it *iterator) Error() error {
return it.err
}
// Key returns nil
// Key returns the key of the current element
func (it *iterator) Key() []byte { return it.key }
// Value returns nil
// Value returns the value of the current element
func (it *iterator) Value() []byte { return it.value }
// Release does nothing
// Release frees any resources held by the iterator
func (it *iterator) Release() {
it.db.client.IteratorRelease(context.Background(), &rpcdbproto.IteratorReleaseRequest{
Id: it.id,
})
}
// updateError sets the error value to the errors required by the Database
// interface
func updateError(err error) error {
if err == nil {
return nil

View File

@ -34,16 +34,16 @@ func NewServer(db database.Database) *DatabaseServer {
}
}
// Has ...
// Has delegates the Has call to the managed database and returns the result
func (db *DatabaseServer) Has(_ context.Context, req *rpcdbproto.HasRequest) (*rpcdbproto.HasResponse, error) {
has, err := db.db.Has(req.Key)
if err != nil {
return nil, err
}
return &rpcdbproto.HasResponse{Has: has}, nil
return &rpcdbproto.HasResponse{Has: has}, err
}
// Get ...
// Get delegates the Get call to the managed database and returns the result
func (db *DatabaseServer) Get(_ context.Context, req *rpcdbproto.GetRequest) (*rpcdbproto.GetResponse, error) {
value, err := db.db.Get(req.Key)
if err != nil {
@ -52,17 +52,18 @@ func (db *DatabaseServer) Get(_ context.Context, req *rpcdbproto.GetRequest) (*r
return &rpcdbproto.GetResponse{Value: value}, nil
}
// Put ...
// Put delegates the Put call to the managed database and returns the result
func (db *DatabaseServer) Put(_ context.Context, req *rpcdbproto.PutRequest) (*rpcdbproto.PutResponse, error) {
return &rpcdbproto.PutResponse{}, db.db.Put(req.Key, req.Value)
}
// Delete ...
// Delete delegates the Delete call to the managed database and returns the
// result
func (db *DatabaseServer) Delete(_ context.Context, req *rpcdbproto.DeleteRequest) (*rpcdbproto.DeleteResponse, error) {
return &rpcdbproto.DeleteResponse{}, db.db.Delete(req.Key)
}
// Stat ...
// Stat delegates the Stat call to the managed database and returns the result
func (db *DatabaseServer) Stat(_ context.Context, req *rpcdbproto.StatRequest) (*rpcdbproto.StatResponse, error) {
stat, err := db.db.Stat(req.Property)
if err != nil {
@ -71,17 +72,19 @@ func (db *DatabaseServer) Stat(_ context.Context, req *rpcdbproto.StatRequest) (
return &rpcdbproto.StatResponse{Stat: stat}, nil
}
// Compact ...
// Compact delegates the Compact call to the managed database and returns the
// result
func (db *DatabaseServer) Compact(_ context.Context, req *rpcdbproto.CompactRequest) (*rpcdbproto.CompactResponse, error) {
return &rpcdbproto.CompactResponse{}, db.db.Compact(req.Start, req.Limit)
}
// Close ...
func (db *DatabaseServer) Close(_ context.Context, _ *rpcdbproto.CloseRequest) (*rpcdbproto.CloseResponse, error) {
// Close delegates the Close call to the managed database and returns the result
func (db *DatabaseServer) Close(context.Context, *rpcdbproto.CloseRequest) (*rpcdbproto.CloseResponse, error) {
return &rpcdbproto.CloseResponse{}, db.db.Close()
}
// WriteBatch ...
// WriteBatch takes in a set of key-value pairs and atomically writes them to
// the internal database
func (db *DatabaseServer) WriteBatch(_ context.Context, req *rpcdbproto.WriteBatchRequest) (*rpcdbproto.WriteBatchResponse, error) {
db.batch.Reset()
@ -100,7 +103,8 @@ func (db *DatabaseServer) WriteBatch(_ context.Context, req *rpcdbproto.WriteBat
return &rpcdbproto.WriteBatchResponse{}, db.batch.Write()
}
// NewIteratorWithStartAndPrefix ...
// NewIteratorWithStartAndPrefix allocates an iterator and returns the iterator
// ID
func (db *DatabaseServer) NewIteratorWithStartAndPrefix(_ context.Context, req *rpcdbproto.NewIteratorWithStartAndPrefixRequest) (*rpcdbproto.NewIteratorWithStartAndPrefixResponse, error) {
id := db.nextIteratorID
it := db.db.NewIteratorWithStartAndPrefix(req.Start, req.Prefix)
@ -110,7 +114,7 @@ func (db *DatabaseServer) NewIteratorWithStartAndPrefix(_ context.Context, req *
return &rpcdbproto.NewIteratorWithStartAndPrefixResponse{Id: id}, nil
}
// IteratorNext ...
// IteratorNext attempts to call next on the requested iterator
func (db *DatabaseServer) IteratorNext(_ context.Context, req *rpcdbproto.IteratorNextRequest) (*rpcdbproto.IteratorNextResponse, error) {
it, exists := db.iterators[req.Id]
if !exists {
@ -123,7 +127,7 @@ func (db *DatabaseServer) IteratorNext(_ context.Context, req *rpcdbproto.Iterat
}, nil
}
// IteratorError ...
// IteratorError attempts to report any errors that occurred during iteration
func (db *DatabaseServer) IteratorError(_ context.Context, req *rpcdbproto.IteratorErrorRequest) (*rpcdbproto.IteratorErrorResponse, error) {
it, exists := db.iterators[req.Id]
if !exists {
@ -132,7 +136,7 @@ func (db *DatabaseServer) IteratorError(_ context.Context, req *rpcdbproto.Itera
return &rpcdbproto.IteratorErrorResponse{}, it.Error()
}
// IteratorRelease ...
// IteratorRelease attempts to release the resources allocated to an iterator
func (db *DatabaseServer) IteratorRelease(_ context.Context, req *rpcdbproto.IteratorReleaseRequest) (*rpcdbproto.IteratorReleaseResponse, error) {
it, exists := db.iterators[req.Id]
if exists {

View File

@ -18,9 +18,10 @@ import (
// database, writing changes to the underlying database only when commit is
// called.
type Database struct {
lock sync.RWMutex
mem map[string]valueDelete
db database.Database
lock sync.RWMutex
mem map[string]valueDelete
db database.Database
batch database.Batch
}
type valueDelete struct {
@ -31,8 +32,9 @@ type valueDelete struct {
// New returns a new prefixed database
func New(db database.Database) *Database {
return &Database{
mem: make(map[string]valueDelete, memdb.DefaultSize),
db: db,
mem: make(map[string]valueDelete, memdb.DefaultSize),
db: db,
batch: db.NewBatch(),
}
}
@ -169,6 +171,7 @@ func (db *Database) SetDatabase(newDB database.Database) error {
}
db.db = newDB
db.batch = newDB.NewBatch()
return nil
}
@ -192,6 +195,7 @@ func (db *Database) Commit() error {
if err := batch.Write(); err != nil {
return err
}
batch.Reset()
db.abort()
return nil
}
@ -206,7 +210,10 @@ func (db *Database) Abort() {
func (db *Database) abort() { db.mem = make(map[string]valueDelete, memdb.DefaultSize) }
// CommitBatch returns a batch that will commit all pending writes to the underlying database
// CommitBatch returns a batch that contains all uncommitted puts/deletes.
// Calling Write() on the returned batch causes the puts/deletes to be
// written to the underlying database. The returned batch should be written before
// future calls to this DB unless the batch will never be written.
func (db *Database) CommitBatch() (database.Batch, error) {
db.lock.Lock()
defer db.lock.Unlock()
@ -214,26 +221,25 @@ func (db *Database) CommitBatch() (database.Batch, error) {
return db.commitBatch()
}
// Put all of the puts/deletes in memory into db.batch
// and return the batch
func (db *Database) commitBatch() (database.Batch, error) {
if db.mem == nil {
return nil, database.ErrClosed
}
batch := db.db.NewBatch()
db.batch.Reset()
for key, value := range db.mem {
if value.delete {
if err := batch.Delete([]byte(key)); err != nil {
if err := db.batch.Delete([]byte(key)); err != nil {
return nil, err
}
} else if err := batch.Put([]byte(key), value.value); err != nil {
} else if err := db.batch.Put([]byte(key), value.value); err != nil {
return nil, err
}
}
if err := batch.Write(); err != nil {
return nil, err
}
return batch, nil
return db.batch, nil
}
// Close implements the database.Database interface
@ -244,6 +250,7 @@ func (db *Database) Close() error {
if db.mem == nil {
return database.ErrClosed
}
db.batch = nil
db.mem = nil
db.db = nil
return nil
@ -298,7 +305,11 @@ func (b *batch) Write() error {
// Reset implements the Database interface
func (b *batch) Reset() {
b.writes = b.writes[:0]
if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor {
b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor)
} else {
b.writes = b.writes[:0]
}
b.size = 0
}

View File

@ -299,6 +299,10 @@ func TestCommitBatch(t *testing.T) {
if err := db.Put(key1, value1); err != nil {
t.Fatalf("Unexpected error on db.Put: %s", err)
} else if has, err := baseDB.Has(key1); err != nil {
t.Fatalf("Unexpected error on db.Has: %s", err)
} else if has {
t.Fatalf("Unexpected result of db.Has: %v", has)
}
batch, err := db.CommitBatch()
@ -307,7 +311,11 @@ func TestCommitBatch(t *testing.T) {
}
db.Abort()
if err := batch.Write(); err != nil {
if has, err := db.Has(key1); err != nil {
t.Fatalf("Unexpected error on db.Has: %s", err)
} else if has {
t.Fatalf("Unexpected result of db.Has: %v", has)
} else if err := batch.Write(); err != nil {
t.Fatalf("Unexpected error on batch.Write: %s", err)
}

View File

@ -18,27 +18,27 @@ import (
// Aliases returns the default aliases based on the network ID
func Aliases(networkID uint32) (map[string][]string, map[[32]byte][]string, map[[32]byte][]string, error) {
generalAliases := map[string][]string{
"vm/" + platformvm.ID.String(): []string{"vm/platform"},
"vm/" + avm.ID.String(): []string{"vm/avm"},
"vm/" + EVMID.String(): []string{"vm/evm"},
"vm/" + spdagvm.ID.String(): []string{"vm/spdag"},
"vm/" + spchainvm.ID.String(): []string{"vm/spchain"},
"vm/" + timestampvm.ID.String(): []string{"vm/timestamp"},
"bc/" + ids.Empty.String(): []string{"P", "platform", "bc/P", "bc/platform"},
"vm/" + platformvm.ID.String(): {"vm/platform"},
"vm/" + avm.ID.String(): {"vm/avm"},
"vm/" + EVMID.String(): {"vm/evm"},
"vm/" + spdagvm.ID.String(): {"vm/spdag"},
"vm/" + spchainvm.ID.String(): {"vm/spchain"},
"vm/" + timestampvm.ID.String(): {"vm/timestamp"},
"bc/" + ids.Empty.String(): {"P", "platform", "bc/P", "bc/platform"},
}
chainAliases := map[[32]byte][]string{
ids.Empty.Key(): []string{"P", "platform"},
ids.Empty.Key(): {"P", "platform"},
}
vmAliases := map[[32]byte][]string{
platformvm.ID.Key(): []string{"platform"},
avm.ID.Key(): []string{"avm"},
EVMID.Key(): []string{"evm"},
spdagvm.ID.Key(): []string{"spdag"},
spchainvm.ID.Key(): []string{"spchain"},
timestampvm.ID.Key(): []string{"timestamp"},
secp256k1fx.ID.Key(): []string{"secp256k1fx"},
nftfx.ID.Key(): []string{"nftfx"},
propertyfx.ID.Key(): []string{"propertyfx"},
platformvm.ID.Key(): {"platform"},
avm.ID.Key(): {"avm"},
EVMID.Key(): {"evm"},
spdagvm.ID.Key(): {"spdag"},
spchainvm.ID.Key(): {"spchain"},
timestampvm.ID.Key(): {"timestamp"},
secp256k1fx.ID.Key(): {"secp256k1fx"},
nftfx.ID.Key(): {"nftfx"},
propertyfx.ID.Key(): {"propertyfx"},
}
genesisBytes, err := Genesis(networkID)

View File

@ -50,6 +50,122 @@ func (c *Config) init() error {
// Hard coded genesis constants
var (
EverestConfig = Config{
MintAddresses: []string{
"95YUFjhDG892VePMzpwKF9JzewGKvGRi3",
},
FundedAddresses: []string{
"9uKvvA7E35QCwLvAaohXTCfFejbf3Rv17",
"JLrYNMYXANGj43BfWXBxMMAEenUBp1Sbn",
"7TUTzwrU6nbZtWHjTHEpdneUvjKBxb3EM",
"77mPUXBdQKwQpPoX6rckCZGLGGdkuG1G6",
"4gGWdFZ4Gax1B466YKXyKRRpWLb42Afdt",
"CKTkzAPsRxCreyiDTnjGxLmjMarxF28fi",
"4ABm9gFHVtsNdcKSd1xsacFkGneSgzpaa",
"DpL8PTsrjtLzv5J8LL3D2A6YcnCTqrNH9",
"ZdhZv6oZrmXLyFDy6ovXAu6VxmbTsT2h",
"6cesTteH62Y5mLoDBUASaBvCXuL2AthL",
},
StakerIDs: []string{
"LQwRLm4cbJ7T2kxcxp4uXCU5XD8DFrE1C",
"hArafGhY2HFTbwaaVh1CSCUCUCiJ2Vfb",
"2m38qc95mhHXtrhjyGbe7r2NhniqHHJRB",
"4QBwET5o8kUhvt9xArhir4d3R25CtmZho",
"NpagUxt6KQiwPch9Sd4osv8kD1TZnkjdk",
},
EVMBytes: []byte{
0x7b, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67,
0x22, 0x3a, 0x7b, 0x22, 0x63, 0x68, 0x61, 0x69,
0x6e, 0x49, 0x64, 0x22, 0x3a, 0x34, 0x33, 0x31,
0x31, 0x30, 0x2c, 0x22, 0x68, 0x6f, 0x6d, 0x65,
0x73, 0x74, 0x65, 0x61, 0x64, 0x42, 0x6c, 0x6f,
0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x64,
0x61, 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x42, 0x6c,
0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22,
0x64, 0x61, 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x53,
0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x3a,
0x74, 0x72, 0x75, 0x65, 0x2c, 0x22, 0x65, 0x69,
0x70, 0x31, 0x35, 0x30, 0x42, 0x6c, 0x6f, 0x63,
0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x65, 0x69,
0x70, 0x31, 0x35, 0x30, 0x48, 0x61, 0x73, 0x68,
0x22, 0x3a, 0x22, 0x30, 0x78, 0x32, 0x30, 0x38,
0x36, 0x37, 0x39, 0x39, 0x61, 0x65, 0x65, 0x62,
0x65, 0x61, 0x65, 0x31, 0x33, 0x35, 0x63, 0x32,
0x34, 0x36, 0x63, 0x36, 0x35, 0x30, 0x32, 0x31,
0x63, 0x38, 0x32, 0x62, 0x34, 0x65, 0x31, 0x35,
0x61, 0x32, 0x63, 0x34, 0x35, 0x31, 0x33, 0x34,
0x30, 0x39, 0x39, 0x33, 0x61, 0x61, 0x63, 0x66,
0x64, 0x32, 0x37, 0x35, 0x31, 0x38, 0x38, 0x36,
0x35, 0x31, 0x34, 0x66, 0x30, 0x22, 0x2c, 0x22,
0x65, 0x69, 0x70, 0x31, 0x35, 0x35, 0x42, 0x6c,
0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22,
0x65, 0x69, 0x70, 0x31, 0x35, 0x38, 0x42, 0x6c,
0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22,
0x62, 0x79, 0x7a, 0x61, 0x6e, 0x74, 0x69, 0x75,
0x6d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a,
0x30, 0x2c, 0x22, 0x63, 0x6f, 0x6e, 0x73, 0x74,
0x61, 0x6e, 0x74, 0x69, 0x6e, 0x6f, 0x70, 0x6c,
0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a,
0x30, 0x2c, 0x22, 0x70, 0x65, 0x74, 0x65, 0x72,
0x73, 0x62, 0x75, 0x72, 0x67, 0x42, 0x6c, 0x6f,
0x63, 0x6b, 0x22, 0x3a, 0x30, 0x7d, 0x2c, 0x22,
0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0x3a, 0x22,
0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22,
0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22,
0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74,
0x61, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30,
0x22, 0x2c, 0x22, 0x67, 0x61, 0x73, 0x4c, 0x69,
0x6d, 0x69, 0x74, 0x22, 0x3a, 0x22, 0x30, 0x78,
0x35, 0x66, 0x35, 0x65, 0x31, 0x30, 0x30, 0x22,
0x2c, 0x22, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63,
0x75, 0x6c, 0x74, 0x79, 0x22, 0x3a, 0x22, 0x30,
0x78, 0x30, 0x22, 0x2c, 0x22, 0x6d, 0x69, 0x78,
0x48, 0x61, 0x73, 0x68, 0x22, 0x3a, 0x22, 0x30,
0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x22, 0x2c, 0x22, 0x63, 0x6f, 0x69, 0x6e,
0x62, 0x61, 0x73, 0x65, 0x22, 0x3a, 0x22, 0x30,
0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x22, 0x2c, 0x22, 0x61, 0x6c, 0x6c, 0x6f,
0x63, 0x22, 0x3a, 0x7b, 0x22, 0x35, 0x37, 0x32,
0x66, 0x34, 0x64, 0x38, 0x30, 0x66, 0x31, 0x30,
0x66, 0x36, 0x36, 0x33, 0x62, 0x35, 0x30, 0x34,
0x39, 0x66, 0x37, 0x38, 0x39, 0x35, 0x34, 0x36,
0x66, 0x32, 0x35, 0x66, 0x37, 0x30, 0x62, 0x62,
0x36, 0x32, 0x61, 0x37, 0x66, 0x22, 0x3a, 0x7b,
0x22, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65,
0x22, 0x3a, 0x22, 0x30, 0x78, 0x33, 0x33, 0x62,
0x32, 0x65, 0x33, 0x63, 0x39, 0x66, 0x64, 0x30,
0x38, 0x30, 0x34, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x22, 0x7d, 0x7d, 0x2c,
0x22, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22,
0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22,
0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x22,
0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22,
0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61,
0x73, 0x68, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30,
0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22,
0x7d,
},
}
DenaliConfig = Config{
MintAddresses: []string{
"95YUFjhDG892VePMzpwKF9JzewGKvGRi3",
@ -393,6 +509,8 @@ var (
// GetConfig ...
func GetConfig(networkID uint32) *Config {
switch networkID {
case EverestID:
return &EverestConfig
case DenaliID:
return &DenaliConfig
case CascadeID:

View File

@ -9,12 +9,12 @@ import (
"time"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/codec"
"github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/json"
"github.com/ava-labs/gecko/utils/units"
"github.com/ava-labs/gecko/utils/wrappers"
"github.com/ava-labs/gecko/vms/avm"
"github.com/ava-labs/gecko/vms/components/codec"
"github.com/ava-labs/gecko/vms/nftfx"
"github.com/ava-labs/gecko/vms/platformvm"
"github.com/ava-labs/gecko/vms/propertyfx"
@ -156,7 +156,7 @@ func FromConfig(networkID uint32, config *Config) ([]byte, error) {
// Specify the chains that exist upon this network's creation
platformvmArgs.Chains = []platformvm.APIChain{
platformvm.APIChain{
{
GenesisData: avmReply.Bytes,
SubnetID: platformvm.DefaultSubnetID,
VMID: avm.ID,
@ -167,25 +167,25 @@ func FromConfig(networkID uint32, config *Config) ([]byte, error) {
},
Name: "X-Chain",
},
platformvm.APIChain{
{
GenesisData: formatting.CB58{Bytes: config.EVMBytes},
SubnetID: platformvm.DefaultSubnetID,
VMID: EVMID,
Name: "C-Chain",
},
platformvm.APIChain{
{
GenesisData: spdagvmReply.Bytes,
SubnetID: platformvm.DefaultSubnetID,
VMID: spdagvm.ID,
Name: "Simple DAG Payments",
},
platformvm.APIChain{
{
GenesisData: spchainvmReply.Bytes,
SubnetID: platformvm.DefaultSubnetID,
VMID: spchainvm.ID,
Name: "Simple Chain Payments",
},
platformvm.APIChain{
{
GenesisData: formatting.CB58{Bytes: []byte{}}, // There is no genesis data
SubnetID: platformvm.DefaultSubnetID,
VMID: timestampvm.ID,

View File

@ -23,7 +23,10 @@ func TestNetworkName(t *testing.T) {
if name := NetworkName(DenaliID); name != DenaliName {
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, DenaliName)
}
if name := NetworkName(TestnetID); name != DenaliName {
if name := NetworkName(EverestID); name != EverestName {
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, EverestName)
}
if name := NetworkName(DenaliID); name != DenaliName {
t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, DenaliName)
}
if name := NetworkName(4294967295); name != "network-4294967295" {

View File

@ -16,6 +16,7 @@ var (
MainnetID uint32 = 1
CascadeID uint32 = 2
DenaliID uint32 = 3
EverestID uint32 = 4
TestnetID uint32 = 3
LocalID uint32 = 12345
@ -23,6 +24,7 @@ var (
MainnetName = "mainnet"
CascadeName = "cascade"
DenaliName = "denali"
EverestName = "everest"
TestnetName = "testnet"
LocalName = "local"
@ -31,6 +33,7 @@ var (
MainnetID: MainnetName,
CascadeID: CascadeName,
DenaliID: DenaliName,
EverestID: EverestName,
LocalID: LocalName,
}
@ -38,6 +41,7 @@ var (
MainnetName: MainnetID,
CascadeName: CascadeID,
DenaliName: DenaliID,
EverestName: EverestID,
TestnetName: TestnetID,
LocalName: LocalID,

7
go.mod
View File

@ -6,10 +6,10 @@ require (
github.com/AppsFlyer/go-sundheit v0.2.0
github.com/allegro/bigcache v1.2.1 // indirect
github.com/aristanetworks/goarista v0.0.0-20200520141224-0f14e646773f // indirect
github.com/ava-labs/coreth v0.2.4 // Added manually; don't delete
github.com/ava-labs/coreth v0.2.5 // indirect; Added manually; don't delete
github.com/ava-labs/go-ethereum v1.9.3 // indirect
github.com/deckarep/golang-set v1.7.1 // indirect
github.com/decred/dcrd/dcrec/secp256k1 v1.0.3
github.com/decred/dcrd/dcrec/secp256k1 v1.0.3 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v3 v3.0.0-20200526030155-0c6c7ca85d3b
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/elastic/gosigar v0.10.5 // indirect
@ -20,6 +20,7 @@ require (
github.com/gorilla/mux v1.7.4
github.com/gorilla/rpc v1.2.0
github.com/gorilla/websocket v1.4.2
github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd
github.com/hashicorp/go-plugin v1.3.0
github.com/hashicorp/golang-lru v0.5.4 // indirect
github.com/huin/goupnp v1.0.0
@ -28,7 +29,7 @@ require (
github.com/karalabe/usb v0.0.0-20191104083709-911d15fe12a9 // indirect
github.com/mattn/go-colorable v0.1.6 // indirect
github.com/mitchellh/go-homedir v1.1.0
github.com/mr-tron/base58 v1.1.3
github.com/mr-tron/base58 v1.2.0
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d
github.com/olekukonko/tablewriter v0.0.4 // indirect
github.com/pborman/uuid v1.2.0 // indirect

26
go.sum
View File

@ -2,13 +2,16 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT
github.com/AppsFlyer/go-sundheit v0.2.0 h1:FArqX+HbqZ6U32RC3giEAWRUpkggqxHj91KIvxNgwjU=
github.com/AppsFlyer/go-sundheit v0.2.0/go.mod h1:rCRkVTMQo7/krF7xQ9X0XEF1an68viFR6/Gy02q+4ds=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q=
github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU=
github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc=
github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM=
@ -19,6 +22,8 @@ github.com/aristanetworks/goarista v0.0.0-20200520141224-0f14e646773f/go.mod h1:
github.com/aristanetworks/splunk-hec-go v0.3.3/go.mod h1:1VHO9r17b0K7WmOlLb9nTk/2YanvOEnLMUgsFrxBROc=
github.com/ava-labs/coreth v0.2.4 h1:MhnbuRyMcij7WU4+frayp40quc44AMPc4IrxXhmucWw=
github.com/ava-labs/coreth v0.2.4/go.mod h1:pGolKipwq5vGIY2IBBcBkMYrqniXMsS5SBn+BBi4+Js=
github.com/ava-labs/coreth v0.2.5 h1:2Al753rpPHvvZfcz7w96YbKhGFvrcZzsIZ/sIp0A0Ao=
github.com/ava-labs/coreth v0.2.5/go.mod h1:pGolKipwq5vGIY2IBBcBkMYrqniXMsS5SBn+BBi4+Js=
github.com/ava-labs/go-ethereum v1.9.3 h1:GmnMZ/dlvVAPFmWBzEpRJX49pUAymPfoASLNRJqR0AY=
github.com/ava-labs/go-ethereum v1.9.3/go.mod h1:a+agc6fXfZFsPZCylA3ry4Y8CLCqLKg3Rc23NXZ9aw8=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@ -38,7 +43,9 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/deckarep/golang-set v1.7.1 h1:SCQV0S6gTtp6itiFrTqI+pfmJ4LN85S1YzhDf9rTHJQ=
github.com/deckarep/golang-set v1.7.1/go.mod h1:93vsz/8Wt4joVM7c2AVqh+YRMiUSc14yDtF28KmMOgQ=
github.com/decred/dcrd v1.3.0 h1:EEXm7BdiROfazDtuFsOu9mfotnyy00bgCuVwUqaszFo=
github.com/decred/dcrd/chaincfg/chainhash v1.0.2 h1:rt5Vlq/jM3ZawwiacWjPa+smINyLRN07EO0cNBV6DGU=
github.com/decred/dcrd/chaincfg/chainhash v1.0.2/go.mod h1:BpbrGgrPTr3YJYRN3Bm+D9NuaFd+zGyNeIKgrhCXK60=
github.com/decred/dcrd/crypto/blake256 v1.0.0 h1:/8DMNYp9SGi5f0w7uCm6d6M4OU2rGFK09Y2A4Xv7EE0=
github.com/decred/dcrd/crypto/blake256 v1.0.0/go.mod h1:sQl2p6Y26YV+ZOcSTP6thNdn47hh8kt6rqSlvmrXFAc=
github.com/decred/dcrd/dcrec v1.0.0 h1:W+z6Es+Rai3MXYVoPAxYr5U1DGis0Co33scJ6uH2J6o=
github.com/decred/dcrd/dcrec/secp256k1 v1.0.3 h1:u4XpHqlscRolxPxt2YHrFBDVZYY1AK+KMV02H1r+HmU=
@ -61,8 +68,10 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw=
github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g=
github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/garyburd/redigo v1.6.0/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY=
github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 h1:f6D9Hr8xV8uYKlyuj8XIruxlh9WjVjdh1gIicAS7ays=
@ -76,6 +85,7 @@ github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -97,6 +107,7 @@ github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/uuid v1.0.0 h1:b4Gk+7WdP/d3HZH8EJsZpvV7EtDOgaZLtnaNGIu1adA=
@ -119,6 +130,7 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l
github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M=
github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo=
github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc=
@ -129,6 +141,7 @@ github.com/jackpal/gateway v1.0.6/go.mod h1:lTpwd4ACLXmpyiCTRtfiNyVnUmqT9RivzCDQ
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jackpal/go-nat-pmp v1.0.2/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o=
github.com/jhump/protoreflect v1.6.0 h1:h5jfMVslIg6l29nsMs0D8Wj17RDVdNYti0vDN/PZZoE=
github.com/jhump/protoreflect v1.6.0/go.mod h1:eaTn3RZAmMBcV0fifFvlm6VHNz3wSkYyXYWUh7ymB74=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@ -142,8 +155,10 @@ github.com/klauspost/reedsolomon v1.9.3/go.mod h1:CwCi+NUr9pqSVktrkN+Ondf06rkhYZ
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/mattn/go-colorable v0.1.6 h1:6Su7aK7lXmJ/U79bYtBjLNaha4Fs1Rg9plHpcH+vvnE=
@ -164,6 +179,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc=
github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o=
github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E=
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
@ -174,8 +191,10 @@ github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn
github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo=
github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME=
github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc=
github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw=
@ -217,6 +236,7 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 h1:Oo2KZNP70KE0+IUJSidPj/BFS/RXNHmKIJOdckzml2E=
@ -226,6 +246,7 @@ github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM=
github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@ -310,6 +331,7 @@ golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20200221224223-e1da425f72fd/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -336,10 +358,13 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo=
gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q=
@ -349,6 +374,7 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLv
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200316214253-d7b0ff38cac9 h1:ITeyKbRetrVzqR3U1eY+ywgp7IBspGd1U/bkwd1gWu4=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200316214253-d7b0ff38cac9/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/redis.v4 v4.2.4/go.mod h1:8KREHdypkCEojGKQcjMqAODMICIVwZAONWq8RowTITA=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=

View File

@ -8,6 +8,10 @@ import (
"strings"
)
const (
minBagSize = 16
)
// Bag is a multiset of IDs.
//
// A bag has the ability to split and filter on it's bits for ease of use for
@ -25,7 +29,7 @@ type Bag struct {
func (b *Bag) init() {
if b.counts == nil {
b.counts = make(map[[32]byte]int)
b.counts = make(map[[32]byte]int, minBagSize)
}
}
@ -72,16 +76,21 @@ func (b *Bag) AddCount(id ID, count int) {
}
// Count returns the number of times the id has been added.
func (b *Bag) Count(id ID) int { return b.counts[*id.ID] }
func (b *Bag) Count(id ID) int {
b.init()
return b.counts[*id.ID]
}
// Len returns the number of times an id has been added.
func (b *Bag) Len() int { return b.size }
// List returns a list of all ids that have been added.
func (b *Bag) List() []ID {
idList := []ID(nil)
idList := make([]ID, len(b.counts), len(b.counts))
i := 0
for id := range b.counts {
idList = append(idList, NewID(id))
idList[i] = NewID(id)
i++
}
return idList
}

53
ids/bag_benchmark_test.go Normal file
View File

@ -0,0 +1,53 @@
package ids
import (
"crypto/rand"
"testing"
)
//
func BenchmarkBagListSmall(b *testing.B) {
smallLen := 5
bag := Bag{}
for i := 0; i < smallLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
NewID(idBytes)
bag.Add(NewID(idBytes))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
bag.List()
}
}
func BenchmarkBagListMedium(b *testing.B) {
mediumLen := 25
bag := Bag{}
for i := 0; i < mediumLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
NewID(idBytes)
bag.Add(NewID(idBytes))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
bag.List()
}
}
func BenchmarkBagListLarge(b *testing.B) {
largeLen := 100000
bag := Bag{}
for i := 0; i < largeLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
NewID(idBytes)
bag.Add(NewID(idBytes))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
bag.List()
}
}

View File

@ -18,8 +18,8 @@ func TestBagAdd(t *testing.T) {
} else if count := bag.Count(id1); count != 0 {
t.Fatalf("Bag.Count returned %d expected %d", count, 0)
} else if size := bag.Len(); size != 0 {
t.Fatalf("Bag.Len returned %d expected %d", count, 0)
} else if list := bag.List(); list != nil {
t.Fatalf("Bag.Len returned %d elements expected %d", count, 0)
} else if list := bag.List(); len(list) != 0 {
t.Fatalf("Bag.List returned %v expected %v", list, nil)
} else if mode, freq := bag.Mode(); !mode.IsZero() {
t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, ID{})

View File

@ -7,11 +7,19 @@ import (
"strings"
)
const (
// The minimum capacity of a set
minSetSize = 16
)
// Set is a set of IDs
type Set map[[32]byte]bool
func (ids *Set) init(size int) {
if *ids == nil {
if minSetSize > size {
size = minSetSize
}
*ids = make(map[[32]byte]bool, size)
}
}
@ -70,9 +78,32 @@ func (ids *Set) Clear() { *ids = nil }
// List converts this set into a list
func (ids Set) List() []ID {
idList := []ID(nil)
idList := make([]ID, ids.Len())
i := 0
for id := range ids {
idList = append(idList, NewID(id))
idList[i] = NewID(id)
i++
}
return idList
}
// CappedList returns a list of length at most [size].
// Size should be >= 0. If size < 0, returns nil.
func (ids Set) CappedList(size int) []ID {
if size < 0 {
return nil
}
if l := ids.Len(); l < size {
size = l
}
i := 0
idList := make([]ID, size)
for id := range ids {
if i >= size {
break
}
idList[i] = NewID(id)
i++
}
return idList
}

53
ids/set_benchmark_test.go Normal file
View File

@ -0,0 +1,53 @@
package ids
import (
"crypto/rand"
"testing"
)
//
func BenchmarkSetListSmall(b *testing.B) {
smallLen := 5
set := Set{}
for i := 0; i < smallLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
NewID(idBytes)
set.Add(NewID(idBytes))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
set.List()
}
}
func BenchmarkSetListMedium(b *testing.B) {
mediumLen := 25
set := Set{}
for i := 0; i < mediumLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
NewID(idBytes)
set.Add(NewID(idBytes))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
set.List()
}
}
func BenchmarkSetListLarge(b *testing.B) {
largeLen := 100000
set := Set{}
for i := 0; i < largeLen; i++ {
var idBytes [32]byte
rand.Read(idBytes[:])
NewID(idBytes)
set.Add(NewID(idBytes))
}
b.ResetTimer()
for n := 0; n < b.N; n++ {
set.List()
}
}

View File

@ -55,3 +55,46 @@ func TestSet(t *testing.T) {
t.Fatalf("Sets overlap")
}
}
func TestSetCappedList(t *testing.T) {
set := Set{}
id := Empty
if list := set.CappedList(0); len(list) != 0 {
t.Fatalf("List should have been empty but was %v", list)
}
set.Add(id)
if list := set.CappedList(0); len(list) != 0 {
t.Fatalf("List should have been empty but was %v", list)
} else if list := set.CappedList(1); len(list) != 1 {
t.Fatalf("List should have had length %d but had %d", 1, len(list))
} else if returnedID := list[0]; !id.Equals(returnedID) {
t.Fatalf("List should have been %s but was %s", id, returnedID)
} else if list := set.CappedList(2); len(list) != 1 {
t.Fatalf("List should have had length %d but had %d", 1, len(list))
} else if returnedID := list[0]; !id.Equals(returnedID) {
t.Fatalf("List should have been %s but was %s", id, returnedID)
}
id2 := NewID([32]byte{1})
set.Add(id2)
if list := set.CappedList(0); len(list) != 0 {
t.Fatalf("List should have been empty but was %v", list)
} else if list := set.CappedList(1); len(list) != 1 {
t.Fatalf("List should have had length %d but had %d", 1, len(list))
} else if returnedID := list[0]; !id.Equals(returnedID) && !id2.Equals(returnedID) {
t.Fatalf("List should have been %s but was %s", id, returnedID)
} else if list := set.CappedList(2); len(list) != 2 {
t.Fatalf("List should have had length %d but had %d", 2, len(list))
} else if list := set.CappedList(3); len(list) != 2 {
t.Fatalf("List should have had length %d but had %d", 2, len(list))
} else if returnedID := list[0]; !id.Equals(returnedID) && !id2.Equals(returnedID) {
t.Fatalf("list contains unexpected element %s", returnedID)
} else if returnedID := list[1]; !id.Equals(returnedID) && !id2.Equals(returnedID) {
t.Fatalf("list contains unexpected element %s", returnedID)
}
}

View File

@ -5,11 +5,18 @@ package ids
import "strings"
const (
minShortSetSize = 16
)
// ShortSet is a set of ShortIDs
type ShortSet map[[20]byte]bool
func (ids *ShortSet) init(size int) {
if *ids == nil {
if minShortSetSize > size {
size = minShortSetSize
}
*ids = make(map[[20]byte]bool, size)
}
}
@ -50,24 +57,34 @@ func (ids *ShortSet) Remove(idList ...ShortID) {
// Clear empties this set
func (ids *ShortSet) Clear() { *ids = nil }
// CappedList returns a list of length at most [size]. Size should be >= 0
// CappedList returns a list of length at most [size].
// Size should be >= 0. If size < 0, returns nil.
func (ids ShortSet) CappedList(size int) []ShortID {
idList := make([]ShortID, size)[:0]
if size < 0 {
return nil
}
if l := ids.Len(); l < size {
size = l
}
i := 0
idList := make([]ShortID, size)
for id := range ids {
if size <= 0 {
if i >= size {
break
}
size--
idList = append(idList, NewShortID(id))
idList[i] = NewShortID(id)
i++
}
return idList
}
// List converts this set into a list
func (ids ShortSet) List() []ShortID {
idList := make([]ShortID, len(ids))[:0]
idList := make([]ShortID, len(ids), len(ids))
i := 0
for id := range ids {
idList = append(idList, NewShortID(id))
idList[i] = NewShortID(id)
i++
}
return idList
}

View File

@ -8,12 +8,16 @@ import (
"strings"
)
const (
minUniqueBagSize = 16
)
// UniqueBag ...
type UniqueBag map[[32]byte]BitSet
func (b *UniqueBag) init() {
if *b == nil {
*b = make(map[[32]byte]BitSet)
*b = make(map[[32]byte]BitSet, minUniqueBagSize)
}
}

View File

@ -40,12 +40,11 @@ func main() {
defer log.StopOnPanic()
defer Config.DB.Close()
if Config.StakingIP.IsZero() {
log.Warn("NAT traversal has failed. It will be able to connect to less nodes.")
}
// Track if sybil control is enforced
if !Config.EnableStaking {
if !Config.EnableStaking && Config.EnableP2PTLS {
log.Warn("Staking is disabled. Sybil control is not enforced.")
}
if !Config.EnableStaking && !Config.EnableP2PTLS {
log.Warn("Staking and p2p encryption are disabled. Packet spoofing is possible.")
}
@ -65,11 +64,19 @@ func main() {
log.Debug("assertions are enabled. This may slow down execution")
}
mapper := nat.NewDefaultMapper(log, Config.Nat, nat.TCP, "gecko")
mapper := nat.NewPortMapper(log, Config.Nat)
defer mapper.UnmapAllPorts()
mapper.MapPort(Config.StakingIP.Port, Config.StakingIP.Port)
mapper.MapPort(Config.HTTPPort, Config.HTTPPort)
port, err := mapper.Map("TCP", Config.StakingLocalPort, "gecko-staking") // Open staking port
if err == nil {
Config.StakingIP.Port = port
} else {
log.Warn("NAT traversal has failed. The node will be able to connect to less nodes.")
}
if Config.HTTPHost != "127.0.0.1" && Config.HTTPHost != "localhost" { // Open HTTP port iff HTTP server not listening on localhost
_, _ = mapper.Map("TCP", Config.HTTPPort, "gecko-http")
}
node := node.Node{}

View File

@ -35,21 +35,25 @@ const (
// Results of parsing the CLI
var (
Config = node.Config{}
Err error
defaultDbDir = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "db"))
defaultStakingKeyPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.key"))
defaultStakingCertPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.crt"))
Config = node.Config{}
Err error
defaultNetworkName = genesis.TestnetName
defaultPluginDirs = []string{
"./build/plugins",
"./plugins",
os.ExpandEnv(filepath.Join("$HOME", ".gecko", "plugins")),
homeDir = os.ExpandEnv("$HOME")
defaultDbDir = filepath.Join(homeDir, ".gecko", "db")
defaultStakingKeyPath = filepath.Join(homeDir, ".gecko", "staking", "staker.key")
defaultStakingCertPath = filepath.Join(homeDir, ".gecko", "staking", "staker.crt")
defaultPluginDirs = []string{
filepath.Join(".", "build", "plugins"),
filepath.Join(".", "plugins"),
filepath.Join("/", "usr", "local", "lib", "gecko"),
filepath.Join(homeDir, ".gecko", "plugins"),
}
)
var (
errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs")
errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs")
errStakingRequiresTLS = errors.New("if staking is enabled, network TLS must also be enabled")
)
// GetIPs returns the default IPs for each network
@ -169,7 +173,7 @@ func init() {
version := fs.Bool("version", false, "If true, print version and quit")
// NetworkID:
networkName := fs.String("network-id", genesis.TestnetName, "Network ID this node will connect to")
networkName := fs.String("network-id", defaultNetworkName, "Network ID this node will connect to")
// Ava fees:
fs.Uint64Var(&Config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva")
@ -188,7 +192,7 @@ func init() {
consensusIP := fs.String("public-ip", "", "Public IP of this node")
// HTTP Server:
httpHost := fs.String("http-host", "", "Address of the HTTP server")
httpHost := fs.String("http-host", "127.0.0.1", "Address of the HTTP server")
httpPort := fs.Uint("http-port", 9650, "Port of the HTTP server")
fs.BoolVar(&Config.EnableHTTPS, "http-tls-enabled", false, "Upgrade the HTTP server to HTTPs")
fs.StringVar(&Config.HTTPSKeyFile, "http-tls-key-file", "", "TLS private key file for the HTTPs server")
@ -200,7 +204,9 @@ func init() {
// Staking:
consensusPort := fs.Uint("staking-port", 9651, "Port of the consensus server")
fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Require TLS to authenticate staking connections")
// TODO - keeping same flag for backwards compatibility, should be changed to "staking-enabled"
fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Enable staking. If enabled, Network TLS is required.")
fs.BoolVar(&Config.EnableP2PTLS, "p2p-tls-enabled", true, "Require TLS to authenticate network communication")
fs.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", defaultStakingKeyPath, "TLS private key for staking")
fs.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", defaultStakingCertPath, "TLS certificate for staking")
@ -221,7 +227,8 @@ func init() {
fs.IntVar(&Config.ConsensusParams.ConcurrentRepolls, "snow-concurrent-repolls", 1, "Minimum number of concurrent polls for finalizing consensus")
// Enable/Disable APIs:
fs.BoolVar(&Config.AdminAPIEnabled, "api-admin-enabled", true, "If true, this node exposes the Admin API")
fs.BoolVar(&Config.AdminAPIEnabled, "api-admin-enabled", false, "If true, this node exposes the Admin API")
fs.BoolVar(&Config.InfoAPIEnabled, "api-info-enabled", true, "If true, this node exposes the Info API")
fs.BoolVar(&Config.KeystoreAPIEnabled, "api-keystore-enabled", true, "If true, this node exposes the Keystore API")
fs.BoolVar(&Config.MetricsAPIEnabled, "api-metrics-enabled", true, "If true, this node exposes the Metrics API")
fs.BoolVar(&Config.HealthAPIEnabled, "api-health-enabled", true, "If true, this node exposes the Health API")
@ -234,7 +241,15 @@ func init() {
ferr := fs.Parse(os.Args[1:])
if *version { // If --version used, print version and exit
fmt.Println(node.Version.String())
networkID, err := genesis.NetworkID(defaultNetworkName)
if errs.Add(err); err != nil {
return
}
networkGeneration := genesis.NetworkName(networkID)
fmt.Printf(
"%s [database=%s, network=%s/%s]\n",
node.Version, dbVersion, defaultNetworkName, networkGeneration,
)
os.Exit(0)
}
@ -269,16 +284,16 @@ func init() {
Config.DB = memdb.New()
}
Config.Nat = nat.NewRouter()
var ip net.IP
// If public IP is not specified, get it using shell command dig
if *consensusIP == "" {
ip, err = Config.Nat.IP()
Config.Nat = nat.GetRouter()
ip, err = Config.Nat.ExternalIP()
if err != nil {
ip = net.IPv4zero // Couldn't get my IP...set to 0.0.0.0
}
} else {
Config.Nat = nat.NewNoRouter()
ip = net.ParseIP(*consensusIP)
}
@ -291,6 +306,7 @@ func init() {
IP: ip,
Port: uint16(*consensusPort),
}
Config.StakingLocalPort = uint16(*consensusPort)
defaultBootstrapIPs, defaultBootstrapIDs := GetDefaultBootstraps(networkID, 5)
@ -318,7 +334,13 @@ func init() {
*bootstrapIDs = strings.Join(defaultBootstrapIDs, ",")
}
}
if Config.EnableStaking {
if Config.EnableStaking && !Config.EnableP2PTLS {
errs.Add(errStakingRequiresTLS)
return
}
if Config.EnableP2PTLS {
i := 0
cb58 := formatting.CB58{}
for _, id := range strings.Split(*bootstrapIDs, ",") {

View File

@ -1,143 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package nat
import (
"sync"
"time"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/wrappers"
)
const (
defaultMappingTimeout = 30 * time.Minute
defaultMappingUpdateInterval = 3 * defaultMappingTimeout / 4
)
// Mapper maps port
type Mapper interface {
MapPort(newInternalPort, newExternalPort uint16) error
UnmapAllPorts() error
}
type mapper struct {
log logging.Logger
router Router
networkProtocol NetworkProtocol
mappingNames string
mappingTimeout time.Duration
mappingUpdateInterval time.Duration
closer chan struct{}
wg sync.WaitGroup
errLock sync.Mutex
errs wrappers.Errs
}
// NewMapper returns a new mapper that can map ports on a router
func NewMapper(
log logging.Logger,
router Router,
networkProtocol NetworkProtocol,
mappingNames string,
mappingTimeout time.Duration,
mappingUpdateInterval time.Duration,
) Mapper {
return &mapper{
log: log,
router: router,
networkProtocol: networkProtocol,
mappingNames: mappingNames,
mappingTimeout: mappingTimeout,
mappingUpdateInterval: mappingUpdateInterval,
closer: make(chan struct{}),
}
}
// NewDefaultMapper returns a new mapper that can map ports on a router with
// default settings
func NewDefaultMapper(
log logging.Logger,
router Router,
networkProtocol NetworkProtocol,
mappingNames string,
) Mapper {
return NewMapper(
log,
router,
networkProtocol,
mappingNames,
defaultMappingTimeout, // uses the default value
defaultMappingUpdateInterval, // uses the default value
)
}
// MapPort maps a local port to a port on the router until UnmapAllPorts is
// called.
func (m *mapper) MapPort(newInternalPort, newExternalPort uint16) error {
m.wg.Add(1)
go m.mapPort(newInternalPort, newExternalPort)
return nil
}
func (m *mapper) mapPort(newInternalPort, newExternalPort uint16) {
// duration is set to 0 here so that the select case will execute
// immediately
updateTimer := time.NewTimer(0)
defer func() {
updateTimer.Stop()
m.errLock.Lock()
m.errs.Add(m.router.UnmapPort(
m.networkProtocol,
newInternalPort,
newExternalPort))
m.errLock.Unlock()
m.log.Debug("Unmapped external port %d to internal port %d",
newExternalPort,
newInternalPort)
m.wg.Done()
}()
for {
select {
case <-updateTimer.C:
err := m.router.MapPort(
m.networkProtocol,
newInternalPort,
newExternalPort,
m.mappingNames,
m.mappingTimeout)
if err != nil {
m.errLock.Lock()
m.errs.Add(err)
m.errLock.Unlock()
m.log.Debug("Failed to add mapping from external port %d to internal port %d due to %s",
newExternalPort,
newInternalPort,
err)
} else {
m.log.Debug("Mapped external port %d to internal port %d",
newExternalPort,
newInternalPort)
}
// remap the port in m.mappingUpdateInterval
updateTimer.Reset(m.mappingUpdateInterval)
case _, _ = <-m.closer:
return // only return when all ports are unmapped
}
}
}
func (m *mapper) UnmapAllPorts() error {
close(m.closer)
m.wg.Wait()
return m.errs.Err
}

139
nat/nat.go Normal file
View File

@ -0,0 +1,139 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package nat
import (
"errors"
"net"
"sync"
"time"
"github.com/ava-labs/gecko/utils/logging"
)
const (
mapTimeout = 30 * time.Second
mapUpdateTimeout = mapTimeout / 2
maxRetries = 20
)
// Router describes the functionality that a network device must support to be
// able to open ports to an external IP.
type Router interface {
MapPort(protocol string, intPort, extPort uint16, desc string, duration time.Duration) error
UnmapPort(protocol string, intPort, extPort uint16) error
ExternalIP() (net.IP, error)
GetPortMappingEntry(extPort uint16, protocol string) (
InternalIP string,
InternalPort uint16,
Description string,
err error,
)
}
// GetRouter returns a router on the current network.
func GetRouter() Router {
if r := getUPnPRouter(); r != nil {
return r
}
if r := getPMPRouter(); r != nil {
return r
}
return NewNoRouter()
}
// Mapper attempts to open a set of ports on a router
type Mapper struct {
log logging.Logger
r Router
closer chan struct{}
wg sync.WaitGroup
}
// NewPortMapper returns an initialized mapper
func NewPortMapper(log logging.Logger, r Router) Mapper {
return Mapper{
log: log,
r: r,
closer: make(chan struct{}),
}
}
// Map sets up port mapping using given protocol, internal and external ports
// and returns the final port mapped. It returns 0 if mapping failed after the
// maximun number of retries
func (dev *Mapper) Map(protocol string, intPort uint16, desc string) (uint16, error) {
mappedPort := make(chan uint16)
go dev.keepPortMapping(mappedPort, protocol, intPort, desc)
port := <-mappedPort
if port == 0 {
return 0, errors.New("failed to map port")
}
return port, nil
}
// keepPortMapping runs in the background to keep a port mapped. It renews the
// the port mapping in mapUpdateTimeout.
func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string,
intPort uint16, desc string) {
updateTimer := time.NewTimer(mapUpdateTimeout)
for i := 0; i <= maxRetries; i++ {
extPort := intPort + uint16(i)
if intaddr, intPort, desc, err := dev.r.GetPortMappingEntry(extPort, protocol); err == nil {
dev.log.Debug("Port %d is taken by %s:%d: %s, retry with the next port",
extPort, intaddr, intPort, desc)
} else if err := dev.r.MapPort(protocol, intPort, extPort, desc, mapTimeout); err != nil {
dev.log.Debug("Map port failed. Protocol %s Internal %d External %d. %s",
protocol, intPort, extPort, err)
} else {
dev.log.Info("Mapped Protocol %s Internal %d External %d.", protocol,
intPort, extPort)
dev.wg.Add(1)
mappedPort <- extPort
defer func(extPort uint16) {
updateTimer.Stop()
dev.log.Debug("Unmap protocol %s external port %d", protocol, extPort)
dev.r.UnmapPort(protocol, intPort, extPort)
dev.wg.Done()
}(extPort)
for {
select {
case <-updateTimer.C:
if err := dev.r.MapPort(protocol, intPort, extPort, desc, mapTimeout); err != nil {
dev.log.Error("Renewing port mapping from external port %d to internal port %d failed with %s",
intPort, extPort, err)
} else {
dev.log.Debug("Renewed port mapping from external port %d to internal port %d.",
intPort, extPort)
}
updateTimer.Reset(mapUpdateTimeout)
case _, _ = <-dev.closer:
return
}
}
}
}
dev.log.Debug("Unable to map port %d~%d", intPort, intPort+maxRetries)
mappedPort <- 0
}
// UnmapAllPorts stops mapping all ports from this mapper and attempts to unmap
// them.
func (dev *Mapper) UnmapAllPorts() {
close(dev.closer)
dev.wg.Wait()
dev.log.Info("Unmapped all ports")
}

View File

@ -4,25 +4,57 @@
package nat
import (
"errors"
"fmt"
"net"
"time"
)
var (
errNoRouter = errors.New("no nat enabled router was discovered")
)
const googleDNSServer = "8.8.8.8:80"
type noRouter struct{}
func (noRouter) MapPort(_ NetworkProtocol, _, _ uint16, _ string, _ time.Duration) error {
return errNoRouter
type noRouter struct {
ip net.IP
}
func (noRouter) UnmapPort(_ NetworkProtocol, _, _ uint16) error {
return errNoRouter
func (noRouter) MapPort(_ string, intPort, extPort uint16, _ string, _ time.Duration) error {
if intPort != extPort {
return fmt.Errorf("cannot map port %d to %d", intPort, extPort)
}
return nil
}
func (noRouter) IP() (net.IP, error) {
return nil, errNoRouter
func (noRouter) UnmapPort(string, uint16, uint16) error {
return nil
}
func (r noRouter) ExternalIP() (net.IP, error) {
return r.ip, nil
}
func (noRouter) GetPortMappingEntry(uint16, string) (string, uint16, string, error) {
return "", 0, "", nil
}
func getOutboundIP() (net.IP, error) {
conn, err := net.Dial("udp", googleDNSServer)
if err != nil {
return nil, err
}
if udpAddr, ok := conn.LocalAddr().(*net.UDPAddr); ok {
return udpAddr.IP, conn.Close()
}
conn.Close()
return nil, fmt.Errorf("getting outbound IP failed")
}
// NewNoRouter returns a router that assumes the network is public
func NewNoRouter() Router {
ip, err := getOutboundIP()
if err != nil {
return nil
}
return &noRouter{
ip: ip,
}
}

View File

@ -4,11 +4,13 @@
package nat
import (
"fmt"
"net"
"time"
"github.com/jackpal/gateway"
"github.com/jackpal/go-nat-pmp"
natpmp "github.com/jackpal/go-nat-pmp"
)
var (
@ -17,12 +19,12 @@ var (
// natPMPClient adapts the NAT-PMP protocol implementation so it conforms to
// the common interface.
type pmpClient struct {
type pmpRouter struct {
client *natpmp.Client
}
func (pmp *pmpClient) MapPort(
networkProtocol NetworkProtocol,
func (pmp *pmpRouter) MapPort(
networkProtocol string,
newInternalPort uint16,
newExternalPort uint16,
mappingName string,
@ -37,8 +39,8 @@ func (pmp *pmpClient) MapPort(
return err
}
func (pmp *pmpClient) UnmapPort(
networkProtocol NetworkProtocol,
func (pmp *pmpRouter) UnmapPort(
networkProtocol string,
internalPort uint16,
_ uint16) error {
protocol := string(networkProtocol)
@ -48,7 +50,7 @@ func (pmp *pmpClient) UnmapPort(
return err
}
func (pmp *pmpClient) IP() (net.IP, error) {
func (pmp *pmpRouter) ExternalIP() (net.IP, error) {
response, err := pmp.client.GetExternalAddress()
if err != nil {
return nil, err
@ -56,14 +58,20 @@ func (pmp *pmpClient) IP() (net.IP, error) {
return response.ExternalIPAddress[:], nil
}
func getPMPRouter() Router {
// go-nat-pmp does not support port mapping entry query
func (pmp *pmpRouter) GetPortMappingEntry(externalPort uint16, protocol string) (
string, uint16, string, error) {
return "", 0, "", fmt.Errorf("port mapping entry not found")
}
func getPMPRouter() *pmpRouter {
gatewayIP, err := gateway.DiscoverGateway()
if err != nil {
return nil
}
pmp := &pmpClient{natpmp.NewClientWithTimeout(gatewayIP, pmpClientTimeout)}
if _, err := pmp.IP(); err != nil {
pmp := &pmpRouter{natpmp.NewClientWithTimeout(gatewayIP, pmpClientTimeout)}
if _, err := pmp.ExternalIP(); err != nil {
return nil
}

View File

@ -1,65 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
// Package nat performs network address translation and provides helpers for
// routing ports.
package nat
import (
"net"
"time"
)
// NetworkProtocol is a protocol that will be used through a port
type NetworkProtocol string
// Available protocol
const (
TCP NetworkProtocol = "TCP"
UDP NetworkProtocol = "UDP"
)
// Router provides a standard NAT router functions. Specifically, allowing the
// fetching of public IPs and port forwarding to this computer.
type Router interface {
// mapPort creates a mapping between a port on the local computer to an
// external port on the router.
//
// The mappingName is something displayed on the router, so it is included
// for completeness.
MapPort(
networkProtocol NetworkProtocol,
newInternalPort uint16,
newExternalPort uint16,
mappingName string,
mappingDuration time.Duration) error
// UnmapPort clears a mapping that was previous made by a call to MapPort
UnmapPort(
networkProtocol NetworkProtocol,
internalPort uint16,
externalPort uint16) error
// Returns the routers IP address on the network the router considers
// external
IP() (net.IP, error)
}
// NewRouter returns a new router discovered on the local network
func NewRouter() Router {
routers := make(chan Router)
// Because getting a router can take a noticeable amount of time to error,
// we run these requests in parallel
go func() {
routers <- getUPnPRouter()
}()
go func() {
routers <- getPMPRouter()
}()
for i := 0; i < 2; i++ {
if router := <-routers; router != nil {
return router
}
}
return noRouter{}
}

View File

@ -4,7 +4,6 @@
package nat
import (
"errors"
"fmt"
"net"
"time"
@ -15,11 +14,7 @@ import (
)
const (
soapTimeout = time.Second
)
var (
errNoGateway = errors.New("Failed to connect to any avaliable gateways")
soapRequestTimeout = 3 * time.Second
)
// upnpClient is the interface used by goupnp for their client implementations
@ -47,69 +42,30 @@ type upnpClient interface {
// returns if there is rsip available, nat enabled, or an unexpected error.
GetNATRSIPStatus() (newRSIPAvailable bool, natEnabled bool, err error)
}
type upnpRouter struct {
root *goupnp.RootDevice
client upnpClient
}
func (n *upnpRouter) MapPort(
networkProtocol NetworkProtocol,
newInternalPort uint16,
newExternalPort uint16,
mappingName string,
mappingDuration time.Duration,
) error {
ip, err := n.localAddress()
if err != nil {
return err
}
protocol := string(networkProtocol)
// goupnp uses seconds to denote their lifetime
lifetime := uint32(mappingDuration / time.Second)
// UnmapPort's error is intentionally dropped, because the mapping may not
// exist.
n.UnmapPort(networkProtocol, newInternalPort, newExternalPort)
return n.client.AddPortMapping(
"", // newRemoteHost isn't used to limit the mapping to a host
newExternalPort,
protocol,
newInternalPort,
ip.String(), // newInternalClient is the client traffic should be sent to
true, // newEnabled enables port mappings
mappingName,
lifetime,
// attempts to get port mapping information give a external port and protocol
GetSpecificPortMappingEntry(
NewRemoteHost string,
NewExternalPort uint16,
NewProtocol string,
) (
NewInternalPort uint16,
NewInternalClient string,
NewEnabled bool,
NewPortMappingDescription string,
NewLeaseDuration uint32,
err error,
)
}
func (n *upnpRouter) UnmapPort(networkProtocol NetworkProtocol, _, externalPort uint16) error {
protocol := string(networkProtocol)
return n.client.DeletePortMapping(
"", // newRemoteHost isn't used to limit the mapping to a host
externalPort,
protocol)
type upnpRouter struct {
dev *goupnp.RootDevice
client upnpClient
}
func (n *upnpRouter) IP() (net.IP, error) {
ipStr, err := n.client.GetExternalIPAddress()
if err != nil {
return nil, err
}
ip := net.ParseIP(ipStr)
if ip == nil {
return nil, fmt.Errorf("invalid IP %s", ipStr)
}
return ip, nil
}
func (n *upnpRouter) localAddress() (net.IP, error) {
func (r *upnpRouter) localIP() (net.IP, error) {
// attempt to get an address on the router
deviceAddr, err := net.ResolveUDPAddr("udp4", n.root.URLBase.Host)
deviceAddr, err := net.ResolveUDPAddr("udp", r.dev.URLBase.Host)
if err != nil {
return nil, err
}
@ -120,7 +76,7 @@ func (n *upnpRouter) localAddress() (net.IP, error) {
return nil, err
}
// attempt to find one of my ips that the router would know about
// attempt to find one of my IPs that matches router's record
for _, netInterface := range netInterfaces {
addrs, err := netInterface.Addrs()
if err != nil {
@ -128,9 +84,6 @@ func (n *upnpRouter) localAddress() (net.IP, error) {
}
for _, addr := range addrs {
// this is pretty janky, but it seems to be the best way to get the
// ip mask and properly check if the ip references the device we are
// connected to
ipNet, ok := addr.(*net.IPNet)
if !ok {
continue
@ -144,110 +97,119 @@ func (n *upnpRouter) localAddress() (net.IP, error) {
return nil, fmt.Errorf("couldn't find the local address in the same network as %s", deviceIP)
}
// getUPnPRouter searches for all Gateway Devices that have avaliable
// connections in the goupnp library and returns the first connection it can
// find.
func getUPnPRouter() Router {
routers := make(chan *upnpRouter)
// Because DiscoverDevices takes a noticeable amount of time to error, we
// run these requests in parallel
go func() {
routers <- connectToGateway(internetgateway1.URN_WANConnectionDevice_1, gateway1)
}()
go func() {
routers <- connectToGateway(internetgateway2.URN_WANConnectionDevice_2, gateway2)
}()
for i := 0; i < 2; i++ {
if router := <-routers; router != nil {
return router
}
func (r *upnpRouter) ExternalIP() (net.IP, error) {
str, err := r.client.GetExternalIPAddress()
if err != nil {
return nil, err
}
return nil
ip := net.ParseIP(str)
if ip == nil {
return nil, fmt.Errorf("invalid IP %s", str)
}
return ip, nil
}
func gateway1(client goupnp.ServiceClient) upnpClient {
func (r *upnpRouter) MapPort(protocol string, intPort, extPort uint16,
desc string, duration time.Duration) error {
ip, err := r.localIP()
if err != nil {
return nil
}
lifetime := uint32(duration / time.Second)
return r.client.AddPortMapping("", extPort, protocol, intPort,
ip.String(), true, desc, lifetime)
}
func (r *upnpRouter) UnmapPort(protocol string, _, extPort uint16) error {
return r.client.DeletePortMapping("", extPort, protocol)
}
func (r *upnpRouter) GetPortMappingEntry(extPort uint16, protocol string) (string, uint16, string, error) {
intPort, intAddr, _, desc, _, err := r.client.GetSpecificPortMappingEntry("", extPort, protocol)
return intAddr, intPort, desc, err
}
// create UPnP SOAP service client with URN
func getUPnPClient(client goupnp.ServiceClient) upnpClient {
switch client.Service.ServiceType {
case internetgateway1.URN_WANIPConnection_1:
return &internetgateway1.WANIPConnection1{ServiceClient: client}
case internetgateway1.URN_WANPPPConnection_1:
return &internetgateway1.WANPPPConnection1{ServiceClient: client}
default:
return nil
}
}
func gateway2(client goupnp.ServiceClient) upnpClient {
switch client.Service.ServiceType {
case internetgateway2.URN_WANIPConnection_1:
return &internetgateway2.WANIPConnection1{ServiceClient: client}
case internetgateway2.URN_WANIPConnection_2:
return &internetgateway2.WANIPConnection2{ServiceClient: client}
case internetgateway2.URN_WANPPPConnection_1:
return &internetgateway2.WANPPPConnection1{ServiceClient: client}
default:
return nil
}
}
func connectToGateway(deviceType string, toClient func(goupnp.ServiceClient) upnpClient) *upnpRouter {
devs, err := goupnp.DiscoverDevices(deviceType)
// discover() tries to find gateway device
func discover(target string) *upnpRouter {
devs, err := goupnp.DiscoverDevices(target)
if err != nil {
return nil
}
// we are iterating over all the network devices, acting a possible roots
for i := range devs {
dev := &devs[i]
if dev.Root == nil {
router := make(chan *upnpRouter)
for i := 0; i < len(devs); i++ {
if devs[i].Root == nil {
continue
}
go func(dev *goupnp.MaybeRootDevice) {
var r *upnpRouter = nil
dev.Root.Device.VisitServices(func(service *goupnp.Service) {
c := goupnp.ServiceClient{
SOAPClient: service.NewSOAPClient(),
RootDevice: dev.Root,
Location: dev.Location,
Service: service,
}
c.SOAPClient.HTTPClient.Timeout = soapRequestTimeout
client := getUPnPClient(c)
if client == nil {
return
}
if _, nat, err := client.GetNATRSIPStatus(); err != nil || !nat {
return
}
r = &upnpRouter{dev.Root, client}
})
router <- r
}(&devs[i])
}
// the root device may be a router, so attempt to connect to that
rootDevice := &dev.Root.Device
if upnp := getRouter(dev, rootDevice, toClient); upnp != nil {
return upnp
}
// attempt to connect to any sub devices
devices := rootDevice.Devices
for i := range devices {
if upnp := getRouter(dev, &devices[i], toClient); upnp != nil {
return upnp
}
for i := 0; i < len(devs); i++ {
if r := <-router; r != nil {
return r
}
}
return nil
}
func getRouter(rootDevice *goupnp.MaybeRootDevice, device *goupnp.Device, toClient func(goupnp.ServiceClient) upnpClient) *upnpRouter {
for i := range device.Services {
service := &device.Services[i]
// getUPnPRouter searches for internet gateway using both Device Control Protocol
// and returns the first one it can find. It returns nil if no UPnP gateway is found
func getUPnPRouter() *upnpRouter {
targets := []string{
internetgateway1.URN_WANConnectionDevice_1,
internetgateway2.URN_WANConnectionDevice_2,
}
soapClient := service.NewSOAPClient()
// make sure the client times out if needed
soapClient.HTTPClient.Timeout = soapTimeout
routers := make(chan *upnpRouter)
// attempt to create a client connection
serviceClient := goupnp.ServiceClient{
SOAPClient: soapClient,
RootDevice: rootDevice.Root,
Location: rootDevice.Location,
Service: service,
}
client := toClient(serviceClient)
if client == nil {
continue
}
for _, urn := range targets {
go func(urn string) {
routers <- discover(urn)
}(urn)
}
// check whether port mapping is enabled
if _, nat, err := client.GetNATRSIPStatus(); err != nil || !nat {
continue
}
// we found a router!
return &upnpRouter{
root: rootDevice.Root,
client: client,
for i := 0; i < len(targets); i++ {
if r := <-routers; r != nil {
return r
}
}
return nil
}

View File

@ -33,6 +33,12 @@ func (m Builder) PeerList(ipDescs []utils.IPDesc) (Msg, error) {
return m.Pack(PeerList, map[Field]interface{}{Peers: ipDescs})
}
// Ping message
func (m Builder) Ping() (Msg, error) { return m.Pack(Ping, nil) }
// Pong message
func (m Builder) Pong() (Msg, error) { return m.Pack(Pong, nil) }
// GetAcceptedFrontier message
func (m Builder) GetAcceptedFrontier(chainID ids.ID, requestID uint32) (Msg, error) {
return m.Pack(GetAcceptedFrontier, map[Field]interface{}{

View File

@ -132,6 +132,10 @@ func (op Op) String() string {
return "get_peerlist"
case PeerList:
return "peerlist"
case Ping:
return "ping"
case Pong:
return "pong"
case GetAcceptedFrontier:
return "get_accepted_frontier"
case AcceptedFrontier:
@ -177,11 +181,12 @@ const (
PushQuery
PullQuery
Chits
// Bootstrapping:
// TODO: Move GetAncestors and MultiPut with the rest of the bootstrapping
// commands when we do non-backwards compatible upgrade
// TODO: Reorder these messages when we transition to everest
GetAncestors
MultiPut
Ping
Pong
)
// Defines the messages that can be sent/received with this network
@ -192,6 +197,8 @@ var (
Version: {NetworkID, NodeID, MyTime, IP, VersionStr},
GetPeerList: {},
PeerList: {Peers},
Ping: {},
Pong: {},
// Bootstrapping:
GetAcceptedFrontier: {ChainID, RequestID},
AcceptedFrontier: {ChainID, RequestID, ContainerIDs},

View File

@ -54,6 +54,7 @@ type metrics struct {
getVersion, version,
getPeerlist, peerlist,
ping, pong,
getAcceptedFrontier, acceptedFrontier,
getAccepted, accepted,
get, getAncestors, put, multiPut,
@ -78,6 +79,8 @@ func (m *metrics) initialize(registerer prometheus.Registerer) error {
errs.Add(m.version.initialize(Version, registerer))
errs.Add(m.getPeerlist.initialize(GetPeerList, registerer))
errs.Add(m.peerlist.initialize(PeerList, registerer))
errs.Add(m.ping.initialize(Ping, registerer))
errs.Add(m.pong.initialize(Pong, registerer))
errs.Add(m.getAcceptedFrontier.initialize(GetAcceptedFrontier, registerer))
errs.Add(m.acceptedFrontier.initialize(AcceptedFrontier, registerer))
errs.Add(m.getAccepted.initialize(GetAccepted, registerer))
@ -103,6 +106,10 @@ func (m *metrics) message(msgType Op) *messageMetrics {
return &m.getPeerlist
case PeerList:
return &m.peerlist
case Ping:
return &m.ping
case Pong:
return &m.pong
case GetAcceptedFrontier:
return &m.getAcceptedFrontier
case AcceptedFrontier:

View File

@ -21,6 +21,7 @@ import (
"github.com/ava-labs/gecko/snow/triggers"
"github.com/ava-labs/gecko/snow/validators"
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/random"
"github.com/ava-labs/gecko/utils/timer"
@ -42,6 +43,12 @@ const (
defaultGetVersionTimeout = 2 * time.Second
defaultAllowPrivateIPs = true
defaultGossipSize = 50
defaultPingPongTimeout = time.Minute
defaultPingFrequency = 3 * defaultPingPongTimeout / 4
// Request ID used when sending a Put message to gossip an accepted container
// (ie not sent in response to a Get)
GossipMsgRequestID = math.MaxUint32
)
// Network defines the functionality of the networking library.
@ -98,6 +105,7 @@ type network struct {
serverUpgrader Upgrader
clientUpgrader Upgrader
vdrs validators.Set // set of current validators in the AVAnet
beacons validators.Set // set of beacons in the AVAnet
router router.Router // router must be thread safe
nodeID uint32
@ -118,6 +126,8 @@ type network struct {
getVersionTimeout time.Duration
allowPrivateIPs bool
gossipSize int
pingPongTimeout time.Duration
pingFrequency time.Duration
executor timer.Executor
@ -150,6 +160,7 @@ func NewDefaultNetwork(
serverUpgrader,
clientUpgrader Upgrader,
vdrs validators.Set,
beacons validators.Set,
router router.Router,
) Network {
return NewNetwork(
@ -165,6 +176,7 @@ func NewDefaultNetwork(
serverUpgrader,
clientUpgrader,
vdrs,
beacons,
router,
defaultInitialReconnectDelay,
defaultMaxReconnectDelay,
@ -179,6 +191,8 @@ func NewDefaultNetwork(
defaultGetVersionTimeout,
defaultAllowPrivateIPs,
defaultGossipSize,
defaultPingPongTimeout,
defaultPingFrequency,
)
}
@ -196,6 +210,7 @@ func NewNetwork(
serverUpgrader,
clientUpgrader Upgrader,
vdrs validators.Set,
beacons validators.Set,
router router.Router,
initialReconnectDelay,
maxReconnectDelay time.Duration,
@ -210,6 +225,8 @@ func NewNetwork(
getVersionTimeout time.Duration,
allowPrivateIPs bool,
gossipSize int,
pingPongTimeout time.Duration,
pingFrequency time.Duration,
) Network {
net := &network{
log: log,
@ -223,6 +240,7 @@ func NewNetwork(
serverUpgrader: serverUpgrader,
clientUpgrader: clientUpgrader,
vdrs: vdrs,
beacons: beacons,
router: router,
nodeID: rand.Uint32(),
initialReconnectDelay: initialReconnectDelay,
@ -238,6 +256,8 @@ func NewNetwork(
getVersionTimeout: getVersionTimeout,
allowPrivateIPs: allowPrivateIPs,
gossipSize: gossipSize,
pingPongTimeout: pingPongTimeout,
pingFrequency: pingFrequency,
disconnectedIPs: make(map[string]struct{}),
connectedIPs: make(map[string]struct{}),
@ -278,8 +298,11 @@ func (n *network) GetAcceptedFrontier(validatorIDs ids.ShortSet, chainID ids.ID,
func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
msg, err := n.b.AcceptedFrontier(chainID, requestID, containerIDs)
if err != nil {
n.log.Error("attempted to pack too large of an AcceptedFrontier message.\nNumber of containerIDs: %d",
containerIDs.Len())
n.log.Error("failed to build AcceptedFrontier(%s, %d, %s): %s",
chainID,
requestID,
containerIDs,
err)
return // Packing message failed
}
@ -291,7 +314,11 @@ func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requ
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send an AcceptedFrontier message to: %s", validatorID)
n.log.Debug("failed to send AcceptedFrontier(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerIDs)
n.acceptedFrontier.numFailed.Inc()
} else {
n.acceptedFrontier.numSent.Inc()
@ -302,6 +329,11 @@ func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requ
func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
msg, err := n.b.GetAccepted(chainID, requestID, containerIDs)
if err != nil {
n.log.Error("failed to build GetAccepted(%s, %d, %s): %s",
chainID,
requestID,
containerIDs,
err)
for _, validatorID := range validatorIDs.List() {
vID := validatorID
n.executor.Add(func() { n.router.GetAcceptedFailed(vID, chainID, requestID) })
@ -319,6 +351,11 @@ func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, request
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send GetAccepted(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerIDs)
n.executor.Add(func() { n.router.GetAcceptedFailed(vID, chainID, requestID) })
n.getAccepted.numFailed.Inc()
} else {
@ -331,8 +368,11 @@ func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, request
func (n *network) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
msg, err := n.b.Accepted(chainID, requestID, containerIDs)
if err != nil {
n.log.Error("attempted to pack too large of an Accepted message.\nNumber of containerIDs: %d",
containerIDs.Len())
n.log.Error("failed to build Accepted(%s, %d, %s): %s",
chainID,
requestID,
containerIDs,
err)
return // Packing message failed
}
@ -344,33 +384,17 @@ func (n *network) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID ui
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send an Accepted message to: %s", validatorID)
n.log.Debug("failed to send Accepted(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerIDs)
n.accepted.numFailed.Inc()
} else {
n.accepted.numSent.Inc()
}
}
// Get implements the Sender interface.
func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
msg, err := n.b.Get(chainID, requestID, containerID)
n.log.AssertNoError(err)
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a Get message to: %s", validatorID)
n.get.numFailed.Inc()
} else {
n.get.numSent.Inc()
}
}
// GetAncestors implements the Sender interface.
func (n *network) GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
msg, err := n.b.GetAncestors(chainID, requestID, containerID)
@ -387,36 +411,18 @@ func (n *network) GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestI
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send GetAncestors(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.executor.Add(func() { n.router.GetAncestorsFailed(validatorID, chainID, requestID) })
n.getAncestors.numFailed.Inc()
n.log.Debug("failed to send a GetAncestors message to: %s", validatorID)
} else {
n.getAncestors.numSent.Inc()
}
}
// Put implements the Sender interface.
func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
msg, err := n.b.Put(chainID, requestID, containerID, container)
if err != nil {
n.log.Error("failed to build Put message because of container of size %d", len(container))
return
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a Put message to: %s", validatorID)
n.put.numFailed.Inc()
} else {
n.put.numSent.Inc()
}
}
// MultiPut implements the Sender interface.
func (n *network) MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte) {
msg, err := n.b.MultiPut(chainID, requestID, containers)
@ -433,22 +439,90 @@ func (n *network) MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID ui
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a MultiPut message to: %s", validatorID)
n.log.Debug("failed to send MultiPut(%s, %s, %d, %d)",
validatorID,
chainID,
requestID,
len(containers))
n.multiPut.numFailed.Inc()
} else {
n.multiPut.numSent.Inc()
}
}
// Get implements the Sender interface.
func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
msg, err := n.b.Get(chainID, requestID, containerID)
n.log.AssertNoError(err)
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send Get(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.executor.Add(func() { n.router.GetFailed(validatorID, chainID, requestID) })
n.get.numFailed.Inc()
} else {
n.get.numSent.Inc()
}
}
// Put implements the Sender interface.
func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
msg, err := n.b.Put(chainID, requestID, containerID, container)
if err != nil {
n.log.Error("failed to build Put(%s, %d, %s): %s. len(container) : %d",
chainID,
requestID,
containerID,
err,
len(container))
return
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send Put(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
n.put.numFailed.Inc()
} else {
n.put.numSent.Inc()
}
}
// PushQuery implements the Sender interface.
func (n *network) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
msg, err := n.b.PushQuery(chainID, requestID, containerID, container)
if err != nil {
n.log.Error("failed to build PushQuery(%s, %d, %s): %s. len(container): %d",
chainID,
requestID,
containerID,
err,
len(container))
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
for _, validatorID := range validatorIDs.List() {
vID := validatorID
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
}
n.log.Error("attempted to pack too large of a PushQuery message.\nContainer length: %d", len(container))
return // Packing message failed
}
@ -462,7 +536,12 @@ func (n *network) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed sending a PushQuery message to: %s", vID)
n.log.Debug("failed to send PushQuery(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
n.pushQuery.numFailed.Inc()
} else {
@ -486,7 +565,11 @@ func (n *network) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed sending a PullQuery message to: %s", vID)
n.log.Debug("failed to send PullQuery(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
n.pullQuery.numFailed.Inc()
} else {
@ -499,7 +582,11 @@ func (n *network) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) {
msg, err := n.b.Chits(chainID, requestID, votes)
if err != nil {
n.log.Error("failed to build Chits message because of %d votes", votes.Len())
n.log.Error("failed to build Chits(%s, %d, %s): %s",
chainID,
requestID,
votes,
err)
return
}
@ -511,7 +598,11 @@ func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint3
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a Chits message to: %s", validatorID)
n.log.Debug("failed to send Chits(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
votes)
n.chits.numFailed.Inc()
} else {
n.chits.numSent.Inc()
@ -521,7 +612,8 @@ func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint3
// Gossip attempts to gossip the container to the network
func (n *network) Gossip(chainID, containerID ids.ID, container []byte) {
if err := n.gossipContainer(chainID, containerID, container); err != nil {
n.log.Error("error gossiping container %s to %s: %s", containerID, chainID, err)
n.log.Debug("failed to Gossip(%s, %s): %s", chainID, containerID, err)
n.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container})
}
}
@ -632,7 +724,7 @@ func (n *network) Track(ip utils.IPDesc) {
// assumes the stateLock is not held.
func (n *network) gossipContainer(chainID, containerID ids.ID, container []byte) error {
msg, err := n.b.Put(chainID, math.MaxUint32, containerID, container)
msg, err := n.b.Put(chainID, GossipMsgRequestID, containerID, container)
if err != nil {
return fmt.Errorf("attempted to pack too large of a Put message.\nContainer length: %d", len(container))
}
@ -695,7 +787,9 @@ func (n *network) gossip() {
}
msg, err := n.b.PeerList(ips)
if err != nil {
n.log.Warn("failed to gossip PeerList message due to %s", err)
n.log.Error("failed to build peer list to gossip: %s. len(ips): %d",
err,
len(ips))
continue
}

View File

@ -197,6 +197,7 @@ func TestNewDefaultNetwork(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net)
@ -280,6 +281,7 @@ func TestEstablishConnection(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net0)
@ -297,6 +299,7 @@ func TestEstablishConnection(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net1)
@ -419,6 +422,7 @@ func TestDoubleTrack(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net0)
@ -436,6 +440,7 @@ func TestDoubleTrack(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net1)
@ -559,6 +564,7 @@ func TestDoubleClose(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net0)
@ -576,6 +582,7 @@ func TestDoubleClose(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net1)
@ -704,6 +711,7 @@ func TestRemoveHandlers(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net0)
@ -721,6 +729,7 @@ func TestRemoveHandlers(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net1)
@ -858,6 +867,7 @@ func TestTrackConnected(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net0)
@ -875,6 +885,7 @@ func TestTrackConnected(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net1)
@ -999,6 +1010,7 @@ func TestTrackConnectedRace(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net0)
@ -1016,6 +1028,7 @@ func TestTrackConnectedRace(t *testing.T) {
serverUpgrader,
clientUpgrader,
vdrs,
vdrs,
handler,
)
assert.NotNil(t, net1)

View File

@ -64,6 +64,24 @@ func (p *peer) Start() {
// Initially send the version to the peer
go p.Version()
go p.requestVersion()
// go p.sendPings()
}
func (p *peer) sendPings() {
t := time.NewTicker(p.net.pingFrequency)
defer t.Stop()
for range t.C {
p.net.stateLock.Lock()
closed := p.closed
p.net.stateLock.Unlock()
if closed {
return
}
p.Ping()
}
}
// request the version from the peer until we get the version from them
@ -80,6 +98,7 @@ func (p *peer) requestVersion() {
if connected || closed {
return
}
p.GetVersion()
}
}
@ -88,6 +107,11 @@ func (p *peer) requestVersion() {
func (p *peer) ReadMessages() {
defer p.Close()
// if err := p.conn.SetReadDeadline(p.net.clock.Time().Add(p.net.pingPongTimeout)); err != nil {
// p.net.log.Verbo("error on setting the connection read timeout %s", err)
// return
// }
pendingBuffer := wrappers.Packer{}
readBuffer := make([]byte, 1<<10)
for {
@ -218,7 +242,15 @@ func (p *peer) send(msg Msg) bool {
// assumes the stateLock is not held
func (p *peer) handle(msg Msg) {
p.net.heartbeat()
atomic.StoreInt64(&p.lastReceived, p.net.clock.Time().Unix())
currentTime := p.net.clock.Time()
atomic.StoreInt64(&p.lastReceived, currentTime.Unix())
// if err := p.conn.SetReadDeadline(currentTime.Add(p.net.pingPongTimeout)); err != nil {
// p.net.log.Verbo("error on setting the connection read timeout %s, closing the connection", err)
// p.Close()
// return
// }
op := msg.Op()
msgMetrics := p.net.message(op)
@ -235,6 +267,12 @@ func (p *peer) handle(msg Msg) {
case GetVersion:
p.getVersion(msg)
return
case Ping:
p.ping(msg)
return
case Pong:
p.pong(msg)
return
}
if !p.connected {
p.net.log.Debug("dropping message from %s because the connection hasn't been established yet", p.id)
@ -318,6 +356,12 @@ func (p *peer) GetPeerList() {
p.Send(msg)
}
// assumes the stateLock is not held
func (p *peer) SendPeerList() {
ips := p.net.validatorIPs()
p.PeerList(ips)
}
// assumes the stateLock is not held
func (p *peer) PeerList(peers []utils.IPDesc) {
msg, err := p.net.b.PeerList(peers)
@ -326,7 +370,28 @@ func (p *peer) PeerList(peers []utils.IPDesc) {
return
}
p.Send(msg)
return
}
// assumes the stateLock is not held
func (p *peer) Ping() {
msg, err := p.net.b.Ping()
p.net.log.AssertNoError(err)
if p.Send(msg) {
p.net.ping.numSent.Inc()
} else {
p.net.ping.numFailed.Inc()
}
}
// assumes the stateLock is not held
func (p *peer) Pong() {
msg, err := p.net.b.Pong()
p.net.log.AssertNoError(err)
if p.Send(msg) {
p.net.pong.numSent.Inc()
} else {
p.net.pong.numFailed.Inc()
}
}
// assumes the stateLock is not held
@ -405,8 +470,13 @@ func (p *peer) version(msg Msg) {
}
if p.net.version.Before(peerVersion) {
p.net.log.Info("peer attempting to connect with newer version %s. You may want to update your client",
peerVersion)
if p.net.beacons.Contains(p.id) {
p.net.log.Info("beacon attempting to connect with newer version %s. You may want to update your client",
peerVersion)
} else {
p.net.log.Debug("peer attempting to connect with newer version %s. You may want to update your client",
peerVersion)
}
}
if err := p.net.version.Compatible(peerVersion); err != nil {
@ -458,17 +528,6 @@ func (p *peer) version(msg Msg) {
p.net.connected(p)
}
// assumes the stateLock is not held
func (p *peer) SendPeerList() {
ips := p.net.validatorIPs()
reply, err := p.net.b.PeerList(ips)
if err != nil {
p.net.log.Warn("failed to send PeerList message due to %s", err)
return
}
p.Send(reply)
}
// assumes the stateLock is not held
func (p *peer) getPeerList(_ Msg) { p.SendPeerList() }
@ -488,6 +547,12 @@ func (p *peer) peerList(msg Msg) {
p.net.stateLock.Unlock()
}
// assumes the stateLock is not held
func (p *peer) ping(_ Msg) { p.Pong() }
// assumes the stateLock is not held
func (p *peer) pong(_ Msg) {}
// assumes the stateLock is not held
func (p *peer) getAcceptedFrontier(msg Msg) {
chainID, err := ids.ToID(msg.Get(ChainID).([]byte))

View File

@ -33,10 +33,12 @@ type Config struct {
DB database.Database
// Staking configuration
StakingIP utils.IPDesc
EnableStaking bool
StakingKeyFile string
StakingCertFile string
StakingIP utils.IPDesc
StakingLocalPort uint16
EnableP2PTLS bool
EnableStaking bool
StakingKeyFile string
StakingCertFile string
// Bootstrapping configuration
BootstrapPeers []*Peer
@ -50,6 +52,7 @@ type Config struct {
// Enable/Disable APIs
AdminAPIEnabled bool
InfoAPIEnabled bool
KeystoreAPIEnabled bool
MetricsAPIEnabled bool
HealthAPIEnabled bool

View File

@ -7,6 +7,7 @@ import (
"crypto/tls"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"io/ioutil"
"net"
@ -18,6 +19,7 @@ import (
"github.com/ava-labs/gecko/api"
"github.com/ava-labs/gecko/api/admin"
"github.com/ava-labs/gecko/api/health"
"github.com/ava-labs/gecko/api/info"
"github.com/ava-labs/gecko/api/ipcs"
"github.com/ava-labs/gecko/api/keystore"
"github.com/ava-labs/gecko/api/metrics"
@ -56,7 +58,7 @@ var (
genesisHashKey = []byte("genesisID")
// Version is the version of this code
Version = version.NewDefaultVersion("avalanche", 0, 5, 5)
Version = version.NewDefaultVersion("avalanche", 0, 5, 7)
versionParser = version.NewDefaultParser()
)
@ -92,6 +94,9 @@ type Node struct {
// Net runs the networking stack
Net network.Network
// this node's initial connections to the network
beacons validators.Set
// current validators of the network
vdrs validators.Manager
@ -112,14 +117,14 @@ type Node struct {
*/
func (n *Node) initNetworking() error {
listener, err := net.Listen(TCP, n.Config.StakingIP.PortString())
listener, err := net.Listen(TCP, fmt.Sprintf(":%d", n.Config.StakingLocalPort))
if err != nil {
return err
}
dialer := network.NewDialer(TCP)
var serverUpgrader, clientUpgrader network.Upgrader
if n.Config.EnableStaking {
if n.Config.EnableP2PTLS {
cert, err := tls.LoadX509KeyPair(n.Config.StakingCertFile, n.Config.StakingKeyFile)
if err != nil {
return err
@ -164,6 +169,7 @@ func (n *Node) initNetworking() error {
serverUpgrader,
clientUpgrader,
defaultSubnetValidators,
n.beacons,
n.Config.ConsensusRouter,
)
@ -253,7 +259,7 @@ func (n *Node) initDatabase() error {
// Otherwise, it is a hash of the TLS certificate that this node
// uses for P2P communication
func (n *Node) initNodeID() error {
if !n.Config.EnableStaking {
if !n.Config.EnableP2PTLS {
n.ID = ids.NewShortID(hashing.ComputeHash160Array([]byte(n.Config.StakingIP.String())))
n.Log.Info("Set the node's ID to %s", n.ID)
return nil
@ -277,6 +283,14 @@ func (n *Node) initNodeID() error {
return nil
}
// Create the IDs of the peers this node should first connect to
func (n *Node) initBeacons() {
n.beacons = validators.NewSet()
for _, peer := range n.Config.BootstrapPeers {
n.beacons.Add(validators.NewValidator(peer.ID, 1))
}
}
// Create the vmManager and register the following vms:
// AVM, Simple Payments DAG, Simple Payments Chain
// The Platform VM is registered in initStaking because
@ -359,11 +373,6 @@ func (n *Node) initChains() error {
return err
}
beacons := validators.NewSet()
for _, peer := range n.Config.BootstrapPeers {
beacons.Add(validators.NewValidator(peer.ID, 1))
}
genesisBytes, err := genesis.Genesis(n.Config.NetworkID)
if err != nil {
return err
@ -375,7 +384,7 @@ func (n *Node) initChains() error {
SubnetID: platformvm.DefaultSubnetID,
GenesisData: genesisBytes, // Specifies other chains to create
VMAlias: platformvm.ID.String(),
CustomBeacons: beacons,
CustomBeacons: n.beacons,
})
return nil
@ -435,58 +444,105 @@ func (n *Node) initSharedMemory() {
// initKeystoreAPI initializes the keystore service
// Assumes n.APIServer is already set
func (n *Node) initKeystoreAPI() {
n.Log.Info("initializing Keystore API")
func (n *Node) initKeystoreAPI() error {
n.Log.Info("initializing keystore")
keystoreDB := prefixdb.New([]byte("keystore"), n.DB)
n.keystoreServer.Initialize(n.Log, keystoreDB)
keystoreHandler := n.keystoreServer.CreateHandler()
if n.Config.KeystoreAPIEnabled {
n.APIServer.AddRoute(keystoreHandler, &sync.RWMutex{}, "keystore", "", n.HTTPLog)
if !n.Config.KeystoreAPIEnabled {
n.Log.Info("skipping keystore API initializaion because it has been disabled")
return nil
}
n.Log.Info("initializing keystore API")
return n.APIServer.AddRoute(keystoreHandler, &sync.RWMutex{}, "keystore", "", n.HTTPLog)
}
// initMetricsAPI initializes the Metrics API
// Assumes n.APIServer is already set
func (n *Node) initMetricsAPI() {
n.Log.Info("initializing Metrics API")
func (n *Node) initMetricsAPI() error {
registry, handler := metrics.NewService()
if n.Config.MetricsAPIEnabled {
n.APIServer.AddRoute(handler, &sync.RWMutex{}, "metrics", "", n.HTTPLog)
}
// It is assumed by components of the system that the Metrics interface is
// non-nil. So, it is set regardless of if the metrics API is available or not.
n.Config.ConsensusParams.Metrics = registry
if !n.Config.MetricsAPIEnabled {
n.Log.Info("skipping metrics API initialization because it has been disabled")
return nil
}
n.Log.Info("initializing metrics API")
return n.APIServer.AddRoute(handler, &sync.RWMutex{}, "metrics", "", n.HTTPLog)
}
// initAdminAPI initializes the Admin API service
// Assumes n.log, n.chainManager, and n.ValidatorAPI already initialized
func (n *Node) initAdminAPI() {
if n.Config.AdminAPIEnabled {
n.Log.Info("initializing Admin API")
service := admin.NewService(Version, n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.Net, &n.APIServer)
n.APIServer.AddRoute(service, &sync.RWMutex{}, "admin", "", n.HTTPLog)
func (n *Node) initAdminAPI() error {
if !n.Config.AdminAPIEnabled {
n.Log.Info("skipping admin API initializaion because it has been disabled")
return nil
}
n.Log.Info("initializing admin API")
service := admin.NewService(Version, n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.Net, &n.APIServer)
return n.APIServer.AddRoute(service, &sync.RWMutex{}, "admin", "", n.HTTPLog)
}
func (n *Node) initInfoAPI() error {
if !n.Config.InfoAPIEnabled {
n.Log.Info("skipping info API initializaion because it has been disabled")
return nil
}
n.Log.Info("initializing info API")
service := info.NewService(n.Log, Version, n.ID, n.Config.NetworkID, n.chainManager, n.Net)
return n.APIServer.AddRoute(service, &sync.RWMutex{}, "info", "", n.HTTPLog)
}
// initHealthAPI initializes the Health API service
// Assumes n.Log, n.ConsensusAPI, and n.ValidatorAPI already initialized
func (n *Node) initHealthAPI() {
// Assumes n.Log, n.Net, n.APIServer, n.HTTPLog already initialized
func (n *Node) initHealthAPI() error {
if !n.Config.HealthAPIEnabled {
return
n.Log.Info("skipping health API initializaion because it has been disabled")
return nil
}
n.Log.Info("initializing Health API")
service := health.NewService(n.Log)
service.RegisterHeartbeat("network.validators.heartbeat", n.Net, 5*time.Minute)
n.APIServer.AddRoute(service.Handler(), &sync.RWMutex{}, "health", "", n.HTTPLog)
if err := service.RegisterHeartbeat("network.validators.heartbeat", n.Net, 5*time.Minute); err != nil {
return fmt.Errorf("couldn't register heartbeat health check: %w", err)
}
isBootstrappedFunc := func() (interface{}, error) {
if pChainID, err := n.chainManager.Lookup("P"); err != nil {
return nil, errors.New("P-Chain not created")
} else if !n.chainManager.IsBootstrapped(pChainID) {
return nil, errors.New("P-Chain not bootstrapped")
}
if xChainID, err := n.chainManager.Lookup("X"); err != nil {
return nil, errors.New("X-Chain not created")
} else if !n.chainManager.IsBootstrapped(xChainID) {
return nil, errors.New("X-Chain not bootstrapped")
}
if cChainID, err := n.chainManager.Lookup("C"); err != nil {
return nil, errors.New("C-Chain not created")
} else if !n.chainManager.IsBootstrapped(cChainID) {
return nil, errors.New("C-Chain not bootstrapped")
}
return nil, nil
}
// Passes if the P, X and C chains are finished bootstrapping
if err := service.RegisterMonotonicCheckFunc("chains.default.bootstrapped", isBootstrappedFunc); err != nil {
return err
}
return n.APIServer.AddRoute(service.Handler(), &sync.RWMutex{}, "health", "", n.HTTPLog)
}
// initIPCAPI initializes the IPC API service
// Assumes n.log and n.chainManager already initialized
func (n *Node) initIPCAPI() {
if n.Config.IPCEnabled {
n.Log.Info("initializing IPC API")
service := ipcs.NewService(n.Log, n.chainManager, n.DecisionDispatcher, &n.APIServer)
n.APIServer.AddRoute(service, &sync.RWMutex{}, "ipcs", "", n.HTTPLog)
func (n *Node) initIPCAPI() error {
if !n.Config.IPCEnabled {
n.Log.Info("skipping ipc API initializaion because it has been disabled")
return nil
}
n.Log.Info("initializing ipc API")
service := ipcs.NewService(n.Log, n.chainManager, n.DecisionDispatcher, &n.APIServer)
return n.APIServer.AddRoute(service, &sync.RWMutex{}, "ipcs", "", n.HTTPLog)
}
// Give chains and VMs aliases as specified by the genesis information
@ -542,10 +598,16 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg
return fmt.Errorf("problem initializing staker ID: %w", err)
}
n.initBeacons()
// Start HTTP APIs
n.initAPIServer() // Start the API Server
n.initKeystoreAPI() // Start the Keystore API
n.initMetricsAPI() // Start the Metrics API
n.initAPIServer() // Start the API Server
if err := n.initKeystoreAPI(); err != nil { // Start the Keystore API
return fmt.Errorf("couldn't initialize keystore API: %w", err)
}
if err := n.initMetricsAPI(); err != nil { // Start the Metrics API
return fmt.Errorf("couldn't initialize metrics API: %w", err)
}
// initialize shared memory
n.initSharedMemory()
@ -561,14 +623,25 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg
n.initEventDispatcher() // Set up the event dipatcher
n.initChainManager() // Set up the chain manager
n.initAdminAPI() // Start the Admin API
n.initHealthAPI() // Start the Health API
n.initIPCAPI() // Start the IPC API
if err := n.initAliases(); err != nil { // Set up aliases
return err
if err := n.initAdminAPI(); err != nil { // Start the Admin API
return fmt.Errorf("couldn't initialize admin API: %w", err)
}
return n.initChains() // Start the Platform chain
if err := n.initInfoAPI(); err != nil { // Start the Info API
return fmt.Errorf("couldn't initialize info API: %w", err)
}
if err := n.initHealthAPI(); err != nil { // Start the Health API
return fmt.Errorf("couldn't initialize health API: %w", err)
}
if err := n.initIPCAPI(); err != nil { // Start the IPC API
return fmt.Errorf("couldn't initialize ipc API: %w", err)
}
if err := n.initAliases(); err != nil { // Set up aliases
return fmt.Errorf("couldn't initialize aliases: %w", err)
}
if err := n.initChains(); err != nil { // Start the Platform chain
return fmt.Errorf("couldn't initialize chains: %w", err)
}
return nil
}
// Shutdown this node

View File

@ -15,7 +15,7 @@ GECKO_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Directory
BUILD_DIR=$GECKO_PATH/build # Where binaries go
PLUGIN_DIR="$BUILD_DIR/plugins" # Where plugin binaries (namely coreth) go
CORETH_VER="0.2.4" # Should match coreth version in go.mod
CORETH_VER="0.2.5" # Should match coreth version in go.mod
CORETH_PATH="$GOPATH/pkg/mod/github.com/ava-labs/coreth@v$CORETH_VER"
# Build Gecko

File diff suppressed because it is too large Load Diff

View File

@ -10,6 +10,10 @@ import (
"github.com/ava-labs/gecko/snow/consensus/snowstorm"
)
const (
minMapSize = 16
)
// TopologicalFactory implements Factory by returning a topological struct
type TopologicalFactory struct{}
@ -65,12 +69,12 @@ func (ta *Topological) Initialize(ctx *snow.Context, params Parameters, frontier
ta.ctx.Log.Error("%s", err)
}
ta.nodes = make(map[[32]byte]Vertex)
ta.nodes = make(map[[32]byte]Vertex, minMapSize)
ta.cg = &snowstorm.Directed{}
ta.cg.Initialize(ctx, params.Parameters)
ta.frontier = make(map[[32]byte]Vertex)
ta.frontier = make(map[[32]byte]Vertex, minMapSize)
for _, vtx := range frontier {
ta.frontier[vtx.ID().Key()] = vtx
}
@ -141,7 +145,9 @@ func (ta *Topological) RecordPoll(responses ids.UniqueBag) error {
votes := ta.pushVotes(kahns, leaves)
// Update the conflict graph: O(|Transactions|)
ta.ctx.Log.Verbo("Updating consumer confidences based on:\n%s", &votes)
ta.cg.RecordPoll(votes)
if err := ta.cg.RecordPoll(votes); err != nil {
return err
}
// Update the dag: O(|Live Set|)
return ta.updateFrontiers()
}
@ -157,7 +163,7 @@ func (ta *Topological) Finalized() bool { return ta.cg.Finalized() }
// the non-transitively applied votes. Also returns the list of leaf nodes.
func (ta *Topological) calculateInDegree(
responses ids.UniqueBag) (map[[32]byte]kahnNode, []ids.ID) {
kahns := make(map[[32]byte]kahnNode)
kahns := make(map[[32]byte]kahnNode, minMapSize)
leaves := ids.Set{}
for _, vote := range responses.List() {
@ -231,6 +237,7 @@ func (ta *Topological) pushVotes(
kahnNodes map[[32]byte]kahnNode,
leaves []ids.ID) ids.Bag {
votes := make(ids.UniqueBag)
txConflicts := make(map[[32]byte]ids.Set, minMapSize)
for len(leaves) > 0 {
newLeavesSize := len(leaves) - 1
@ -245,6 +252,12 @@ func (ta *Topological) pushVotes(
// Give the votes to the consumer
txID := tx.ID()
votes.UnionSet(txID, kahn.votes)
// Map txID to set of Conflicts
txKey := txID.Key()
if _, exists := txConflicts[txKey]; !exists {
txConflicts[txKey] = ta.cg.Conflicts(tx)
}
}
for _, dep := range vtx.Parents() {
@ -265,6 +278,18 @@ func (ta *Topological) pushVotes(
}
}
// Create bag of votes for conflicting transactions
conflictingVotes := make(ids.UniqueBag)
for txHash, conflicts := range txConflicts {
txID := ids.NewID(txHash)
for conflictTxHash := range conflicts {
conflictTxID := ids.NewID(conflictTxHash)
conflictingVotes.UnionSet(txID, votes.GetSet(conflictTxID))
}
}
votes.Difference(&conflictingVotes)
return votes.Bag(ta.params.Alpha)
}
@ -422,9 +447,9 @@ func (ta *Topological) updateFrontiers() error {
ta.preferred.Clear()
ta.virtuous.Clear()
ta.orphans.Clear()
ta.frontier = make(map[[32]byte]Vertex)
ta.preferenceCache = make(map[[32]byte]bool)
ta.virtuousCache = make(map[[32]byte]bool)
ta.frontier = make(map[[32]byte]Vertex, minMapSize)
ta.preferenceCache = make(map[[32]byte]bool, minMapSize)
ta.virtuousCache = make(map[[32]byte]bool, minMapSize)
ta.orphans.Union(ta.cg.Virtuous()) // Initially, nothing is preferred

View File

@ -4,758 +4,7 @@
package avalanche
import (
"math"
"testing"
"github.com/prometheus/client_golang/prometheus"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow"
"github.com/ava-labs/gecko/snow/choices"
"github.com/ava-labs/gecko/snow/consensus/snowball"
"github.com/ava-labs/gecko/snow/consensus/snowstorm"
)
func TestTopologicalParams(t *testing.T) { ParamsTest(t, TopologicalFactory{}) }
func TestTopologicalAdd(t *testing.T) { AddTest(t, TopologicalFactory{}) }
func TestTopologicalVertexIssued(t *testing.T) { VertexIssuedTest(t, TopologicalFactory{}) }
func TestTopologicalTxIssued(t *testing.T) { TxIssuedTest(t, TopologicalFactory{}) }
func TestAvalancheVoting(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2,
Alpha: 2,
BetaVirtuous: 1,
BetaRogue: 2,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
tx1 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx1.Ins.Add(utxos[0])
vtx1 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 1,
status: choices.Processing,
}
ta.Add(vtx0)
ta.Add(vtx1)
sm := make(ids.UniqueBag)
sm.Add(0, vtx1.id)
sm.Add(1, vtx1.id)
ta.RecordPoll(sm)
if ta.Finalized() {
t.Fatalf("An avalanche instance finalized too early")
} else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) {
t.Fatalf("Initial frontier failed to be set")
}
ta.RecordPoll(sm)
if !ta.Finalized() {
t.Fatalf("An avalanche instance finalized too late")
} else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) {
t.Fatalf("Initial frontier failed to be set")
} else if tx0.Status() != choices.Rejected {
t.Fatalf("Tx should have been rejected")
} else if tx1.Status() != choices.Accepted {
t.Fatalf("Tx should have been accepted")
}
}
func TestAvalancheTransitiveVoting(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2,
Alpha: 2,
BetaVirtuous: 1,
BetaRogue: 2,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID(), GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
tx1 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx1.Ins.Add(utxos[1])
vtx1 := &Vtx{
dependencies: []Vertex{vtx0},
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 2,
status: choices.Processing,
}
vtx2 := &Vtx{
dependencies: []Vertex{vtx1},
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 3,
status: choices.Processing,
}
ta.Add(vtx0)
ta.Add(vtx1)
ta.Add(vtx2)
sm1 := make(ids.UniqueBag)
sm1.Add(0, vtx0.id)
sm1.Add(1, vtx2.id)
ta.RecordPoll(sm1)
if ta.Finalized() {
t.Fatalf("An avalanche instance finalized too early")
} else if !ids.UnsortedEquals([]ids.ID{vtx2.id}, ta.Preferences().List()) {
t.Fatalf("Initial frontier failed to be set")
} else if tx0.Status() != choices.Accepted {
t.Fatalf("Tx should have been accepted")
}
sm2 := make(ids.UniqueBag)
sm2.Add(0, vtx2.id)
sm2.Add(1, vtx2.id)
ta.RecordPoll(sm2)
if !ta.Finalized() {
t.Fatalf("An avalanche instance finalized too late")
} else if !ids.UnsortedEquals([]ids.ID{vtx2.id}, ta.Preferences().List()) {
t.Fatalf("Initial frontier failed to be set")
} else if tx0.Status() != choices.Accepted {
t.Fatalf("Tx should have been accepted")
} else if tx1.Status() != choices.Accepted {
t.Fatalf("Tx should have been accepted")
}
}
func TestAvalancheSplitVoting(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2,
Alpha: 2,
BetaVirtuous: 1,
BetaRogue: 2,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
vtx1 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
ta.Add(vtx0)
ta.Add(vtx1)
sm1 := make(ids.UniqueBag)
sm1.Add(0, vtx0.id)
sm1.Add(1, vtx1.id)
ta.RecordPoll(sm1)
if !ta.Finalized() {
t.Fatalf("An avalanche instance finalized too late")
} else if !ids.UnsortedEquals([]ids.ID{vtx0.id, vtx1.id}, ta.Preferences().List()) {
t.Fatalf("Initial frontier failed to be set")
} else if tx0.Status() != choices.Accepted {
t.Fatalf("Tx should have been accepted")
}
}
func TestAvalancheTransitiveRejection(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2,
Alpha: 2,
BetaVirtuous: 1,
BetaRogue: 2,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID(), GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
tx1 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx1.Ins.Add(utxos[0])
vtx1 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 1,
status: choices.Processing,
}
tx2 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx2.Ins.Add(utxos[1])
vtx2 := &Vtx{
dependencies: []Vertex{vtx0},
id: GenerateID(),
txs: []snowstorm.Tx{tx2},
height: 2,
status: choices.Processing,
}
ta.Add(vtx0)
ta.Add(vtx1)
ta.Add(vtx2)
sm := make(ids.UniqueBag)
sm.Add(0, vtx1.id)
sm.Add(1, vtx1.id)
ta.RecordPoll(sm)
if ta.Finalized() {
t.Fatalf("An avalanche instance finalized too early")
} else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) {
t.Fatalf("Initial frontier failed to be set")
}
ta.RecordPoll(sm)
if ta.Finalized() {
t.Fatalf("An avalanche instance finalized too early")
} else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) {
t.Fatalf("Initial frontier failed to be set")
} else if tx0.Status() != choices.Rejected {
t.Fatalf("Tx should have been rejected")
} else if tx1.Status() != choices.Accepted {
t.Fatalf("Tx should have been accepted")
} else if tx2.Status() != choices.Processing {
t.Fatalf("Tx should not have been decided")
}
ta.preferenceCache = make(map[[32]byte]bool)
ta.virtuousCache = make(map[[32]byte]bool)
ta.update(vtx2)
}
func TestAvalancheVirtuous(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2,
Alpha: 2,
BetaVirtuous: 1,
BetaRogue: 2,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID(), GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
if virtuous := ta.Virtuous(); virtuous.Len() != 2 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(vts[0].ID()) {
t.Fatalf("Wrong virtuous")
} else if !virtuous.Contains(vts[1].ID()) {
t.Fatalf("Wrong virtuous")
}
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
tx1 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx1.Ins.Add(utxos[0])
vtx1 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 1,
status: choices.Processing,
}
tx2 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx2.Ins.Add(utxos[1])
vtx2 := &Vtx{
dependencies: []Vertex{vtx0},
id: GenerateID(),
txs: []snowstorm.Tx{tx2},
height: 2,
status: choices.Processing,
}
ta.Add(vtx0)
if virtuous := ta.Virtuous(); virtuous.Len() != 1 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(vtx0.id) {
t.Fatalf("Wrong virtuous")
}
ta.Add(vtx1)
if virtuous := ta.Virtuous(); virtuous.Len() != 1 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(vtx0.id) {
t.Fatalf("Wrong virtuous")
}
ta.updateFrontiers()
if virtuous := ta.Virtuous(); virtuous.Len() != 2 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(vts[0].ID()) {
t.Fatalf("Wrong virtuous")
} else if !virtuous.Contains(vts[1].ID()) {
t.Fatalf("Wrong virtuous")
}
ta.Add(vtx2)
if virtuous := ta.Virtuous(); virtuous.Len() != 2 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(vts[0].ID()) {
t.Fatalf("Wrong virtuous")
} else if !virtuous.Contains(vts[1].ID()) {
t.Fatalf("Wrong virtuous")
}
ta.updateFrontiers()
if virtuous := ta.Virtuous(); virtuous.Len() != 2 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(vts[0].ID()) {
t.Fatalf("Wrong virtuous")
} else if !virtuous.Contains(vts[1].ID()) {
t.Fatalf("Wrong virtuous")
}
}
func TestAvalancheIsVirtuous(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2,
Alpha: 2,
BetaVirtuous: 1,
BetaRogue: 2,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID(), GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
if virtuous := ta.Virtuous(); virtuous.Len() != 2 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(vts[0].ID()) {
t.Fatalf("Wrong virtuous")
} else if !virtuous.Contains(vts[1].ID()) {
t.Fatalf("Wrong virtuous")
}
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
tx1 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx1.Ins.Add(utxos[0])
vtx1 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 1,
status: choices.Processing,
}
if !ta.IsVirtuous(tx0) {
t.Fatalf("Should be virtuous.")
} else if !ta.IsVirtuous(tx1) {
t.Fatalf("Should be virtuous.")
}
ta.Add(vtx0)
if !ta.IsVirtuous(tx0) {
t.Fatalf("Should be virtuous.")
} else if ta.IsVirtuous(tx1) {
t.Fatalf("Should not be virtuous.")
}
ta.Add(vtx1)
if ta.IsVirtuous(tx0) {
t.Fatalf("Should not be virtuous.")
} else if ta.IsVirtuous(tx1) {
t.Fatalf("Should not be virtuous.")
}
}
func TestAvalancheQuiesce(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 1,
Alpha: 1,
BetaVirtuous: 1,
BetaRogue: 1,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID(), GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
tx1 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx1.Ins.Add(utxos[0])
vtx1 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 1,
status: choices.Processing,
}
tx2 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx2.Ins.Add(utxos[1])
vtx2 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx2},
height: 2,
status: choices.Processing,
}
ta.Add(vtx0)
if ta.Quiesce() {
t.Fatalf("Shouldn't quiesce")
}
ta.Add(vtx1)
if !ta.Quiesce() {
t.Fatalf("Should quiesce")
}
ta.Add(vtx2)
if ta.Quiesce() {
t.Fatalf("Shouldn't quiesce")
}
sm := make(ids.UniqueBag)
sm.Add(0, vtx2.id)
ta.RecordPoll(sm)
if !ta.Quiesce() {
t.Fatalf("Should quiesce")
}
}
func TestAvalancheOrphans(t *testing.T) {
params := Parameters{
Parameters: snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 1,
Alpha: 1,
BetaVirtuous: math.MaxInt32,
BetaRogue: math.MaxInt32,
ConcurrentRepolls: 1,
},
Parents: 2,
BatchSize: 1,
}
vts := []Vertex{&Vtx{
id: GenerateID(),
status: choices.Accepted,
}, &Vtx{
id: GenerateID(),
status: choices.Accepted,
}}
utxos := []ids.ID{GenerateID(), GenerateID()}
ta := Topological{}
ta.Initialize(snow.DefaultContextTest(), params, vts)
tx0 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx0.Ins.Add(utxos[0])
vtx0 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
}
tx1 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx1.Ins.Add(utxos[0])
vtx1 := &Vtx{
dependencies: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 1,
status: choices.Processing,
}
tx2 := &snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
}
tx2.Ins.Add(utxos[1])
vtx2 := &Vtx{
dependencies: []Vertex{vtx0},
id: GenerateID(),
txs: []snowstorm.Tx{tx2},
height: 2,
status: choices.Processing,
}
ta.Add(vtx0)
if orphans := ta.Orphans(); orphans.Len() != 0 {
t.Fatalf("Wrong number of orphans")
}
ta.Add(vtx1)
if orphans := ta.Orphans(); orphans.Len() != 0 {
t.Fatalf("Wrong number of orphans")
}
ta.Add(vtx2)
if orphans := ta.Orphans(); orphans.Len() != 0 {
t.Fatalf("Wrong number of orphans")
}
sm := make(ids.UniqueBag)
sm.Add(0, vtx1.id)
ta.RecordPoll(sm)
if orphans := ta.Orphans(); orphans.Len() != 1 {
t.Fatalf("Wrong number of orphans")
} else if !orphans.Contains(tx2.ID()) {
t.Fatalf("Wrong orphan")
}
}
func TestTopological(t *testing.T) { ConsensusTest(t, TopologicalFactory{}) }

View File

@ -19,7 +19,8 @@ type Vtx struct {
height uint64
status choices.Status
bytes []byte
Validity error
bytes []byte
}
func (v *Vtx) ID() ids.ID { return v.id }
@ -28,9 +29,8 @@ func (v *Vtx) Parents() []Vertex { return v.dependencies }
func (v *Vtx) Height() uint64 { return v.height }
func (v *Vtx) Txs() []snowstorm.Tx { return v.txs }
func (v *Vtx) Status() choices.Status { return v.status }
func (v *Vtx) Live() {}
func (v *Vtx) Accept() error { v.status = choices.Accepted; return nil }
func (v *Vtx) Reject() error { v.status = choices.Rejected; return nil }
func (v *Vtx) Accept() error { v.status = choices.Accepted; return v.Validity }
func (v *Vtx) Reject() error { v.status = choices.Rejected; return v.Validity }
func (v *Vtx) Bytes() []byte { return v.bytes }
type sortVts []*Vtx

View File

@ -1,48 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package snowball
import (
"github.com/ava-labs/gecko/ids"
)
// ByzantineFactory implements Factory by returning a byzantine struct
type ByzantineFactory struct{}
// New implements Factory
func (ByzantineFactory) New() Consensus { return &Byzantine{} }
// Byzantine is a naive implementation of a multi-choice snowball instance
type Byzantine struct {
// params contains all the configurations of a snowball instance
params Parameters
// Hardcode the preference
preference ids.ID
}
// Initialize implements the Consensus interface
func (b *Byzantine) Initialize(params Parameters, choice ids.ID) {
b.params = params
b.preference = choice
}
// Parameters implements the Consensus interface
func (b *Byzantine) Parameters() Parameters { return b.params }
// Add implements the Consensus interface
func (b *Byzantine) Add(choice ids.ID) {}
// Preference implements the Consensus interface
func (b *Byzantine) Preference() ids.ID { return b.preference }
// RecordPoll implements the Consensus interface
func (b *Byzantine) RecordPoll(votes ids.Bag) {}
// RecordUnsuccessfulPoll implements the Consensus interface
func (b *Byzantine) RecordUnsuccessfulPoll() {}
// Finalized implements the Consensus interface
func (b *Byzantine) Finalized() bool { return true }
func (b *Byzantine) String() string { return b.preference.String() }

View File

@ -1,54 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package snowball
import (
"testing"
"github.com/ava-labs/gecko/ids"
"github.com/prometheus/client_golang/prometheus"
)
func TestByzantine(t *testing.T) {
params := Parameters{
Metrics: prometheus.NewRegistry(),
K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5,
}
byzFactory := ByzantineFactory{}
byz := byzFactory.New()
byz.Initialize(params, Blue)
if ret := byz.Parameters(); ret != params {
t.Fatalf("Should have returned the correct params")
}
byz.Add(Green)
if pref := byz.Preference(); !pref.Equals(Blue) {
t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref)
}
oneGreen := ids.Bag{}
oneGreen.Add(Green)
byz.RecordPoll(oneGreen)
if pref := byz.Preference(); !pref.Equals(Blue) {
t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref)
}
byz.RecordUnsuccessfulPoll()
if pref := byz.Preference(); !pref.Equals(Blue) {
t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref)
}
if final := byz.Finalized(); !final {
t.Fatalf("Should be marked as accepted")
}
if str := byz.String(); str != Blue.String() {
t.Fatalf("Wrong string, expected %s returned %s", Blue, str)
}
}

View File

@ -11,6 +11,46 @@ import (
"github.com/ava-labs/gecko/ids"
)
// ByzantineFactory implements Factory by returning a byzantine struct
type ByzantineFactory struct{}
// New implements Factory
func (ByzantineFactory) New() Consensus { return &Byzantine{} }
// Byzantine is a naive implementation of a multi-choice snowball instance
type Byzantine struct {
// params contains all the configurations of a snowball instance
params Parameters
// Hardcode the preference
preference ids.ID
}
// Initialize implements the Consensus interface
func (b *Byzantine) Initialize(params Parameters, choice ids.ID) {
b.params = params
b.preference = choice
}
// Parameters implements the Consensus interface
func (b *Byzantine) Parameters() Parameters { return b.params }
// Add implements the Consensus interface
func (b *Byzantine) Add(choice ids.ID) {}
// Preference implements the Consensus interface
func (b *Byzantine) Preference() ids.ID { return b.preference }
// RecordPoll implements the Consensus interface
func (b *Byzantine) RecordPoll(votes ids.Bag) {}
// RecordUnsuccessfulPoll implements the Consensus interface
func (b *Byzantine) RecordUnsuccessfulPoll() {}
// Finalized implements the Consensus interface
func (b *Byzantine) Finalized() bool { return true }
func (b *Byzantine) String() string { return b.preference.String() }
var (
Red = ids.Empty.Prefix(0)
Blue = ids.Empty.Prefix(1)

View File

@ -34,7 +34,7 @@ func (f *Flat) Parameters() Parameters { return f.params }
// RecordPoll implements the Consensus interface
func (f *Flat) RecordPoll(votes ids.Bag) {
if pollMode, numVotes := votes.Mode(); numVotes >= f.params.Alpha {
f.nnarySnowball.RecordSuccessfulPoll(pollMode)
f.RecordSuccessfulPoll(pollMode)
} else {
f.RecordUnsuccessfulPoll()
}

View File

@ -51,7 +51,7 @@ func (sf *nnarySnowflake) RecordSuccessfulPoll(choice ids.ID) {
return // This instace is already decided.
}
if preference := sf.nnarySlush.Preference(); preference.Equals(choice) {
if preference := sf.Preference(); preference.Equals(choice) {
sf.confidence++
} else {
// confidence is set to 1 because there has already been 1 successful

View File

@ -125,14 +125,14 @@ func TestParametersAnotherInvalidBetaRogue(t *testing.T) {
func TestParametersInvalidConcurrentRepolls(t *testing.T) {
tests := []Parameters{
Parameters{
{
K: 1,
Alpha: 1,
BetaVirtuous: 1,
BetaRogue: 1,
ConcurrentRepolls: 2,
},
Parameters{
{
K: 1,
Alpha: 1,
BetaVirtuous: 1,

View File

@ -27,11 +27,13 @@ func (sb *unarySnowball) Extend(beta int, choice int) BinarySnowball {
bs := &binarySnowball{
binarySnowflake: binarySnowflake{
binarySlush: binarySlush{preference: choice},
confidence: sb.confidence,
beta: beta,
finalized: sb.Finalized(),
},
preference: choice,
}
bs.numSuccessfulPolls[choice] = sb.numSuccessfulPolls
return bs
}

View File

@ -42,11 +42,32 @@ func TestUnarySnowball(t *testing.T) {
binarySnowball := sbClone.Extend(beta, 0)
expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 0, SF(Confidence = 1, Finalized = false, SL(Preference = 0)))"
if result := binarySnowball.String(); result != expected {
t.Fatalf("Expected:\n%s\nReturned:\n%s", expected, result)
}
binarySnowball.RecordUnsuccessfulPoll()
for i := 0; i < 3; i++ {
if binarySnowball.Preference() != 0 {
t.Fatalf("Wrong preference")
} else if binarySnowball.Finalized() {
t.Fatalf("Should not have finalized")
}
binarySnowball.RecordSuccessfulPoll(1)
binarySnowball.RecordUnsuccessfulPoll()
}
if binarySnowball.Preference() != 1 {
t.Fatalf("Wrong preference")
} else if binarySnowball.Finalized() {
t.Fatalf("Should not have finalized")
}
binarySnowball.RecordSuccessfulPoll(1)
if binarySnowball.Finalized() {
if binarySnowball.Preference() != 1 {
t.Fatalf("Wrong preference")
} else if binarySnowball.Finalized() {
t.Fatalf("Should not have finalized")
}
@ -57,4 +78,9 @@ func TestUnarySnowball(t *testing.T) {
} else if !binarySnowball.Finalized() {
t.Fatalf("Should have finalized")
}
expected = "SB(NumSuccessfulPolls = 2, SF(Confidence = 1, Finalized = false))"
if str := sb.String(); str != expected {
t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str)
}
}

View File

@ -17,6 +17,7 @@ type TestBlock struct {
height int
status choices.Status
bytes []byte
err error
}
func (b *TestBlock) Parent() Block { return b.parent }
@ -27,16 +28,16 @@ func (b *TestBlock) Accept() error {
return errors.New("Dis-agreement")
}
b.status = choices.Accepted
return nil
return b.err
}
func (b *TestBlock) Reject() error {
if b.status.Decided() && b.status != choices.Rejected {
return errors.New("Dis-agreement")
}
b.status = choices.Rejected
return nil
return b.err
}
func (b *TestBlock) Verify() error { return nil }
func (b *TestBlock) Verify() error { return b.err }
func (b *TestBlock) Bytes() []byte { return b.bytes }
type sortBlocks []*TestBlock

View File

@ -4,6 +4,7 @@
package snowman
import (
"errors"
"math/rand"
"testing"
@ -42,6 +43,10 @@ var (
MetricsProcessingErrorTest,
MetricsAcceptedErrorTest,
MetricsRejectedErrorTest,
ErrorOnInitialRejectionTest,
ErrorOnAcceptTest,
ErrorOnRejectSiblingTest,
ErrorOnTransitiveRejectionTest,
RandomizedConsistencyTest,
}
)
@ -71,11 +76,9 @@ func InitializeTest(t *testing.T, factory Factory) {
if p := sm.Parameters(); p != params {
t.Fatalf("Wrong returned parameters")
}
if pref := sm.Preference(); !pref.Equals(GenesisID) {
} else if pref := sm.Preference(); !pref.Equals(GenesisID) {
t.Fatalf("Wrong preference returned")
}
if !sm.Finalized() {
} else if !sm.Finalized() {
t.Fatalf("Wrong should have marked the instance as being finalized")
}
}
@ -101,9 +104,9 @@ func AddToTailTest(t *testing.T, factory Factory) {
}
// Adding to the previous preference will update the preference
sm.Add(block)
if pref := sm.Preference(); !pref.Equals(block.id) {
if err := sm.Add(block); err != nil {
t.Fatal(err)
} else if pref := sm.Preference(); !pref.Equals(block.id) {
t.Fatalf("Wrong preference. Expected %s, got %s", block.id, pref)
}
}
@ -133,17 +136,17 @@ func AddToNonTailTest(t *testing.T, factory Factory) {
}
// Adding to the previous preference will update the preference
sm.Add(firstBlock)
if pref := sm.Preference(); !pref.Equals(firstBlock.id) {
if err := sm.Add(firstBlock); err != nil {
t.Fatal(err)
} else if pref := sm.Preference(); !pref.Equals(firstBlock.id) {
t.Fatalf("Wrong preference. Expected %s, got %s", firstBlock.id, pref)
}
// Adding to something other than the previous preference won't update the
// preference
sm.Add(secondBlock)
if pref := sm.Preference(); !pref.Equals(firstBlock.id) {
if err := sm.Add(secondBlock); err != nil {
t.Fatal(err)
} else if pref := sm.Preference(); !pref.Equals(firstBlock.id) {
t.Fatalf("Wrong preference. Expected %s, got %s", firstBlock.id, pref)
}
}
@ -171,9 +174,9 @@ func AddToUnknownTest(t *testing.T, factory Factory) {
// Adding a block with an unknown parent means the parent must have already
// been rejected. Therefore the block should be immediately rejected
sm.Add(block)
if pref := sm.Preference(); !pref.Equals(GenesisID) {
if err := sm.Add(block); err != nil {
t.Fatal(err)
} else if pref := sm.Preference(); !pref.Equals(GenesisID) {
t.Fatalf("Wrong preference. Expected %s, got %s", GenesisID, pref)
} else if status := block.Status(); status != choices.Rejected {
t.Fatalf("Should have rejected the block")
@ -269,9 +272,9 @@ func IssuedIssuedTest(t *testing.T, factory Factory) {
status: choices.Processing,
}
sm.Add(block)
if !sm.Issued(block) {
if err := sm.Add(block); err != nil {
t.Fatal(err)
} else if !sm.Issued(block) {
t.Fatalf("Should have marked a pending block as having been issued")
}
}
@ -296,24 +299,23 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) {
status: choices.Processing,
}
sm.Add(block)
if err := sm.Add(block); err != nil {
t.Fatal(err)
}
votes := ids.Bag{}
votes.Add(block.id)
sm.RecordPoll(votes)
if pref := sm.Preference(); !pref.Equals(block.id) {
if err := sm.RecordPoll(votes); err != nil {
t.Fatal(err)
} else if pref := sm.Preference(); !pref.Equals(block.id) {
t.Fatalf("Preference returned the wrong block")
} else if sm.Finalized() {
t.Fatalf("Snowman instance finalized too soon")
} else if status := block.Status(); status != choices.Processing {
t.Fatalf("Block's status changed unexpectedly")
}
sm.RecordPoll(votes)
if pref := sm.Preference(); !pref.Equals(block.id) {
} else if err := sm.RecordPoll(votes); err != nil {
t.Fatal(err)
} else if pref := sm.Preference(); !pref.Equals(block.id) {
t.Fatalf("Preference returned the wrong block")
} else if !sm.Finalized() {
t.Fatalf("Snowman instance didn't finalize")
@ -347,15 +349,18 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) {
status: choices.Processing,
}
sm.Add(firstBlock)
sm.Add(secondBlock)
if err := sm.Add(firstBlock); err != nil {
t.Fatal(err)
} else if err := sm.Add(secondBlock); err != nil {
t.Fatal(err)
}
votes := ids.Bag{}
votes.Add(firstBlock.id)
sm.RecordPoll(votes)
if pref := sm.Preference(); !pref.Equals(firstBlock.id) {
if err := sm.RecordPoll(votes); err != nil {
t.Fatal(err)
} else if pref := sm.Preference(); !pref.Equals(firstBlock.id) {
t.Fatalf("Preference returned the wrong block")
} else if sm.Finalized() {
t.Fatalf("Snowman instance finalized too soon")
@ -363,11 +368,9 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) {
t.Fatalf("Block's status changed unexpectedly")
} else if status := secondBlock.Status(); status != choices.Processing {
t.Fatalf("Block's status changed unexpectedly")
}
sm.RecordPoll(votes)
if pref := sm.Preference(); !pref.Equals(firstBlock.id) {
} else if err := sm.RecordPoll(votes); err != nil {
t.Fatal(err)
} else if pref := sm.Preference(); !pref.Equals(firstBlock.id) {
t.Fatalf("Preference returned the wrong block")
} else if !sm.Finalized() {
t.Fatalf("Snowman instance didn't finalize")
@ -394,9 +397,9 @@ func RecordPollWhenFinalizedTest(t *testing.T, factory Factory) {
votes := ids.Bag{}
votes.Add(GenesisID)
sm.RecordPoll(votes)
if !sm.Finalized() {
if err := sm.RecordPoll(votes); err != nil {
t.Fatal(err)
} else if !sm.Finalized() {
t.Fatalf("Consensus should still be finalized")
} else if pref := sm.Preference(); !GenesisID.Equals(pref) {
t.Fatalf("Wrong preference listed")
@ -433,9 +436,13 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) {
status: choices.Processing,
}
sm.Add(block0)
sm.Add(block1)
sm.Add(block2)
if err := sm.Add(block0); err != nil {
t.Fatal(err)
} else if err := sm.Add(block1); err != nil {
t.Fatal(err)
} else if err := sm.Add(block2); err != nil {
t.Fatal(err)
}
// Current graph structure:
// G
@ -447,7 +454,9 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) {
votes := ids.Bag{}
votes.Add(block0.id)
sm.RecordPoll(votes)
if err := sm.RecordPoll(votes); err != nil {
t.Fatal(err)
}
// Current graph structure:
// 0
@ -457,9 +466,7 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) {
t.Fatalf("Finalized too late")
} else if pref := sm.Preference(); !block0.id.Equals(pref) {
t.Fatalf("Wrong preference listed")
}
if status := block0.Status(); status != choices.Accepted {
} else if status := block0.Status(); status != choices.Accepted {
t.Fatalf("Wrong status returned")
} else if status := block1.Status(); status != choices.Rejected {
t.Fatalf("Wrong status returned")
@ -503,10 +510,15 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) {
status: choices.Processing,
}
sm.Add(block0)
sm.Add(block1)
sm.Add(block2)
sm.Add(block3)
if err := sm.Add(block0); err != nil {
t.Fatal(err)
} else if err := sm.Add(block1); err != nil {
t.Fatal(err)
} else if err := sm.Add(block2); err != nil {
t.Fatal(err)
} else if err := sm.Add(block3); err != nil {
t.Fatal(err)
}
// Current graph structure:
// G
@ -517,26 +529,24 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) {
votesFor2 := ids.Bag{}
votesFor2.Add(block2.id)
sm.RecordPoll(votesFor2)
if sm.Finalized() {
if err := sm.RecordPoll(votesFor2); err != nil {
t.Fatal(err)
} else if sm.Finalized() {
t.Fatalf("Finalized too early")
} else if pref := sm.Preference(); !block2.id.Equals(pref) {
t.Fatalf("Wrong preference listed")
}
emptyVotes := ids.Bag{}
sm.RecordPoll(emptyVotes)
if sm.Finalized() {
if err := sm.RecordPoll(emptyVotes); err != nil {
t.Fatal(err)
} else if sm.Finalized() {
t.Fatalf("Finalized too early")
} else if pref := sm.Preference(); !block2.id.Equals(pref) {
t.Fatalf("Wrong preference listed")
}
sm.RecordPoll(votesFor2)
if sm.Finalized() {
} else if err := sm.RecordPoll(votesFor2); err != nil {
t.Fatal(err)
} else if sm.Finalized() {
t.Fatalf("Finalized too early")
} else if pref := sm.Preference(); !block2.id.Equals(pref) {
t.Fatalf("Wrong preference listed")
@ -544,23 +554,19 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) {
votesFor3 := ids.Bag{}
votesFor3.Add(block3.id)
sm.RecordPoll(votesFor3)
if sm.Finalized() {
if err := sm.RecordPoll(votesFor3); err != nil {
t.Fatal(err)
} else if sm.Finalized() {
t.Fatalf("Finalized too early")
} else if pref := sm.Preference(); !block2.id.Equals(pref) {
t.Fatalf("Wrong preference listed")
}
sm.RecordPoll(votesFor3)
if !sm.Finalized() {
} else if err := sm.RecordPoll(votesFor3); err != nil {
t.Fatal(err)
} else if !sm.Finalized() {
t.Fatalf("Finalized too late")
} else if pref := sm.Preference(); !block3.id.Equals(pref) {
t.Fatalf("Wrong preference listed")
}
if status := block0.Status(); status != choices.Rejected {
} else if status := block0.Status(); status != choices.Rejected {
t.Fatalf("Wrong status returned")
} else if status := block1.Status(); status != choices.Accepted {
t.Fatalf("Wrong status returned")
@ -592,19 +598,23 @@ func RecordPollInvalidVoteTest(t *testing.T, factory Factory) {
}
unknownBlockID := ids.Empty.Prefix(2)
sm.Add(block)
if err := sm.Add(block); err != nil {
t.Fatal(err)
}
validVotes := ids.Bag{}
validVotes.Add(block.id)
sm.RecordPoll(validVotes)
if err := sm.RecordPoll(validVotes); err != nil {
t.Fatal(err)
}
invalidVotes := ids.Bag{}
invalidVotes.Add(unknownBlockID)
sm.RecordPoll(invalidVotes)
sm.RecordPoll(validVotes)
if sm.Finalized() {
if err := sm.RecordPoll(invalidVotes); err != nil {
t.Fatal(err)
} else if err := sm.RecordPoll(validVotes); err != nil {
t.Fatal(err)
} else if sm.Finalized() {
t.Fatalf("Finalized too early")
} else if pref := sm.Preference(); !block.id.Equals(pref) {
t.Fatalf("Wrong preference listed")
@ -651,11 +661,17 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) {
status: choices.Processing,
}
sm.Add(block0)
sm.Add(block1)
sm.Add(block2)
sm.Add(block3)
sm.Add(block4)
if err := sm.Add(block0); err != nil {
t.Fatal(err)
} else if err := sm.Add(block1); err != nil {
t.Fatal(err)
} else if err := sm.Add(block2); err != nil {
t.Fatal(err)
} else if err := sm.Add(block3); err != nil {
t.Fatal(err)
} else if err := sm.Add(block4); err != nil {
t.Fatal(err)
}
// Current graph structure:
// G
@ -668,10 +684,14 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) {
// Tail = 2
votes0_2_4 := ids.Bag{}
votes0_2_4.Add(block0.id)
votes0_2_4.Add(block2.id)
votes0_2_4.Add(block4.id)
sm.RecordPoll(votes0_2_4)
votes0_2_4.Add(
block0.id,
block2.id,
block4.id,
)
if err := sm.RecordPoll(votes0_2_4); err != nil {
t.Fatal(err)
}
// Current graph structure:
// 0
@ -699,7 +719,9 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) {
dep2_2_2 := ids.Bag{}
dep2_2_2.AddCount(block2.id, 3)
sm.RecordPoll(dep2_2_2)
if err := sm.RecordPoll(dep2_2_2); err != nil {
t.Fatal(err)
}
// Current graph structure:
// 2
@ -757,20 +779,25 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) {
status: choices.Processing,
}
sm.Add(block0)
sm.Add(block1)
if err := sm.Add(block0); err != nil {
t.Fatal(err)
} else if err := sm.Add(block1); err != nil {
t.Fatal(err)
}
votes0 := ids.Bag{}
votes0.Add(block0.id)
sm.RecordPoll(votes0)
sm.Add(block2)
if err := sm.RecordPoll(votes0); err != nil {
t.Fatal(err)
} else if err := sm.Add(block2); err != nil {
t.Fatal(err)
}
// dep2 is already rejected.
sm.Add(block3)
if status := block0.Status(); status == choices.Accepted {
if err := sm.Add(block3); err != nil {
t.Fatal(err)
} else if status := block0.Status(); status == choices.Accepted {
t.Fatalf("Shouldn't be accepted yet")
}
@ -778,9 +805,9 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) {
// dep0. Because dep2 is already rejected, this will accept dep0.
votes3 := ids.Bag{}
votes3.Add(block3.id)
sm.RecordPoll(votes3)
if !sm.Finalized() {
if err := sm.RecordPoll(votes3); err != nil {
t.Fatal(err)
} else if !sm.Finalized() {
t.Fatalf("Finalized too late")
} else if status := block0.Status(); status != choices.Accepted {
t.Fatalf("Should be accepted")
@ -818,14 +845,15 @@ func MetricsProcessingErrorTest(t *testing.T, factory Factory) {
status: choices.Processing,
}
sm.Add(block)
if err := sm.Add(block); err != nil {
t.Fatal(err)
}
votes := ids.Bag{}
votes.Add(block.id)
sm.RecordPoll(votes)
if !sm.Finalized() {
if err := sm.RecordPoll(votes); err != nil {
t.Fatal(err)
} else if !sm.Finalized() {
t.Fatalf("Snowman instance didn't finalize")
}
}
@ -861,14 +889,15 @@ func MetricsAcceptedErrorTest(t *testing.T, factory Factory) {
status: choices.Processing,
}
sm.Add(block)
if err := sm.Add(block); err != nil {
t.Fatal(err)
}
votes := ids.Bag{}
votes.Add(block.id)
sm.RecordPoll(votes)
if !sm.Finalized() {
if err := sm.RecordPoll(votes); err != nil {
t.Fatal(err)
} else if !sm.Finalized() {
t.Fatalf("Snowman instance didn't finalize")
}
}
@ -904,18 +933,171 @@ func MetricsRejectedErrorTest(t *testing.T, factory Factory) {
status: choices.Processing,
}
sm.Add(block)
if err := sm.Add(block); err != nil {
t.Fatal(err)
}
votes := ids.Bag{}
votes.Add(block.id)
sm.RecordPoll(votes)
if !sm.Finalized() {
if err := sm.RecordPoll(votes); err != nil {
t.Fatal(err)
} else if !sm.Finalized() {
t.Fatalf("Snowman instance didn't finalize")
}
}
func ErrorOnInitialRejectionTest(t *testing.T, factory Factory) {
sm := factory.New()
ctx := snow.DefaultContextTest()
params := snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 1,
Alpha: 1,
BetaVirtuous: 1,
BetaRogue: 1,
ConcurrentRepolls: 1,
}
sm.Initialize(ctx, params, GenesisID)
rejectedBlock := &TestBlock{
id: ids.Empty.Prefix(1),
status: choices.Rejected,
}
block := &TestBlock{
parent: rejectedBlock,
id: ids.Empty.Prefix(2),
status: choices.Processing,
err: errors.New(""),
}
if err := sm.Add(block); err == nil {
t.Fatalf("Should have errored on rejecting the rejectable block")
}
}
func ErrorOnAcceptTest(t *testing.T, factory Factory) {
sm := factory.New()
ctx := snow.DefaultContextTest()
params := snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 1,
Alpha: 1,
BetaVirtuous: 1,
BetaRogue: 1,
ConcurrentRepolls: 1,
}
sm.Initialize(ctx, params, GenesisID)
block := &TestBlock{
parent: Genesis,
id: ids.Empty.Prefix(1),
status: choices.Processing,
err: errors.New(""),
}
if err := sm.Add(block); err != nil {
t.Fatal(err)
}
votes := ids.Bag{}
votes.Add(block.id)
if err := sm.RecordPoll(votes); err == nil {
t.Fatalf("Should have errored on accepted the block")
}
}
func ErrorOnRejectSiblingTest(t *testing.T, factory Factory) {
sm := factory.New()
ctx := snow.DefaultContextTest()
params := snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 1,
Alpha: 1,
BetaVirtuous: 1,
BetaRogue: 1,
ConcurrentRepolls: 1,
}
sm.Initialize(ctx, params, GenesisID)
block0 := &TestBlock{
parent: Genesis,
id: ids.Empty.Prefix(1),
status: choices.Processing,
}
block1 := &TestBlock{
parent: Genesis,
id: ids.Empty.Prefix(2),
status: choices.Processing,
err: errors.New(""),
}
if err := sm.Add(block0); err != nil {
t.Fatal(err)
} else if err := sm.Add(block1); err != nil {
t.Fatal(err)
}
votes := ids.Bag{}
votes.Add(block0.id)
if err := sm.RecordPoll(votes); err == nil {
t.Fatalf("Should have errored on rejecting the block's sibling")
}
}
func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) {
sm := factory.New()
ctx := snow.DefaultContextTest()
params := snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 1,
Alpha: 1,
BetaVirtuous: 1,
BetaRogue: 1,
ConcurrentRepolls: 1,
}
sm.Initialize(ctx, params, GenesisID)
block0 := &TestBlock{
parent: Genesis,
id: ids.Empty.Prefix(1),
status: choices.Processing,
}
block1 := &TestBlock{
parent: Genesis,
id: ids.Empty.Prefix(2),
status: choices.Processing,
}
block2 := &TestBlock{
parent: block1,
id: ids.Empty.Prefix(3),
status: choices.Processing,
err: errors.New(""),
}
if err := sm.Add(block0); err != nil {
t.Fatal(err)
} else if err := sm.Add(block1); err != nil {
t.Fatal(err)
} else if err := sm.Add(block2); err != nil {
t.Fatal(err)
}
votes := ids.Bag{}
votes.Add(block0.id)
if err := sm.RecordPoll(votes); err == nil {
t.Fatalf("Should have errored on transitively rejecting the block")
}
}
func RandomizedConsistencyTest(t *testing.T, factory Factory) {
numColors := 50
numNodes := 100

View File

@ -9,6 +9,10 @@ import (
"github.com/ava-labs/gecko/snow/consensus/snowball"
)
const (
minMapSize = 16
)
// TopologicalFactory implements Factory by returning a topological struct
type TopologicalFactory struct{}
@ -183,7 +187,7 @@ func (ts *Topological) Finalized() bool { return len(ts.blocks) == 1 }
// the non-transitively applied votes. Also returns the list of leaf blocks.
func (ts *Topological) calculateInDegree(
votes ids.Bag) (map[[32]byte]kahnNode, []ids.ID) {
kahns := make(map[[32]byte]kahnNode)
kahns := make(map[[32]byte]kahnNode, minMapSize)
leaves := ids.Set{}
for _, vote := range votes.List() {

View File

@ -4,6 +4,7 @@
package snowstorm
import (
"errors"
"testing"
"github.com/prometheus/client_golang/prometheus"
@ -19,6 +20,28 @@ var (
Green = &TestTx{Identifier: ids.Empty.Prefix(1)}
Blue = &TestTx{Identifier: ids.Empty.Prefix(2)}
Alpha = &TestTx{Identifier: ids.Empty.Prefix(3)}
Tests = []func(*testing.T, Factory){
MetricsTest,
ParamsTest,
IssuedTest,
LeftoverInputTest,
LowerConfidenceTest,
MiddleConfidenceTest,
IndependentTest,
VirtuousTest,
IsVirtuousTest,
QuiesceTest,
AcceptingDependencyTest,
RejectingDependencyTest,
VacuouslyAcceptedTest,
ConflictsTest,
VirtuousDependsOnRogueTest,
ErrorOnVacuouslyAcceptedTest,
ErrorOnAcceptedTest,
ErrorOnRejectingLowerConfidenceConflictTest,
ErrorOnRejectingHigherConfidenceConflictTest,
}
)
// R - G - B - A
@ -46,6 +69,52 @@ func Setup() {
Alpha.Reset()
}
// Execute all tests against a consensus implementation
func ConsensusTest(t *testing.T, factory Factory, prefix string) {
for _, test := range Tests {
test(t, factory)
}
StringTest(t, factory, prefix)
}
func MetricsTest(t *testing.T, factory Factory) {
Setup()
{
params := snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2,
}
params.Metrics.Register(prometheus.NewCounter(prometheus.CounterOpts{
Name: "tx_processing",
}))
graph := factory.New()
graph.Initialize(snow.DefaultContextTest(), params)
}
{
params := snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2,
}
params.Metrics.Register(prometheus.NewCounter(prometheus.CounterOpts{
Name: "tx_accepted",
}))
graph := factory.New()
graph.Initialize(snow.DefaultContextTest(), params)
}
{
params := snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2,
}
params.Metrics.Register(prometheus.NewCounter(prometheus.CounterOpts{
Name: "tx_rejected",
}))
graph := factory.New()
graph.Initialize(snow.DefaultContextTest(), params)
}
}
func ParamsTest(t *testing.T, factory Factory) {
Setup()
@ -81,15 +150,13 @@ func IssuedTest(t *testing.T, factory Factory) {
if issued := graph.Issued(Red); issued {
t.Fatalf("Haven't issued anything yet.")
}
graph.Add(Red)
if issued := graph.Issued(Red); !issued {
} else if err := graph.Add(Red); err != nil {
t.Fatal(err)
} else if issued := graph.Issued(Red); !issued {
t.Fatalf("Have already issued.")
}
Blue.Accept()
_ = Blue.Accept()
if issued := graph.Issued(Blue); !issued {
t.Fatalf("Have already accepted.")
@ -106,10 +173,12 @@ func LeftoverInputTest(t *testing.T, factory Factory) {
K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1,
}
graph.Initialize(snow.DefaultContextTest(), params)
graph.Add(Red)
graph.Add(Green)
if prefs := graph.Preferences(); prefs.Len() != 1 {
if err := graph.Add(Red); err != nil {
t.Fatal(err)
} else if err := graph.Add(Green); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 1 {
t.Fatalf("Wrong number of preferences.")
} else if !prefs.Contains(Red.ID()) {
t.Fatalf("Wrong preference. Expected %s got %s", Red.ID(), prefs.List()[0])
@ -120,15 +189,13 @@ func LeftoverInputTest(t *testing.T, factory Factory) {
r := ids.Bag{}
r.SetThreshold(2)
r.AddCount(Red.ID(), 2)
graph.RecordPoll(r)
if prefs := graph.Preferences(); prefs.Len() != 0 {
if err := graph.RecordPoll(r); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 0 {
t.Fatalf("Wrong number of preferences.")
} else if !graph.Finalized() {
t.Fatalf("Finalized too late")
}
if Red.Status() != choices.Accepted {
} else if Red.Status() != choices.Accepted {
t.Fatalf("%s should have been accepted", Red.ID())
} else if Green.Status() != choices.Rejected {
t.Fatalf("%s should have been rejected", Green.ID())
@ -145,11 +212,14 @@ func LowerConfidenceTest(t *testing.T, factory Factory) {
K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1,
}
graph.Initialize(snow.DefaultContextTest(), params)
graph.Add(Red)
graph.Add(Green)
graph.Add(Blue)
if prefs := graph.Preferences(); prefs.Len() != 1 {
if err := graph.Add(Red); err != nil {
t.Fatal(err)
} else if err := graph.Add(Green); err != nil {
t.Fatal(err)
} else if err := graph.Add(Blue); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 1 {
t.Fatalf("Wrong number of preferences.")
} else if !prefs.Contains(Red.ID()) {
t.Fatalf("Wrong preference. Expected %s got %s", Red.ID(), prefs.List()[0])
@ -160,9 +230,9 @@ func LowerConfidenceTest(t *testing.T, factory Factory) {
r := ids.Bag{}
r.SetThreshold(2)
r.AddCount(Red.ID(), 2)
graph.RecordPoll(r)
if prefs := graph.Preferences(); prefs.Len() != 1 {
if err := graph.RecordPoll(r); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 1 {
t.Fatalf("Wrong number of preferences.")
} else if !prefs.Contains(Blue.ID()) {
t.Fatalf("Wrong preference. Expected %s", Blue.ID())
@ -181,12 +251,16 @@ func MiddleConfidenceTest(t *testing.T, factory Factory) {
K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1,
}
graph.Initialize(snow.DefaultContextTest(), params)
graph.Add(Red)
graph.Add(Green)
graph.Add(Alpha)
graph.Add(Blue)
if prefs := graph.Preferences(); prefs.Len() != 2 {
if err := graph.Add(Red); err != nil {
t.Fatal(err)
} else if err := graph.Add(Green); err != nil {
t.Fatal(err)
} else if err := graph.Add(Alpha); err != nil {
t.Fatal(err)
} else if err := graph.Add(Blue); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 2 {
t.Fatalf("Wrong number of preferences.")
} else if !prefs.Contains(Red.ID()) {
t.Fatalf("Wrong preference. Expected %s", Red.ID())
@ -199,9 +273,9 @@ func MiddleConfidenceTest(t *testing.T, factory Factory) {
r := ids.Bag{}
r.SetThreshold(2)
r.AddCount(Red.ID(), 2)
graph.RecordPoll(r)
if prefs := graph.Preferences(); prefs.Len() != 1 {
if err := graph.RecordPoll(r); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 1 {
t.Fatalf("Wrong number of preferences.")
} else if !prefs.Contains(Alpha.ID()) {
t.Fatalf("Wrong preference. Expected %s", Alpha.ID())
@ -209,6 +283,7 @@ func MiddleConfidenceTest(t *testing.T, factory Factory) {
t.Fatalf("Finalized too early")
}
}
func IndependentTest(t *testing.T, factory Factory) {
Setup()
@ -219,10 +294,12 @@ func IndependentTest(t *testing.T, factory Factory) {
K: 2, Alpha: 2, BetaVirtuous: 2, BetaRogue: 2,
}
graph.Initialize(snow.DefaultContextTest(), params)
graph.Add(Red)
graph.Add(Alpha)
if prefs := graph.Preferences(); prefs.Len() != 2 {
if err := graph.Add(Red); err != nil {
t.Fatal(err)
} else if err := graph.Add(Alpha); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 2 {
t.Fatalf("Wrong number of preferences.")
} else if !prefs.Contains(Red.ID()) {
t.Fatalf("Wrong preference. Expected %s", Red.ID())
@ -236,9 +313,9 @@ func IndependentTest(t *testing.T, factory Factory) {
ra.SetThreshold(2)
ra.AddCount(Red.ID(), 2)
ra.AddCount(Alpha.ID(), 2)
graph.RecordPoll(ra)
if prefs := graph.Preferences(); prefs.Len() != 2 {
if err := graph.RecordPoll(ra); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 2 {
t.Fatalf("Wrong number of preferences.")
} else if !prefs.Contains(Red.ID()) {
t.Fatalf("Wrong preference. Expected %s", Red.ID())
@ -246,11 +323,9 @@ func IndependentTest(t *testing.T, factory Factory) {
t.Fatalf("Wrong preference. Expected %s", Alpha.ID())
} else if graph.Finalized() {
t.Fatalf("Finalized too early")
}
graph.RecordPoll(ra)
if prefs := graph.Preferences(); prefs.Len() != 0 {
} else if err := graph.RecordPoll(ra); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 0 {
t.Fatalf("Wrong number of preferences.")
} else if !graph.Finalized() {
t.Fatalf("Finalized too late")
@ -267,35 +342,30 @@ func VirtuousTest(t *testing.T, factory Factory) {
K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1,
}
graph.Initialize(snow.DefaultContextTest(), params)
graph.Add(Red)
if virtuous := graph.Virtuous(); virtuous.Len() != 1 {
if err := graph.Add(Red); err != nil {
t.Fatal(err)
} else if virtuous := graph.Virtuous(); virtuous.Len() != 1 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(Red.ID()) {
t.Fatalf("Wrong virtuous. Expected %s", Red.ID())
}
graph.Add(Alpha)
if virtuous := graph.Virtuous(); virtuous.Len() != 2 {
} else if err := graph.Add(Alpha); err != nil {
t.Fatal(err)
} else if virtuous := graph.Virtuous(); virtuous.Len() != 2 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(Red.ID()) {
t.Fatalf("Wrong virtuous. Expected %s", Red.ID())
} else if !virtuous.Contains(Alpha.ID()) {
t.Fatalf("Wrong virtuous. Expected %s", Alpha.ID())
}
graph.Add(Green)
if virtuous := graph.Virtuous(); virtuous.Len() != 1 {
} else if err := graph.Add(Green); err != nil {
t.Fatal(err)
} else if virtuous := graph.Virtuous(); virtuous.Len() != 1 {
t.Fatalf("Wrong number of virtuous.")
} else if !virtuous.Contains(Alpha.ID()) {
t.Fatalf("Wrong virtuous. Expected %s", Alpha.ID())
}
graph.Add(Blue)
if virtuous := graph.Virtuous(); virtuous.Len() != 0 {
} else if err := graph.Add(Blue); err != nil {
t.Fatal(err)
} else if virtuous := graph.Virtuous(); virtuous.Len() != 0 {
t.Fatalf("Wrong number of virtuous.")
}
}
@ -319,11 +389,9 @@ func IsVirtuousTest(t *testing.T, factory Factory) {
t.Fatalf("Should be virtuous")
} else if !graph.IsVirtuous(Alpha) {
t.Fatalf("Should be virtuous")
}
graph.Add(Red)
if !graph.IsVirtuous(Red) {
} else if err := graph.Add(Red); err != nil {
t.Fatal(err)
} else if !graph.IsVirtuous(Red) {
t.Fatalf("Should be virtuous")
} else if graph.IsVirtuous(Green) {
t.Fatalf("Should not be virtuous")
@ -331,11 +399,9 @@ func IsVirtuousTest(t *testing.T, factory Factory) {
t.Fatalf("Should be virtuous")
} else if !graph.IsVirtuous(Alpha) {
t.Fatalf("Should be virtuous")
}
graph.Add(Green)
if graph.IsVirtuous(Red) {
} else if err := graph.Add(Green); err != nil {
t.Fatal(err)
} else if graph.IsVirtuous(Red) {
t.Fatalf("Should not be virtuous")
} else if graph.IsVirtuous(Green) {
t.Fatalf("Should not be virtuous")
@ -357,17 +423,13 @@ func QuiesceTest(t *testing.T, factory Factory) {
if !graph.Quiesce() {
t.Fatalf("Should quiesce")
}
graph.Add(Red)
if graph.Quiesce() {
} else if err := graph.Add(Red); err != nil {
t.Fatal(err)
} else if graph.Quiesce() {
t.Fatalf("Shouldn't quiesce")
}
graph.Add(Green)
if !graph.Quiesce() {
} else if err := graph.Add(Green); err != nil {
t.Fatal(err)
} else if !graph.Quiesce() {
t.Fatalf("Should quiesce")
}
}
@ -390,11 +452,13 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) {
}
graph.Initialize(snow.DefaultContextTest(), params)
graph.Add(Red)
graph.Add(Green)
graph.Add(purple)
if prefs := graph.Preferences(); prefs.Len() != 2 {
if err := graph.Add(Red); err != nil {
t.Fatal(err)
} else if err := graph.Add(Green); err != nil {
t.Fatal(err)
} else if err := graph.Add(purple); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 2 {
t.Fatalf("Wrong number of preferences.")
} else if !prefs.Contains(Red.ID()) {
t.Fatalf("Wrong preference. Expected %s", Red.ID())
@ -410,10 +474,9 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) {
g := ids.Bag{}
g.Add(Green.ID())
graph.RecordPoll(g)
if prefs := graph.Preferences(); prefs.Len() != 2 {
if err := graph.RecordPoll(g); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 2 {
t.Fatalf("Wrong number of preferences.")
} else if !prefs.Contains(Green.ID()) {
t.Fatalf("Wrong preference. Expected %s", Green.ID())
@ -429,10 +492,9 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) {
rp := ids.Bag{}
rp.Add(Red.ID(), purple.ID())
graph.RecordPoll(rp)
if prefs := graph.Preferences(); prefs.Len() != 2 {
if err := graph.RecordPoll(rp); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 2 {
t.Fatalf("Wrong number of preferences.")
} else if !prefs.Contains(Green.ID()) {
t.Fatalf("Wrong preference. Expected %s", Green.ID())
@ -448,10 +510,9 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) {
r := ids.Bag{}
r.Add(Red.ID())
graph.RecordPoll(r)
if prefs := graph.Preferences(); prefs.Len() != 0 {
if err := graph.RecordPoll(r); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 0 {
t.Fatalf("Wrong number of preferences.")
} else if Red.Status() != choices.Accepted {
t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Accepted)
@ -480,12 +541,15 @@ func RejectingDependencyTest(t *testing.T, factory Factory) {
}
graph.Initialize(snow.DefaultContextTest(), params)
graph.Add(Red)
graph.Add(Green)
graph.Add(Blue)
graph.Add(purple)
if prefs := graph.Preferences(); prefs.Len() != 2 {
if err := graph.Add(Red); err != nil {
t.Fatal(err)
} else if err := graph.Add(Green); err != nil {
t.Fatal(err)
} else if err := graph.Add(Blue); err != nil {
t.Fatal(err)
} else if err := graph.Add(purple); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 2 {
t.Fatalf("Wrong number of preferences.")
} else if !prefs.Contains(Red.ID()) {
t.Fatalf("Wrong preference. Expected %s", Red.ID())
@ -503,10 +567,9 @@ func RejectingDependencyTest(t *testing.T, factory Factory) {
gp := ids.Bag{}
gp.Add(Green.ID(), purple.ID())
graph.RecordPoll(gp)
if prefs := graph.Preferences(); prefs.Len() != 2 {
if err := graph.RecordPoll(gp); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 2 {
t.Fatalf("Wrong number of preferences.")
} else if !prefs.Contains(Green.ID()) {
t.Fatalf("Wrong preference. Expected %s", Green.ID())
@ -520,11 +583,9 @@ func RejectingDependencyTest(t *testing.T, factory Factory) {
t.Fatalf("Wrong status. %s should be %s", Blue.ID(), choices.Processing)
} else if purple.Status() != choices.Processing {
t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing)
}
graph.RecordPoll(gp)
if prefs := graph.Preferences(); prefs.Len() != 0 {
} else if err := graph.RecordPoll(gp); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 0 {
t.Fatalf("Wrong number of preferences.")
} else if Red.Status() != choices.Rejected {
t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Rejected)
@ -553,9 +614,9 @@ func VacuouslyAcceptedTest(t *testing.T, factory Factory) {
}
graph.Initialize(snow.DefaultContextTest(), params)
graph.Add(purple)
if prefs := graph.Preferences(); prefs.Len() != 0 {
if err := graph.Add(purple); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 0 {
t.Fatalf("Wrong number of preferences.")
} else if status := purple.Status(); status != choices.Accepted {
t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Accepted)
@ -593,17 +654,15 @@ func ConflictsTest(t *testing.T, factory Factory) {
Ins: insPurple,
}
graph.Add(purple)
if orangeConflicts := graph.Conflicts(orange); orangeConflicts.Len() != 1 {
if err := graph.Add(purple); err != nil {
t.Fatal(err)
} else if orangeConflicts := graph.Conflicts(orange); orangeConflicts.Len() != 1 {
t.Fatalf("Wrong number of conflicts")
} else if !orangeConflicts.Contains(purple.Identifier) {
t.Fatalf("Conflicts does not contain the right transaction")
}
graph.Add(orange)
if orangeConflicts := graph.Conflicts(orange); orangeConflicts.Len() != 1 {
} else if err := graph.Add(orange); err != nil {
t.Fatal(err)
} else if orangeConflicts := graph.Conflicts(orange); orangeConflicts.Len() != 1 {
t.Fatalf("Wrong number of conflicts")
} else if !orangeConflicts.Contains(purple.Identifier) {
t.Fatalf("Conflicts does not contain the right transaction")
@ -643,17 +702,20 @@ func VirtuousDependsOnRogueTest(t *testing.T, factory Factory) {
virtuous.Ins.Add(input2)
graph.Add(rogue1)
graph.Add(rogue2)
graph.Add(virtuous)
if err := graph.Add(rogue1); err != nil {
t.Fatal(err)
} else if err := graph.Add(rogue2); err != nil {
t.Fatal(err)
} else if err := graph.Add(virtuous); err != nil {
t.Fatal(err)
}
votes := ids.Bag{}
votes.Add(rogue1.ID())
votes.Add(virtuous.ID())
graph.RecordPoll(votes)
if status := rogue1.Status(); status != choices.Processing {
if err := graph.RecordPoll(votes); err != nil {
t.Fatal(err)
} else if status := rogue1.Status(); status != choices.Processing {
t.Fatalf("Rogue Tx is %s expected %s", status, choices.Processing)
} else if status := rogue2.Status(); status != choices.Processing {
t.Fatalf("Rogue Tx is %s expected %s", status, choices.Processing)
@ -664,6 +726,135 @@ func VirtuousDependsOnRogueTest(t *testing.T, factory Factory) {
}
}
func ErrorOnVacuouslyAcceptedTest(t *testing.T, factory Factory) {
Setup()
graph := factory.New()
purple := &TestTx{
Identifier: ids.Empty.Prefix(7),
Stat: choices.Processing,
Validity: errors.New(""),
}
params := snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2,
}
graph.Initialize(snow.DefaultContextTest(), params)
if err := graph.Add(purple); err == nil {
t.Fatalf("Should have errored on acceptance")
}
}
func ErrorOnAcceptedTest(t *testing.T, factory Factory) {
Setup()
graph := factory.New()
purple := &TestTx{
Identifier: ids.Empty.Prefix(7),
Stat: choices.Processing,
Validity: errors.New(""),
}
purple.Ins.Add(ids.Empty.Prefix(4))
params := snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2,
}
graph.Initialize(snow.DefaultContextTest(), params)
if err := graph.Add(purple); err != nil {
t.Fatal(err)
}
votes := ids.Bag{}
votes.Add(purple.ID())
if err := graph.RecordPoll(votes); err == nil {
t.Fatalf("Should have errored on accepting an invalid tx")
}
}
func ErrorOnRejectingLowerConfidenceConflictTest(t *testing.T, factory Factory) {
Setup()
graph := factory.New()
X := ids.Empty.Prefix(4)
purple := &TestTx{
Identifier: ids.Empty.Prefix(7),
Stat: choices.Processing,
}
purple.Ins.Add(X)
pink := &TestTx{
Identifier: ids.Empty.Prefix(8),
Stat: choices.Processing,
Validity: errors.New(""),
}
pink.Ins.Add(X)
params := snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 1,
}
graph.Initialize(snow.DefaultContextTest(), params)
if err := graph.Add(purple); err != nil {
t.Fatal(err)
} else if err := graph.Add(pink); err != nil {
t.Fatal(err)
}
votes := ids.Bag{}
votes.Add(purple.ID())
if err := graph.RecordPoll(votes); err == nil {
t.Fatalf("Should have errored on rejecting an invalid tx")
}
}
func ErrorOnRejectingHigherConfidenceConflictTest(t *testing.T, factory Factory) {
Setup()
graph := factory.New()
X := ids.Empty.Prefix(4)
purple := &TestTx{
Identifier: ids.Empty.Prefix(7),
Stat: choices.Processing,
}
purple.Ins.Add(X)
pink := &TestTx{
Identifier: ids.Empty.Prefix(8),
Stat: choices.Processing,
Validity: errors.New(""),
}
pink.Ins.Add(X)
params := snowball.Parameters{
Metrics: prometheus.NewRegistry(),
K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 1,
}
graph.Initialize(snow.DefaultContextTest(), params)
if err := graph.Add(pink); err != nil {
t.Fatal(err)
} else if err := graph.Add(purple); err != nil {
t.Fatal(err)
}
votes := ids.Bag{}
votes.Add(purple.ID())
if err := graph.RecordPoll(votes); err == nil {
t.Fatalf("Should have errored on rejecting an invalid tx")
}
}
func StringTest(t *testing.T, factory Factory, prefix string) {
Setup()
@ -674,12 +865,16 @@ func StringTest(t *testing.T, factory Factory, prefix string) {
K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2,
}
graph.Initialize(snow.DefaultContextTest(), params)
graph.Add(Red)
graph.Add(Green)
graph.Add(Blue)
graph.Add(Alpha)
if prefs := graph.Preferences(); prefs.Len() != 1 {
if err := graph.Add(Red); err != nil {
t.Fatal(err)
} else if err := graph.Add(Green); err != nil {
t.Fatal(err)
} else if err := graph.Add(Blue); err != nil {
t.Fatal(err)
} else if err := graph.Add(Alpha); err != nil {
t.Fatal(err)
} else if prefs := graph.Preferences(); prefs.Len() != 1 {
t.Fatalf("Wrong number of preferences.")
} else if !prefs.Contains(Red.ID()) {
t.Fatalf("Wrong preference. Expected %s got %s", Red.ID(), prefs.List()[0])
@ -691,8 +886,11 @@ func StringTest(t *testing.T, factory Factory, prefix string) {
rb.SetThreshold(2)
rb.AddCount(Red.ID(), 2)
rb.AddCount(Blue.ID(), 2)
graph.RecordPoll(rb)
graph.Add(Blue)
if err := graph.RecordPoll(rb); err != nil {
t.Fatal(err)
} else if err := graph.Add(Blue); err != nil {
t.Fatal(err)
}
{
expected := prefix + "(\n" +
@ -720,7 +918,9 @@ func StringTest(t *testing.T, factory Factory, prefix string) {
ga.SetThreshold(2)
ga.AddCount(Green.ID(), 2)
ga.AddCount(Alpha.ID(), 2)
graph.RecordPoll(ga)
if err := graph.RecordPoll(ga); err != nil {
t.Fatal(err)
}
{
expected := prefix + "(\n" +
@ -745,7 +945,9 @@ func StringTest(t *testing.T, factory Factory, prefix string) {
}
empty := ids.Bag{}
graph.RecordPoll(empty)
if err := graph.RecordPoll(empty); err != nil {
t.Fatal(err)
}
{
expected := prefix + "(\n" +
@ -767,10 +969,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) {
t.Fatalf("Wrong preference. Expected %s", Blue.ID())
} else if graph.Finalized() {
t.Fatalf("Finalized too early")
} else if err := graph.RecordPoll(ga); err != nil {
t.Fatal(err)
}
graph.RecordPoll(ga)
{
expected := prefix + "(\n" +
" Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq Confidence: 0 Bias: 1\n" +
@ -791,10 +993,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) {
t.Fatalf("Wrong preference. Expected %s", Alpha.ID())
} else if graph.Finalized() {
t.Fatalf("Finalized too early")
} else if err := graph.RecordPoll(ga); err != nil {
t.Fatal(err)
}
graph.RecordPoll(ga)
{
expected := prefix + "()"
if str := graph.String(); str != expected {
@ -806,9 +1008,7 @@ func StringTest(t *testing.T, factory Factory, prefix string) {
t.Fatalf("Wrong number of preferences.")
} else if !graph.Finalized() {
t.Fatalf("Finalized too late")
}
if Green.Status() != choices.Accepted {
} else if Green.Status() != choices.Accepted {
t.Fatalf("%s should have been accepted", Green.ID())
} else if Alpha.Status() != choices.Accepted {
t.Fatalf("%s should have been accepted", Alpha.ID())
@ -816,10 +1016,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) {
t.Fatalf("%s should have been rejected", Red.ID())
} else if Blue.Status() != choices.Rejected {
t.Fatalf("%s should have been rejected", Blue.ID())
} else if err := graph.RecordPoll(rb); err != nil {
t.Fatal(err)
}
graph.RecordPoll(rb)
{
expected := prefix + "()"
if str := graph.String(); str != expected {
@ -831,9 +1031,7 @@ func StringTest(t *testing.T, factory Factory, prefix string) {
t.Fatalf("Wrong number of preferences.")
} else if !graph.Finalized() {
t.Fatalf("Finalized too late")
}
if Green.Status() != choices.Accepted {
} else if Green.Status() != choices.Accepted {
t.Fatalf("%s should have been accepted", Green.ID())
} else if Alpha.Status() != choices.Accepted {
t.Fatalf("%s should have been accepted", Alpha.ID())

View File

@ -7,34 +7,4 @@ import (
"testing"
)
func TestDirectedParams(t *testing.T) { ParamsTest(t, DirectedFactory{}) }
func TestDirectedIssued(t *testing.T) { IssuedTest(t, DirectedFactory{}) }
func TestDirectedLeftoverInput(t *testing.T) { LeftoverInputTest(t, DirectedFactory{}) }
func TestDirectedLowerConfidence(t *testing.T) { LowerConfidenceTest(t, DirectedFactory{}) }
func TestDirectedMiddleConfidence(t *testing.T) { MiddleConfidenceTest(t, DirectedFactory{}) }
func TestDirectedIndependent(t *testing.T) { IndependentTest(t, DirectedFactory{}) }
func TestDirectedVirtuous(t *testing.T) { VirtuousTest(t, DirectedFactory{}) }
func TestDirectedIsVirtuous(t *testing.T) { IsVirtuousTest(t, DirectedFactory{}) }
func TestDirectedConflicts(t *testing.T) { ConflictsTest(t, DirectedFactory{}) }
func TestDirectedQuiesce(t *testing.T) { QuiesceTest(t, DirectedFactory{}) }
func TestDirectedAcceptingDependency(t *testing.T) { AcceptingDependencyTest(t, DirectedFactory{}) }
func TestDirectedRejectingDependency(t *testing.T) { RejectingDependencyTest(t, DirectedFactory{}) }
func TestDirectedVacuouslyAccepted(t *testing.T) { VacuouslyAcceptedTest(t, DirectedFactory{}) }
func TestDirectedVirtuousDependsOnRogue(t *testing.T) {
VirtuousDependsOnRogueTest(t, DirectedFactory{})
}
func TestDirectedString(t *testing.T) { StringTest(t, DirectedFactory{}, "DG") }
func TestDirectedConsensus(t *testing.T) { ConsensusTest(t, DirectedFactory{}, "DG") }

View File

@ -7,32 +7,4 @@ import (
"testing"
)
func TestInputParams(t *testing.T) { ParamsTest(t, InputFactory{}) }
func TestInputIssued(t *testing.T) { IssuedTest(t, InputFactory{}) }
func TestInputLeftoverInput(t *testing.T) { LeftoverInputTest(t, InputFactory{}) }
func TestInputLowerConfidence(t *testing.T) { LowerConfidenceTest(t, InputFactory{}) }
func TestInputMiddleConfidence(t *testing.T) { MiddleConfidenceTest(t, InputFactory{}) }
func TestInputIndependent(t *testing.T) { IndependentTest(t, InputFactory{}) }
func TestInputVirtuous(t *testing.T) { VirtuousTest(t, InputFactory{}) }
func TestInputIsVirtuous(t *testing.T) { IsVirtuousTest(t, InputFactory{}) }
func TestInputConflicts(t *testing.T) { ConflictsTest(t, InputFactory{}) }
func TestInputQuiesce(t *testing.T) { QuiesceTest(t, InputFactory{}) }
func TestInputAcceptingDependency(t *testing.T) { AcceptingDependencyTest(t, InputFactory{}) }
func TestInputRejectingDependency(t *testing.T) { RejectingDependencyTest(t, InputFactory{}) }
func TestInputVacuouslyAccepted(t *testing.T) { VacuouslyAcceptedTest(t, InputFactory{}) }
func TestInputVirtuousDependsOnRogue(t *testing.T) { VirtuousDependsOnRogueTest(t, InputFactory{}) }
func TestInputString(t *testing.T) { StringTest(t, InputFactory{}, "IG") }
func TestInputConsensus(t *testing.T) { ConsensusTest(t, InputFactory{}, "IG") }

View File

@ -31,10 +31,10 @@ func (tx *TestTx) InputIDs() ids.Set { return tx.Ins }
func (tx *TestTx) Status() choices.Status { return tx.Stat }
// Accept implements the Consumer interface
func (tx *TestTx) Accept() error { tx.Stat = choices.Accepted; return nil }
func (tx *TestTx) Accept() error { tx.Stat = choices.Accepted; return tx.Validity }
// Reject implements the Consumer interface
func (tx *TestTx) Reject() error { tx.Stat = choices.Rejected; return nil }
func (tx *TestTx) Reject() error { tx.Stat = choices.Rejected; return tx.Validity }
// Reset sets the status to pending
func (tx *TestTx) Reset() { tx.Stat = choices.Processing }

View File

@ -17,7 +17,12 @@ import (
)
const (
cacheSize = 100000
// We cache processed vertices where height = c * stripeDistance for c = {1,2,3...}
// This forms a "stripe" of cached DAG vertices at height stripeDistance, 2*stripeDistance, etc.
// This helps to limit the number of repeated DAG traversals performed
stripeDistance = 2000
stripeWidth = 5
cacheSize = 100000
)
// BootstrapConfig ...
@ -37,15 +42,16 @@ type bootstrapper struct {
metrics
common.Bootstrapper
// true if all of the vertices in the original accepted frontier have been processed
processedStartingAcceptedFrontier bool
// number of vertices fetched so far
numFetched uint32
// tracks which validators were asked for which containers in which requests
outstandingRequests common.Requests
// IDs of vertices that we will send a GetAncestors request for once we are
// not at the max number of outstanding requests
needToFetch ids.Set
// Contains IDs of vertices that have recently been processed
processedCache *cache.LRU
@ -80,14 +86,15 @@ func (b *bootstrapper) Initialize(config BootstrapConfig) error {
return nil
}
// CurrentAcceptedFrontier ...
// CurrentAcceptedFrontier returns the set of vertices that this node has accepted
// that have no accepted children
func (b *bootstrapper) CurrentAcceptedFrontier() ids.Set {
acceptedFrontier := ids.Set{}
acceptedFrontier.Add(b.State.Edge()...)
return acceptedFrontier
}
// FilterAccepted ...
// FilterAccepted returns the IDs of vertices in [containerIDs] that this node has accepted
func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set {
acceptedVtxIDs := ids.Set{}
for _, vtxID := range containerIDs.List() {
@ -98,50 +105,64 @@ func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set {
return acceptedVtxIDs
}
// Get vertex [vtxID] and its ancestors
func (b *bootstrapper) fetch(vtxID ids.ID) error {
// Make sure we haven't already requested this block
if b.outstandingRequests.Contains(vtxID) {
return nil
}
// Add the vertices in [vtxIDs] to the set of vertices that we need to fetch,
// and then fetch vertices (and their ancestors) until either there are no more
// to fetch or we are at the maximum number of outstanding requests.
func (b *bootstrapper) fetch(vtxIDs ...ids.ID) error {
b.needToFetch.Add(vtxIDs...)
for b.needToFetch.Len() > 0 && b.outstandingRequests.Len() < common.MaxOutstandingRequests {
vtxID := b.needToFetch.CappedList(1)[0]
b.needToFetch.Remove(vtxID)
// Make sure we don't already have this vertex
if _, err := b.State.GetVertex(vtxID); err == nil {
return nil
}
validators := b.BootstrapConfig.Validators.Sample(1) // validator to send request to
if len(validators) == 0 {
return fmt.Errorf("Dropping request for %s as there are no validators", vtxID)
}
validatorID := validators[0].ID()
b.RequestID++
b.outstandingRequests.Add(validatorID, b.RequestID, vtxID)
b.BootstrapConfig.Sender.GetAncestors(validatorID, b.RequestID, vtxID) // request vertex and ancestors
return nil
}
// Process vertices
func (b *bootstrapper) process(vtx avalanche.Vertex) error {
toProcess := []avalanche.Vertex{vtx}
for len(toProcess) > 0 {
newLen := len(toProcess) - 1
vtx := toProcess[newLen]
toProcess = toProcess[:newLen]
if _, ok := b.processedCache.Get(vtx.ID()); ok { // already processed this
// Make sure we haven't already requested this vertex
if b.outstandingRequests.Contains(vtxID) {
continue
}
// Make sure we don't already have this vertex
if _, err := b.State.GetVertex(vtxID); err == nil {
continue
}
validators := b.BootstrapConfig.Validators.Sample(1) // validator to send request to
if len(validators) == 0 {
return fmt.Errorf("Dropping request for %s as there are no validators", vtxID)
}
validatorID := validators[0].ID()
b.RequestID++
b.outstandingRequests.Add(validatorID, b.RequestID, vtxID)
b.BootstrapConfig.Sender.GetAncestors(validatorID, b.RequestID, vtxID) // request vertex and ancestors
}
return b.finish()
}
// Process the vertices in [vtxs].
func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error {
// Vertices that we need to process. Store them in a heap for de-deduplication
// and so we always process vertices further down in the DAG first. This helps
// to reduce the number of repeated DAG traversals.
toProcess := newMaxVertexHeap()
for _, vtx := range vtxs {
if _, ok := b.processedCache.Get(vtx.ID()); !ok { // only process a vertex if we haven't already
toProcess.Push(vtx)
}
}
for toProcess.Len() > 0 { // While there are unprocessed vertices
vtx := toProcess.Pop() // Get an unknown vertex or one furthest down the DAG
vtxID := vtx.ID()
switch vtx.Status() {
case choices.Unknown:
if err := b.fetch(vtx.ID()); err != nil {
return err
}
b.needToFetch.Add(vtxID) // We don't have this vertex locally. Mark that we need to fetch it.
case choices.Rejected:
b.needToFetch.Remove(vtxID) // We have this vertex locally. Mark that we don't need to fetch it.
return fmt.Errorf("tried to accept %s even though it was previously rejected", vtx.ID())
case choices.Processing:
if err := b.VtxBlocked.Push(&vertexJob{
b.needToFetch.Remove(vtxID)
if err := b.VtxBlocked.Push(&vertexJob{ // Add to queue of vertices to execute when bootstrapping finishes.
log: b.BootstrapConfig.Context.Log,
numAccepted: b.numBSVtx,
numDropped: b.numBSDroppedVtx,
@ -155,7 +176,7 @@ func (b *bootstrapper) process(vtx avalanche.Vertex) error {
} else {
b.BootstrapConfig.Context.Log.Verbo("couldn't push to vtxBlocked: %s", err)
}
for _, tx := range vtx.Txs() {
for _, tx := range vtx.Txs() { // Add transactions to queue of transactions to execute when bootstrapping finishes.
if err := b.TxBlocked.Push(&txJob{
log: b.BootstrapConfig.Context.Log,
numAccepted: b.numBSTx,
@ -167,10 +188,14 @@ func (b *bootstrapper) process(vtx avalanche.Vertex) error {
b.BootstrapConfig.Context.Log.Verbo("couldn't push to txBlocked: %s", err)
}
}
for _, parent := range vtx.Parents() {
toProcess = append(toProcess, parent)
for _, parent := range vtx.Parents() { // Process the parents of this vertex (traverse up the DAG)
if _, ok := b.processedCache.Get(parent.ID()); !ok { // But only if we haven't processed the parent
toProcess.Push(parent)
}
}
if vtx.Height()%stripeDistance < stripeWidth { // See comment for stripeDistance
b.processedCache.Put(vtx.ID(), nil)
}
b.processedCache.Put(vtx.ID(), nil)
}
}
@ -181,10 +206,7 @@ func (b *bootstrapper) process(vtx avalanche.Vertex) error {
return err
}
if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier {
return b.finish()
}
return nil
return b.fetch()
}
// MultiPut handles the receipt of multiple containers. Should be received in response to a GetAncestors message to [vdr]
@ -200,12 +222,12 @@ func (b *bootstrapper) MultiPut(vdr ids.ShortID, requestID uint32, vtxs [][]byte
// Make sure this is in response to a request we made
neededVtxID, needed := b.outstandingRequests.Remove(vdr, requestID)
if !needed { // this message isn't in response to a request we made
if !needed { // this message isn't in response to a request we made, or is in response to a request that timed out
b.BootstrapConfig.Context.Log.Debug("received unexpected MultiPut from %s with ID %d", vdr, requestID)
return nil
}
neededVtx, err := b.State.ParseVertex(vtxs[0]) // the vertex we requested
neededVtx, err := b.State.ParseVertex(vtxs[0]) // first vertex should be the one we requested in GetAncestors request
if err != nil {
b.BootstrapConfig.Context.Log.Debug("Failed to parse requested vertex %s: %w", neededVtxID, err)
b.BootstrapConfig.Context.Log.Verbo("vertex: %s", formatting.DumpBytes{Bytes: vtxs[0]})
@ -215,14 +237,20 @@ func (b *bootstrapper) MultiPut(vdr ids.ShortID, requestID uint32, vtxs [][]byte
return b.fetch(neededVtxID)
}
for _, vtxBytes := range vtxs { // Parse/persist all the vertices
if _, err := b.State.ParseVertex(vtxBytes); err != nil { // Persists the vtx
processVertices := make([]avalanche.Vertex, 1, len(vtxs)) // Process all of the vertices in this message
processVertices[0] = neededVtx
for _, vtxBytes := range vtxs[1:] { // Parse/persist all the vertices
if vtx, err := b.State.ParseVertex(vtxBytes); err != nil { // Persists the vtx
b.BootstrapConfig.Context.Log.Debug("Failed to parse vertex: %w", err)
b.BootstrapConfig.Context.Log.Verbo("vertex: %s", formatting.DumpBytes{Bytes: vtxBytes})
} else {
processVertices = append(processVertices, vtx)
b.needToFetch.Remove(vtx.ID()) // No need to fetch this vertex since we have it now
}
}
return b.process(neededVtx)
return b.process(processVertices...)
}
// GetAncestorsFailed is called when a GetAncestors message we sent fails
@ -236,43 +264,38 @@ func (b *bootstrapper) GetAncestorsFailed(vdr ids.ShortID, requestID uint32) err
return b.fetch(vtxID)
}
// ForceAccepted ...
// ForceAccepted starts bootstrapping. Process the vertices in [accepterContainerIDs].
func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) error {
if err := b.VM.Bootstrapping(); err != nil {
return fmt.Errorf("failed to notify VM that bootstrapping has started: %w",
err)
}
toProcess := make([]avalanche.Vertex, 0, acceptedContainerIDs.Len())
for _, vtxID := range acceptedContainerIDs.List() {
if vtx, err := b.State.GetVertex(vtxID); err == nil {
if err := b.process(vtx); err != nil {
return err
}
} else if err := b.fetch(vtxID); err != nil {
return err
toProcess = append(toProcess, vtx) // Process this vertex.
} else {
b.needToFetch.Add(vtxID) // We don't have this vertex. Mark that we have to fetch it.
}
}
b.processedStartingAcceptedFrontier = true
if numPending := b.outstandingRequests.Len(); numPending == 0 {
return b.finish()
}
return nil
return b.process(toProcess...)
}
// Finish bootstrapping
func (b *bootstrapper) finish() error {
if b.finished {
// If there are outstanding requests for vertices or we still need to fetch vertices, we can't finish
if b.finished || b.outstandingRequests.Len() > 0 || b.needToFetch.Len() > 0 {
return nil
}
b.BootstrapConfig.Context.Log.Info("finished fetching vertices. executing transaction state transitions...")
b.BootstrapConfig.Context.Log.Info("finished fetching %d vertices. executing transaction state transitions...",
b.numFetched)
if err := b.executeAll(b.TxBlocked, b.numBSBlockedTx); err != nil {
return err
}
b.BootstrapConfig.Context.Log.Info("executing vertex state transitions...")
if err := b.executeAll(b.VtxBlocked, b.numBSBlockedVtx); err != nil {
return err
}
@ -307,5 +330,6 @@ func (b *bootstrapper) executeAll(jobs *queue.Jobs, numBlocked prometheus.Gauge)
b.BootstrapConfig.Context.Log.Info("executed %d operations", numExecuted)
}
}
b.BootstrapConfig.Context.Log.Info("executed %d operations", numExecuted)
return nil
}

View File

@ -805,3 +805,113 @@ func TestBootstrapperIncompleteMultiPut(t *testing.T) {
t.Fatal("should be accepted")
}
}
func TestBootstrapperFinalized(t *testing.T) {
config, peerID, sender, state, vm := newConfig(t)
vtxID0 := ids.Empty.Prefix(0)
vtxID1 := ids.Empty.Prefix(1)
vtxBytes0 := []byte{0}
vtxBytes1 := []byte{1}
vtx0 := &Vtx{
id: vtxID0,
height: 0,
status: choices.Unknown,
bytes: vtxBytes0,
}
vtx1 := &Vtx{
id: vtxID1,
height: 1,
parents: []avalanche.Vertex{vtx0},
status: choices.Unknown,
bytes: vtxBytes1,
}
bs := bootstrapper{}
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
bs.Initialize(config)
finished := new(bool)
bs.onFinished = func() error { *finished = true; return nil }
acceptedIDs := ids.Set{}
acceptedIDs.Add(vtxID0)
acceptedIDs.Add(vtxID1)
parsedVtx0 := false
parsedVtx1 := false
state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) {
switch {
case vtxID.Equals(vtxID0):
if parsedVtx0 {
return vtx0, nil
}
return nil, errUnknownVertex
case vtxID.Equals(vtxID1):
if parsedVtx1 {
return vtx1, nil
}
return nil, errUnknownVertex
default:
t.Fatal(errUnknownVertex)
panic(errUnknownVertex)
}
}
state.parseVertex = func(vtxBytes []byte) (avalanche.Vertex, error) {
switch {
case bytes.Equal(vtxBytes, vtxBytes0):
vtx0.status = choices.Processing
parsedVtx0 = true
return vtx0, nil
case bytes.Equal(vtxBytes, vtxBytes1):
vtx1.status = choices.Processing
parsedVtx1 = true
return vtx1, nil
}
t.Fatal(errUnknownVertex)
return nil, errUnknownVertex
}
requestIDs := map[[32]byte]uint32{}
sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
if !vdr.Equals(peerID) {
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
}
requestIDs[vtxID.Key()] = reqID
}
vm.CantBootstrapping = false
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request vtx0 and vtx1
t.Fatal(err)
}
reqID, ok := requestIDs[vtxID1.Key()]
if !ok {
t.Fatalf("should have requested vtx1")
}
vm.CantBootstrapped = false
if err := bs.MultiPut(peerID, reqID, [][]byte{vtxBytes1, vtxBytes0}); err != nil {
t.Fatal(err)
}
reqID, ok = requestIDs[vtxID0.Key()]
if !ok {
t.Fatalf("should have requested vtx0")
}
if err := bs.GetAncestorsFailed(peerID, reqID); err != nil {
t.Fatal(err)
}
if !*finished {
t.Fatalf("Bootstrapping should have finished")
} else if vtx0.Status() != choices.Accepted {
t.Fatalf("Vertex should be accepted")
} else if vtx1.Status() != choices.Accepted {
t.Fatalf("Vertex should be accepted")
}
}

View File

@ -78,9 +78,12 @@ func (i *issuer) Update() {
vdrSet.Add(vdr.ID())
}
toSample := ids.ShortSet{} // Copy to a new variable because we may remove an element in sender.Sender
toSample.Union(vdrSet) // and we don't want that to affect the set of validators we wait for [ie vdrSet]
i.t.RequestID++
if numVdrs := len(vdrs); numVdrs == p.K && i.t.polls.Add(i.t.RequestID, vdrSet.Len()) {
i.t.Config.Sender.PushQuery(vdrSet, i.t.RequestID, vtxID, i.vtx.Bytes())
if numVdrs := len(vdrs); numVdrs == p.K && i.t.polls.Add(i.t.RequestID, vdrSet) {
i.t.Config.Sender.PushQuery(toSample, i.t.RequestID, vtxID, i.vtx.Bytes())
} else if numVdrs < p.K {
i.t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", vtxID)
}

View File

@ -14,7 +14,7 @@ type metrics struct {
numBSVtx, numBSDroppedVtx,
numBSTx, numBSDroppedTx prometheus.Counter
numPolls, numVtxRequests, numTxRequests, numPendingVtx prometheus.Gauge
numVtxRequests, numTxRequests, numPendingVtx prometheus.Gauge
}
// Initialize implements the Engine interface
@ -61,12 +61,6 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr
Name: "av_bs_dropped_txs",
Help: "Number of dropped txs",
})
m.numPolls = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "av_polls",
Help: "Number of pending network polls",
})
m.numVtxRequests = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
@ -107,9 +101,6 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr
if err := registerer.Register(m.numBSDroppedTx); err != nil {
log.Error("Failed to register av_bs_dropped_txs statistics due to %s", err)
}
if err := registerer.Register(m.numPolls); err != nil {
log.Error("Failed to register av_polls statistics due to %s", err)
}
if err := registerer.Register(m.numVtxRequests); err != nil {
log.Error("Failed to register av_vtx_requests statistics due to %s", err)
}

View File

@ -0,0 +1,85 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package poll
import (
"fmt"
"github.com/ava-labs/gecko/ids"
)
type earlyTermNoTraversalFactory struct {
alpha int
}
// NewEarlyTermNoTraversalFactory returns a factory that returns polls with
// early termination, without doing DAG traversals
func NewEarlyTermNoTraversalFactory(alpha int) Factory {
return &earlyTermNoTraversalFactory{alpha: alpha}
}
func (f *earlyTermNoTraversalFactory) New(vdrs ids.ShortSet) Poll {
return &earlyTermNoTraversalPoll{
polled: vdrs,
alpha: f.alpha,
}
}
// earlyTermNoTraversalPoll finishes when any remaining validators can't change
// the result of the poll. However, does not terminate tightly with this bound.
// It terminates as quickly as it can without performing any DAG traversals.
type earlyTermNoTraversalPoll struct {
votes ids.UniqueBag
polled ids.ShortSet
alpha int
}
// Vote registers a response for this poll
func (p *earlyTermNoTraversalPoll) Vote(vdr ids.ShortID, votes []ids.ID) {
if !p.polled.Contains(vdr) {
// if the validator wasn't polled or already responded to this poll, we
// should just drop the vote
return
}
// make sure that a validator can't respond multiple times
p.polled.Remove(vdr)
// track the votes the validator responded with
p.votes.Add(uint(p.polled.Len()), votes...)
}
// Finished returns true when all validators have voted
func (p *earlyTermNoTraversalPoll) Finished() bool {
// If there are no outstanding queries, the poll is finished
numPending := p.polled.Len()
if numPending == 0 {
return true
}
// If there are still enough pending responses to include another vertex,
// then the poll must wait for more responses
if numPending > p.alpha {
return false
}
// Ignore any vertex that has already received alpha votes. To safely skip
// DAG traversal, assume that all votes for vertices with less than alpha
// votes will be applied to a single shared ancestor. In this case, the poll
// can terminate early, iff there are not enough pending votes for this
// ancestor to receive alpha votes.
partialVotes := ids.BitSet(0)
for _, vote := range p.votes.List() {
if voters := p.votes.GetSet(vote); voters.Len() < p.alpha {
partialVotes.Union(voters)
}
}
return partialVotes.Len()+numPending < p.alpha
}
// Result returns the result of this poll
func (p *earlyTermNoTraversalPoll) Result() ids.UniqueBag { return p.votes }
func (p *earlyTermNoTraversalPoll) String() string {
return fmt.Sprintf("waiting on %s", p.polled)
}

View File

@ -0,0 +1,207 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package poll
import (
"testing"
"github.com/ava-labs/gecko/ids"
)
func TestEarlyTermNoTraversalResults(t *testing.T) {
alpha := 1
vtxID := ids.NewID([32]byte{1})
votes := []ids.ID{vtxID}
vdr1 := ids.NewShortID([20]byte{1}) // k = 1
vdrs := ids.ShortSet{}
vdrs.Add(vdr1)
factory := NewEarlyTermNoTraversalFactory(alpha)
poll := factory.New(vdrs)
poll.Vote(vdr1, votes)
if !poll.Finished() {
t.Fatalf("Poll did not terminate after receiving k votes")
}
result := poll.Result()
if list := result.List(); len(list) != 1 {
t.Fatalf("Wrong number of vertices returned")
} else if retVtxID := list[0]; !retVtxID.Equals(vtxID) {
t.Fatalf("Wrong vertex returned")
} else if set := result.GetSet(vtxID); set.Len() != 1 {
t.Fatalf("Wrong number of votes returned")
}
}
func TestEarlyTermNoTraversalString(t *testing.T) {
alpha := 2
vtxID := ids.NewID([32]byte{1})
votes := []ids.ID{vtxID}
vdr1 := ids.NewShortID([20]byte{1})
vdr2 := ids.NewShortID([20]byte{2}) // k = 2
vdrs := ids.ShortSet{}
vdrs.Add(
vdr1,
vdr2,
)
factory := NewEarlyTermNoTraversalFactory(alpha)
poll := factory.New(vdrs)
poll.Vote(vdr1, votes)
expected := "waiting on {BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp}"
if result := poll.String(); expected != result {
t.Fatalf("Poll should have returned %s but returned %s", expected, result)
}
}
func TestEarlyTermNoTraversalDropsDuplicatedVotes(t *testing.T) {
alpha := 2
vtxID := ids.NewID([32]byte{1})
votes := []ids.ID{vtxID}
vdr1 := ids.NewShortID([20]byte{1})
vdr2 := ids.NewShortID([20]byte{2}) // k = 2
vdrs := ids.ShortSet{}
vdrs.Add(
vdr1,
vdr2,
)
factory := NewEarlyTermNoTraversalFactory(alpha)
poll := factory.New(vdrs)
poll.Vote(vdr1, votes)
if poll.Finished() {
t.Fatalf("Poll finished after less than alpha votes")
}
poll.Vote(vdr1, votes)
if poll.Finished() {
t.Fatalf("Poll finished after getting a duplicated vote")
}
poll.Vote(vdr2, votes)
if !poll.Finished() {
t.Fatalf("Poll did not terminate after receiving k votes")
}
}
func TestEarlyTermNoTraversalTerminatesEarly(t *testing.T) {
alpha := 3
vtxID := ids.NewID([32]byte{1})
votes := []ids.ID{vtxID}
vdr1 := ids.NewShortID([20]byte{1})
vdr2 := ids.NewShortID([20]byte{2})
vdr3 := ids.NewShortID([20]byte{3})
vdr4 := ids.NewShortID([20]byte{4})
vdr5 := ids.NewShortID([20]byte{5}) // k = 5
vdrs := ids.ShortSet{}
vdrs.Add(
vdr1,
vdr2,
vdr3,
vdr4,
vdr5,
)
factory := NewEarlyTermNoTraversalFactory(alpha)
poll := factory.New(vdrs)
poll.Vote(vdr1, votes)
if poll.Finished() {
t.Fatalf("Poll finished after less than alpha votes")
}
poll.Vote(vdr2, votes)
if poll.Finished() {
t.Fatalf("Poll finished after less than alpha votes")
}
poll.Vote(vdr3, votes)
if !poll.Finished() {
t.Fatalf("Poll did not terminate early after receiving alpha votes for one vertex and none for other vertices")
}
}
func TestEarlyTermNoTraversalForSharedAncestor(t *testing.T) {
alpha := 4
vtxA := ids.NewID([32]byte{1})
vtxB := ids.NewID([32]byte{2})
vtxC := ids.NewID([32]byte{3})
vtxD := ids.NewID([32]byte{4})
// If validators 1-3 vote for frontier vertices
// B, C, and D respectively, which all share the common ancestor
// A, then we cannot terminate early with alpha = k = 4
// If the final vote is cast for any of A, B, C, or D, then
// vertex A will have transitively received alpha = 4 votes
vdr1 := ids.NewShortID([20]byte{1})
vdr2 := ids.NewShortID([20]byte{2})
vdr3 := ids.NewShortID([20]byte{3})
vdr4 := ids.NewShortID([20]byte{4})
vdrs := ids.ShortSet{}
vdrs.Add(vdr1)
vdrs.Add(vdr2)
vdrs.Add(vdr3)
vdrs.Add(vdr4)
factory := NewEarlyTermNoTraversalFactory(alpha)
poll := factory.New(vdrs)
poll.Vote(vdr1, []ids.ID{vtxB})
if poll.Finished() {
t.Fatalf("Poll finished early after receiving one vote")
}
poll.Vote(vdr2, []ids.ID{vtxC})
if poll.Finished() {
t.Fatalf("Poll finished early after receiving two votes")
}
poll.Vote(vdr3, []ids.ID{vtxD})
if poll.Finished() {
t.Fatalf("Poll terminated early, when a shared ancestor could have received alpha votes")
}
poll.Vote(vdr4, []ids.ID{vtxA})
if !poll.Finished() {
t.Fatalf("Poll did not terminate after receiving all outstanding votes")
}
}
func TestEarlyTermNoTraversalWithFastDrops(t *testing.T) {
alpha := 2
vdr1 := ids.NewShortID([20]byte{1})
vdr2 := ids.NewShortID([20]byte{2})
vdr3 := ids.NewShortID([20]byte{3}) // k = 3
vdrs := ids.ShortSet{}
vdrs.Add(
vdr1,
vdr2,
vdr3,
)
factory := NewEarlyTermNoTraversalFactory(alpha)
poll := factory.New(vdrs)
poll.Vote(vdr1, nil)
if poll.Finished() {
t.Fatalf("Poll finished early after dropping one vote")
}
poll.Vote(vdr2, nil)
if !poll.Finished() {
t.Fatalf("Poll did not terminate after dropping two votes")
}
}

View File

@ -0,0 +1,33 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package poll
import (
"fmt"
"github.com/ava-labs/gecko/ids"
)
// Set is a collection of polls
type Set interface {
fmt.Stringer
Add(requestID uint32, vdrs ids.ShortSet) bool
Vote(requestID uint32, vdr ids.ShortID, votes []ids.ID) (ids.UniqueBag, bool)
Len() int
}
// Poll is an outstanding poll
type Poll interface {
fmt.Stringer
Vote(vdr ids.ShortID, votes []ids.ID)
Finished() bool
Result() ids.UniqueBag
}
// Factory creates a new Poll
type Factory interface {
New(vdrs ids.ShortSet) Poll
}

View File

@ -0,0 +1,52 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package poll
import (
"fmt"
"github.com/ava-labs/gecko/ids"
)
type noEarlyTermFactory struct{}
// NewNoEarlyTermFactory returns a factory that returns polls with no early
// termination
func NewNoEarlyTermFactory() Factory { return noEarlyTermFactory{} }
func (noEarlyTermFactory) New(vdrs ids.ShortSet) Poll {
return &noEarlyTermPoll{polled: vdrs}
}
// noEarlyTermPoll finishes when all polled validators either respond to the
// query or a timeout occurs
type noEarlyTermPoll struct {
votes ids.UniqueBag
polled ids.ShortSet
}
// Vote registers a response for this poll
func (p *noEarlyTermPoll) Vote(vdr ids.ShortID, votes []ids.ID) {
if !p.polled.Contains(vdr) {
// if the validator wasn't polled or already responded to this poll, we
// should just drop the vote
return
}
// make sure that a validator can't respond multiple times
p.polled.Remove(vdr)
// track the votes the validator responded with
p.votes.Add(uint(p.polled.Len()), votes...)
}
// Finished returns true when all validators have voted
func (p *noEarlyTermPoll) Finished() bool { return p.polled.Len() == 0 }
// Result returns the result of this poll
func (p *noEarlyTermPoll) Result() ids.UniqueBag { return p.votes }
func (p *noEarlyTermPoll) String() string {
return fmt.Sprintf("waiting on %s", p.polled)
}

View File

@ -0,0 +1,91 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package poll
import (
"testing"
"github.com/ava-labs/gecko/ids"
)
func TestNoEarlyTermResults(t *testing.T) {
vtxID := ids.NewID([32]byte{1})
votes := []ids.ID{vtxID}
vdr1 := ids.NewShortID([20]byte{1}) // k = 1
vdrs := ids.ShortSet{}
vdrs.Add(vdr1)
factory := NewNoEarlyTermFactory()
poll := factory.New(vdrs)
poll.Vote(vdr1, votes)
if !poll.Finished() {
t.Fatalf("Poll did not terminate after receiving k votes")
}
result := poll.Result()
if list := result.List(); len(list) != 1 {
t.Fatalf("Wrong number of vertices returned")
} else if retVtxID := list[0]; !retVtxID.Equals(vtxID) {
t.Fatalf("Wrong vertex returned")
} else if set := result.GetSet(vtxID); set.Len() != 1 {
t.Fatalf("Wrong number of votes returned")
}
}
func TestNoEarlyTermString(t *testing.T) {
vtxID := ids.NewID([32]byte{1})
votes := []ids.ID{vtxID}
vdr1 := ids.NewShortID([20]byte{1})
vdr2 := ids.NewShortID([20]byte{2}) // k = 2
vdrs := ids.ShortSet{}
vdrs.Add(
vdr1,
vdr2,
)
factory := NewNoEarlyTermFactory()
poll := factory.New(vdrs)
poll.Vote(vdr1, votes)
expected := "waiting on {BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp}"
if result := poll.String(); expected != result {
t.Fatalf("Poll should have returned %s but returned %s", expected, result)
}
}
func TestNoEarlyTermDropsDuplicatedVotes(t *testing.T) {
vtxID := ids.NewID([32]byte{1})
votes := []ids.ID{vtxID}
vdr1 := ids.NewShortID([20]byte{1})
vdr2 := ids.NewShortID([20]byte{2}) // k = 2
vdrs := ids.ShortSet{}
vdrs.Add(
vdr1,
vdr2,
)
factory := NewNoEarlyTermFactory()
poll := factory.New(vdrs)
poll.Vote(vdr1, votes)
if poll.Finished() {
t.Fatalf("Poll finished after less than alpha votes")
}
poll.Vote(vdr1, votes)
if poll.Finished() {
t.Fatalf("Poll finished after getting a duplicated vote")
}
poll.Vote(vdr2, votes)
if !poll.Finished() {
t.Fatalf("Poll did not terminate after receiving k votes")
}
}

View File

@ -0,0 +1,130 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package poll
import (
"fmt"
"strings"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/timer"
)
type poll struct {
Poll
start time.Time
}
type set struct {
log logging.Logger
numPolls prometheus.Gauge
durPolls prometheus.Histogram
factory Factory
polls map[uint32]poll
}
// NewSet returns a new empty set of polls
func NewSet(
factory Factory,
log logging.Logger,
namespace string,
registerer prometheus.Registerer,
) Set {
numPolls := prometheus.NewGauge(prometheus.GaugeOpts{
Namespace: namespace,
Name: "polls",
Help: "Number of pending network polls",
})
if err := registerer.Register(numPolls); err != nil {
log.Error("failed to register polls statistics due to %s", err)
}
durPolls := prometheus.NewHistogram(prometheus.HistogramOpts{
Namespace: namespace,
Name: "poll_duration",
Help: "Length of time the poll existed in milliseconds",
Buckets: timer.MillisecondsBuckets,
})
if err := registerer.Register(durPolls); err != nil {
log.Error("failed to register poll_duration statistics due to %s", err)
}
return &set{
log: log,
numPolls: numPolls,
durPolls: durPolls,
factory: factory,
polls: make(map[uint32]poll),
}
}
// Add to the current set of polls
// Returns true if the poll was registered correctly and the network sample
// should be made.
func (s *set) Add(requestID uint32, vdrs ids.ShortSet) bool {
if _, exists := s.polls[requestID]; exists {
s.log.Debug("dropping poll due to duplicated requestID: %d", requestID)
return false
}
s.log.Verbo("creating poll with requestID %d and validators %s",
requestID,
vdrs)
s.polls[requestID] = poll{
Poll: s.factory.New(vdrs), // create the new poll
start: time.Now(),
}
s.numPolls.Inc() // increase the metrics
return true
}
// Vote registers the connections response to a query for [id]. If there was no
// query, or the response has already be registered, nothing is performed.
func (s *set) Vote(
requestID uint32,
vdr ids.ShortID,
votes []ids.ID,
) (ids.UniqueBag, bool) {
poll, exists := s.polls[requestID]
if !exists {
s.log.Verbo("dropping vote from %s to an unknown poll with requestID: %d",
vdr,
requestID)
return nil, false
}
s.log.Verbo("processing vote from %s in the poll with requestID: %d with the votes %v",
vdr,
requestID,
votes)
poll.Vote(vdr, votes)
if !poll.Finished() {
return nil, false
}
s.log.Verbo("poll with requestID %d finished as %s", requestID, poll)
delete(s.polls, requestID) // remove the poll from the current set
s.durPolls.Observe(float64(time.Now().Sub(poll.start).Milliseconds()))
s.numPolls.Dec() // decrease the metrics
return poll.Result(), true
}
// Len returns the number of outstanding polls
func (s *set) Len() int { return len(s.polls) }
func (s *set) String() string {
sb := strings.Builder{}
sb.WriteString(fmt.Sprintf("current polls: (Size = %d)", len(s.polls)))
for requestID, poll := range s.polls {
sb.WriteString(fmt.Sprintf("\n %d: %s", requestID, poll))
}
return sb.String()
}

View File

@ -0,0 +1,97 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package poll
import (
"testing"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/logging"
"github.com/prometheus/client_golang/prometheus"
)
func TestNewSetErrorOnMetrics(t *testing.T) {
factory := NewNoEarlyTermFactory()
log := logging.NoLog{}
namespace := ""
registerer := prometheus.NewRegistry()
registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{
Name: "polls",
}))
registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{
Name: "poll_duration",
}))
_ = NewSet(factory, log, namespace, registerer)
}
func TestCreateAndFinishPoll(t *testing.T) {
factory := NewNoEarlyTermFactory()
log := logging.NoLog{}
namespace := ""
registerer := prometheus.NewRegistry()
s := NewSet(factory, log, namespace, registerer)
vtxID := ids.NewID([32]byte{1})
votes := []ids.ID{vtxID}
vdr1 := ids.NewShortID([20]byte{1})
vdr2 := ids.NewShortID([20]byte{2}) // k = 2
vdrs := ids.ShortSet{}
vdrs.Add(
vdr1,
vdr2,
)
if s.Len() != 0 {
t.Fatalf("Shouldn't have any active polls yet")
} else if !s.Add(0, vdrs) {
t.Fatalf("Should have been able to add a new poll")
} else if s.Len() != 1 {
t.Fatalf("Should only have one active poll")
} else if s.Add(0, vdrs) {
t.Fatalf("Shouldn't have been able to add a duplicated poll")
} else if s.Len() != 1 {
t.Fatalf("Should only have one active poll")
} else if _, finished := s.Vote(1, vdr1, votes); finished {
t.Fatalf("Shouldn't have been able to finish a non-existant poll")
} else if _, finished := s.Vote(0, vdr1, votes); finished {
t.Fatalf("Shouldn't have been able to finish an ongoing poll")
} else if _, finished := s.Vote(0, vdr1, votes); finished {
t.Fatalf("Should have dropped a duplicated poll")
} else if result, finished := s.Vote(0, vdr2, votes); !finished {
t.Fatalf("Should have finished the")
} else if list := result.List(); len(list) != 1 {
t.Fatalf("Wrong number of vertices returned")
} else if retVtxID := list[0]; !retVtxID.Equals(vtxID) {
t.Fatalf("Wrong vertex returned")
} else if set := result.GetSet(vtxID); set.Len() != 2 {
t.Fatalf("Wrong number of votes returned")
}
}
func TestSetString(t *testing.T) {
factory := NewNoEarlyTermFactory()
log := logging.NoLog{}
namespace := ""
registerer := prometheus.NewRegistry()
s := NewSet(factory, log, namespace, registerer)
vdr1 := ids.NewShortID([20]byte{1}) // k = 1
vdrs := ids.ShortSet{}
vdrs.Add(vdr1)
expected := "current polls: (Size = 1)\n" +
" 0: waiting on {6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt}"
if !s.Add(0, vdrs) {
t.Fatalf("Should have been able to add a new poll")
} else if str := s.String(); expected != str {
t.Fatalf("Set return wrong string, Expected:\n%s\nReturned:\n%s",
expected,
str)
}
}

View File

@ -1,101 +0,0 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package avalanche
import (
"fmt"
"strings"
"github.com/prometheus/client_golang/prometheus"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/logging"
)
// TODO: There is a conservative early termination case that doesn't require dag
// traversals we may want to implement. The algorithm would go as follows:
// Keep track of the number of response that reference an ID. If an ID gets >=
// alpha responses, then remove it from all responses and place it into a chit
// list. Remove all empty responses. If the number of responses + the number of
// pending responses is less than alpha, terminate the poll.
// In the synchronous + virtuous case, when everyone returns the same hash, the
// poll now terminates after receiving alpha responses.
// In the rogue case, it is possible that the poll doesn't terminate as quickly
// as possible, because IDs may have the alpha threshold but only when counting
// transitive votes. In this case, we may wait even if it is no longer possible
// for another ID to earn alpha votes.
// Because alpha is typically set close to k, this may not be performance
// critical. However, early termination may be performance critical with crashed
// nodes.
type polls struct {
log logging.Logger
numPolls prometheus.Gauge
m map[uint32]poll
}
// Add to the current set of polls
// Returns true if the poll was registered correctly and the network sample
// should be made.
func (p *polls) Add(requestID uint32, numPolled int) bool {
poll, exists := p.m[requestID]
if !exists {
poll.numPending = numPolled
p.m[requestID] = poll
p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics
}
return !exists
}
// Vote registers the connections response to a query for [id]. If there was no
// query, or the response has already be registered, nothing is performed.
func (p *polls) Vote(requestID uint32, vdr ids.ShortID, votes []ids.ID) (ids.UniqueBag, bool) {
p.log.Verbo("Vote. requestID: %d. validatorID: %s.", requestID, vdr)
poll, exists := p.m[requestID]
p.log.Verbo("Poll: %+v", poll)
if !exists {
return nil, false
}
poll.Vote(votes)
if poll.Finished() {
p.log.Verbo("Poll is finished")
delete(p.m, requestID)
p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics
return poll.votes, true
}
p.m[requestID] = poll
return nil, false
}
func (p *polls) String() string {
sb := strings.Builder{}
sb.WriteString(fmt.Sprintf("Current polls: (Size = %d)", len(p.m)))
for requestID, poll := range p.m {
sb.WriteString(fmt.Sprintf("\n %d: %s", requestID, poll))
}
return sb.String()
}
// poll represents the current state of a network poll for a vertex
type poll struct {
votes ids.UniqueBag
numPending int
}
// Vote registers a vote for this poll
func (p *poll) Vote(votes []ids.ID) {
if p.numPending > 0 {
p.numPending--
p.votes.Add(uint(p.numPending), votes...)
}
}
// Finished returns true if the poll has completed, with no more required
// responses
func (p poll) Finished() bool { return p.numPending <= 0 }
func (p poll) String() string { return fmt.Sprintf("Waiting on %d chits", p.numPending) }

View File

@ -54,6 +54,8 @@ func (vtx *uniqueVertex) refresh() {
func (vtx *uniqueVertex) Evict() {
if vtx.v != nil {
vtx.v.unique = false
// make sure the parents are able to be garbage collected
vtx.v.parents = nil
}
}

View File

@ -12,6 +12,7 @@ import (
"github.com/ava-labs/gecko/snow/choices"
"github.com/ava-labs/gecko/snow/consensus/avalanche"
"github.com/ava-labs/gecko/snow/consensus/snowstorm"
"github.com/ava-labs/gecko/snow/engine/avalanche/poll"
"github.com/ava-labs/gecko/snow/engine/common"
"github.com/ava-labs/gecko/snow/events"
"github.com/ava-labs/gecko/utils/formatting"
@ -31,7 +32,7 @@ type Transitive struct {
Config
bootstrapper
polls polls // track people I have asked for their preference
polls poll.Set // track people I have asked for their preference
// vtxReqs prevents asking validators for the same vertex
vtxReqs common.Requests
@ -57,9 +58,12 @@ func (t *Transitive) Initialize(config Config) error {
t.onFinished = t.finishBootstrapping
t.polls.log = config.Context.Log
t.polls.numPolls = t.numPolls
t.polls.m = make(map[uint32]poll)
factory := poll.NewEarlyTermNoTraversalFactory(int(config.Params.Alpha))
t.polls = poll.NewSet(factory,
config.Context.Log,
config.Params.Namespace,
config.Params.Metrics,
)
return t.bootstrapper.Initialize(config.BootstrapConfig)
}
@ -169,7 +173,11 @@ func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxByt
t.Config.Context.Log.Verbo("Put(%s, %d, %s) called", vdr, requestID, vtxID)
if !t.bootstrapped { // Bootstrapping unfinished --> didn't call Get --> this message is invalid
t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
if requestID == network.GossipMsgRequestID {
t.Config.Context.Log.Verbo("dropping gossip Put(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
} else {
t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID)
}
return nil
}
@ -307,7 +315,7 @@ func (t *Transitive) Notify(msg common.Message) error {
}
func (t *Transitive) repoll() error {
if len(t.polls.m) >= t.Params.ConcurrentRepolls || t.errs.Errored() {
if t.polls.Len() >= t.Params.ConcurrentRepolls || t.errs.Errored() {
return nil
}
@ -316,7 +324,7 @@ func (t *Transitive) repoll() error {
return err
}
for i := len(t.polls.m); i < t.Params.ConcurrentRepolls; i++ {
for i := t.polls.Len(); i < t.Params.ConcurrentRepolls; i++ {
if err := t.batch(nil, false /*=force*/, true /*=empty*/); err != nil {
return err
}
@ -335,10 +343,10 @@ func (t *Transitive) reinsertFrom(vdr ids.ShortID, vtxID ids.ID) (bool, error) {
func (t *Transitive) insertFrom(vdr ids.ShortID, vtx avalanche.Vertex) (bool, error) {
issued := true
vts := []avalanche.Vertex{vtx}
for len(vts) > 0 {
vtx := vts[0]
vts = vts[1:]
vertexHeap := newMaxVertexHeap()
vertexHeap.Push(vtx)
for vertexHeap.Len() > 0 {
vtx := vertexHeap.Pop()
if t.Consensus.VertexIssued(vtx) {
continue
@ -353,7 +361,7 @@ func (t *Transitive) insertFrom(vdr ids.ShortID, vtx avalanche.Vertex) (bool, er
t.sendRequest(vdr, parent.ID())
issued = false
} else {
vts = append(vts, parent)
vertexHeap.Push(parent)
}
}
@ -471,8 +479,11 @@ func (t *Transitive) issueRepoll() {
vdrSet.Add(vdr.ID())
}
vdrCopy := ids.ShortSet{}
vdrCopy.Union((vdrSet))
t.RequestID++
if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet.Len()) {
if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrCopy) {
t.Config.Sender.PullQuery(vdrSet, t.RequestID, vtxID)
} else if numVdrs < p.K {
t.Config.Context.Log.Error("re-query for %s was dropped due to an insufficient number of validators", vtxID)
@ -510,3 +521,8 @@ func (t *Transitive) sendRequest(vdr ids.ShortID, vtxID ids.ID) {
t.numVtxRequests.Set(float64(t.vtxReqs.Len())) // Tracks performance statistics
}
// IsBootstrapped returns true iff this chain is done bootstrapping
func (t *Transitive) IsBootstrapped() bool {
return t.bootstrapped
}

View File

@ -3085,3 +3085,120 @@ func TestEngineDuplicatedIssuance(t *testing.T) {
te.Notify(common.PendingTxs)
}
func TestEngineDoubleChit(t *testing.T) {
config := DefaultConfig()
config.Params.Alpha = 2
config.Params.K = 2
vdr0 := validators.GenerateRandomValidator(1)
vdr1 := validators.GenerateRandomValidator(1)
vals := validators.NewSet()
vals.Add(vdr0)
vals.Add(vdr1)
config.Validators = vals
sender := &common.SenderTest{}
sender.T = t
config.Sender = sender
sender.Default(true)
sender.CantGetAcceptedFrontier = false
st := &stateTest{t: t}
config.State = st
st.Default(true)
gVtx := &Vtx{
id: GenerateID(),
status: choices.Accepted,
}
mVtx := &Vtx{
id: GenerateID(),
status: choices.Accepted,
}
vts := []avalanche.Vertex{gVtx, mVtx}
utxos := []ids.ID{GenerateID()}
tx := &TestTx{
TestTx: snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
},
}
tx.Ins.Add(utxos[0])
vtx := &Vtx{
parents: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx},
height: 1,
status: choices.Processing,
bytes: []byte{1, 1, 2, 3},
}
st.edge = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} }
st.getVertex = func(id ids.ID) (avalanche.Vertex, error) {
switch {
case id.Equals(gVtx.ID()):
return gVtx, nil
case id.Equals(mVtx.ID()):
return mVtx, nil
}
t.Fatalf("Unknown vertex")
panic("Should have errored")
}
te := &Transitive{}
te.Initialize(config)
te.finishBootstrapping()
reqID := new(uint32)
sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, vtxID ids.ID, _ []byte) {
*reqID = requestID
if inVdrs.Len() != 2 {
t.Fatalf("Wrong number of validators")
}
if !vtxID.Equals(vtx.ID()) {
t.Fatalf("Wrong vertex requested")
}
}
st.getVertex = func(id ids.ID) (avalanche.Vertex, error) {
switch {
case id.Equals(vtx.ID()):
return vtx, nil
}
t.Fatalf("Unknown vertex")
panic("Should have errored")
}
te.insert(vtx)
votes := ids.Set{}
votes.Add(vtx.ID())
if status := tx.Status(); status != choices.Processing {
t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing)
}
te.Chits(vdr0.ID(), *reqID, votes)
if status := tx.Status(); status != choices.Processing {
t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing)
}
te.Chits(vdr0.ID(), *reqID, votes)
if status := tx.Status(); status != choices.Processing {
t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing)
}
te.Chits(vdr1.ID(), *reqID, votes)
if status := tx.Status(); status != choices.Accepted {
t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Accepted)
}
}

View File

@ -17,11 +17,14 @@ const (
// StatusUpdateFrequency ... bootstrapper logs "processed X blocks/vertices" every [statusUpdateFrequency] blocks/vertices
StatusUpdateFrequency = 2500
// MaxOutstandingRequests is the maximum number of GetAncestors sent but not responsded to/failed
MaxOutstandingRequests = 8
)
var (
// MaxTimeFetchingAncestors is the maximum amount of time to spend fetching vertices during a call to GetAncestors
MaxTimeFetchingAncestors = 100 * time.Millisecond
MaxTimeFetchingAncestors = 50 * time.Millisecond
)
// Bootstrapper implements the Engine interface.

View File

@ -14,6 +14,9 @@ type Engine interface {
// Return the context of the chain this engine is working on
Context() *snow.Context
// Returns true iff the chain is done bootstrapping
IsBootstrapped() bool
}
// Handler defines the functions that are acted on the node

View File

@ -7,6 +7,10 @@ import (
"github.com/ava-labs/gecko/ids"
)
const (
minRequestsSize = 32
)
type req struct {
vdr ids.ShortID
id uint32
@ -22,7 +26,7 @@ type Requests struct {
// are only in one request at a time.
func (r *Requests) Add(vdr ids.ShortID, requestID uint32, containerID ids.ID) {
if r.reqsToID == nil {
r.reqsToID = make(map[[20]byte]map[uint32]ids.ID)
r.reqsToID = make(map[[20]byte]map[uint32]ids.ID, minRequestsSize)
}
vdrKey := vdr.Key()
vdrReqs, ok := r.reqsToID[vdrKey]
@ -33,7 +37,7 @@ func (r *Requests) Add(vdr ids.ShortID, requestID uint32, containerID ids.ID) {
vdrReqs[requestID] = containerID
if r.idToReq == nil {
r.idToReq = make(map[[32]byte]req)
r.idToReq = make(map[[32]byte]req, minRequestsSize)
}
r.idToReq[containerID.Key()] = req{
vdr: vdr,

View File

@ -15,6 +15,7 @@ import (
type EngineTest struct {
T *testing.T
CantIsBootstrapped,
CantStartup,
CantGossip,
CantShutdown,
@ -43,6 +44,7 @@ type EngineTest struct {
CantQueryFailed,
CantChits bool
IsBootstrappedF func() bool
ContextF func() *snow.Context
StartupF, GossipF, ShutdownF func() error
NotifyF func(Message) error
@ -58,6 +60,8 @@ var _ Engine = &EngineTest{}
// Default ...
func (e *EngineTest) Default(cant bool) {
e.CantIsBootstrapped = cant
e.CantStartup = cant
e.CantGossip = cant
e.CantShutdown = cant
@ -354,3 +358,14 @@ func (e *EngineTest) Chits(validatorID ids.ShortID, requestID uint32, containerI
}
return nil
}
// IsBootstrapped ...
func (e *EngineTest) IsBootstrapped() bool {
if e.IsBootstrappedF != nil {
return e.IsBootstrappedF()
}
if e.CantIsBootstrapped && e.T != nil {
e.T.Fatalf("Unexpectedly called IsBootstrapped")
}
return false
}

View File

@ -115,6 +115,9 @@ func (b *bootstrapper) fetch(blkID ids.ID) error {
// Make sure we don't already have this block
if _, err := b.VM.GetBlock(blkID); err == nil {
if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier {
return b.finish()
}
return nil
}
@ -224,7 +227,8 @@ func (b *bootstrapper) finish() error {
if b.finished {
return nil
}
b.BootstrapConfig.Context.Log.Info("bootstrapping finished fetching blocks. executing state transitions...")
b.BootstrapConfig.Context.Log.Info("bootstrapping finished fetching %d blocks. executing state transitions...",
b.numFetched)
if err := b.executeAll(b.Blocked, b.numBlocked); err != nil {
return err
@ -262,5 +266,6 @@ func (b *bootstrapper) executeAll(jobs *queue.Jobs, numBlocked prometheus.Gauge)
b.BootstrapConfig.Context.Log.Info("executed %d blocks", numExecuted)
}
}
b.BootstrapConfig.Context.Log.Info("executed %d blocks", numExecuted)
return nil
}

View File

@ -622,3 +622,128 @@ func TestBootstrapperFilterAccepted(t *testing.T) {
t.Fatalf("Blk shouldn't be accepted")
}
}
func TestBootstrapperFinalized(t *testing.T) {
config, peerID, sender, vm := newConfig(t)
blkID0 := ids.Empty.Prefix(0)
blkID1 := ids.Empty.Prefix(1)
blkID2 := ids.Empty.Prefix(2)
blkBytes0 := []byte{0}
blkBytes1 := []byte{1}
blkBytes2 := []byte{2}
blk0 := &Blk{
id: blkID0,
height: 0,
status: choices.Accepted,
bytes: blkBytes0,
}
blk1 := &Blk{
parent: blk0,
id: blkID1,
height: 1,
status: choices.Unknown,
bytes: blkBytes1,
}
blk2 := &Blk{
parent: blk1,
id: blkID2,
height: 2,
status: choices.Unknown,
bytes: blkBytes2,
}
bs := bootstrapper{}
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
bs.Initialize(config)
finished := new(bool)
bs.onFinished = func() error { *finished = true; return nil }
acceptedIDs := ids.Set{}
acceptedIDs.Add(blkID1)
acceptedIDs.Add(blkID2)
parsedBlk1 := false
parsedBlk2 := false
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
switch {
case blkID.Equals(blkID0):
return blk0, nil
case blkID.Equals(blkID1):
if parsedBlk1 {
return blk1, nil
}
return nil, errUnknownBlock
case blkID.Equals(blkID2):
if parsedBlk2 {
return blk2, nil
}
return nil, errUnknownBlock
default:
t.Fatal(errUnknownBlock)
panic(errUnknownBlock)
}
}
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
switch {
case bytes.Equal(blkBytes, blkBytes0):
return blk0, nil
case bytes.Equal(blkBytes, blkBytes1):
blk1.status = choices.Processing
parsedBlk1 = true
return blk1, nil
case bytes.Equal(blkBytes, blkBytes2):
blk2.status = choices.Processing
parsedBlk2 = true
return blk2, nil
}
t.Fatal(errUnknownBlock)
return nil, errUnknownBlock
}
requestIDs := map[[32]byte]uint32{}
sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
if !vdr.Equals(peerID) {
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
}
requestIDs[vtxID.Key()] = reqID
}
vm.CantBootstrapping = false
if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk0 and blk1
t.Fatal(err)
}
reqID, ok := requestIDs[blkID2.Key()]
if !ok {
t.Fatalf("should have requested blk2")
}
vm.CantBootstrapped = false
if err := bs.MultiPut(peerID, reqID, [][]byte{blkBytes2, blkBytes1}); err != nil {
t.Fatal(err)
}
reqID, ok = requestIDs[blkID1.Key()]
if !ok {
t.Fatalf("should have requested blk1")
}
if err := bs.GetAncestorsFailed(peerID, reqID); err != nil {
t.Fatal(err)
}
if !*finished {
t.Fatalf("Bootstrapping should have finished")
} else if blk0.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
} else if blk1.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
} else if blk2.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
}
}

View File

@ -13,7 +13,7 @@ type metrics struct {
numPendingRequests, numBlocked prometheus.Gauge
numBootstrapped, numDropped prometheus.Counter
numPolls, numBlkRequests, numBlockedBlk prometheus.Gauge
numBlkRequests, numBlockedBlk prometheus.Gauge
}
// Initialize implements the Engine interface
@ -42,12 +42,6 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr
Name: "sm_bs_dropped",
Help: "Number of dropped bootstrap blocks",
})
m.numPolls = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
Name: "sm_polls",
Help: "Number of pending network polls",
})
m.numBlkRequests = prometheus.NewGauge(
prometheus.GaugeOpts{
Namespace: namespace,
@ -73,9 +67,6 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr
if err := registerer.Register(m.numDropped); err != nil {
log.Error("Failed to register sm_bs_dropped statistics due to %s", err)
}
if err := registerer.Register(m.numPolls); err != nil {
log.Error("Failed to register sm_polls statistics due to %s", err)
}
if err := registerer.Register(m.numBlkRequests); err != nil {
log.Error("Failed to register sm_blk_requests statistics due to %s", err)
}

View File

@ -0,0 +1,73 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package poll
import (
"fmt"
"github.com/ava-labs/gecko/ids"
)
type earlyTermNoTraversalFactory struct {
alpha int
}
// NewEarlyTermNoTraversalFactory returns a factory that returns polls with
// early termination, without doing DAG traversals
func NewEarlyTermNoTraversalFactory(alpha int) Factory {
return &earlyTermNoTraversalFactory{alpha: alpha}
}
func (f *earlyTermNoTraversalFactory) New(vdrs ids.ShortSet) Poll {
return &earlyTermNoTraversalPoll{
polled: vdrs,
alpha: f.alpha,
}
}
// earlyTermNoTraversalPoll finishes when any remaining validators can't change
// the result of the poll. However, does not terminate tightly with this bound.
// It terminates as quickly as it can without performing any DAG traversals.
type earlyTermNoTraversalPoll struct {
votes ids.Bag
polled ids.ShortSet
alpha int
}
// Vote registers a response for this poll
func (p *earlyTermNoTraversalPoll) Vote(vdr ids.ShortID, vote ids.ID) {
if !p.polled.Contains(vdr) {
// if the validator wasn't polled or already responded to this poll, we
// should just drop the vote
return
}
// make sure that a validator can't respond multiple times
p.polled.Remove(vdr)
// track the votes the validator responded with
p.votes.Add(vote)
}
// Drop any future response for this poll
func (p *earlyTermNoTraversalPoll) Drop(vdr ids.ShortID) {
p.polled.Remove(vdr)
}
// Finished returns true when all validators have voted
func (p *earlyTermNoTraversalPoll) Finished() bool {
remaining := p.polled.Len()
received := p.votes.Len()
_, freq := p.votes.Mode()
return remaining == 0 || // All k nodes responded
freq >= p.alpha || // An alpha majority has returned
received+remaining < p.alpha // An alpha majority can never return
}
// Result returns the result of this poll
func (p *earlyTermNoTraversalPoll) Result() ids.Bag { return p.votes }
func (p *earlyTermNoTraversalPoll) String() string {
return fmt.Sprintf("waiting on %s", p.polled)
}

Some files were not shown because too many files have changed in this diff Show More