Merge branch 'master' into log-level-and-format

This commit is contained in:
Gabriel Cardona 2020-06-15 15:07:44 -07:00
commit ef0b9bfb1e
16 changed files with 649 additions and 151 deletions

View File

@ -8,12 +8,14 @@ import (
"fmt"
"net/http"
"sync"
"testing"
"github.com/gorilla/rpc/v2"
"github.com/ava-labs/gecko/chains/atomic"
"github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/database/encdb"
"github.com/ava-labs/gecko/database/memdb"
"github.com/ava-labs/gecko/database/prefixdb"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/engine/common"
@ -29,8 +31,17 @@ const (
// maxUserPassLen is the maximum length of the username or password allowed
maxUserPassLen = 1024
// requiredPassScore defines the score a password must achieve to be accepted
// as a password with strong characteristics by the zxcvbn package
// maxCheckedPassLen limits the length of the password that should be
// strength checked.
//
// As per issue https://github.com/ava-labs/gecko/issues/195 it was found
// the longer the length of password the slower zxcvbn.PasswordStrength()
// performs. To avoid performance issues, and a DoS vector, we only check
// the first 50 characters of the password.
maxCheckedPassLen = 50
// requiredPassScore defines the score a password must achieve to be
// accepted as a password with strong characteristics by the zxcvbn package
//
// The scoring mechanism defined is as follows;
//
@ -135,37 +146,11 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre
ks.lock.Lock()
defer ks.lock.Unlock()
ks.log.Info("Keystore: CreateUser called with %.*s", maxUserPassLen, args.Username)
if len(args.Username) > maxUserPassLen || len(args.Password) > maxUserPassLen {
return errUserPassMaxLength
}
if args.Username == "" {
return errEmptyUsername
}
if usr, err := ks.getUser(args.Username); err == nil || usr != nil {
return fmt.Errorf("user already exists: %s", args.Username)
}
if zxcvbn.PasswordStrength(args.Password, nil).Score < requiredPassScore {
return errWeakPassword
}
usr := &User{}
if err := usr.Initialize(args.Password); err != nil {
ks.log.Verbo("CreateUser called with %.*s", maxUserPassLen, args.Username)
if err := ks.AddUser(args.Username, args.Password); err != nil {
return err
}
usrBytes, err := ks.codec.Marshal(usr)
if err != nil {
return err
}
if err := ks.userDB.Put([]byte(args.Username), usrBytes); err != nil {
return err
}
ks.users[args.Username] = usr
reply.Success = true
return nil
}
@ -403,3 +388,51 @@ func (ks *Keystore) GetDatabase(bID ids.ID, username, password string) (database
return encDB, nil
}
// AddUser attempts to register this username and password as a new user of the
// keystore.
func (ks *Keystore) AddUser(username, password string) error {
if len(username) > maxUserPassLen || len(password) > maxUserPassLen {
return errUserPassMaxLength
}
if username == "" {
return errEmptyUsername
}
if usr, err := ks.getUser(username); err == nil || usr != nil {
return fmt.Errorf("user already exists: %s", username)
}
checkPass := password
if len(password) > maxCheckedPassLen {
checkPass = password[:maxCheckedPassLen]
}
if zxcvbn.PasswordStrength(checkPass, nil).Score < requiredPassScore {
return errWeakPassword
}
usr := &User{}
if err := usr.Initialize(password); err != nil {
return err
}
usrBytes, err := ks.codec.Marshal(usr)
if err != nil {
return err
}
if err := ks.userDB.Put([]byte(username), usrBytes); err != nil {
return err
}
ks.users[username] = usr
return nil
}
// CreateTestKeystore returns a new keystore that can be utilized for testing
func CreateTestKeystore(t *testing.T) *Keystore {
ks := &Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
return ks
}

View File

@ -10,9 +10,7 @@ import (
"reflect"
"testing"
"github.com/ava-labs/gecko/database/memdb"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/utils/logging"
)
var (
@ -22,8 +20,7 @@ var (
)
func TestServiceListNoUsers(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
reply := ListUsersReply{}
if err := ks.ListUsers(nil, &ListUsersArgs{}, &reply); err != nil {
@ -35,8 +32,7 @@ func TestServiceListNoUsers(t *testing.T) {
}
func TestServiceCreateUser(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -75,8 +71,7 @@ func genStr(n int) string {
// TestServiceCreateUserArgsChecks generates excessively long usernames or
// passwords to assure the santity checks on string length are not exceeded
func TestServiceCreateUserArgsCheck(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -117,8 +112,7 @@ func TestServiceCreateUserArgsCheck(t *testing.T) {
// TestServiceCreateUserWeakPassword tests creating a new user with a weak
// password to ensure the password strength check is working
func TestServiceCreateUserWeakPassword(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -138,8 +132,7 @@ func TestServiceCreateUserWeakPassword(t *testing.T) {
}
func TestServiceCreateDuplicate(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -166,8 +159,7 @@ func TestServiceCreateDuplicate(t *testing.T) {
}
func TestServiceCreateUserNoName(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
reply := CreateUserReply{}
if err := ks.CreateUser(nil, &CreateUserArgs{
@ -178,8 +170,7 @@ func TestServiceCreateUserNoName(t *testing.T) {
}
func TestServiceUseBlockchainDB(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -218,8 +209,7 @@ func TestServiceUseBlockchainDB(t *testing.T) {
}
func TestServiceExportImport(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
{
reply := CreateUserReply{}
@ -252,8 +242,7 @@ func TestServiceExportImport(t *testing.T) {
t.Fatal(err)
}
newKS := Keystore{}
newKS.Initialize(logging.NoLog{}, memdb.New())
newKS := CreateTestKeystore(t)
{
reply := ImportUserReply{}
@ -358,11 +347,10 @@ func TestServiceDeleteUser(t *testing.T) {
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
ks := CreateTestKeystore(t)
if tt.setup != nil {
if err := tt.setup(&ks); err != nil {
if err := tt.setup(ks); err != nil {
t.Fatalf("failed to create user setup in keystore: %v", err)
}
}

View File

@ -45,7 +45,10 @@ func main() {
}
// Track if sybil control is enforced
if !Config.EnableStaking {
if !Config.EnableStaking && Config.EnableP2PTLS {
log.Warn("Staking is disabled. Sybil control is not enforced.")
}
if !Config.EnableStaking && !Config.EnableP2PTLS {
log.Warn("Staking and p2p encryption are disabled. Packet spoofing is possible.")
}

View File

@ -37,6 +37,7 @@ const (
var (
Config = node.Config{}
Err error
defaultNetworkName = genesis.TestnetName
defaultDbDir = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "db"))
defaultStakingKeyPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.key"))
defaultStakingCertPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.crt"))
@ -49,7 +50,8 @@ var (
)
var (
errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs")
errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs")
errStakingRequiresTLS = errors.New("if staking is enabled, network TLS must also be enabled")
)
// GetIPs returns the default IPs for each network
@ -169,7 +171,7 @@ func init() {
version := fs.Bool("version", false, "If true, print version and quit")
// NetworkID:
networkName := fs.String("network-id", genesis.TestnetName, "Network ID this node will connect to")
networkName := fs.String("network-id", defaultNetworkName, "Network ID this node will connect to")
// Ava fees:
fs.Uint64Var(&Config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva")
@ -200,7 +202,9 @@ func init() {
// Staking:
consensusPort := fs.Uint("staking-port", 9651, "Port of the consensus server")
fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Require TLS to authenticate staking connections")
// TODO - keeping same flag for backwards compatibility, should be changed to "staking-enabled"
fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Enable staking. If enabled, Network TLS is required.")
fs.BoolVar(&Config.EnableP2PTLS, "p2p-tls-enabled", true, "Require TLS to authenticate network communication")
fs.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", defaultStakingKeyPath, "TLS private key for staking")
fs.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", defaultStakingCertPath, "TLS certificate for staking")
@ -234,7 +238,15 @@ func init() {
ferr := fs.Parse(os.Args[1:])
if *version { // If --version used, print version and exit
fmt.Println(node.Version.String())
networkID, err := genesis.NetworkID(defaultNetworkName)
if errs.Add(err); err != nil {
return
}
networkGeneration := genesis.NetworkName(networkID)
fmt.Printf(
"%s [database=%s, network=%s/%s]\n",
node.Version, dbVersion, defaultNetworkName, networkGeneration,
)
os.Exit(0)
}
@ -318,7 +330,13 @@ func init() {
*bootstrapIDs = strings.Join(defaultBootstrapIDs, ",")
}
}
if Config.EnableStaking {
if Config.EnableStaking && !Config.EnableP2PTLS {
errs.Add(errStakingRequiresTLS)
return
}
if Config.EnableP2PTLS {
i := 0
cb58 := formatting.CB58{}
for _, id := range strings.Split(*bootstrapIDs, ",") {

View File

@ -21,6 +21,7 @@ import (
"github.com/ava-labs/gecko/snow/triggers"
"github.com/ava-labs/gecko/snow/validators"
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/random"
"github.com/ava-labs/gecko/utils/timer"
@ -278,8 +279,11 @@ func (n *network) GetAcceptedFrontier(validatorIDs ids.ShortSet, chainID ids.ID,
func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
msg, err := n.b.AcceptedFrontier(chainID, requestID, containerIDs)
if err != nil {
n.log.Error("attempted to pack too large of an AcceptedFrontier message.\nNumber of containerIDs: %d",
containerIDs.Len())
n.log.Error("failed to build AcceptedFrontier(%s, %d, %s): %s",
chainID,
requestID,
containerIDs,
err)
return // Packing message failed
}
@ -291,7 +295,11 @@ func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requ
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send an AcceptedFrontier message to: %s", validatorID)
n.log.Debug("failed to send AcceptedFrontier(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerIDs)
n.acceptedFrontier.numFailed.Inc()
} else {
n.acceptedFrontier.numSent.Inc()
@ -302,6 +310,11 @@ func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requ
func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
msg, err := n.b.GetAccepted(chainID, requestID, containerIDs)
if err != nil {
n.log.Error("failed to build GetAccepted(%s, %d, %s): %s",
chainID,
requestID,
containerIDs,
err)
for _, validatorID := range validatorIDs.List() {
vID := validatorID
n.executor.Add(func() { n.router.GetAcceptedFailed(vID, chainID, requestID) })
@ -319,6 +332,11 @@ func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, request
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send GetAccepted(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerIDs)
n.executor.Add(func() { n.router.GetAcceptedFailed(vID, chainID, requestID) })
n.getAccepted.numFailed.Inc()
} else {
@ -331,8 +349,11 @@ func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, request
func (n *network) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
msg, err := n.b.Accepted(chainID, requestID, containerIDs)
if err != nil {
n.log.Error("attempted to pack too large of an Accepted message.\nNumber of containerIDs: %d",
containerIDs.Len())
n.log.Error("failed to build Accepted(%s, %d, %s): %s",
chainID,
requestID,
containerIDs,
err)
return // Packing message failed
}
@ -344,33 +365,17 @@ func (n *network) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID ui
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send an Accepted message to: %s", validatorID)
n.log.Debug("failed to send Accepted(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerIDs)
n.accepted.numFailed.Inc()
} else {
n.accepted.numSent.Inc()
}
}
// Get implements the Sender interface.
func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
msg, err := n.b.Get(chainID, requestID, containerID)
n.log.AssertNoError(err)
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a Get message to: %s", validatorID)
n.get.numFailed.Inc()
} else {
n.get.numSent.Inc()
}
}
// GetAncestors implements the Sender interface.
func (n *network) GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
msg, err := n.b.GetAncestors(chainID, requestID, containerID)
@ -387,36 +392,18 @@ func (n *network) GetAncestors(validatorID ids.ShortID, chainID ids.ID, requestI
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send GetAncestors(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.executor.Add(func() { n.router.GetAncestorsFailed(validatorID, chainID, requestID) })
n.getAncestors.numFailed.Inc()
n.log.Debug("failed to send a GetAncestors message to: %s", validatorID)
} else {
n.getAncestors.numSent.Inc()
}
}
// Put implements the Sender interface.
func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
msg, err := n.b.Put(chainID, requestID, containerID, container)
if err != nil {
n.log.Error("failed to build Put message because of container of size %d", len(container))
return
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a Put message to: %s", validatorID)
n.put.numFailed.Inc()
} else {
n.put.numSent.Inc()
}
}
// MultiPut implements the Sender interface.
func (n *network) MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containers [][]byte) {
msg, err := n.b.MultiPut(chainID, requestID, containers)
@ -433,22 +420,90 @@ func (n *network) MultiPut(validatorID ids.ShortID, chainID ids.ID, requestID ui
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a MultiPut message to: %s", validatorID)
n.log.Debug("failed to send MultiPut(%s, %s, %d, %d)",
validatorID,
chainID,
requestID,
len(containers))
n.multiPut.numFailed.Inc()
} else {
n.multiPut.numSent.Inc()
}
}
// Get implements the Sender interface.
func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
msg, err := n.b.Get(chainID, requestID, containerID)
n.log.AssertNoError(err)
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send Get(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.executor.Add(func() { n.router.GetFailed(validatorID, chainID, requestID) })
n.get.numFailed.Inc()
} else {
n.get.numSent.Inc()
}
}
// Put implements the Sender interface.
func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
msg, err := n.b.Put(chainID, requestID, containerID, container)
if err != nil {
n.log.Error("failed to build Put(%s, %d, %s): %s. len(container) : %d",
chainID,
requestID,
containerID,
err,
len(container))
return
}
n.stateLock.Lock()
defer n.stateLock.Unlock()
peer, sent := n.peers[validatorID.Key()]
if sent {
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send Put(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
n.put.numFailed.Inc()
} else {
n.put.numSent.Inc()
}
}
// PushQuery implements the Sender interface.
func (n *network) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
msg, err := n.b.PushQuery(chainID, requestID, containerID, container)
if err != nil {
n.log.Error("failed to build PushQuery(%s, %d, %s): %s. len(container): %d",
chainID,
requestID,
containerID,
err,
len(container))
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
for _, validatorID := range validatorIDs.List() {
vID := validatorID
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
}
n.log.Error("attempted to pack too large of a PushQuery message.\nContainer length: %d", len(container))
return // Packing message failed
}
@ -462,7 +517,12 @@ func (n *network) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed sending a PushQuery message to: %s", vID)
n.log.Debug("failed to send PushQuery(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container})
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
n.pushQuery.numFailed.Inc()
} else {
@ -486,7 +546,11 @@ func (n *network) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed sending a PullQuery message to: %s", vID)
n.log.Debug("failed to send PullQuery(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
containerID)
n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) })
n.pullQuery.numFailed.Inc()
} else {
@ -499,7 +563,11 @@ func (n *network) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) {
msg, err := n.b.Chits(chainID, requestID, votes)
if err != nil {
n.log.Error("failed to build Chits message because of %d votes", votes.Len())
n.log.Error("failed to build Chits(%s, %d, %s): %s",
chainID,
requestID,
votes,
err)
return
}
@ -511,7 +579,11 @@ func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint3
sent = peer.send(msg)
}
if !sent {
n.log.Debug("failed to send a Chits message to: %s", validatorID)
n.log.Debug("failed to send Chits(%s, %s, %d, %s)",
validatorID,
chainID,
requestID,
votes)
n.chits.numFailed.Inc()
} else {
n.chits.numSent.Inc()
@ -521,7 +593,8 @@ func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint3
// Gossip attempts to gossip the container to the network
func (n *network) Gossip(chainID, containerID ids.ID, container []byte) {
if err := n.gossipContainer(chainID, containerID, container); err != nil {
n.log.Error("error gossiping container %s to %s: %s", containerID, chainID, err)
n.log.Debug("failed to Gossip(%s, %s): %s", chainID, containerID, err)
n.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container})
}
}
@ -695,7 +768,9 @@ func (n *network) gossip() {
}
msg, err := n.b.PeerList(ips)
if err != nil {
n.log.Warn("failed to gossip PeerList message due to %s", err)
n.log.Error("failed to build peer list to gossip: %s. len(ips): %d",
err,
len(ips))
continue
}

View File

@ -34,6 +34,7 @@ type Config struct {
// Staking configuration
StakingIP utils.IPDesc
EnableP2PTLS bool
EnableStaking bool
StakingKeyFile string
StakingCertFile string

View File

@ -119,7 +119,7 @@ func (n *Node) initNetworking() error {
dialer := network.NewDialer(TCP)
var serverUpgrader, clientUpgrader network.Upgrader
if n.Config.EnableStaking {
if n.Config.EnableP2PTLS {
cert, err := tls.LoadX509KeyPair(n.Config.StakingCertFile, n.Config.StakingKeyFile)
if err != nil {
return err
@ -253,7 +253,7 @@ func (n *Node) initDatabase() error {
// Otherwise, it is a hash of the TLS certificate that this node
// uses for P2P communication
func (n *Node) initNodeID() error {
if !n.Config.EnableStaking {
if !n.Config.EnableP2PTLS {
n.ID = ids.NewShortID(hashing.ComputeHash160Array([]byte(n.Config.StakingIP.String())))
n.Log.Info("Set the node's ID to %s", n.ID)
return nil

View File

@ -7,6 +7,8 @@ import (
"sync"
"time"
"github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/networking/timeout"
"github.com/ava-labs/gecko/utils/logging"
@ -67,7 +69,7 @@ func (sr *ChainRouter) RemoveChain(chainID ids.ID) {
sr.lock.RLock()
chain, exists := sr.chains[chainID.Key()]
if !exists {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Debug("can't remove unknown chain %s", chainID)
sr.lock.RUnlock()
return
}
@ -95,7 +97,7 @@ func (sr *ChainRouter) GetAcceptedFrontier(validatorID ids.ShortID, chainID ids.
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.GetAcceptedFrontier(validatorID, requestID)
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Debug("GetAcceptedFrontier(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
}
}
@ -111,7 +113,7 @@ func (sr *ChainRouter) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID,
sr.timeouts.Cancel(validatorID, chainID, requestID)
}
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Debug("AcceptedFrontier(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerIDs)
}
}
@ -132,7 +134,7 @@ func (sr *ChainRouter) GetAcceptedFrontierFailed(validatorID ids.ShortID, chainI
return
}
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Error("GetAcceptedFrontierFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
}
sr.timeouts.Cancel(validatorID, chainID, requestID)
}
@ -147,7 +149,7 @@ func (sr *ChainRouter) GetAccepted(validatorID ids.ShortID, chainID ids.ID, requ
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.GetAccepted(validatorID, requestID, containerIDs)
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Debug("GetAccepted(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerIDs)
}
}
@ -163,7 +165,7 @@ func (sr *ChainRouter) Accepted(validatorID ids.ShortID, chainID ids.ID, request
sr.timeouts.Cancel(validatorID, chainID, requestID)
}
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Debug("Accepted(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerIDs)
}
}
@ -183,7 +185,7 @@ func (sr *ChainRouter) GetAcceptedFailed(validatorID ids.ShortID, chainID ids.ID
return
}
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Error("GetAcceptedFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
}
sr.timeouts.Cancel(validatorID, chainID, requestID)
}
@ -198,7 +200,7 @@ func (sr *ChainRouter) GetAncestors(validatorID ids.ShortID, chainID ids.ID, req
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.GetAncestors(validatorID, requestID, containerID)
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Debug("GetAncestors(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
}
}
@ -215,7 +217,7 @@ func (sr *ChainRouter) MultiPut(validatorID ids.ShortID, chainID ids.ID, request
sr.timeouts.Cancel(validatorID, chainID, requestID)
}
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Debug("MultiPut(%s, %s, %d, %d) dropped due to unknown chain", validatorID, chainID, requestID, len(containers))
}
}
@ -234,7 +236,7 @@ func (sr *ChainRouter) GetAncestorsFailed(validatorID ids.ShortID, chainID ids.I
return
}
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Error("GetAncestorsFailed(%s, %s, %d, %d) dropped due to unknown chain", validatorID, chainID, requestID)
}
sr.timeouts.Cancel(validatorID, chainID, requestID)
}
@ -248,7 +250,7 @@ func (sr *ChainRouter) Get(validatorID ids.ShortID, chainID ids.ID, requestID ui
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.Get(validatorID, requestID, containerID)
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Debug("Get(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID)
}
}
@ -265,7 +267,8 @@ func (sr *ChainRouter) Put(validatorID ids.ShortID, chainID ids.ID, requestID ui
sr.timeouts.Cancel(validatorID, chainID, requestID)
}
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Debug("Put(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID)
sr.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container})
}
}
@ -284,7 +287,7 @@ func (sr *ChainRouter) GetFailed(validatorID ids.ShortID, chainID ids.ID, reques
return
}
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Error("GetFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID)
}
sr.timeouts.Cancel(validatorID, chainID, requestID)
}
@ -298,7 +301,8 @@ func (sr *ChainRouter) PushQuery(validatorID ids.ShortID, chainID ids.ID, reques
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.PushQuery(validatorID, requestID, containerID, container)
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Debug("PushQuery(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID)
sr.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container})
}
}
@ -311,7 +315,7 @@ func (sr *ChainRouter) PullQuery(validatorID ids.ShortID, chainID ids.ID, reques
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.PullQuery(validatorID, requestID, containerID)
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Debug("PullQuery(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID)
}
}
@ -327,7 +331,7 @@ func (sr *ChainRouter) Chits(validatorID ids.ShortID, chainID ids.ID, requestID
sr.timeouts.Cancel(validatorID, chainID, requestID)
}
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Debug("Chits(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, votes)
}
}
@ -346,7 +350,7 @@ func (sr *ChainRouter) QueryFailed(validatorID ids.ShortID, chainID ids.ID, requ
return
}
} else {
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
sr.log.Error("QueryFailed(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID)
}
sr.timeouts.Cancel(validatorID, chainID, requestID)
}

View File

@ -666,13 +666,20 @@ func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *I
}
addresses, _ := user.Addresses(db)
addresses = append(addresses, sk.PublicKey().Address())
newAddress := sk.PublicKey().Address()
reply.Address = service.vm.Format(newAddress.Bytes())
for _, address := range addresses {
if newAddress.Equals(address) {
return nil
}
}
addresses = append(addresses, newAddress)
if err := user.SetAddresses(db, addresses); err != nil {
return fmt.Errorf("problem saving addresses: %w", err)
}
reply.Address = service.vm.Format(sk.PublicKey().Address().Bytes())
return nil
}

View File

@ -9,8 +9,10 @@ import (
"github.com/stretchr/testify/assert"
"github.com/ava-labs/gecko/api/keystore"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/choices"
"github.com/ava-labs/gecko/utils/crypto"
"github.com/ava-labs/gecko/utils/formatting"
)
@ -340,3 +342,113 @@ func TestCreateVariableCapAsset(t *testing.T) {
t.Fatalf("Wrong assetID returned from CreateFixedCapAsset %s", reply.AssetID)
}
}
func TestImportAvmKey(t *testing.T) {
_, vm, s := setup(t)
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
userKeystore := keystore.CreateTestKeystore(t)
username := "bobby"
password := "StrnasfqewiurPasswdn56d"
if err := userKeystore.AddUser(username, password); err != nil {
t.Fatal(err)
}
vm.ctx.Keystore = userKeystore.NewBlockchainKeyStore(vm.ctx.ChainID)
_, err := vm.ctx.Keystore.GetDatabase(username, password)
if err != nil {
t.Fatal(err)
}
factory := crypto.FactorySECP256K1R{}
skIntf, err := factory.NewPrivateKey()
if err != nil {
t.Fatalf("problem generating private key: %w", err)
}
sk := skIntf.(*crypto.PrivateKeySECP256K1R)
args := ImportKeyArgs{
Username: username,
Password: password,
PrivateKey: formatting.CB58{Bytes: sk.Bytes()},
}
reply := ImportKeyReply{}
if err = s.ImportKey(nil, &args, &reply); err != nil {
t.Fatal(err)
}
}
func TestImportAvmKeyNoDuplicates(t *testing.T) {
_, vm, s := setup(t)
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
userKeystore := keystore.CreateTestKeystore(t)
username := "bobby"
password := "StrnasfqewiurPasswdn56d"
if err := userKeystore.AddUser(username, password); err != nil {
t.Fatal(err)
}
vm.ctx.Keystore = userKeystore.NewBlockchainKeyStore(vm.ctx.ChainID)
_, err := vm.ctx.Keystore.GetDatabase(username, password)
if err != nil {
t.Fatal(err)
}
factory := crypto.FactorySECP256K1R{}
skIntf, err := factory.NewPrivateKey()
if err != nil {
t.Fatalf("problem generating private key: %w", err)
}
sk := skIntf.(*crypto.PrivateKeySECP256K1R)
args := ImportKeyArgs{
Username: username,
Password: password,
PrivateKey: formatting.CB58{Bytes: sk.Bytes()},
}
reply := ImportKeyReply{}
if err = s.ImportKey(nil, &args, &reply); err != nil {
t.Fatal(err)
}
expectedAddress := vm.Format(sk.PublicKey().Address().Bytes())
if reply.Address != expectedAddress {
t.Fatalf("Reply address: %s did not match expected address: %s", reply.Address, expectedAddress)
}
reply2 := ImportKeyReply{}
if err = s.ImportKey(nil, &args, &reply2); err != nil {
t.Fatal(err)
}
if reply2.Address != expectedAddress {
t.Fatalf("Reply address: %s did not match expected address: %s", reply2.Address, expectedAddress)
}
addrsArgs := ListAddressesArgs{
Username: username,
Password: password,
}
addrsReply := ListAddressesResponse{}
if err := s.ListAddresses(nil, &addrsArgs, &addrsReply); err != nil {
t.Fatal(err)
}
if len(addrsReply.Addresses) != 1 {
t.Fatal("Importing the same key twice created duplicate addresses")
}
if addrsReply.Addresses[0] != expectedAddress {
t.Fatal("List addresses returned an incorrect address")
}
}

View File

@ -1275,7 +1275,7 @@ func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *Is
if err := tx.initialize(service.vm); err != nil {
return fmt.Errorf("error initializing tx: %s", err)
}
service.vm.unissuedEvents.Push(tx)
service.vm.unissuedEvents.Add(tx)
response.TxID = tx.ID()
case DecisionTx:
if err := tx.initialize(service.vm); err != nil {
@ -1290,7 +1290,7 @@ func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *Is
service.vm.unissuedAtomicTxs = append(service.vm.unissuedAtomicTxs, tx)
response.TxID = tx.ID()
default:
return errors.New("Could not parse given tx. Must be a TimedTx, DecisionTx, or AtomicTx")
return errors.New("Could not parse given tx. Provided tx needs to be a TimedTx, DecisionTx, or AtomicTx")
}
service.vm.resetTimer()

View File

@ -6,6 +6,9 @@ package platformvm
import (
"encoding/json"
"testing"
"time"
"github.com/ava-labs/gecko/utils/formatting"
)
func TestAddDefaultSubnetValidator(t *testing.T) {
@ -50,3 +53,184 @@ func TestImportKey(t *testing.T) {
t.Fatal(err)
}
}
func TestIssueTxKeepsTimedEventsSorted(t *testing.T) {
vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
service := Service{vm: vm}
pendingValidatorStartTime1 := defaultGenesisTime.Add(3 * time.Second)
pendingValidatorEndTime1 := pendingValidatorStartTime1.Add(MinimumStakingDuration)
nodeIDKey1, _ := vm.factory.NewPrivateKey()
nodeID1 := nodeIDKey1.PublicKey().Address()
addPendingValidatorTx1, err := vm.newAddDefaultSubnetValidatorTx(
defaultNonce+1,
defaultStakeAmount,
uint64(pendingValidatorStartTime1.Unix()),
uint64(pendingValidatorEndTime1.Unix()),
nodeID1,
nodeID1,
NumberOfShares,
testNetworkID,
defaultKey,
)
if err != nil {
t.Fatal(err)
}
txBytes1, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx1})
if err != nil {
t.Fatal(err)
}
args1 := &IssueTxArgs{}
args1.Tx = formatting.CB58{Bytes: txBytes1}
reply1 := IssueTxResponse{}
err = service.IssueTx(nil, args1, &reply1)
if err != nil {
t.Fatal(err)
}
pendingValidatorStartTime2 := defaultGenesisTime.Add(2 * time.Second)
pendingValidatorEndTime2 := pendingValidatorStartTime2.Add(MinimumStakingDuration)
nodeIDKey2, _ := vm.factory.NewPrivateKey()
nodeID2 := nodeIDKey2.PublicKey().Address()
addPendingValidatorTx2, err := vm.newAddDefaultSubnetValidatorTx(
defaultNonce+1,
defaultStakeAmount,
uint64(pendingValidatorStartTime2.Unix()),
uint64(pendingValidatorEndTime2.Unix()),
nodeID2,
nodeID2,
NumberOfShares,
testNetworkID,
defaultKey,
)
if err != nil {
t.Fatal(err)
}
txBytes2, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx2})
if err != nil {
t.Fatal(err)
}
args2 := IssueTxArgs{Tx: formatting.CB58{Bytes: txBytes2}}
reply2 := IssueTxResponse{}
err = service.IssueTx(nil, &args2, &reply2)
if err != nil {
t.Fatal(err)
}
pendingValidatorStartTime3 := defaultGenesisTime.Add(10 * time.Second)
pendingValidatorEndTime3 := pendingValidatorStartTime3.Add(MinimumStakingDuration)
nodeIDKey3, _ := vm.factory.NewPrivateKey()
nodeID3 := nodeIDKey3.PublicKey().Address()
addPendingValidatorTx3, err := vm.newAddDefaultSubnetValidatorTx(
defaultNonce+1,
defaultStakeAmount,
uint64(pendingValidatorStartTime3.Unix()),
uint64(pendingValidatorEndTime3.Unix()),
nodeID3,
nodeID3,
NumberOfShares,
testNetworkID,
defaultKey,
)
if err != nil {
t.Fatal(err)
}
txBytes3, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx3})
if err != nil {
t.Fatal(err)
}
args3 := IssueTxArgs{Tx: formatting.CB58{Bytes: txBytes3}}
reply3 := IssueTxResponse{}
err = service.IssueTx(nil, &args3, &reply3)
if err != nil {
t.Fatal(err)
}
pendingValidatorStartTime4 := defaultGenesisTime.Add(1 * time.Second)
pendingValidatorEndTime4 := pendingValidatorStartTime4.Add(MinimumStakingDuration)
nodeIDKey4, _ := vm.factory.NewPrivateKey()
nodeID4 := nodeIDKey4.PublicKey().Address()
addPendingValidatorTx4, err := vm.newAddDefaultSubnetValidatorTx(
defaultNonce+1,
defaultStakeAmount,
uint64(pendingValidatorStartTime4.Unix()),
uint64(pendingValidatorEndTime4.Unix()),
nodeID4,
nodeID4,
NumberOfShares,
testNetworkID,
defaultKey,
)
if err != nil {
t.Fatal(err)
}
txBytes4, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx4})
if err != nil {
t.Fatal(err)
}
args4 := IssueTxArgs{Tx: formatting.CB58{Bytes: txBytes4}}
reply4 := IssueTxResponse{}
err = service.IssueTx(nil, &args4, &reply4)
if err != nil {
t.Fatal(err)
}
pendingValidatorStartTime5 := defaultGenesisTime.Add(50 * time.Second)
pendingValidatorEndTime5 := pendingValidatorStartTime5.Add(MinimumStakingDuration)
nodeIDKey5, _ := vm.factory.NewPrivateKey()
nodeID5 := nodeIDKey5.PublicKey().Address()
addPendingValidatorTx5, err := vm.newAddDefaultSubnetValidatorTx(
defaultNonce+1,
defaultStakeAmount,
uint64(pendingValidatorStartTime5.Unix()),
uint64(pendingValidatorEndTime5.Unix()),
nodeID5,
nodeID5,
NumberOfShares,
testNetworkID,
defaultKey,
)
if err != nil {
t.Fatal(err)
}
txBytes5, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx5})
if err != nil {
t.Fatal(err)
}
args5 := IssueTxArgs{Tx: formatting.CB58{Bytes: txBytes5}}
reply5 := IssueTxResponse{}
err = service.IssueTx(nil, &args5, &reply5)
if err != nil {
t.Fatal(err)
}
currentEvent := vm.unissuedEvents.Remove()
for vm.unissuedEvents.Len() > 0 {
nextEvent := vm.unissuedEvents.Remove()
if !currentEvent.StartTime().Before(nextEvent.StartTime()) {
t.Fatal("IssueTx does not keep event heap ordered")
}
currentEvent = nextEvent
}
}

View File

@ -4,7 +4,6 @@
package platformvm
import (
"container/heap"
"errors"
"net/http"
@ -174,8 +173,8 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl
return errAccountHasNoValue
}
accounts = append(accounts, newAccount(
account.Address, // ID
0, // nonce
account.Address, // ID
0, // nonce
uint64(account.Balance), // balance
))
}
@ -210,7 +209,7 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl
return err
}
heap.Push(validators, tx)
validators.Add(tx)
}
// Specify the chains that exist at genesis.

View File

@ -111,3 +111,77 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) {
t.Fatalf("Should have errored due to an invalid end time")
}
}
func TestBuildGenesisReturnsSortedValidators(t *testing.T) {
id := ids.NewShortID([20]byte{1})
account := APIAccount{
Address: id,
Balance: 123456789,
}
weight := json.Uint64(987654321)
validator1 := APIDefaultSubnetValidator{
APIValidator: APIValidator{
StartTime: 0,
EndTime: 20,
Weight: &weight,
ID: id,
},
Destination: id,
}
validator2 := APIDefaultSubnetValidator{
APIValidator: APIValidator{
StartTime: 3,
EndTime: 15,
Weight: &weight,
ID: id,
},
Destination: id,
}
validator3 := APIDefaultSubnetValidator{
APIValidator: APIValidator{
StartTime: 1,
EndTime: 10,
Weight: &weight,
ID: id,
},
Destination: id,
}
args := BuildGenesisArgs{
Accounts: []APIAccount{
account,
},
Validators: []APIDefaultSubnetValidator{
validator1,
validator2,
validator3,
},
Time: 5,
}
reply := BuildGenesisReply{}
ss := StaticService{}
if err := ss.BuildGenesis(nil, &args, &reply); err != nil {
t.Fatalf("BuildGenesis should not have errored")
}
genesis := &Genesis{}
if err := Codec.Unmarshal(reply.Bytes.Bytes, genesis); err != nil {
t.Fatal(err)
}
validators := genesis.Validators
if validators.Len() == 0 {
t.Fatal("Validators should contain 3 validators")
}
currentValidator := validators.Remove()
for validators.Len() > 0 {
nextValidator := validators.Remove()
if currentValidator.EndTime().Unix() > nextValidator.EndTime().Unix() {
t.Fatalf("Validators returned by genesis should be a min heap sorted by end time")
}
currentValidator = nextValidator
}
}

View File

@ -4,7 +4,6 @@
package platformvm
import (
"container/heap"
"errors"
"fmt"
"time"
@ -698,7 +697,7 @@ func (vm *VM) resetTimer() {
vm.SnowmanVM.NotifyBlockReady() // Should issue a ProposeAddValidator
return
}
// If the tx doesn't meet the syncrony bound, drop it
// If the tx doesn't meet the synchrony bound, drop it
vm.unissuedEvents.Remove()
vm.Ctx.Log.Debug("dropping tx to add validator because its start time has passed")
}
@ -780,8 +779,8 @@ func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, sub
if timestamp.Before(nextTx.StartTime()) {
break
}
heap.Push(current, nextTx)
heap.Pop(pending)
current.Add(nextTx)
pending.Remove()
started.Add(nextTx.Vdr().ID())
}
return current, pending, started, stopped, nil

View File

@ -5,7 +5,6 @@ package platformvm
import (
"bytes"
"container/heap"
"errors"
"testing"
"time"
@ -193,6 +192,8 @@ func defaultVM() *VM {
panic("no subnets found")
} // end delete
vm.registerDBTypes()
return vm
}
@ -226,7 +227,7 @@ func GenesisCurrentValidators() *EventHeap {
testNetworkID, // network ID
key, // key paying tx fee and stake
)
heap.Push(validators, validator)
validators.Add(validator)
}
return validators
}
@ -1011,7 +1012,7 @@ func TestCreateSubnet(t *testing.T) {
t.Fatal(err)
}
vm.unissuedEvents.Push(addValidatorTx)
vm.unissuedEvents.Add(addValidatorTx)
blk, err = vm.BuildBlock() // should add validator to the new subnet
if err != nil {
t.Fatal(err)