Merge branch 'master' into editorconfig

This commit is contained in:
Stephen Buttolph 2020-05-12 22:19:15 -04:00 committed by GitHub
commit 18533a87e3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
57 changed files with 2192 additions and 436 deletions

View File

@ -301,6 +301,70 @@ func (ks *Keystore) ImportUser(r *http.Request, args *ImportUserArgs, reply *Imp
return nil
}
// DeleteUserArgs are arguments for passing into DeleteUser requests
type DeleteUserArgs struct {
Username string `json:"username"`
Password string `json:"password"`
}
// DeleteUserReply is the response from calling DeleteUser
type DeleteUserReply struct {
Success bool `json:"success"`
}
// DeleteUser deletes user with the provided username and password.
func (ks *Keystore) DeleteUser(_ *http.Request, args *DeleteUserArgs, reply *DeleteUserReply) error {
ks.lock.Lock()
defer ks.lock.Unlock()
ks.log.Verbo("DeleteUser called with %s", args.Username)
if args.Username == "" {
return errEmptyUsername
}
// check if user exists and valid user.
usr, err := ks.getUser(args.Username)
switch {
case err != nil || usr == nil:
return fmt.Errorf("user doesn't exist: %s", args.Username)
case !usr.CheckPassword(args.Password):
return fmt.Errorf("incorrect password for user %q", args.Username)
}
userNameBytes := []byte(args.Username)
userBatch := ks.userDB.NewBatch()
if err := userBatch.Delete(userNameBytes); err != nil {
return err
}
userDataDB := prefixdb.New(userNameBytes, ks.bcDB)
dataBatch := userDataDB.NewBatch()
it := userDataDB.NewIterator()
defer it.Release()
for it.Next() {
if err = dataBatch.Delete(it.Key()); err != nil {
return err
}
}
if err = it.Error(); err != nil {
return err
}
if err := atomic.WriteAll(dataBatch, userBatch); err != nil {
return err
}
// delete from users map.
delete(ks.users, args.Username)
reply.Success = true
return nil
}
// NewBlockchainKeyStore ...
func (ks *Keystore) NewBlockchainKeyStore(blockchainID ids.ID) *BlockchainKeystore {
return &BlockchainKeystore{

View File

@ -7,6 +7,7 @@ import (
"bytes"
"fmt"
"math/rand"
"reflect"
"testing"
"github.com/ava-labs/gecko/database/memdb"
@ -280,3 +281,89 @@ func TestServiceExportImport(t *testing.T) {
}
}
}
func TestServiceDeleteUser(t *testing.T) {
testUser := "testUser"
password := "passwTest@fake01ord"
tests := []struct {
desc string
setup func(ks *Keystore) error
request *DeleteUserArgs
want *DeleteUserReply
wantError bool
}{{
desc: "empty user name case",
request: &DeleteUserArgs{},
wantError: true,
}, {
desc: "user not exists case",
request: &DeleteUserArgs{Username: "dummy"},
wantError: true,
}, {
desc: "user exists and invalid password case",
request: &DeleteUserArgs{Username: testUser, Password: "password"},
wantError: true,
}, {
desc: "user exists and valid password case",
setup: func(ks *Keystore) error {
return ks.CreateUser(nil, &CreateUserArgs{Username: testUser, Password: password}, &CreateUserReply{})
},
request: &DeleteUserArgs{Username: testUser, Password: password},
want: &DeleteUserReply{Success: true},
}, {
desc: "delete a user, imported from import api case",
setup: func(ks *Keystore) error {
reply := CreateUserReply{}
if err := ks.CreateUser(nil, &CreateUserArgs{Username: testUser, Password: password}, &reply); err != nil {
return err
}
// created data in bob db
db, err := ks.GetDatabase(ids.Empty, testUser, password)
if err != nil {
return err
}
if err := db.Put([]byte("hello"), []byte("world")); err != nil {
return err
}
return nil
},
request: &DeleteUserArgs{Username: testUser, Password: password},
want: &DeleteUserReply{Success: true},
}}
for _, tt := range tests {
t.Run(tt.desc, func(t *testing.T) {
ks := Keystore{}
ks.Initialize(logging.NoLog{}, memdb.New())
if tt.setup != nil {
if err := tt.setup(&ks); err != nil {
t.Fatalf("failed to create user setup in keystore: %v", err)
}
}
got := &DeleteUserReply{}
err := ks.DeleteUser(nil, tt.request, got)
if (err != nil) != tt.wantError {
t.Fatalf("DeleteUser() failed: error %v, wantError %v", err, tt.wantError)
}
if !tt.wantError && !reflect.DeepEqual(tt.want, got) {
t.Fatalf("DeleteUser() failed: got %v, want %v", got, tt.want)
}
if err == nil && got.Success { // delete is successful
if _, ok := ks.users[testUser]; ok {
t.Fatalf("DeleteUser() failed: expected the user %s should be delete from users map", testUser)
}
// deleted user details should be available to create user again.
if err = ks.CreateUser(nil, &CreateUserArgs{Username: testUser, Password: password}, &CreateUserReply{}); err != nil {
t.Fatalf("failed to create user: %v", err)
}
}
})
}
}

View File

@ -1,5 +0,0 @@
#!/bin/sh
set -ex
openssl genrsa -out `dirname "$0"`/rootCA.key 4096
openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt

View File

@ -1,13 +0,0 @@
#!/bin/sh
set -ex
keypath=$GOPATH/src/github.com/ava-labs/gecko/keys
if test -f "$keypath/staker.key" || test -f "$keypath/staker.crt"; then
echo "staker.key or staker.crt already exists. Not generating new key/certificiate."
exit 1
fi
openssl genrsa -out `dirname "$0"`/staker.key 4096
openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr
openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256

View File

@ -1,34 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV
BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB
dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN
AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw
MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM
Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV
BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw
DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg
jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93
QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU
m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0
lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB
KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW
cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44
RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH
bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW
T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB
J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU
KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei
73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E
BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj
FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG
XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY
omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv
Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC
XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0
gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn
3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N
W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s
scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU
kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD
DB8IRfWqBx2nWw==
-----END CERTIFICATE-----

View File

@ -1,51 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1
6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ
mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h
Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL
AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk
tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd
CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu
TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV
Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll
JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt
RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA
AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg
O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6
WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc
fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o
WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y
243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM
Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv
/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF
2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3
wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R
WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1
POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC
T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW
jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc
23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK
XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl
jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+
/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P
rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl
KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD
E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C
cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE
r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu
GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy
7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr
RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF
SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor
Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY
KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t
Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM
/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6
YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt
I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy
+jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f
UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER
KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW
MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe
f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA==
-----END RSA PRIVATE KEY-----

View File

@ -1 +0,0 @@
BAF3B5C5C6D0D166

View File

@ -10,6 +10,7 @@ import (
"net"
"os"
"path"
"path/filepath"
"strings"
"github.com/ava-labs/gecko/database/leveldb"
@ -19,23 +20,29 @@ import (
"github.com/ava-labs/gecko/nat"
"github.com/ava-labs/gecko/node"
"github.com/ava-labs/gecko/snow/networking/router"
"github.com/ava-labs/gecko/staking"
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/hashing"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/wrappers"
"github.com/mitchellh/go-homedir"
)
const (
dbVersion = "v0.2.0"
defaultDbDir = "~/.gecko/db"
dbVersion = "v0.2.0"
)
// Results of parsing the CLI
var (
Config = node.Config{}
Err error
Config = node.Config{}
Err error
defaultDbDir = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "db"))
defaultStakingKeyPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.key"))
defaultStakingCertPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.crt"))
)
var (
errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs")
)
// GetIPs returns the default IPs for each network
@ -54,17 +61,15 @@ func GetIPs(networkID uint32) []string {
}
}
var (
errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs")
)
// Parse the CLI arguments
func init() {
errs := &wrappers.Errs{}
defer func() { Err = errs.Err }()
loggingConfig, err := logging.DefaultConfig()
errs.Add(err)
if errs.Add(err); errs.Errored() {
return
}
fs := flag.NewFlagSet("gecko", flag.ContinueOnError)
@ -100,8 +105,8 @@ func init() {
// Staking:
consensusPort := fs.Uint("staking-port", 9651, "Port of the consensus server")
fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Require TLS to authenticate staking connections")
fs.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", "keys/staker.key", "TLS private key file for staking connections")
fs.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", "keys/staker.crt", "TLS certificate file for staking connections")
fs.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", defaultStakingKeyPath, "TLS private key for staking")
fs.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", defaultStakingCertPath, "TLS certificate for staking")
// Plugins:
fs.StringVar(&Config.PluginDir, "plugin-dir", "./build/plugins", "Plugin directory for Ava VMs")
@ -142,22 +147,22 @@ func init() {
}
networkID, err := genesis.NetworkID(*networkName)
errs.Add(err)
if errs.Add(err); err != nil {
return
}
Config.NetworkID = networkID
// DB:
if *db && err == nil {
// TODO: Add better params here
if *dbDir == defaultDbDir {
if *dbDir, err = homedir.Expand(defaultDbDir); err != nil {
errs.Add(fmt.Errorf("couldn't resolve default db path: %v", err))
}
}
if *db {
*dbDir = os.ExpandEnv(*dbDir) // parse any env variables
dbPath := path.Join(*dbDir, genesis.NetworkName(Config.NetworkID), dbVersion)
db, err := leveldb.New(dbPath, 0, 0, 0)
if err != nil {
errs.Add(fmt.Errorf("couldn't create db at %s: %w", dbPath, err))
return
}
Config.DB = db
errs.Add(err)
} else {
Config.DB = memdb.New()
}
@ -169,7 +174,7 @@ func init() {
if *consensusIP == "" {
ip, err = Config.Nat.IP()
if err != nil {
ip = net.IPv4zero
ip = net.IPv4zero // Couldn't get my IP...set to 0.0.0.0
}
} else {
ip = net.ParseIP(*consensusIP)
@ -177,7 +182,9 @@ func init() {
if ip == nil {
errs.Add(fmt.Errorf("Invalid IP Address %s", *consensusIP))
return
}
Config.StakingIP = utils.IPDesc{
IP: ip,
Port: uint16(*consensusPort),
@ -190,7 +197,10 @@ func init() {
for _, ip := range strings.Split(*bootstrapIPs, ",") {
if ip != "" {
addr, err := utils.ToIPDesc(ip)
errs.Add(err)
if err != nil {
errs.Add(fmt.Errorf("couldn't parse ip: %w", err))
return
}
Config.BootstrapPeers = append(Config.BootstrapPeers, &node.Peer{
IP: addr,
})
@ -209,20 +219,27 @@ func init() {
cb58 := formatting.CB58{}
for _, id := range strings.Split(*bootstrapIDs, ",") {
if id != "" {
errs.Add(cb58.FromString(id))
cert, err := ids.ToShortID(cb58.Bytes)
errs.Add(err)
err = cb58.FromString(id)
if err != nil {
errs.Add(fmt.Errorf("couldn't parse bootstrap peer id to bytes: %w", err))
return
}
peerID, err := ids.ToShortID(cb58.Bytes)
if err != nil {
errs.Add(fmt.Errorf("couldn't parse bootstrap peer id: %w", err))
return
}
if len(Config.BootstrapPeers) <= i {
errs.Add(errBootstrapMismatch)
continue
return
}
Config.BootstrapPeers[i].ID = cert
Config.BootstrapPeers[i].ID = peerID
i++
}
}
if len(Config.BootstrapPeers) != i {
errs.Add(fmt.Errorf("More bootstrap IPs, %d, provided than bootstrap IDs, %d", len(Config.BootstrapPeers), i))
return
}
} else {
for _, peer := range Config.BootstrapPeers {
@ -230,6 +247,27 @@ func init() {
}
}
// Staking
Config.StakingCertFile = os.ExpandEnv(Config.StakingCertFile) // parse any env variable
Config.StakingKeyFile = os.ExpandEnv(Config.StakingKeyFile)
switch {
// If staking key/cert locations are specified but not found, error
case Config.StakingKeyFile != defaultStakingKeyPath || Config.StakingCertFile != defaultStakingCertPath:
if _, err := os.Stat(Config.StakingKeyFile); os.IsNotExist(err) {
errs.Add(fmt.Errorf("couldn't find staking key at %s", Config.StakingKeyFile))
return
} else if _, err := os.Stat(Config.StakingCertFile); os.IsNotExist(err) {
errs.Add(fmt.Errorf("couldn't find staking certificate at %s", Config.StakingCertFile))
return
}
default:
// Only creates staking key/cert if [stakingKeyPath] doesn't exist
if err := staking.GenerateStakingKeyCert(Config.StakingKeyFile, Config.StakingCertFile); err != nil {
errs.Add(fmt.Errorf("couldn't generate staking key/cert: %w", err))
return
}
}
// HTTP:
Config.HTTPPort = uint16(*httpPort)
@ -238,14 +276,18 @@ func init() {
loggingConfig.Directory = *logsDir
}
logFileLevel, err := logging.ToLevel(*logLevel)
errs.Add(err)
if errs.Add(err); err != nil {
return
}
loggingConfig.LogLevel = logFileLevel
if *logDisplayLevel == "" {
*logDisplayLevel = *logLevel
}
displayLevel, err := logging.ToLevel(*logDisplayLevel)
errs.Add(err)
if errs.Add(err); err != nil {
return
}
loggingConfig.DisplayLevel = displayLevel
Config.LoggingConfig = loggingConfig

View File

@ -209,7 +209,7 @@ func (nm *Handshake) ConnectTo(peer salticidae.PeerID, stakerID ids.ShortID, add
return
}
nm.log.Info("Attempting to connect to %s", stakerID)
nm.log.Debug("attempting to connect to %s", stakerID)
nm.net.AddPeer(peer)
nm.net.SetPeerAddr(peer, addr)
@ -239,7 +239,7 @@ func (nm *Handshake) Connect(addr salticidae.NetAddr) {
}
if !nm.enableStaking {
nm.log.Info("Adding peer %s", ip)
nm.log.Debug("adding peer %s", ip)
peer := salticidae.NewPeerIDFromNetAddr(addr, true)
nm.ConnectTo(peer, toShortID(ip), addr)
@ -254,7 +254,7 @@ func (nm *Handshake) Connect(addr salticidae.NetAddr) {
return
}
nm.log.Info("Adding peer %s", ip)
nm.log.Debug("adding peer %s", ip)
count := new(int)
*count = 100
@ -281,7 +281,7 @@ func (nm *Handshake) Connect(addr salticidae.NetAddr) {
return
}
nm.log.Info("Attempting to discover peer at %s", ipStr)
nm.log.Debug("attempting to discover peer at %s", ipStr)
msgNet := nm.net.AsMsgNetwork()
msgNet.Connect(addr)
@ -374,7 +374,7 @@ func (nm *Handshake) SendVersion(peer salticidae.PeerID) error {
build := Builder{}
v, err := build.Version(nm.networkID, nm.clock.Unix(), toIPDesc(nm.myAddr), ClientVersion)
if err != nil {
return fmt.Errorf("packing Version failed due to %s", err)
return fmt.Errorf("packing version failed due to: %w", err)
}
nm.send(v, peer)
nm.numVersionSent.Inc()
@ -397,16 +397,16 @@ func (nm *Handshake) SendPeerList(peers ...salticidae.PeerID) error {
}
if len(ipsToSend) == 0 {
nm.log.Debug("No IPs to send to %d peer(s)", len(peers))
nm.log.Debug("no IPs to send to %d peer(s)", len(peers))
return nil
}
nm.log.Verbo("Sending %d ips to %d peer(s)", len(ipsToSend), len(peers))
nm.log.Verbo("sending %d ips to %d peer(s)", len(ipsToSend), len(peers))
build := Builder{}
pl, err := build.PeerList(ipsToSend)
if err != nil {
return fmt.Errorf("Packing Peerlist failed due to %w", err)
return fmt.Errorf("packing peer list failed due to: %w", err)
}
nm.send(pl, peers...)
nm.numPeerlistSent.Add(float64(len(peers)))
@ -449,7 +449,7 @@ func connHandler(_conn *C.struct_msgnetwork_conn_t, connected C.bool, _ unsafe.P
HandshakeNet.requestedTimeout.Remove(ipID)
if _, exists := HandshakeNet.requested[ipStr]; !exists {
HandshakeNet.log.Debug("connHandler called with %s", ip)
HandshakeNet.log.Debug("connHandler called with ip %s", ip)
return true
}
delete(HandshakeNet.requested, ipStr)
@ -476,7 +476,7 @@ func (nm *Handshake) connectedToPeer(conn *C.struct_peernetwork_conn_t, peer sal
cert = ids.NewShortID(key)
}
nm.log.Debug("Connected to %s", cert)
nm.log.Debug("connected to %s", cert)
nm.reconnectTimeout.Remove(peerID)
@ -494,10 +494,10 @@ func (nm *Handshake) disconnectedFromPeer(peer salticidae.PeerID) {
cert := ids.ShortID{}
if pendingCert, exists := nm.pending.GetID(peer); exists {
cert = pendingCert
nm.log.Info("Disconnected from pending peer %s", cert)
nm.log.Debug("disconnected from pending peer %s", cert)
} else if connectedCert, exists := nm.connections.GetID(peer); exists {
cert = connectedCert
nm.log.Info("Disconnected from peer %s", cert)
nm.log.Debug("disconnected from peer %s", cert)
} else {
return
}
@ -537,29 +537,29 @@ func (nm *Handshake) disconnectedFromPeer(peer salticidae.PeerID) {
// checkCompatibility Check to make sure that the peer and I speak the same language.
func (nm *Handshake) checkCompatibility(peerVersion string) bool {
if !strings.HasPrefix(peerVersion, VersionPrefix) {
nm.log.Warn("Peer attempted to connect with an invalid version prefix")
nm.log.Debug("peer attempted to connect with an invalid version prefix")
return false
}
peerVersion = peerVersion[len(VersionPrefix):]
splitPeerVersion := strings.SplitN(peerVersion, VersionSeparator, 3)
if len(splitPeerVersion) != 3 {
nm.log.Warn("Peer attempted to connect with an invalid number of subversions")
nm.log.Debug("peer attempted to connect with an invalid number of subversions")
return false
}
major, err := strconv.Atoi(splitPeerVersion[0])
if err != nil {
nm.log.Warn("Peer attempted to connect with an invalid major version")
nm.log.Debug("peer attempted to connect with an invalid major version")
return false
}
minor, err := strconv.Atoi(splitPeerVersion[1])
if err != nil {
nm.log.Warn("Peer attempted to connect with an invalid minor version")
nm.log.Debug("peer attempted to connect with an invalid minor version")
return false
}
patch, err := strconv.Atoi(splitPeerVersion[2])
if err != nil {
nm.log.Warn("Peer attempted to connect with an invalid patch version")
nm.log.Debug("peer attempted to connect with an invalid patch version")
return false
}
@ -568,7 +568,7 @@ func (nm *Handshake) checkCompatibility(peerVersion string) bool {
// peers major version is too low
return false
case major > MajorVersion:
nm.log.Warn("Peer attempted to connect with a higher major version, this client may need to be updated")
nm.log.Warn("peer attempted to connect with a higher major version, this client may need to be updated")
return false
}
@ -577,12 +577,12 @@ func (nm *Handshake) checkCompatibility(peerVersion string) bool {
// peers minor version is too low
return false
case minor > MinorVersion:
nm.log.Warn("Peer attempted to connect with a higher minor version, this client may need to be updated")
nm.log.Warn("peer attempted to connect with a higher minor version, this client may need to be updated")
return false
}
if patch > PatchVersion {
nm.log.Warn("Peer is connecting with a higher patch version, this client may need to be updated")
nm.log.Warn("peer is connecting with a higher patch version, this client may need to be updated")
}
return true
}
@ -612,7 +612,7 @@ func unknownPeerHandler(_addr *C.netaddr_t, _cert *C.x509_t, _ unsafe.Pointer) {
addr := salticidae.NetAddrFromC(salticidae.CNetAddr(_addr)).Copy(true)
ip := toIPDesc(addr)
HandshakeNet.log.Info("Adding peer %s", ip)
HandshakeNet.log.Debug("adding peer at %s", ip)
var peer salticidae.PeerID
var id ids.ShortID
@ -685,7 +685,7 @@ func version(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.P
id, exists := HandshakeNet.pending.GetID(peer)
if !exists {
HandshakeNet.log.Warn("Dropping Version message because the peer isn't pending")
HandshakeNet.log.Debug("dropping Version message because the peer isn't pending")
return
}
HandshakeNet.pending.Remove(peer, id)
@ -693,14 +693,14 @@ func version(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.P
build := Builder{}
pMsg, err := build.Parse(Version, msg.GetPayloadByMove())
if err != nil {
HandshakeNet.log.Warn("Failed to parse Version message")
HandshakeNet.log.Debug("failed to parse Version message")
HandshakeNet.net.DelPeer(peer)
return
}
if networkID := pMsg.Get(NetworkID).(uint32); networkID != HandshakeNet.networkID {
HandshakeNet.log.Warn("Peer's network ID doesn't match our networkID: Peer's = %d ; Ours = %d", networkID, HandshakeNet.networkID)
HandshakeNet.log.Debug("peer's network ID doesn't match our networkID: Peer's = %d ; Ours = %d", networkID, HandshakeNet.networkID)
HandshakeNet.net.DelPeer(peer)
return
@ -708,14 +708,14 @@ func version(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.P
myTime := float64(HandshakeNet.clock.Unix())
if peerTime := float64(pMsg.Get(MyTime).(uint64)); math.Abs(peerTime-myTime) > MaxClockDifference.Seconds() {
HandshakeNet.log.Warn("Peer's clock is too far out of sync with mine. His = %d, Mine = %d (seconds)", uint64(peerTime), uint64(myTime))
HandshakeNet.log.Debug("peer's clock is too far out of sync with mine. Peer's = %d, Ours = %d (seconds)", uint64(peerTime), uint64(myTime))
HandshakeNet.net.DelPeer(peer)
return
}
if peerVersion := pMsg.Get(VersionStr).(string); !HandshakeNet.checkCompatibility(peerVersion) {
HandshakeNet.log.Debug("Dropping connection due to an incompatible version from peer")
HandshakeNet.log.Debug("peer version, %s, is not compatible. dropping connection.", peerVersion)
HandshakeNet.net.DelPeer(peer)
return
@ -774,7 +774,7 @@ func peerList(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.
build := Builder{}
pMsg, err := build.Parse(PeerList, msg.GetPayloadByMove())
if err != nil {
HandshakeNet.log.Warn("Failed to parse PeerList message due to %s", err)
HandshakeNet.log.Debug("failed to parse PeerList message due to %s", err)
// TODO: What should we do here?
return
}

View File

@ -109,7 +109,7 @@ func (s *Voting) GetAcceptedFrontier(validatorIDs ids.ShortSet, chainID ids.ID,
peers = append(peers, peer)
s.log.Verbo("Sending a GetAcceptedFrontier to %s", vID)
} else {
s.log.Debug("Attempted to send a GetAcceptedFrontier message to a disconnected validator: %s", vID)
s.log.Debug("attempted to send a GetAcceptedFrontier message to a disconnected validator: %s", vID)
s.executor.Add(func() { s.router.GetAcceptedFrontierFailed(vID, chainID, requestID) })
}
}
@ -134,14 +134,14 @@ func (s *Voting) GetAcceptedFrontier(validatorIDs ids.ShortSet, chainID ids.ID,
func (s *Voting) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
peer, exists := s.conns.GetPeerID(validatorID)
if !exists {
s.log.Debug("Attempted to send an AcceptedFrontier message to a disconnected validator: %s", validatorID)
s.log.Debug("attempted to send an AcceptedFrontier message to disconnected validator: %s", validatorID)
return // Validator is not connected
}
build := Builder{}
msg, err := build.AcceptedFrontier(chainID, requestID, containerIDs)
if err != nil {
s.log.Error("Attempted to pack too large of an AcceptedFrontier message.\nNumber of containerIDs: %d", containerIDs.Len())
s.log.Error("attempted to pack too large of an AcceptedFrontier message.\nNumber of containerIDs: %d", containerIDs.Len())
return // Packing message failed
}
@ -167,9 +167,9 @@ func (s *Voting) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, requestI
vID := validatorID
if peer, exists := s.conns.GetPeerID(validatorID); exists {
peers = append(peers, peer)
s.log.Verbo("Sending a GetAccepted to %s", vID)
s.log.Verbo("sending a GetAccepted to %s", vID)
} else {
s.log.Debug("Attempted to send a GetAccepted message to a disconnected validator: %s", vID)
s.log.Debug("attempted to send a GetAccepted message to a disconnected validator: %s", vID)
s.executor.Add(func() { s.router.GetAcceptedFailed(vID, chainID, requestID) })
}
}
@ -182,7 +182,7 @@ func (s *Voting) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, requestI
s.executor.Add(func() { s.router.GetAcceptedFailed(validatorID, chainID, requestID) })
}
}
s.log.Debug("Attempted to pack too large of a GetAccepted message.\nNumber of containerIDs: %d", containerIDs.Len())
s.log.Debug("attempted to pack too large of a GetAccepted message.\nNumber of containerIDs: %d", containerIDs.Len())
return // Packing message failed
}
@ -204,14 +204,14 @@ func (s *Voting) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, requestI
func (s *Voting) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) {
peer, exists := s.conns.GetPeerID(validatorID)
if !exists {
s.log.Debug("Attempted to send an Accepted message to a disconnected validator: %s", validatorID)
s.log.Debug("attempted to send an Accepted message to a disconnected validator: %s", validatorID)
return // Validator is not connected
}
build := Builder{}
msg, err := build.Accepted(chainID, requestID, containerIDs)
if err != nil {
s.log.Error("Attempted to pack too large of an Accepted message.\nNumber of containerIDs: %d", containerIDs.Len())
s.log.Error("attempted to pack too large of an Accepted message.\nNumber of containerIDs: %d", containerIDs.Len())
return // Packing message failed
}
@ -233,8 +233,8 @@ func (s *Voting) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uin
func (s *Voting) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
peer, exists := s.conns.GetPeerID(validatorID)
if !exists {
s.log.Debug("Attempted to send a Get message to a disconnected validator: %s", validatorID)
s.executor.Add(func() { s.router.GetFailed(validatorID, chainID, requestID, containerID) })
s.log.Debug("attempted to send a Get message to a disconnected validator: %s", validatorID)
s.executor.Add(func() { s.router.GetFailed(validatorID, chainID, requestID) })
return // Validator is not connected
}
@ -260,14 +260,14 @@ func (s *Voting) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32,
func (s *Voting) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) {
peer, exists := s.conns.GetPeerID(validatorID)
if !exists {
s.log.Debug("Attempted to send a Container message to a disconnected validator: %s", validatorID)
s.log.Debug("attempted to send a Container message to a disconnected validator: %s", validatorID)
return // Validator is not connected
}
build := Builder{}
msg, err := build.Put(chainID, requestID, containerID, container)
if err != nil {
s.log.Error("Attempted to pack too large of a Put message.\nContainer length: %d", len(container))
s.log.Error("attempted to pack too large of a Put message.\nContainer length: %d", len(container))
return // Packing message failed
}
@ -297,7 +297,7 @@ func (s *Voting) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
peers = append(peers, peer)
s.log.Verbo("Sending a PushQuery to %s", vID)
} else {
s.log.Debug("Attempted to send a PushQuery message to a disconnected validator: %s", vID)
s.log.Debug("attempted to send a PushQuery message to a disconnected validator: %s", vID)
s.executor.Add(func() { s.router.QueryFailed(vID, chainID, requestID) })
}
}
@ -310,7 +310,7 @@ func (s *Voting) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
s.executor.Add(func() { s.router.QueryFailed(validatorID, chainID, requestID) })
}
}
s.log.Error("Attempted to pack too large of a PushQuery message.\nContainer length: %d", len(container))
s.log.Error("attempted to pack too large of a PushQuery message.\nContainer length: %d", len(container))
return // Packing message failed
}
@ -340,7 +340,7 @@ func (s *Voting) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
peers = append(peers, peer)
s.log.Verbo("Sending a PullQuery to %s", vID)
} else {
s.log.Warn("Attempted to send a PullQuery message to a disconnected validator: %s", vID)
s.log.Debug("attempted to send a PullQuery message to a disconnected validator: %s", vID)
s.executor.Add(func() { s.router.QueryFailed(vID, chainID, requestID) })
}
}
@ -367,14 +367,14 @@ func (s *Voting) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID
func (s *Voting) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) {
peer, exists := s.conns.GetPeerID(validatorID)
if !exists {
s.log.Debug("Attempted to send a Chits message to a disconnected validator: %s", validatorID)
s.log.Debug("attempted to send a Chits message to a disconnected validator: %s", validatorID)
return // Validator is not connected
}
build := Builder{}
msg, err := build.Chits(chainID, requestID, votes)
if err != nil {
s.log.Error("Attempted to pack too large of a Chits message.\nChits length: %d", votes.Len())
s.log.Error("attempted to pack too large of a Chits message.\nChits length: %d", votes.Len())
return // Packing message failed
}
@ -395,7 +395,7 @@ func (s *Voting) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32
// Gossip attempts to gossip the container to the network
func (s *Voting) Gossip(chainID, containerID ids.ID, container []byte) {
if err := s.gossip(chainID, containerID, container); err != nil {
s.log.Error("Error gossiping container %s to %s\n%s", containerID, chainID, err)
s.log.Error("error gossiping container %s to %s: %s", containerID, chainID, err)
}
}
@ -433,7 +433,7 @@ func (s *Voting) gossip(chainID, containerID ids.ID, container []byte) error {
build := Builder{}
msg, err := build.Put(chainID, math.MaxUint32, containerID, container)
if err != nil {
return fmt.Errorf("Attempted to pack too large of a Put message.\nContainer length: %d: %w", len(container), err)
return fmt.Errorf("attempted to pack too large of a Put message.\nContainer length: %d", len(container))
}
s.log.Verbo("Sending a Put message to peers."+
@ -459,7 +459,7 @@ func getAcceptedFrontier(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t
validatorID, chainID, requestID, _, err := VotingNet.sanitize(_msg, _conn, GetAcceptedFrontier)
if err != nil {
VotingNet.log.Error("Failed to sanitize message due to: %s", err)
VotingNet.log.Debug("failed to sanitize getAcceptedFrontier message due to: %s", err)
return
}
@ -473,7 +473,7 @@ func acceptedFrontier(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, AcceptedFrontier)
if err != nil {
VotingNet.log.Error("Failed to sanitize message due to: %s", err)
VotingNet.log.Debug("failed to sanitize acceptedFrontier message due to: %s", err)
return
}
@ -481,7 +481,7 @@ func acceptedFrontier(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _
for _, containerIDBytes := range msg.Get(ContainerIDs).([][]byte) {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
VotingNet.log.Warn("Error parsing ContainerID: %v", containerIDBytes)
VotingNet.log.Debug("error parsing ContainerID %v: %s", containerIDBytes, err)
return
}
containerIDs.Add(containerID)
@ -497,7 +497,7 @@ func getAccepted(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsa
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, GetAccepted)
if err != nil {
VotingNet.log.Error("Failed to sanitize message due to: %s", err)
VotingNet.log.Debug("failed to sanitize getAccepted message due to: %s", err)
return
}
@ -505,7 +505,7 @@ func getAccepted(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsa
for _, containerIDBytes := range msg.Get(ContainerIDs).([][]byte) {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
VotingNet.log.Warn("Error parsing ContainerID: %v", containerIDBytes)
VotingNet.log.Debug("error parsing ContainerID %v: %s", containerIDBytes, err)
return
}
containerIDs.Add(containerID)
@ -521,7 +521,7 @@ func accepted(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, Accepted)
if err != nil {
VotingNet.log.Error("Failed to sanitize message due to: %s", err)
VotingNet.log.Debug("failed to sanitize accepted message due to: %s", err)
return
}
@ -529,7 +529,7 @@ func accepted(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.
for _, containerIDBytes := range msg.Get(ContainerIDs).([][]byte) {
containerID, err := ids.ToID(containerIDBytes)
if err != nil {
VotingNet.log.Warn("Error parsing ContainerID: %v", containerIDBytes)
VotingNet.log.Debug("error parsing ContainerID %v: %s", containerIDBytes, err)
return
}
containerIDs.Add(containerID)
@ -545,7 +545,7 @@ func get(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Point
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, Get)
if err != nil {
VotingNet.log.Error("Failed to sanitize message due to: %s", err)
VotingNet.log.Debug("failed to sanitize get message due to: %s", err)
return
}
@ -561,7 +561,7 @@ func put(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Point
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, Put)
if err != nil {
VotingNet.log.Error("Failed to sanitize message due to: %s", err)
VotingNet.log.Debug("failed to sanitize put message due to: %s", err)
return
}
@ -579,7 +579,7 @@ func pushQuery(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, PushQuery)
if err != nil {
VotingNet.log.Error("Failed to sanitize message due to: %s", err)
VotingNet.log.Debug("failed to sanitize pushQuery message due to: %s", err)
return
}
@ -597,7 +597,7 @@ func pullQuery(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, PullQuery)
if err != nil {
VotingNet.log.Error("Failed to sanitize message due to: %s", err)
VotingNet.log.Debug("failed to sanitize pullQuery message due to: %s", err)
return
}
@ -613,7 +613,7 @@ func chits(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Poi
validatorID, chainID, requestID, msg, err := VotingNet.sanitize(_msg, _conn, Chits)
if err != nil {
VotingNet.log.Error("Failed to sanitize message due to: %s", err)
VotingNet.log.Debug("failed to sanitize chits message due to: %s", err)
return
}
@ -621,7 +621,7 @@ func chits(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.Poi
for _, voteBytes := range msg.Get(ContainerIDs).([][]byte) {
vote, err := ids.ToID(voteBytes)
if err != nil {
VotingNet.log.Warn("Error parsing chit: %v", voteBytes)
VotingNet.log.Debug("error parsing chit %v: %s", voteBytes, err)
return
}
votes.Add(vote)
@ -637,16 +637,16 @@ func (s *Voting) sanitize(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_
validatorID, exists := s.conns.GetID(peer)
if !exists {
return ids.ShortID{}, ids.ID{}, 0, nil, fmt.Errorf("message received from an un-registered peer")
return ids.ShortID{}, ids.ID{}, 0, nil, fmt.Errorf("received message from un-registered peer %s", validatorID)
}
s.log.Verbo("Receiving message from %s", validatorID)
s.log.Verbo("received message from %s", validatorID)
msg := salticidae.MsgFromC(salticidae.CMsg(_msg))
codec := Codec{}
pMsg, err := codec.Parse(op, msg.GetPayloadByMove())
if err != nil {
return ids.ShortID{}, ids.ID{}, 0, nil, err // The message couldn't be parsed
return ids.ShortID{}, ids.ID{}, 0, nil, fmt.Errorf("couldn't parse payload: %w", err) // The message couldn't be parsed
}
chainID, err := ids.ToID(pMsg.Get(ChainID).([]byte))

View File

@ -4,7 +4,7 @@
package node
// #include "salticidae/network.h"
// void onTerm(int sig, void *);
// void onTerm(threadcall_handle_t *, void *);
// void errorHandler(SalticidaeCError *, bool, int32_t, void *);
import "C"
@ -14,6 +14,7 @@ import (
"errors"
"fmt"
"io/ioutil"
"os"
"path"
"sync"
"unsafe"
@ -35,6 +36,7 @@ import (
"github.com/ava-labs/gecko/networking/xputtest"
"github.com/ava-labs/gecko/snow/triggers"
"github.com/ava-labs/gecko/snow/validators"
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/hashing"
"github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/wrappers"
@ -92,6 +94,10 @@ type Node struct {
// Event loop manager
EC salticidae.EventContext
// Caller to the event context
TCall salticidae.ThreadCall
// Network that manages validator peers
PeerNet salticidae.PeerNetwork
// Network that manages clients
@ -115,6 +121,9 @@ type Node struct {
// This node's configuration
Config *Config
// channel for closing the node
nodeCloser chan<- os.Signal
}
/*
@ -124,7 +133,7 @@ type Node struct {
*/
//export onTerm
func onTerm(C.int, unsafe.Pointer) {
func onTerm(*C.threadcall_handle_t, unsafe.Pointer) {
MainNode.Log.Debug("Terminate signal received")
MainNode.EC.Stop()
}
@ -143,12 +152,11 @@ func errorHandler(_err *C.struct_SalticidaeCError, fatal C.bool, asyncID C.int32
func (n *Node) initNetlib() error {
// Create main event context
n.EC = salticidae.NewEventContext()
n.TCall = salticidae.NewThreadCall(n.EC)
// Set up interrupt signal and terminate signal handlers
evInt := salticidae.NewSigEvent(n.EC, salticidae.SigEventCallback(C.onTerm), nil)
evInt.Add(salticidae.SIGINT)
evTerm := salticidae.NewSigEvent(n.EC, salticidae.SigEventCallback(C.onTerm), nil)
evTerm.Add(salticidae.SIGTERM)
n.nodeCloser = utils.HandleSignals(func(os.Signal) {
n.TCall.AsyncCall(salticidae.ThreadCallCallback(C.onTerm), nil)
}, os.Interrupt, os.Kill)
// Create peer network config, may have tls enabled
peerConfig := salticidae.NewPeerNetworkConfig()
@ -655,4 +663,5 @@ func (n *Node) Shutdown() {
n.ValidatorAPI.Shutdown()
n.ConsensusAPI.Shutdown()
n.chainManager.Shutdown()
utils.ClearSignals(n.nodeCloser)
}

View File

@ -7,8 +7,8 @@
vars:
ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava
repo_folder: ~/go/src/github.com/ava-labs/gecko
repo_name: ava-labs/gecko-internal
repo_branch: platformvm-proposal-accept
repo_name: ava-labs/gecko
repo_branch: master
roles:
- name: ava-stop
- name: ava-build

View File

@ -1,3 +1,3 @@
- name: Kill Node
command: killall ava
command: killall -SIGINT ava
ignore_errors: true

View File

@ -2,8 +2,8 @@ borealis_bootstrap:
hosts:
bootstrap1:
ansible_host: 3.84.129.247
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker1.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker1.crt"
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker1.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker1.crt"
vars:
ansible_connection: ssh
ansible_user: ubuntu
@ -44,20 +44,20 @@ borealis_node:
hosts:
node1:
ansible_host: 35.153.99.244
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker2.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker2.crt"
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker2.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker2.crt"
node2:
ansible_host: 34.201.137.119
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker3.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker3.crt"
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker3.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker3.crt"
node3:
ansible_host: 54.146.1.110
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker4.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker4.crt"
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker4.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker4.crt"
node4:
ansible_host: 54.91.255.231
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker5.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker5.crt"
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker5.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker5.crt"
vars:
ansible_connection: ssh
ansible_user: ubuntu

View File

@ -7,8 +7,8 @@
vars:
ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava
repo_folder: ~/go/src/github.com/ava-labs/gecko
repo_name: ava-labs/gecko-internal
repo_branch: platformvm-proposal-accept
repo_name: ava-labs/gecko
repo_branch: master
roles:
- name: ava-stop
- name: ava-build

View File

@ -1,4 +1,8 @@
#!/bin/bash -e
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
# Ted: contact me when you make any changes

View File

@ -1,4 +1,9 @@
#!/bin/bash -e
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
SRC_DIR="$(dirname "${BASH_SOURCE[0]}")"
export GOPATH="$SRC_DIR/.build_image_gopath"
WORKPREFIX="$GOPATH/src/github.com/ava-labs/"

View File

@ -1,4 +1,8 @@
#!/bin/bash -e
#!/bin/bash
set -o errexit
set -o nounset
set -o pipefail
# Ted: contact me when you make any changes

View File

@ -16,7 +16,7 @@ type snowmanBlock struct {
// block that this node contains. For the genesis, this value will be nil
blk Block
// shouldFalter is set to true if this node, and all its decendants received
// shouldFalter is set to true if this node, and all its descendants received
// less than Alpha votes
shouldFalter bool

View File

@ -149,7 +149,7 @@ func (ts *Topological) Preference() ids.ID { return ts.tail }
// During the sort, votes are pushed towards the genesis. To prevent interating
// over all blocks that had unsuccessful polls, we set a flag on the block to
// know that any future traversal through that block should register an
// unsuccessful poll on that block and every decendant block.
// unsuccessful poll on that block and every descendant block.
//
// The complexity of this function is:
// - Runtime = 3 * |live set| + |votes|
@ -408,7 +408,7 @@ func (ts *Topological) getPreferredDecendent(blkID ids.ID) ids.ID {
// accept the preferred child of the provided snowman block. By accepting the
// preferred child, all other children will be rejected. When these children are
// rejected, all their decendants will be rejected.
// rejected, all their descendants will be rejected.
func (ts *Topological) accept(n *snowmanBlock) {
// We are finalizing the block's child, so we need to get the preference
pref := n.sb.Preference()
@ -451,11 +451,11 @@ func (ts *Topological) accept(n *snowmanBlock) {
rejects = append(rejects, childID)
}
// reject all the decendants of the blocks we just rejected
// reject all the descendants of the blocks we just rejected
ts.rejectTransitively(rejects)
}
// Takes in a list of rejected ids and rejects all decendants of these IDs
// Takes in a list of rejected ids and rejects all descendants of these IDs
func (ts *Topological) rejectTransitively(rejected []ids.ID) {
// the rejected array is treated as a queue, with the next element at index
// 0 and the last element at the end of the slice.

View File

@ -30,6 +30,9 @@ type bootstrapper struct {
metrics
common.Bootstrapper
// vtxReqs prevents asking validators for the same vertex
vtxReqs common.Requests
pending ids.Set
finished bool
onFinished func()
@ -90,16 +93,22 @@ func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) {
func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) {
b.BootstrapConfig.Context.Log.Verbo("Put called for vertexID %s", vtxID)
if !b.pending.Contains(vtxID) {
vtx, err := b.State.ParseVertex(vtxBytes)
if err != nil {
b.BootstrapConfig.Context.Log.Debug("ParseVertex failed due to %s for block:\n%s",
err,
formatting.DumpBytes{Bytes: vtxBytes})
b.GetFailed(vdr, requestID)
return
}
vtx, err := b.State.ParseVertex(vtxBytes)
if err != nil {
b.BootstrapConfig.Context.Log.Warn("ParseVertex failed due to %s for block:\n%s",
err,
if !b.pending.Contains(vtx.ID()) {
b.BootstrapConfig.Context.Log.Debug("Validator %s sent an unrequested vertex:\n%s",
vdr,
formatting.DumpBytes{Bytes: vtxBytes})
b.GetFailed(vdr, requestID, vtxID)
b.GetFailed(vdr, requestID)
return
}
@ -107,7 +116,16 @@ func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxB
}
// GetFailed ...
func (b *bootstrapper) GetFailed(_ ids.ShortID, _ uint32, vtxID ids.ID) { b.sendRequest(vtxID) }
func (b *bootstrapper) GetFailed(vdr ids.ShortID, requestID uint32) {
vtxID, ok := b.vtxReqs.Remove(vdr, requestID)
if !ok {
b.BootstrapConfig.Context.Log.Debug("GetFailed called without sending the corresponding Get message from %s",
vdr)
return
}
b.sendRequest(vtxID)
}
func (b *bootstrapper) fetch(vtxID ids.ID) {
if b.pending.Contains(vtxID) {
@ -131,6 +149,9 @@ func (b *bootstrapper) sendRequest(vtxID ids.ID) {
validatorID := validators[0].ID()
b.RequestID++
b.vtxReqs.RemoveAny(vtxID)
b.vtxReqs.Add(validatorID, b.RequestID, vtxID)
b.pending.Add(vtxID)
b.BootstrapConfig.Sender.Get(validatorID, b.RequestID, vtxID)

View File

@ -289,7 +289,6 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) {
bs.ForceAccepted(acceptedIDs)
state.getVertex = nil
sender.GetF = nil
state.parseVertex = func(vtxBytes []byte) (avalanche.Vertex, error) {
switch {
@ -1008,3 +1007,111 @@ func TestBootstrapperPartialFetch(t *testing.T) {
t.Fatalf("wrong number pending")
}
}
func TestBootstrapperWrongIDByzantineResponse(t *testing.T) {
config, peerID, sender, state, _ := newConfig(t)
vtxID0 := ids.Empty.Prefix(0)
vtxID1 := ids.Empty.Prefix(1)
vtxBytes0 := []byte{0}
vtxBytes1 := []byte{1}
vtx0 := &Vtx{
id: vtxID0,
height: 0,
status: choices.Processing,
bytes: vtxBytes0,
}
vtx1 := &Vtx{
id: vtxID1,
height: 0,
status: choices.Processing,
bytes: vtxBytes1,
}
bs := bootstrapper{}
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
bs.Initialize(config)
acceptedIDs := ids.Set{}
acceptedIDs.Add(
vtxID0,
)
state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) {
switch {
case vtxID.Equals(vtxID0):
return nil, errUnknownVertex
default:
t.Fatal(errUnknownVertex)
panic(errUnknownVertex)
}
}
requestID := new(uint32)
sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
if !vdr.Equals(peerID) {
t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr)
}
switch {
case vtxID.Equals(vtxID0):
default:
t.Fatalf("Requested unknown vertex")
}
*requestID = reqID
}
bs.ForceAccepted(acceptedIDs)
state.getVertex = nil
sender.GetF = nil
state.parseVertex = func(vtxBytes []byte) (avalanche.Vertex, error) {
switch {
case bytes.Equal(vtxBytes, vtxBytes0):
return vtx0, nil
case bytes.Equal(vtxBytes, vtxBytes1):
return vtx1, nil
}
t.Fatal(errParsedUnknownVertex)
return nil, errParsedUnknownVertex
}
state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) {
switch {
case vtxID.Equals(vtxID0):
return vtx0, nil
case vtxID.Equals(vtxID1):
return vtx1, nil
default:
t.Fatal(errUnknownVertex)
panic(errUnknownVertex)
}
}
finished := new(bool)
bs.onFinished = func() { *finished = true }
sender.CantGet = false
bs.Put(peerID, *requestID, vtxID0, vtxBytes1)
sender.CantGet = true
bs.Put(peerID, *requestID, vtxID0, vtxBytes0)
state.parseVertex = nil
state.edge = nil
bs.onFinished = nil
if !*finished {
t.Fatalf("Bootstrapping should have finished")
}
if vtx0.Status() != choices.Accepted {
t.Fatalf("Vertex should be accepted")
}
if vtx1.Status() != choices.Processing {
t.Fatalf("Vertex should be processing")
}
}

View File

@ -76,10 +76,8 @@ func (i *issuer) Update() {
}
i.t.RequestID++
polled := false
if numVdrs := len(vdrs); numVdrs == p.K && i.t.polls.Add(i.t.RequestID, vdrSet.Len()) {
i.t.Config.Sender.PushQuery(vdrSet, i.t.RequestID, vtxID, i.vtx.Bytes())
polled = true
} else if numVdrs < p.K {
i.t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", vtxID)
}
@ -89,9 +87,7 @@ func (i *issuer) Update() {
i.t.txBlocked.Fulfill(tx.ID())
}
if polled && len(i.t.polls.m) < i.t.Params.ConcurrentRepolls {
i.t.repoll()
}
i.t.repoll()
}
type vtxIssuer struct{ i *issuer }

View File

@ -23,8 +23,10 @@ type Transitive struct {
polls polls // track people I have asked for their preference
// vtxReqs prevents asking validators for the same vertex
vtxReqs common.Requests
// missingTxs tracks transaction that are missing
vtxReqs, missingTxs, pending ids.Set
missingTxs, pending ids.Set
// vtxBlocked tracks operations that are blocked on vertices
// txBlocked tracks operations that are blocked on transactions
@ -60,6 +62,8 @@ func (t *Transitive) finishBootstrapping() {
}
t.Consensus.Initialize(t.Config.Context, t.Params, frontier)
t.bootstrapped = true
t.Config.Context.Log.Info("Bootstrapping finished with %d vertices in the accepted frontier", len(frontier))
}
// Gossip implements the Engine interface
@ -110,25 +114,30 @@ func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxByt
vtx, err := t.Config.State.ParseVertex(vtxBytes)
if err != nil {
t.Config.Context.Log.Warn("ParseVertex failed due to %s for block:\n%s",
t.Config.Context.Log.Debug("ParseVertex failed due to %s for block:\n%s",
err,
formatting.DumpBytes{Bytes: vtxBytes})
t.GetFailed(vdr, requestID, vtxID)
t.GetFailed(vdr, requestID)
return
}
t.insertFrom(vdr, vtx)
}
// GetFailed implements the Engine interface
func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32, vtxID ids.ID) {
func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) {
if !t.bootstrapped {
t.bootstrapper.GetFailed(vdr, requestID, vtxID)
t.bootstrapper.GetFailed(vdr, requestID)
return
}
vtxID, ok := t.vtxReqs.Remove(vdr, requestID)
if !ok {
t.Config.Context.Log.Warn("GetFailed called without sending the corresponding Get message from %s",
vdr)
return
}
t.pending.Remove(vtxID)
t.vtxBlocked.Abandon(vtxID)
t.vtxReqs.Remove(vtxID)
if t.vtxReqs.Len() == 0 {
for _, txID := range t.missingTxs.List() {
@ -140,7 +149,6 @@ func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32, vtxID ids.ID)
// Track performance statistics
t.numVtxRequests.Set(float64(t.vtxReqs.Len()))
t.numTxRequests.Set(float64(t.missingTxs.Len()))
t.numBlockedVtx.Set(float64(t.pending.Len()))
}
// PullQuery implements the Engine interface
@ -165,20 +173,28 @@ func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID)
}
// PushQuery implements the Engine interface
func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtx []byte) {
func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxBytes []byte) {
if !t.bootstrapped {
t.Config.Context.Log.Debug("Dropping PushQuery for %s due to bootstrapping", vtxID)
return
}
t.Put(vdr, requestID, vtxID, vtx)
t.PullQuery(vdr, requestID, vtxID)
vtx, err := t.Config.State.ParseVertex(vtxBytes)
if err != nil {
t.Config.Context.Log.Warn("ParseVertex failed due to %s for block:\n%s",
err,
formatting.DumpBytes{Bytes: vtxBytes})
return
}
t.insertFrom(vdr, vtx)
t.PullQuery(vdr, requestID, vtx.ID())
}
// Chits implements the Engine interface
func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) {
if !t.bootstrapped {
t.Config.Context.Log.Warn("Dropping Chits due to bootstrapping")
t.Config.Context.Log.Debug("Dropping Chits due to bootstrapping")
return
}
@ -218,8 +234,16 @@ func (t *Transitive) Notify(msg common.Message) {
}
func (t *Transitive) repoll() {
if len(t.polls.m) >= t.Params.ConcurrentRepolls {
return
}
txs := t.Config.VM.PendingTxs()
t.batch(txs, false /*=force*/, true /*=empty*/)
for i := len(t.polls.m); i < t.Params.ConcurrentRepolls; i++ {
t.batch(nil, false /*=force*/, true /*=empty*/)
}
}
func (t *Transitive) reinsertFrom(vdr ids.ShortID, vtxID ids.ID) bool {
@ -264,7 +288,7 @@ func (t *Transitive) insert(vtx avalanche.Vertex) {
vtxID := vtx.ID()
t.pending.Add(vtxID)
t.vtxReqs.Remove(vtxID)
t.vtxReqs.RemoveAny(vtxID)
i := &issuer{
t: t,
@ -393,10 +417,10 @@ func (t *Transitive) sendRequest(vdr ids.ShortID, vtxID ids.ID) {
return
}
t.vtxReqs.Add(vtxID)
t.RequestID++
t.vtxReqs.Add(vdr, t.RequestID, vtxID)
t.Config.Sender.Get(vdr, t.RequestID, vtxID)
t.numVtxRequests.Set(float64(t.vtxReqs.Len())) // Tracks performance statistics
t.RequestID++
t.Config.Sender.Get(vdr, t.RequestID, vtxID)
}

View File

@ -40,6 +40,7 @@ func TestEngineShutdown(t *testing.T) {
t.Fatal("Shutting down the Transitive did not shutdown the VM")
}
}
func TestEngineAdd(t *testing.T) {
config := DefaultConfig()
@ -85,7 +86,9 @@ func TestEngineAdd(t *testing.T) {
}
asked := new(bool)
sender.GetF = func(inVdr ids.ShortID, _ uint32, vtxID ids.ID) {
reqID := new(uint32)
sender.GetF = func(inVdr ids.ShortID, requestID uint32, vtxID ids.ID) {
*reqID = requestID
if *asked {
t.Fatalf("Asked multiple times")
}
@ -119,7 +122,7 @@ func TestEngineAdd(t *testing.T) {
st.parseVertex = func(b []byte) (avalanche.Vertex, error) { return nil, errFailedParsing }
te.Put(vdr.ID(), 0, vtx.parents[0].ID(), nil)
te.Put(vdr.ID(), *reqID, vtx.parents[0].ID(), nil)
st.parseVertex = nil
@ -485,7 +488,9 @@ func TestEngineMultipleQuery(t *testing.T) {
}
asked := new(bool)
sender.GetF = func(inVdr ids.ShortID, _ uint32, vtxID ids.ID) {
reqID := new(uint32)
sender.GetF = func(inVdr ids.ShortID, requestID uint32, vtxID ids.ID) {
*reqID = requestID
if *asked {
t.Fatalf("Asked multiple times")
}
@ -512,7 +517,7 @@ func TestEngineMultipleQuery(t *testing.T) {
// Should be dropped because the query was marked as failed
te.Chits(vdr1.ID(), *queryRequestID, s0)
te.GetFailed(vdr0.ID(), 0, vtx1.ID())
te.GetFailed(vdr0.ID(), *reqID)
if vtx0.Status() != choices.Accepted {
t.Fatalf("Should have executed vertex")
@ -598,6 +603,12 @@ func TestEngineAbandonResponse(t *testing.T) {
st := &stateTest{t: t}
config.State = st
sender := &common.SenderTest{}
sender.T = t
config.Sender = sender
sender.Default(true)
gVtx := &Vtx{
id: GenerateID(),
status: choices.Accepted,
@ -629,8 +640,13 @@ func TestEngineAbandonResponse(t *testing.T) {
te.Initialize(config)
te.finishBootstrapping()
reqID := new(uint32)
sender.GetF = func(vID ids.ShortID, requestID uint32, vtxID ids.ID) {
*reqID = requestID
}
te.PullQuery(vdr.ID(), 0, vtx.ID())
te.GetFailed(vdr.ID(), 0, vtx.ID())
te.GetFailed(vdr.ID(), *reqID)
if len(te.vtxBlocked) != 0 {
t.Fatalf("Should have removed blocking event")
@ -2098,7 +2114,7 @@ func TestEngineReissueAbortedVertex(t *testing.T) {
sender.GetF = nil
st.parseVertex = nil
te.GetFailed(vdrID, *requestID, vtxID0)
te.GetFailed(vdrID, *requestID)
requested := new(bool)
sender.GetF = func(_ ids.ShortID, _ uint32, vtxID ids.ID) {
@ -2587,3 +2603,375 @@ func TestEngineGossip(t *testing.T) {
t.Fatalf("Should have gossiped the vertex")
}
}
func TestEngineInvalidVertexIgnoredFromUnexpectedPeer(t *testing.T) {
config := DefaultConfig()
vdr := validators.GenerateRandomValidator(1)
secondVdr := validators.GenerateRandomValidator(1)
vals := validators.NewSet()
config.Validators = vals
vals.Add(vdr)
vals.Add(secondVdr)
sender := &common.SenderTest{}
sender.T = t
config.Sender = sender
st := &stateTest{t: t}
config.State = st
gVtx := &Vtx{
id: GenerateID(),
status: choices.Accepted,
bytes: []byte{0},
}
vts := []avalanche.Vertex{gVtx}
utxos := []ids.ID{GenerateID(), GenerateID()}
tx0 := &TestTx{
TestTx: snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
},
}
tx0.Ins.Add(utxos[0])
tx1 := &TestTx{
TestTx: snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
},
}
tx1.Ins.Add(utxos[1])
vtx0 := &Vtx{
parents: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Unknown,
bytes: []byte{1},
}
vtx1 := &Vtx{
parents: []avalanche.Vertex{vtx0},
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 2,
status: choices.Processing,
bytes: []byte{2},
}
te := &Transitive{}
te.Initialize(config)
te.finishBootstrapping()
parsed := new(bool)
st.parseVertex = func(b []byte) (avalanche.Vertex, error) {
switch {
case bytes.Equal(b, vtx1.Bytes()):
*parsed = true
return vtx1, nil
}
return nil, errUnknownVertex
}
st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) {
if !*parsed {
return nil, errUnknownVertex
}
switch {
case vtxID.Equals(vtx1.ID()):
return vtx1, nil
}
return nil, errUnknownVertex
}
reqID := new(uint32)
sender.GetF = func(reqVdr ids.ShortID, requestID uint32, vtxID ids.ID) {
*reqID = requestID
if !reqVdr.Equals(vdr.ID()) {
t.Fatalf("Wrong validator requested")
}
if !vtxID.Equals(vtx0.ID()) {
t.Fatalf("Wrong vertex requested")
}
}
te.PushQuery(vdr.ID(), 0, vtx1.ID(), vtx1.Bytes())
te.Put(secondVdr.ID(), *reqID, vtx0.ID(), []byte{3})
*parsed = false
st.parseVertex = func(b []byte) (avalanche.Vertex, error) {
switch {
case bytes.Equal(b, vtx0.Bytes()):
*parsed = true
return vtx0, nil
}
return nil, errUnknownVertex
}
st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) {
if !*parsed {
return nil, errUnknownVertex
}
switch {
case vtxID.Equals(vtx0.ID()):
return vtx0, nil
}
return nil, errUnknownVertex
}
sender.CantPushQuery = false
sender.CantChits = false
vtx0.status = choices.Processing
te.Put(vdr.ID(), *reqID, vtx0.ID(), vtx0.Bytes())
prefs := te.Consensus.Preferences()
if !prefs.Contains(vtx1.ID()) {
t.Fatalf("Shouldn't have abandoned the pending vertex")
}
}
func TestEnginePushQueryRequestIDConflict(t *testing.T) {
config := DefaultConfig()
vdr := validators.GenerateRandomValidator(1)
vals := validators.NewSet()
config.Validators = vals
vals.Add(vdr)
sender := &common.SenderTest{}
sender.T = t
config.Sender = sender
st := &stateTest{t: t}
config.State = st
gVtx := &Vtx{
id: GenerateID(),
status: choices.Accepted,
bytes: []byte{0},
}
vts := []avalanche.Vertex{gVtx}
utxos := []ids.ID{GenerateID(), GenerateID()}
tx0 := &TestTx{
TestTx: snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
},
}
tx0.Ins.Add(utxos[0])
tx1 := &TestTx{
TestTx: snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
},
}
tx1.Ins.Add(utxos[1])
vtx0 := &Vtx{
parents: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Unknown,
bytes: []byte{1},
}
vtx1 := &Vtx{
parents: []avalanche.Vertex{vtx0},
id: GenerateID(),
txs: []snowstorm.Tx{tx1},
height: 2,
status: choices.Processing,
bytes: []byte{2},
}
randomVtxID := GenerateID()
te := &Transitive{}
te.Initialize(config)
te.finishBootstrapping()
parsed := new(bool)
st.parseVertex = func(b []byte) (avalanche.Vertex, error) {
switch {
case bytes.Equal(b, vtx1.Bytes()):
*parsed = true
return vtx1, nil
}
return nil, errUnknownVertex
}
st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) {
if !*parsed {
return nil, errUnknownVertex
}
switch {
case vtxID.Equals(vtx1.ID()):
return vtx1, nil
}
return nil, errUnknownVertex
}
reqID := new(uint32)
sender.GetF = func(reqVdr ids.ShortID, requestID uint32, vtxID ids.ID) {
*reqID = requestID
if !reqVdr.Equals(vdr.ID()) {
t.Fatalf("Wrong validator requested")
}
if !vtxID.Equals(vtx0.ID()) {
t.Fatalf("Wrong vertex requested")
}
}
te.PushQuery(vdr.ID(), 0, vtx1.ID(), vtx1.Bytes())
sender.GetF = nil
sender.CantGet = false
te.PushQuery(vdr.ID(), *reqID, randomVtxID, []byte{3})
*parsed = false
st.parseVertex = func(b []byte) (avalanche.Vertex, error) {
switch {
case bytes.Equal(b, vtx0.Bytes()):
*parsed = true
return vtx0, nil
}
return nil, errUnknownVertex
}
st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) {
if !*parsed {
return nil, errUnknownVertex
}
switch {
case vtxID.Equals(vtx0.ID()):
return vtx0, nil
}
return nil, errUnknownVertex
}
sender.CantPushQuery = false
sender.CantChits = false
vtx0.status = choices.Processing
te.Put(vdr.ID(), *reqID, vtx0.ID(), vtx0.Bytes())
prefs := te.Consensus.Preferences()
if !prefs.Contains(vtx1.ID()) {
t.Fatalf("Shouldn't have abandoned the pending vertex")
}
}
func TestEngineAggressivePolling(t *testing.T) {
config := DefaultConfig()
config.Params.ConcurrentRepolls = 3
vdr := validators.GenerateRandomValidator(1)
vals := validators.NewSet()
config.Validators = vals
vals.Add(vdr)
sender := &common.SenderTest{}
sender.T = t
config.Sender = sender
st := &stateTest{t: t}
config.State = st
gVtx := &Vtx{
id: GenerateID(),
status: choices.Accepted,
bytes: []byte{0},
}
vts := []avalanche.Vertex{gVtx}
utxos := []ids.ID{GenerateID(), GenerateID()}
tx0 := &TestTx{
TestTx: snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
},
}
tx0.Ins.Add(utxos[0])
tx1 := &TestTx{
TestTx: snowstorm.TestTx{
Identifier: GenerateID(),
Stat: choices.Processing,
},
}
tx1.Ins.Add(utxos[1])
vtx := &Vtx{
parents: vts,
id: GenerateID(),
txs: []snowstorm.Tx{tx0},
height: 1,
status: choices.Processing,
bytes: []byte{1},
}
te := &Transitive{}
te.Initialize(config)
te.finishBootstrapping()
parsed := new(bool)
st.parseVertex = func(b []byte) (avalanche.Vertex, error) {
switch {
case bytes.Equal(b, vtx.Bytes()):
*parsed = true
return vtx, nil
}
return nil, errUnknownVertex
}
st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) {
if !*parsed {
return nil, errUnknownVertex
}
switch {
case vtxID.Equals(vtx.ID()):
return vtx, nil
}
return nil, errUnknownVertex
}
numPushQueries := new(int)
sender.PushQueryF = func(ids.ShortSet, uint32, ids.ID, []byte) { *numPushQueries++ }
numPullQueries := new(int)
sender.PullQueryF = func(ids.ShortSet, uint32, ids.ID) { *numPullQueries++ }
te.Put(vdr.ID(), 0, vtx.ID(), vtx.Bytes())
if *numPushQueries != 1 {
t.Fatalf("should have issued one push query")
}
if *numPullQueries != 2 {
t.Fatalf("should have issued one pull query")
}
}

View File

@ -59,10 +59,7 @@ func (v *voter) Update() {
}
v.t.Config.Context.Log.Verbo("Avalanche engine can't quiesce")
if len(v.t.polls.m) < v.t.Config.Params.ConcurrentRepolls {
v.t.repoll()
}
v.t.repoll()
}
func (v *voter) bubbleVotes(votes ids.UniqueBag) ids.UniqueBag {

View File

@ -122,9 +122,9 @@ func (b *Bootstrapper) Accepted(validatorID ids.ShortID, requestID uint32, conta
}
if size := accepted.Len(); size == 0 && b.Config.Beacons.Len() > 0 {
b.Context.Log.Warn("Bootstrapping finished with no accepted frontier. This is likely a result of failing to be able to connect to the specified bootstraps, or no transactions have been issued on this network yet")
b.Context.Log.Warn("Bootstrapping finished with no accepted frontier. This is likely a result of failing to be able to connect to the specified bootstraps, or no transactions have been issued on this chain yet")
} else {
b.Context.Log.Info("Bootstrapping finished with %d vertices in the accepted frontier", size)
b.Context.Log.Info("Bootstrapping started syncing with %d vertices in the accepted frontier", size)
}
b.Bootstrapable.ForceAccepted(accepted)

View File

@ -34,75 +34,181 @@ type ExternalHandler interface {
// FrontierHandler defines how a consensus engine reacts to frontier messages
// from other validators
type FrontierHandler interface {
// GetAcceptedFrontier notifies this consensus engine that its accepted
// frontier is requested by the specified validator
// Notify this engine of a request for the accepted frontier of vertices.
//
// The accepted frontier is the set of accepted vertices that do not have
// any accepted descendants.
//
// This function can be called by any validator. It is not safe to assume
// this message is utilizing a unique requestID. However, the validatorID is
// assumed to be authenticated.
//
// This engine should respond with an AcceptedFrontier message with the same
// requestID, and the engine's current accepted frontier.
GetAcceptedFrontier(validatorID ids.ShortID, requestID uint32)
// AcceptedFrontier notifies this consensus engine of the specified
// validators current accepted frontier
AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set)
// Notify this engine of an accepted frontier.
//
// This function can be called by any validator. It is not safe to assume
// this message is in response to a GetAcceptedFrontier message, is utilizing a
// unique requestID, or that the containerIDs from a valid frontier.
// However, the validatorID is assumed to be authenticated.
AcceptedFrontier(
validatorID ids.ShortID,
requestID uint32,
containerIDs ids.Set,
)
// GetAcceptedFrontierFailed notifies this consensus engine that the
// requested accepted frontier from the specified validator should be
// considered lost
// Notify this engine that a get accepted frontier request it issued has
// failed.
//
// This function will be called if the engine sent a GetAcceptedFrontier
// message that is not anticipated to be responded to. This could be because
// the recipient of the message is unknown or if the message request has
// timed out.
//
// The validatorID, and requestID, are assumed to be the same as those sent
// in the GetAcceptedFrontier message.
GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32)
}
// AcceptedHandler defines how a consensus engine reacts to messages pertaining
// to accepted containers from other validators
type AcceptedHandler interface {
// GetAccepted notifies this consensus engine that it should send the set of
// containerIDs that it has accepted from the provided set to the specified
// validator
// Notify this engine of a request to filter non-accepted vertices.
//
// This function can be called by any validator. It is not safe to assume
// this message is utilizing a unique requestID. However, the validatorID is
// assumed to be authenticated.
//
// This engine should respond with an Accepted message with the same
// requestID, and the subset of the containerIDs that this node has decided
// are accepted.
GetAccepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set)
// Accepted notifies this consensus engine of a set of accepted containerIDs
// Notify this engine of a set of accepted vertices.
//
// This function can be called by any validator. It is not safe to assume
// this message is in response to a GetAccepted message, is utilizing a
// unique requestID, or that the containerIDs are a subset of the
// containerIDs from a GetAccepted message. However, the validatorID is
// assumed to be authenticated.
Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set)
// GetAcceptedFailed notifies this consensus engine that the requested
// accepted containers requested from the specified validator should be
// considered lost
// Notify this engine that a get accepted request it issued has failed.
//
// This function will be called if the engine sent a GetAccepted message
// that is not anticipated to be responded to. This could be because the
// recipient of the message is unknown or if the message request has timed
// out.
//
// The validatorID, and requestID, are assumed to be the same as those sent
// in the GetAccepted message.
GetAcceptedFailed(validatorID ids.ShortID, requestID uint32)
}
// FetchHandler defines how a consensus engine reacts to retrieval messages from
// other validators
type FetchHandler interface {
// Get notifies this consensus engine that the specified validator requested
// that this engine send the specified container to it
// Notify this engine of a request for a container.
//
// This function can be called by any validator. It is not safe to assume
// this message is utilizing a unique requestID. It is also not safe to
// assume the requested containerID exists. However, the validatorID is
// assumed to be authenticated.
//
// There should never be a situation where a virtuous node sends a Get
// request to another virtuous node that does not have the requested
// container. Unless that container was pruned from the active set.
//
// This engine should respond with a Put message with the same requestID if
// the container was locally avaliable. Otherwise, the message can be safely
// dropped.
Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
// Put the container with the specified ID and body.
// Notify this engine of a container.
//
// This function can be called by any validator. It is not safe to assume
// this message is utilizing a unique requestID or even that the containerID
// matches the ID of the container bytes. However, the validatorID is
// assumed to be authenticated.
//
// This engine needs to request and receive missing ancestors of the
// container before adding the container to consensus. Once all ancestor
// containers are added, pushes the container into the consensus.
Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte)
Put(
validatorID ids.ShortID,
requestID uint32,
containerID ids.ID,
container []byte,
)
// Notify this engine that a get request it issued has failed.
GetFailed(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
//
// This function will be called if the engine sent a Get message that is not
// anticipated to be responded to. This could be because the recipient of
// the message is unknown or if the message request has timed out.
//
// The validatorID and requestID are assumed to be the same as those sent in
// the Get message.
GetFailed(validatorID ids.ShortID, requestID uint32)
}
// QueryHandler defines how a consensus engine reacts to query messages from
// other validators
type QueryHandler interface {
// Notify this engine that the specified validator queried it about the
// specified container. That is, the validator would like to know whether
// this engine prefers the specified container. If the ancestry of the
// container is incomplete, or the container is unknown, request the missing
// data. Once complete, sends this validator the current preferences.
// Notify this engine of a request for our preferences.
//
// This function can be called by any validator. It is not safe to assume
// this message is utilizing a unique requestID. However, the validatorID is
// assumed to be authenticated.
//
// If the container or its ancestry is incomplete, this engine is expected
// to request the missing containers from the validator. Once the ancestry
// is complete, this engine should send this validator the current
// preferences in a Chits message. The Chits message should have the same
// requestID that was passed in here.
PullQuery(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
// Notify this engine that the specified validator queried it about the
// specified container. That is, the validator would like to know whether
// this engine prefers the specified container. If the ancestry of the
// container is incomplete, request it. Once complete, sends this validator
// the current preferences.
PushQuery(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte)
// Notify this engine of a request for our preferences.
//
// This function can be called by any validator. It is not safe to assume
// this message is utilizing a unique requestID or even that the containerID
// matches the ID of the container bytes. However, the validatorID is
// assumed to be authenticated.
//
// This function is meant to behave the same way as PullQuery, except the
// container is optimistically provided to potentially remove the need for
// a series of Get/Put messages.
//
// If the ancestry of the container is incomplete, this engine is expected
// to request the ancestry from the validator. Once the ancestry is
// complete, this engine should send this validator the current preferences
// in a Chits message. The Chits message should have the same requestID that
// was passed in here.
PushQuery(
validatorID ids.ShortID,
requestID uint32,
containerID ids.ID,
container []byte,
)
// Notify this engine of the specified validators preferences.
//
// This function can be called by any validator. It is not safe to assume
// this message is in response to a PullQuery or a PushQuery message.
// However, the validatorID is assumed to be authenticated.
Chits(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set)
// Notify this engine that a query it issued has failed.
//
// This function will be called if the engine sent a PullQuery or PushQuery
// message that is not anticipated to be responded to. This could be because
// the recipient of the message is unknown or if the message request has
// timed out.
//
// The validatorID and the requestID are assumed to be the same as those
// sent in the Query message.
QueryFailed(validatorID ids.ShortID, requestID uint32)
}
@ -110,14 +216,19 @@ type QueryHandler interface {
// other components of this validator
type InternalHandler interface {
// Startup this engine.
//
// This function will be called once the environment is configured to be
// able to run the engine.
Startup()
// Gossip to the network a container on the accepted frontier
Gossip()
// Shutdown this engine.
//
// This function will be called when the environment is exiting.
Shutdown()
// Notify this engine that the vm has sent a message to it.
// Notify this engine of a message from the virtual machine.
Notify(Message)
}

View File

@ -0,0 +1,88 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package common
import (
"github.com/ava-labs/gecko/ids"
)
type req struct {
vdr ids.ShortID
id uint32
}
// Requests tracks pending container messages from a peer.
type Requests struct {
reqsToID map[[20]byte]map[uint32]ids.ID
idToReq map[[32]byte]req
}
// Add a request. Assumes that requestIDs are unique. Assumes that containerIDs
// are only in one request at a time.
func (r *Requests) Add(vdr ids.ShortID, requestID uint32, containerID ids.ID) {
if r.reqsToID == nil {
r.reqsToID = make(map[[20]byte]map[uint32]ids.ID)
}
vdrKey := vdr.Key()
vdrReqs, ok := r.reqsToID[vdrKey]
if !ok {
vdrReqs = make(map[uint32]ids.ID)
r.reqsToID[vdrKey] = vdrReqs
}
vdrReqs[requestID] = containerID
if r.idToReq == nil {
r.idToReq = make(map[[32]byte]req)
}
r.idToReq[containerID.Key()] = req{
vdr: vdr,
id: requestID,
}
}
// Remove attempts to abandon a requestID sent to a validator. If the request is
// currently outstanding, the requested ID will be returned along with true. If
// the request isn't currently outstanding, false will be returned.
func (r *Requests) Remove(vdr ids.ShortID, requestID uint32) (ids.ID, bool) {
vdrKey := vdr.Key()
vdrReqs, ok := r.reqsToID[vdrKey]
if !ok {
return ids.ID{}, false
}
containerID, ok := vdrReqs[requestID]
if !ok {
return ids.ID{}, false
}
if len(vdrReqs) == 1 {
delete(r.reqsToID, vdrKey)
} else {
delete(vdrReqs, requestID)
}
delete(r.idToReq, containerID.Key())
return containerID, true
}
// RemoveAny outstanding requests for the container ID. True is returned if the
// container ID had an outstanding request.
func (r *Requests) RemoveAny(containerID ids.ID) bool {
req, ok := r.idToReq[containerID.Key()]
if !ok {
return false
}
r.Remove(req.vdr, req.id)
return true
}
// Len returns the total number of outstanding requests.
func (r *Requests) Len() int { return len(r.idToReq) }
// Contains returns true if there is an outstanding request for the container
// ID.
func (r *Requests) Contains(containerID ids.ID) bool {
_, ok := r.idToReq[containerID.Key()]
return ok
}

View File

@ -0,0 +1,90 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package common
import (
"testing"
"github.com/stretchr/testify/assert"
"github.com/ava-labs/gecko/ids"
)
func TestRequests(t *testing.T) {
req := Requests{}
length := req.Len()
assert.Equal(t, 0, length, "should have had no outstanding requests")
_, removed := req.Remove(ids.ShortEmpty, 0)
assert.False(t, removed, "shouldn't have removed the request")
removed = req.RemoveAny(ids.Empty)
assert.False(t, removed, "shouldn't have removed the request")
constains := req.Contains(ids.Empty)
assert.False(t, constains, "shouldn't contain this request")
req.Add(ids.ShortEmpty, 0, ids.Empty)
length = req.Len()
assert.Equal(t, 1, length, "should have had one outstanding request")
_, removed = req.Remove(ids.ShortEmpty, 1)
assert.False(t, removed, "shouldn't have removed the request")
_, removed = req.Remove(ids.NewShortID([20]byte{1}), 0)
assert.False(t, removed, "shouldn't have removed the request")
constains = req.Contains(ids.Empty)
assert.True(t, constains, "should contain this request")
length = req.Len()
assert.Equal(t, 1, length, "should have had one outstanding request")
req.Add(ids.ShortEmpty, 10, ids.Empty.Prefix(0))
length = req.Len()
assert.Equal(t, 2, length, "should have had two outstanding requests")
_, removed = req.Remove(ids.ShortEmpty, 1)
assert.False(t, removed, "shouldn't have removed the request")
_, removed = req.Remove(ids.NewShortID([20]byte{1}), 0)
assert.False(t, removed, "shouldn't have removed the request")
constains = req.Contains(ids.Empty)
assert.True(t, constains, "should contain this request")
length = req.Len()
assert.Equal(t, 2, length, "should have had two outstanding requests")
removedID, removed := req.Remove(ids.ShortEmpty, 0)
assert.True(t, removedID.Equals(ids.Empty), "should have removed the requested ID")
assert.True(t, removed, "should have removed the request")
removedID, removed = req.Remove(ids.ShortEmpty, 10)
assert.True(t, removedID.Equals(ids.Empty.Prefix(0)), "should have removed the requested ID")
assert.True(t, removed, "should have removed the request")
length = req.Len()
assert.Equal(t, 0, length, "should have had no outstanding requests")
req.Add(ids.ShortEmpty, 0, ids.Empty)
length = req.Len()
assert.Equal(t, 1, length, "should have had one outstanding request")
removed = req.RemoveAny(ids.Empty)
assert.True(t, removed, "should have removed the request")
length = req.Len()
assert.Equal(t, 0, length, "should have had no outstanding requests")
removed = req.RemoveAny(ids.Empty)
assert.False(t, removed, "shouldn't have removed the request")
length = req.Len()
assert.Equal(t, 0, length, "should have had no outstanding requests")
}

View File

@ -42,7 +42,8 @@ type EngineTest struct {
StartupF, GossipF, ShutdownF func()
ContextF func() *snow.Context
NotifyF func(Message)
GetF, GetFailedF, PullQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
GetF, PullQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
GetFailedF func(validatorID ids.ShortID, requestID uint32)
PutF, PushQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte)
GetAcceptedFrontierF, GetAcceptedFrontierFailedF, GetAcceptedFailedF, QueryFailedF func(validatorID ids.ShortID, requestID uint32)
AcceptedFrontierF, GetAcceptedF, AcceptedF, ChitsF func(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set)
@ -187,9 +188,9 @@ func (e *EngineTest) Get(validatorID ids.ShortID, requestID uint32, containerID
}
// GetFailed ...
func (e *EngineTest) GetFailed(validatorID ids.ShortID, requestID uint32, containerID ids.ID) {
func (e *EngineTest) GetFailed(validatorID ids.ShortID, requestID uint32) {
if e.GetFailedF != nil {
e.GetFailedF(validatorID, requestID, containerID)
e.GetFailedF(validatorID, requestID)
} else if e.CantGetFailed && e.T != nil {
e.T.Fatalf("Unexpectedly called GetFailed")
}

View File

@ -20,6 +20,9 @@ type BootstrapConfig struct {
// Blocked tracks operations that are blocked on blocks
Blocked *queue.Jobs
// blocks that have outstanding get requests
blkReqs common.Requests
VM ChainVM
Bootstrapped func()
@ -84,16 +87,22 @@ func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) {
func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) {
b.BootstrapConfig.Context.Log.Verbo("Put called for blkID %s", blkID)
if !b.pending.Contains(blkID) {
blk, err := b.VM.ParseBlock(blkBytes)
if err != nil {
b.BootstrapConfig.Context.Log.Debug("ParseBlock failed due to %s for block:\n%s",
err,
formatting.DumpBytes{Bytes: blkBytes})
b.GetFailed(vdr, requestID)
return
}
blk, err := b.VM.ParseBlock(blkBytes)
if err != nil {
b.BootstrapConfig.Context.Log.Warn("ParseBlock failed due to %s for block:\n%s",
err,
if !b.pending.Contains(blk.ID()) {
b.BootstrapConfig.Context.Log.Debug("Validator %s sent an unrequested block:\n%s",
vdr,
formatting.DumpBytes{Bytes: blkBytes})
b.GetFailed(vdr, requestID, blkID)
b.GetFailed(vdr, requestID)
return
}
@ -101,7 +110,15 @@ func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkB
}
// GetFailed ...
func (b *bootstrapper) GetFailed(_ ids.ShortID, _ uint32, blkID ids.ID) { b.sendRequest(blkID) }
func (b *bootstrapper) GetFailed(vdr ids.ShortID, requestID uint32) {
blkID, ok := b.blkReqs.Remove(vdr, requestID)
if !ok {
b.BootstrapConfig.Context.Log.Debug("GetFailed called without sending the corresponding Get message from %s",
vdr)
return
}
b.sendRequest(blkID)
}
func (b *bootstrapper) fetch(blkID ids.ID) {
if b.pending.Contains(blkID) {
@ -125,6 +142,9 @@ func (b *bootstrapper) sendRequest(blkID ids.ID) {
validatorID := validators[0].ID()
b.RequestID++
b.blkReqs.RemoveAny(blkID)
b.blkReqs.Add(validatorID, b.RequestID, blkID)
b.pending.Add(blkID)
b.BootstrapConfig.Sender.Get(validatorID, b.RequestID, blkID)

View File

@ -223,12 +223,13 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) {
bs.ForceAccepted(acceptedIDs)
vm.GetBlockF = nil
sender.GetF = nil
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
switch {
case bytes.Equal(blkBytes, blkBytes1):
return blk1, nil
case bytes.Equal(blkBytes, blkBytes2):
return blk2, nil
}
t.Fatal(errUnknownBlock)
return nil, errUnknownBlock
@ -477,3 +478,113 @@ func TestBootstrapperPartialFetch(t *testing.T) {
t.Fatalf("wrong number pending")
}
}
func TestBootstrapperWrongIDByzantineResponse(t *testing.T) {
config, peerID, sender, vm := newConfig(t)
blkID0 := ids.Empty.Prefix(0)
blkID1 := ids.Empty.Prefix(1)
blkID2 := ids.Empty.Prefix(2)
blkBytes0 := []byte{0}
blkBytes1 := []byte{1}
blkBytes2 := []byte{2}
blk0 := &Blk{
id: blkID0,
height: 0,
status: choices.Accepted,
bytes: blkBytes0,
}
blk1 := &Blk{
parent: blk0,
id: blkID1,
height: 1,
status: choices.Processing,
bytes: blkBytes1,
}
blk2 := &Blk{
parent: blk1,
id: blkID2,
height: 2,
status: choices.Processing,
bytes: blkBytes2,
}
bs := bootstrapper{}
bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry())
bs.Initialize(config)
acceptedIDs := ids.Set{}
acceptedIDs.Add(blkID1)
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
switch {
case blkID.Equals(blkID1):
return nil, errUnknownBlock
default:
t.Fatal(errUnknownBlock)
panic(errUnknownBlock)
}
}
requestID := new(uint32)
sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) {
if !vdr.Equals(peerID) {
t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr)
}
switch {
case vtxID.Equals(blkID1):
default:
t.Fatalf("Requested unknown block")
}
*requestID = reqID
}
bs.ForceAccepted(acceptedIDs)
vm.GetBlockF = nil
sender.GetF = nil
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
switch {
case bytes.Equal(blkBytes, blkBytes2):
return blk2, nil
}
t.Fatal(errUnknownBlock)
return nil, errUnknownBlock
}
sender.CantGet = false
bs.Put(peerID, *requestID, blkID1, blkBytes2)
sender.CantGet = true
vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) {
switch {
case bytes.Equal(blkBytes, blkBytes1):
return blk1, nil
}
t.Fatal(errUnknownBlock)
return nil, errUnknownBlock
}
finished := new(bool)
bs.onFinished = func() { *finished = true }
bs.Put(peerID, *requestID, blkID1, blkBytes1)
vm.ParseBlockF = nil
if !*finished {
t.Fatalf("Bootstrapping should have finished")
}
if blk1.Status() != choices.Accepted {
t.Fatalf("Block should be accepted")
}
if blk2.Status() != choices.Processing {
t.Fatalf("Block should be processing")
}
}

View File

@ -19,12 +19,22 @@ type Transitive struct {
Config
bootstrapper
polls polls // track people I have asked for their preference
// track outstanding preference requests
polls polls
blkReqs, pending ids.Set // prevent asking validators for the same block
// blocks that have outstanding get requests
blkReqs common.Requests
blocked events.Blocker // track operations that are blocked on blocks
// blocks that are fetched but haven't been issued due to missing
// dependencies
pending ids.Set
// operations that are blocked on a block being issued. This could be
// issuing another block, responding to a query, or applying votes to
// consensus
blocked events.Blocker
// mark for if the engine has been bootstrapped or not
bootstrapped bool
}
@ -33,7 +43,11 @@ func (t *Transitive) Initialize(config Config) {
config.Context.Log.Info("Initializing Snowman consensus")
t.Config = config
t.metrics.Initialize(config.Context.Log, config.Params.Namespace, config.Params.Metrics)
t.metrics.Initialize(
config.Context.Log,
config.Params.Namespace,
config.Params.Metrics,
)
t.onFinished = t.finishBootstrapping
t.bootstrapper.Initialize(config.BootstrapConfig)
@ -44,11 +58,19 @@ func (t *Transitive) Initialize(config Config) {
t.polls.m = make(map[uint32]poll)
}
// when bootstrapping is finished, this will be called. This initializes the
// consensus engine with the last accepted block.
func (t *Transitive) finishBootstrapping() {
// set the bootstrapped mark to switch consensus modes
t.bootstrapped = true
// initialize consensus to the last accepted blockID
tailID := t.Config.VM.LastAccepted()
t.Consensus.Initialize(t.Config.Context, t.Params, tailID)
// to maintain the invariant that oracle blocks are issued in the correct
// preferences, we need to handle the case that we are bootstrapping into an
// oracle block
tail, err := t.Config.VM.GetBlock(tailID)
if err != nil {
t.Config.Context.Log.Error("Failed to get last accepted block due to: %s", err)
@ -58,11 +80,16 @@ func (t *Transitive) finishBootstrapping() {
switch blk := tail.(type) {
case OracleBlock:
for _, blk := range blk.Options() {
// note that deliver will set the VM's preference
t.deliver(blk)
}
default:
// if there aren't blocks we need to deliver on startup, we need to set
// the preference to the last accepted block
t.Config.VM.SetPreference(tailID)
}
t.Config.Context.Log.Info("Bootstrapping finished with %s as the last accepted block", tailID)
}
// Gossip implements the Engine interface
@ -89,15 +116,27 @@ func (t *Transitive) Context() *snow.Context { return t.Config.Context }
// Get implements the Engine interface
func (t *Transitive) Get(vdr ids.ShortID, requestID uint32, blkID ids.ID) {
if blk, err := t.Config.VM.GetBlock(blkID); err == nil {
t.Config.Sender.Put(vdr, requestID, blkID, blk.Bytes())
blk, err := t.Config.VM.GetBlock(blkID)
if err != nil {
// If we failed to get the block, that means either an unexpected error
// has occurred, the validator is not following the protocol, or the
// block has been pruned.
t.Config.Context.Log.Warn("Get called for blockID %s errored with %s",
blkID,
err)
return
}
// Respond to the validator with the fetched block and the same requestID.
t.Config.Sender.Put(vdr, requestID, blkID, blk.Bytes())
}
// Put implements the Engine interface
func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) {
t.Config.Context.Log.Verbo("Put called for blockID %s", blkID)
// if the engine hasn't been bootstrapped, forward the request to the
// bootstrapper
if !t.bootstrapped {
t.bootstrapper.Put(vdr, requestID, blkID, blkBytes)
return
@ -105,35 +144,56 @@ func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkByt
blk, err := t.Config.VM.ParseBlock(blkBytes)
if err != nil {
t.Config.Context.Log.Warn("ParseBlock failed due to %s for block:\n%s",
t.Config.Context.Log.Debug("ParseBlock failed due to %s for block:\n%s",
err,
formatting.DumpBytes{Bytes: blkBytes})
t.GetFailed(vdr, requestID, blkID)
// because GetFailed doesn't utilize the assumption that we actually
// sent a Get message, we can safely call GetFailed here to potentially
// abandon the request.
t.GetFailed(vdr, requestID)
return
}
// insert the block into consensus. If the block has already been issued,
// this will be a noop. If this block has missing dependencies, vdr will
// receive requests to fill the ancestry. dependencies that have already
// been fetched, but with missing dependencies themselves won't be requested
// from the vdr.
t.insertFrom(vdr, blk)
}
// GetFailed implements the Engine interface
func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32, blkID ids.ID) {
func (t *Transitive) GetFailed(vdr ids.ShortID, requestID uint32) {
// if the engine hasn't been bootstrapped, forward the request to the
// bootstrapper
if !t.bootstrapped {
t.bootstrapper.GetFailed(vdr, requestID, blkID)
t.bootstrapper.GetFailed(vdr, requestID)
return
}
t.pending.Remove(blkID)
t.blocked.Abandon(blkID)
t.blkReqs.Remove(blkID)
// we don't use the assumption that this function is called after a failed
// Get message. So we first check to see if we have an outsanding request
// and also get what the request was for if it exists
blkID, ok := t.blkReqs.Remove(vdr, requestID)
if !ok {
t.Config.Context.Log.Warn("GetFailed called without sending the corresponding Get message from %s",
vdr)
return
}
// Tracks performance statistics
t.numBlockedBlk.Set(float64(t.pending.Len()))
// because the get request was dropped, we no longer are expected blkID to
// be issued.
t.blocked.Abandon(blkID)
}
// PullQuery implements the Engine interface
func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID) {
// if the engine hasn't been bootstrapped, we aren't ready to respond to
// queries
if !t.bootstrapped {
t.Config.Context.Log.Debug("Dropping PullQuery for %s due to bootstrapping", blkID)
t.Config.Context.Log.Debug("Dropping PullQuery for %s due to bootstrapping",
blkID)
return
}
@ -144,6 +204,8 @@ func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID)
requestID: requestID,
}
// if we aren't able to have issued this block, then it is a dependency for
// this reply
if !t.reinsertFrom(vdr, blkID) {
c.deps.Add(blkID)
}
@ -152,26 +214,52 @@ func (t *Transitive) PullQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID)
}
// PushQuery implements the Engine interface
func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID, blk []byte) {
func (t *Transitive) PushQuery(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) {
// if the engine hasn't been bootstrapped, we aren't ready to respond to
// queries
if !t.bootstrapped {
t.Config.Context.Log.Debug("Dropping PushQuery for %s due to bootstrapping", blkID)
return
}
t.Put(vdr, requestID, blkID, blk)
t.PullQuery(vdr, requestID, blkID)
blk, err := t.Config.VM.ParseBlock(blkBytes)
// If the parsing fails, we just drop the request, as we didn't ask for it
if err != nil {
t.Config.Context.Log.Warn("ParseBlock failed due to %s for block:\n%s",
err,
formatting.DumpBytes{Bytes: blkBytes})
return
}
// insert the block into consensus. If the block has already been issued,
// this will be a noop. If this block has missing dependencies, vdr will
// receive requests to fill the ancestry. dependencies that have already
// been fetched, but with missing dependencies themselves won't be requested
// from the vdr.
t.insertFrom(vdr, blk)
// register the chit request
t.PullQuery(vdr, requestID, blk.ID())
}
// Chits implements the Engine interface
func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) {
// if the engine hasn't been bootstrapped, we shouldn't be receiving chits
if !t.bootstrapped {
t.Config.Context.Log.Warn("Dropping Chits due to bootstrapping")
t.Config.Context.Log.Debug("Dropping Chits due to bootstrapping")
return
}
// Since this is snowman, there should only be one ID in the vote set
if votes.Len() != 1 {
t.Config.Context.Log.Warn("Chits was called with the wrong number of votes %d. ValidatorID: %s, RequestID: %d", votes.Len(), vdr, requestID)
t.Config.Context.Log.Debug("Chits was called with the wrong number of votes %d. ValidatorID: %s, RequestID: %d",
votes.Len(),
vdr,
requestID)
// because QueryFailed doesn't utilize the assumption that we actually
// sent a Query message, we can safely call QueryFailed here to
// potentially abandon the request.
t.QueryFailed(vdr, requestID)
return
}
@ -186,6 +274,8 @@ func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) {
response: vote,
}
// if we aren't able to have issued the vote's block, then it is a
// dependency for applying the vote
if !t.reinsertFrom(vdr, vote) {
v.deps.Add(vote)
}
@ -195,6 +285,7 @@ func (t *Transitive) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) {
// QueryFailed implements the Engine interface
func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) {
// if the engine hasn't been bootstrapped, we won't have sent a query
if !t.bootstrapped {
t.Config.Context.Log.Warn("Dropping QueryFailed due to bootstrapping")
return
@ -209,6 +300,7 @@ func (t *Transitive) QueryFailed(vdr ids.ShortID, requestID uint32) {
// Notify implements the Engine interface
func (t *Transitive) Notify(msg common.Message) {
// if the engine hasn't been bootstrapped, we shouldn't issuing blocks
if !t.bootstrapped {
t.Config.Context.Log.Warn("Dropping Notify due to bootstrapping")
return
@ -217,21 +309,32 @@ func (t *Transitive) Notify(msg common.Message) {
t.Config.Context.Log.Verbo("Snowman engine notified of %s from the vm", msg)
switch msg {
case common.PendingTxs:
if blk, err := t.Config.VM.BuildBlock(); err == nil {
if status := blk.Status(); status != choices.Processing {
t.Config.Context.Log.Warn("Attempting to issue a block with status: %s, expected Processing", status)
}
parentID := blk.Parent().ID()
if pref := t.Consensus.Preference(); !parentID.Equals(pref) {
t.Config.Context.Log.Warn("Built block with parent: %s, expected %s", parentID, pref)
}
if t.insertAll(blk) {
t.Config.Context.Log.Verbo("Successfully issued new block from the VM")
} else {
t.Config.Context.Log.Warn("VM.BuildBlock returned a block that is pending for ancestors")
}
} else {
// the pending txs message means we should attempt to build a block.
blk, err := t.Config.VM.BuildBlock()
if err != nil {
t.Config.Context.Log.Verbo("VM.BuildBlock errored with %s", err)
return
}
// a newly created block is expected to be processing. If this check
// fails, there is potentially an error in the VM this engine is running
if status := blk.Status(); status != choices.Processing {
t.Config.Context.Log.Warn("Attempting to issue a block with status: %s, expected Processing", status)
}
// the newly created block should be built on top of the preferred
// block. Otherwise, the new block doesn't have the best chance of being
// confirmed.
parentID := blk.Parent().ID()
if pref := t.Consensus.Preference(); !parentID.Equals(pref) {
t.Config.Context.Log.Warn("Built block with parent: %s, expected %s", parentID, pref)
}
// inserting the block shouldn't have any missing dependencies
if t.insertAll(blk) {
t.Config.Context.Log.Verbo("Successfully issued new block from the VM")
} else {
t.Config.Context.Log.Warn("VM.BuildBlock returned a block that is pending for ancestors")
}
default:
t.Config.Context.Log.Warn("Unexpected message from the VM: %s", msg)
@ -239,10 +342,20 @@ func (t *Transitive) Notify(msg common.Message) {
}
func (t *Transitive) repoll() {
// if we are issuing a repoll, we should gossip our current preferences to
// propagate the most likely branch as quickly as possible
prefID := t.Consensus.Preference()
t.pullSample(prefID)
for i := len(t.polls.m); i < t.Params.ConcurrentRepolls; i++ {
t.pullSample(prefID)
}
}
// reinsertFrom attempts to issue the branch ending with a block, from only its
// ID, to consensus. Returns true if the block was added, or was previously
// added, to consensus. This is useful to check the local DB before requesting a
// block in case we have the block for some reason. If the block or a dependency
// is missing, the validator will be sent a Get message.
func (t *Transitive) reinsertFrom(vdr ids.ShortID, blkID ids.ID) bool {
blk, err := t.Config.VM.GetBlock(blkID)
if err != nil {
@ -252,44 +365,81 @@ func (t *Transitive) reinsertFrom(vdr ids.ShortID, blkID ids.ID) bool {
return t.insertFrom(vdr, blk)
}
// insertFrom attempts to issue the branch ending with a block to consensus.
// Returns true if the block was added, or was previously added, to consensus.
// This is useful to check the local DB before requesting a block in case we
// have the block for some reason. If a dependency is missing, the validator
// will be sent a Get message.
func (t *Transitive) insertFrom(vdr ids.ShortID, blk snowman.Block) bool {
blkID := blk.ID()
// if the block has been issued, we don't need to insert it. if the block is
// already pending, we shouldn't attempt to insert it again yet
for !t.Consensus.Issued(blk) && !t.pending.Contains(blkID) {
t.insert(blk)
parent := blk.Parent()
parentID := parent.ID()
if parentStatus := parent.Status(); !parentStatus.Fetched() {
t.sendRequest(vdr, parentID)
blk = blk.Parent()
blkID = blk.ID()
// if the parent hasn't been fetched, we need to request it to issue the
// newly inserted block
if !blk.Status().Fetched() {
t.sendRequest(vdr, blkID)
return false
}
blk = parent
blkID = parentID
}
return !t.pending.Contains(blkID)
return t.Consensus.Issued(blk)
}
// insertAll attempts to issue the branch ending with a block to consensus.
// Returns true if the block was added, or was previously added, to consensus.
// This is useful to check the local DB before requesting a block in case we
// have the block for some reason. If a dependency is missing and the dependency
// hasn't been requested, the issuance will be abandoned.
func (t *Transitive) insertAll(blk snowman.Block) bool {
blkID := blk.ID()
for blk.Status().Fetched() && !t.Consensus.Issued(blk) && !t.pending.Contains(blkID) {
t.insert(blk)
blk = blk.Parent()
blkID = blk.ID()
}
return !t.pending.Contains(blkID)
// if issuance the block was successful, this is the happy path
if t.Consensus.Issued(blk) {
return true
}
// if this branch is waiting on a block that we supposedly have a source of,
// we can just wait for that request to succeed or fail
if t.blkReqs.Contains(blkID) {
return false
}
// if we have no reason to expect that this block will be inserted, we
// should abandon the block to avoid a memory leak
t.blocked.Abandon(blkID)
return false
}
// attempt to insert the block to consensus. If the block's parent hasn't been
// issued, the insertion will block until the parent's issuance is abandoned or
// fulfilled
func (t *Transitive) insert(blk snowman.Block) {
blkID := blk.ID()
// mark that the block has been fetched but is pending
t.pending.Add(blkID)
t.blkReqs.Remove(blkID)
// if we have any outstanding requests for this block, remove the pending
// requests
t.blkReqs.RemoveAny(blkID)
i := &issuer{
t: t,
blk: blk,
}
// block on the parent if needed
if parent := blk.Parent(); !t.Consensus.Issued(parent) {
parentID := parent.ID()
t.Config.Context.Log.Verbo("Block waiting for parent %s", parentID)
@ -304,17 +454,22 @@ func (t *Transitive) insert(blk snowman.Block) {
}
func (t *Transitive) sendRequest(vdr ids.ShortID, blkID ids.ID) {
if !t.blkReqs.Contains(blkID) {
t.blkReqs.Add(blkID)
t.numBlkRequests.Set(float64(t.blkReqs.Len())) // Tracks performance statistics
t.RequestID++
t.Config.Context.Log.Verbo("Sending Get message for %s", blkID)
t.Config.Sender.Get(vdr, t.RequestID, blkID)
// only send one request at a time for a block
if t.blkReqs.Contains(blkID) {
return
}
t.Config.Context.Log.Verbo("Sending Get message for %s", blkID)
t.RequestID++
t.blkReqs.Add(vdr, t.RequestID, blkID)
t.Config.Sender.Get(vdr, t.RequestID, blkID)
// Tracks performance statistics
t.numBlkRequests.Set(float64(t.blkReqs.Len()))
}
// send a pull request for this block ID
func (t *Transitive) pullSample(blkID ids.ID) {
t.Config.Context.Log.Verbo("About to sample from: %s", t.Config.Validators)
p := t.Consensus.Parameters()
@ -324,15 +479,22 @@ func (t *Transitive) pullSample(blkID ids.ID) {
vdrSet.Add(vdr.ID())
}
t.RequestID++
if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet.Len()) {
t.Config.Sender.PullQuery(vdrSet, t.RequestID, blkID)
} else if numVdrs < p.K {
if numVdrs := len(vdrs); numVdrs != p.K {
t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blkID)
return
}
t.RequestID++
if !t.polls.Add(t.RequestID, vdrSet.Len()) {
t.Config.Context.Log.Error("Query for %s was dropped due to use of a duplicated requestID", blkID)
return
}
t.Config.Sender.PullQuery(vdrSet, t.RequestID, blkID)
}
func (t *Transitive) pushSample(blk snowman.Block) bool {
// send a push request for this block
func (t *Transitive) pushSample(blk snowman.Block) {
t.Config.Context.Log.Verbo("About to sample from: %s", t.Config.Validators)
p := t.Consensus.Parameters()
vdrs := t.Config.Validators.Sample(p.K)
@ -341,15 +503,20 @@ func (t *Transitive) pushSample(blk snowman.Block) bool {
vdrSet.Add(vdr.ID())
}
t.RequestID++
queryIssued := false
if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet.Len()) {
t.Config.Sender.PushQuery(vdrSet, t.RequestID, blk.ID(), blk.Bytes())
queryIssued = true
} else if numVdrs < p.K {
t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blk.ID())
blkID := blk.ID()
if numVdrs := len(vdrs); numVdrs != p.K {
t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blkID)
return
}
return queryIssued
t.RequestID++
if !t.polls.Add(t.RequestID, vdrSet.Len()) {
t.Config.Context.Log.Error("Query for %s was dropped due to use of a duplicated requestID", blkID)
return
}
t.Config.Sender.PushQuery(vdrSet, t.RequestID, blkID, blk.Bytes())
return
}
func (t *Transitive) deliver(blk snowman.Block) {
@ -357,11 +524,14 @@ func (t *Transitive) deliver(blk snowman.Block) {
return
}
// we are adding the block to consensus, so it is no longer pending
blkID := blk.ID()
t.pending.Remove(blkID)
if err := blk.Verify(); err != nil {
t.Config.Context.Log.Debug("Block failed verification due to %s, dropping block", err)
// if verify fails, then all decedents are also invalid
t.blocked.Abandon(blkID)
t.numBlockedBlk.Set(float64(t.pending.Len())) // Tracks performance statistics
return
@ -369,8 +539,10 @@ func (t *Transitive) deliver(blk snowman.Block) {
t.Config.Context.Log.Verbo("Adding block to consensus: %s", blkID)
t.Consensus.Add(blk)
polled := t.pushSample(blk)
// Add all the oracle blocks if they exist. We call verify on all the blocks
// and add them to consensus before marking anything as fulfilled to avoid
// any potential reentrant bugs.
added := []snowman.Block{}
dropped := []snowman.Block{}
switch blk := blk.(type) {
@ -378,20 +550,23 @@ func (t *Transitive) deliver(blk snowman.Block) {
for _, blk := range blk.Options() {
if err := blk.Verify(); err != nil {
t.Config.Context.Log.Debug("Block failed verification due to %s, dropping block", err)
t.blocked.Abandon(blk.ID())
dropped = append(dropped, blk)
} else {
t.Consensus.Add(blk)
t.pushSample(blk)
added = append(added, blk)
}
}
}
t.Config.VM.SetPreference(t.Consensus.Preference())
t.blocked.Fulfill(blkID)
// launch a query for the newly added block
t.pushSample(blk)
t.blocked.Fulfill(blkID)
for _, blk := range added {
t.pushSample(blk)
blkID := blk.ID()
t.pending.Remove(blkID)
t.blocked.Fulfill(blkID)
@ -402,9 +577,8 @@ func (t *Transitive) deliver(blk snowman.Block) {
t.blocked.Abandon(blkID)
}
if polled && len(t.polls.m) < t.Params.ConcurrentRepolls {
t.repoll()
}
// If we should issue multiple queries at the same time, we need to repoll
t.repoll()
// Tracks performance statistics
t.numBlkRequests.Set(float64(t.blkReqs.Len()))

View File

@ -102,7 +102,9 @@ func TestEngineAdd(t *testing.T) {
}
asked := new(bool)
sender.GetF = func(inVdr ids.ShortID, _ uint32, blkID ids.ID) {
reqID := new(uint32)
sender.GetF = func(inVdr ids.ShortID, requestID uint32, blkID ids.ID) {
*reqID = requestID
if *asked {
t.Fatalf("Asked multiple times")
}
@ -136,7 +138,7 @@ func TestEngineAdd(t *testing.T) {
vm.ParseBlockF = func(b []byte) (snowman.Block, error) { return nil, errParseBlock }
te.Put(vdr.ID(), 0, blk.Parent().ID(), nil)
te.Put(vdr.ID(), *reqID, blk.Parent().ID(), nil)
vm.ParseBlockF = nil
@ -906,7 +908,11 @@ func TestEngineAbandonQuery(t *testing.T) {
panic("Should have failed")
}
}
sender.CantGet = false
reqID := new(uint32)
sender.GetF = func(_ ids.ShortID, requestID uint32, _ ids.ID) {
*reqID = requestID
}
te.PullQuery(vdr.ID(), 0, blkID)
@ -914,7 +920,7 @@ func TestEngineAbandonQuery(t *testing.T) {
t.Fatalf("Should have blocked on request")
}
te.GetFailed(vdr.ID(), 0, blkID)
te.GetFailed(vdr.ID(), *reqID)
if len(te.blocked) != 0 {
t.Fatalf("Should have removed request")
@ -947,7 +953,12 @@ func TestEngineAbandonChit(t *testing.T) {
panic("Should have failed")
}
}
sender.CantGet = false
reqID := new(uint32)
sender.GetF = func(_ ids.ShortID, requestID uint32, _ ids.ID) {
*reqID = requestID
}
fakeBlkIDSet := ids.Set{}
fakeBlkIDSet.Add(fakeBlkID)
te.Chits(vdr.ID(), 0, fakeBlkIDSet)
@ -956,7 +967,7 @@ func TestEngineAbandonChit(t *testing.T) {
t.Fatalf("Should have blocked on request")
}
te.GetFailed(vdr.ID(), 0, fakeBlkID)
te.GetFailed(vdr.ID(), *reqID)
if len(te.blocked) != 0 {
t.Fatalf("Should have removed request")
@ -1105,14 +1116,18 @@ func TestEngineRetryFetch(t *testing.T) {
}
vm.CantGetBlock = false
sender.CantGet = false
reqID := new(uint32)
sender.GetF = func(_ ids.ShortID, requestID uint32, _ ids.ID) {
*reqID = requestID
}
te.PullQuery(vdr.ID(), 0, missingBlk.ID())
vm.CantGetBlock = true
sender.CantGet = true
sender.GetF = nil
te.GetFailed(vdr.ID(), 0, missingBlk.ID())
te.GetFailed(vdr.ID(), *reqID)
vm.CantGetBlock = false
@ -1124,7 +1139,7 @@ func TestEngineRetryFetch(t *testing.T) {
te.PullQuery(vdr.ID(), 0, missingBlk.ID())
vm.CantGetBlock = true
sender.CantGet = true
sender.GetF = nil
if !*called {
t.Fatalf("Should have requested the block again")
@ -1220,3 +1235,290 @@ func TestEngineGossip(t *testing.T) {
t.Fatalf("Should have gossiped the block")
}
}
func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) {
vdr, vdrs, sender, vm, te, gBlk := setup(t)
secondVdr := validators.GenerateRandomValidator(1)
vdrs.Add(secondVdr)
sender.Default(true)
missingBlk := &Blk{
parent: gBlk,
id: GenerateID(),
height: 1,
status: choices.Unknown,
bytes: []byte{1},
}
pendingBlk := &Blk{
parent: missingBlk,
id: GenerateID(),
height: 2,
status: choices.Processing,
bytes: []byte{2},
}
parsed := new(bool)
vm.ParseBlockF = func(b []byte) (snowman.Block, error) {
switch {
case bytes.Equal(b, pendingBlk.Bytes()):
*parsed = true
return pendingBlk, nil
}
return nil, errUnknownBlock
}
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
if !*parsed {
return nil, errUnknownBlock
}
switch {
case blkID.Equals(pendingBlk.ID()):
return pendingBlk, nil
}
return nil, errUnknownBlock
}
reqID := new(uint32)
sender.GetF = func(reqVdr ids.ShortID, requestID uint32, blkID ids.ID) {
*reqID = requestID
if !reqVdr.Equals(vdr.ID()) {
t.Fatalf("Wrong validator requested")
}
if !blkID.Equals(missingBlk.ID()) {
t.Fatalf("Wrong block requested")
}
}
te.PushQuery(vdr.ID(), 0, pendingBlk.ID(), pendingBlk.Bytes())
te.Put(secondVdr.ID(), *reqID, missingBlk.ID(), []byte{3})
*parsed = false
vm.ParseBlockF = func(b []byte) (snowman.Block, error) {
switch {
case bytes.Equal(b, missingBlk.Bytes()):
*parsed = true
return missingBlk, nil
}
return nil, errUnknownBlock
}
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
if !*parsed {
return nil, errUnknownBlock
}
switch {
case blkID.Equals(missingBlk.ID()):
return missingBlk, nil
}
return nil, errUnknownBlock
}
sender.CantPushQuery = false
sender.CantChits = false
missingBlk.status = choices.Processing
te.Put(vdr.ID(), *reqID, missingBlk.ID(), missingBlk.Bytes())
pref := te.Consensus.Preference()
if !pref.Equals(pendingBlk.ID()) {
t.Fatalf("Shouldn't have abandoned the pending block")
}
}
func TestEnginePushQueryRequestIDConflict(t *testing.T) {
vdr, _, sender, vm, te, gBlk := setup(t)
sender.Default(true)
missingBlk := &Blk{
parent: gBlk,
id: GenerateID(),
height: 1,
status: choices.Unknown,
bytes: []byte{1},
}
pendingBlk := &Blk{
parent: missingBlk,
id: GenerateID(),
height: 2,
status: choices.Processing,
bytes: []byte{2},
}
randomBlkID := GenerateID()
parsed := new(bool)
vm.ParseBlockF = func(b []byte) (snowman.Block, error) {
switch {
case bytes.Equal(b, pendingBlk.Bytes()):
*parsed = true
return pendingBlk, nil
}
return nil, errUnknownBlock
}
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
if !*parsed {
return nil, errUnknownBlock
}
switch {
case blkID.Equals(pendingBlk.ID()):
return pendingBlk, nil
}
return nil, errUnknownBlock
}
reqID := new(uint32)
sender.GetF = func(reqVdr ids.ShortID, requestID uint32, blkID ids.ID) {
*reqID = requestID
if !reqVdr.Equals(vdr.ID()) {
t.Fatalf("Wrong validator requested")
}
if !blkID.Equals(missingBlk.ID()) {
t.Fatalf("Wrong block requested")
}
}
te.PushQuery(vdr.ID(), 0, pendingBlk.ID(), pendingBlk.Bytes())
sender.GetF = nil
sender.CantGet = false
te.PushQuery(vdr.ID(), *reqID, randomBlkID, []byte{3})
*parsed = false
vm.ParseBlockF = func(b []byte) (snowman.Block, error) {
switch {
case bytes.Equal(b, missingBlk.Bytes()):
*parsed = true
return missingBlk, nil
}
return nil, errUnknownBlock
}
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
if !*parsed {
return nil, errUnknownBlock
}
switch {
case blkID.Equals(missingBlk.ID()):
return missingBlk, nil
}
return nil, errUnknownBlock
}
sender.CantPushQuery = false
sender.CantChits = false
te.Put(vdr.ID(), *reqID, missingBlk.ID(), missingBlk.Bytes())
pref := te.Consensus.Preference()
if !pref.Equals(pendingBlk.ID()) {
t.Fatalf("Shouldn't have abandoned the pending block")
}
}
func TestEngineAggressivePolling(t *testing.T) {
config := DefaultConfig()
config.Params.ConcurrentRepolls = 2
vdr := validators.GenerateRandomValidator(1)
vals := validators.NewSet()
config.Validators = vals
vals.Add(vdr)
sender := &common.SenderTest{}
sender.T = t
config.Sender = sender
sender.Default(true)
vm := &VMTest{}
vm.T = t
config.VM = vm
vm.Default(true)
vm.CantSetPreference = false
gBlk := &Blk{
id: GenerateID(),
status: choices.Accepted,
}
vm.LastAcceptedF = func() ids.ID { return gBlk.ID() }
sender.CantGetAcceptedFrontier = false
te := &Transitive{}
te.Initialize(config)
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
if !blkID.Equals(gBlk.ID()) {
t.Fatalf("Wrong block requested")
}
return gBlk, nil
}
te.finishBootstrapping()
vm.GetBlockF = nil
vm.LastAcceptedF = nil
sender.CantGetAcceptedFrontier = true
sender.Default(true)
pendingBlk := &Blk{
parent: gBlk,
id: GenerateID(),
height: 2,
status: choices.Processing,
bytes: []byte{1},
}
parsed := new(bool)
vm.ParseBlockF = func(b []byte) (snowman.Block, error) {
switch {
case bytes.Equal(b, pendingBlk.Bytes()):
*parsed = true
return pendingBlk, nil
}
return nil, errUnknownBlock
}
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
if !*parsed {
return nil, errUnknownBlock
}
switch {
case blkID.Equals(pendingBlk.ID()):
return pendingBlk, nil
}
return nil, errUnknownBlock
}
numPushed := new(int)
sender.PushQueryF = func(_ ids.ShortSet, _ uint32, _ ids.ID, _ []byte) { *numPushed++ }
numPulled := new(int)
sender.PullQueryF = func(_ ids.ShortSet, _ uint32, _ ids.ID) { *numPulled++ }
te.Put(vdr.ID(), 0, pendingBlk.ID(), pendingBlk.Bytes())
if *numPushed != 1 {
t.Fatalf("Should have initially sent a push query")
}
if *numPulled != 1 {
t.Fatalf("Should have sent an additional pull query")
}
}

View File

@ -56,10 +56,7 @@ func (v *voter) Update() {
}
v.t.Config.Context.Log.Verbo("Snowman engine can't quiesce")
if len(v.t.polls.m) < v.t.Config.Params.ConcurrentRepolls {
v.t.repoll()
}
v.t.repoll()
}
func (v *voter) bubbleVotes(votes ids.Bag) ids.Bag {

View File

@ -78,7 +78,7 @@ func (h *Handler) dispatchMsg(msg message) bool {
case getMsg:
h.engine.Get(msg.validatorID, msg.requestID, msg.containerID)
case getFailedMsg:
h.engine.GetFailed(msg.validatorID, msg.requestID, msg.containerID)
h.engine.GetFailed(msg.validatorID, msg.requestID)
case putMsg:
h.engine.Put(msg.validatorID, msg.requestID, msg.containerID, msg.container)
case pushQueryMsg:
@ -185,12 +185,11 @@ func (h *Handler) Put(validatorID ids.ShortID, requestID uint32, containerID ids
}
// GetFailed passes a GetFailed message to the consensus engine.
func (h *Handler) GetFailed(validatorID ids.ShortID, requestID uint32, containerID ids.ID) {
func (h *Handler) GetFailed(validatorID ids.ShortID, requestID uint32) {
h.msgs <- message{
messageType: getFailedMsg,
validatorID: validatorID,
requestID: requestID,
containerID: containerID,
}
}

View File

@ -42,6 +42,6 @@ type ExternalRouter interface {
type InternalRouter interface {
GetAcceptedFrontierFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
GetAcceptedFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
GetFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID)
GetFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
QueryFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32)
}

View File

@ -50,7 +50,7 @@ func (sr *ChainRouter) AddChain(chain *handler.Handler) {
defer sr.lock.Unlock()
chainID := chain.Context().ChainID
sr.log.Debug("Adding %s to the routing table", chainID)
sr.log.Debug("registering chain %s with chain router", chainID)
sr.chains[chainID.Key()] = chain
}
@ -64,7 +64,7 @@ func (sr *ChainRouter) RemoveChain(chainID ids.ID) {
chain.Shutdown()
delete(sr.chains, chainID.Key())
} else {
sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID)
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
@ -78,7 +78,7 @@ func (sr *ChainRouter) GetAcceptedFrontier(validatorID ids.ShortID, chainID ids.
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.GetAcceptedFrontier(validatorID, requestID)
} else {
sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID)
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
@ -93,7 +93,7 @@ func (sr *ChainRouter) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID,
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.AcceptedFrontier(validatorID, requestID, containerIDs)
} else {
sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID)
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
@ -108,7 +108,7 @@ func (sr *ChainRouter) GetAcceptedFrontierFailed(validatorID ids.ShortID, chainI
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.GetAcceptedFrontierFailed(validatorID, requestID)
} else {
sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID)
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
@ -122,7 +122,7 @@ func (sr *ChainRouter) GetAccepted(validatorID ids.ShortID, chainID ids.ID, requ
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.GetAccepted(validatorID, requestID, containerIDs)
} else {
sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID)
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
@ -137,7 +137,7 @@ func (sr *ChainRouter) Accepted(validatorID ids.ShortID, chainID ids.ID, request
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.Accepted(validatorID, requestID, containerIDs)
} else {
sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID)
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
@ -152,7 +152,7 @@ func (sr *ChainRouter) GetAcceptedFailed(validatorID ids.ShortID, chainID ids.ID
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.GetAcceptedFailed(validatorID, requestID)
} else {
sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID)
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
@ -165,7 +165,7 @@ func (sr *ChainRouter) Get(validatorID ids.ShortID, chainID ids.ID, requestID ui
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.Get(validatorID, requestID, containerID)
} else {
sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID)
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
@ -181,21 +181,21 @@ func (sr *ChainRouter) Put(validatorID ids.ShortID, chainID ids.ID, requestID ui
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.Put(validatorID, requestID, containerID, container)
} else {
sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID)
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
// GetFailed routes an incoming GetFailed message from the validator with ID [validatorID]
// to the consensus engine working on the chain with ID [chainID]
func (sr *ChainRouter) GetFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID) {
func (sr *ChainRouter) GetFailed(validatorID ids.ShortID, chainID ids.ID, requestID uint32) {
sr.lock.RLock()
defer sr.lock.RUnlock()
sr.timeouts.Cancel(validatorID, chainID, requestID)
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.GetFailed(validatorID, requestID, containerID)
chain.GetFailed(validatorID, requestID)
} else {
sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID)
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
@ -208,7 +208,7 @@ func (sr *ChainRouter) PushQuery(validatorID ids.ShortID, chainID ids.ID, reques
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.PushQuery(validatorID, requestID, containerID, container)
} else {
sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID)
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
@ -221,7 +221,7 @@ func (sr *ChainRouter) PullQuery(validatorID ids.ShortID, chainID ids.ID, reques
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.PullQuery(validatorID, requestID, containerID)
} else {
sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID)
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
@ -236,7 +236,7 @@ func (sr *ChainRouter) Chits(validatorID ids.ShortID, chainID ids.ID, requestID
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.Chits(validatorID, requestID, votes)
} else {
sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID)
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}
@ -250,7 +250,7 @@ func (sr *ChainRouter) QueryFailed(validatorID ids.ShortID, chainID ids.ID, requ
if chain, exists := sr.chains[chainID.Key()]; exists {
chain.QueryFailed(validatorID, requestID)
} else {
sr.log.Warn("Message referenced a chain, %s, this validator is not validating", chainID)
sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID)
}
}

View File

@ -88,7 +88,7 @@ func (s *Sender) Get(validatorID ids.ShortID, requestID uint32, containerID ids.
// Add a timeout -- if we don't get a response before the timeout expires,
// send this consensus engine a GetFailed message
s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() {
s.router.GetFailed(validatorID, s.ctx.ChainID, requestID, containerID)
s.router.GetFailed(validatorID, s.ctx.ChainID, requestID)
})
s.sender.Get(validatorID, s.ctx.ChainID, requestID, containerID)
}

74
staking/gen_staker_key.go Normal file
View File

@ -0,0 +1,74 @@
package staking
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"math/big"
"os"
"path/filepath"
"time"
)
// GenerateStakingKeyCert generates a self-signed TLS key/cert pair to use in staking
// The key and files will be placed at [keyPath] and [certPath], respectively
// If there is already a file at [keyPath], returns nil
func GenerateStakingKeyCert(keyPath, certPath string) error {
// If there is already a file at [keyPath], do nothing
if _, err := os.Stat(keyPath); !os.IsNotExist(err) {
return nil
}
// Create key to sign cert with
key, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return fmt.Errorf("couldn't generate rsa key: %w", err)
}
// Create self-signed staking cert
certTemplate := &x509.Certificate{
SerialNumber: big.NewInt(0),
NotBefore: time.Date(2000, time.January, 0, 0, 0, 0, 0, time.UTC),
NotAfter: time.Now().AddDate(100, 0, 0),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageDataEncipherment,
BasicConstraintsValid: true,
}
certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, &key.PublicKey, key)
if err != nil {
return fmt.Errorf("couldn't create certificate: %w", err)
}
// Write cert to disk
if err := os.MkdirAll(filepath.Dir(certPath), 0755); err != nil {
return fmt.Errorf("couldn't create path for key/cert: %w", err)
}
certOut, err := os.Create(certPath)
if err != nil {
return fmt.Errorf("couldn't create cert file: %w", err)
}
if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: certBytes}); err != nil {
return fmt.Errorf("couldn't write cert file: %w", err)
}
if err := certOut.Close(); err != nil {
return fmt.Errorf("couldn't close cert file: %w", err)
}
// Write key to disk
keyOut, err := os.Create(keyPath)
if err != nil {
return fmt.Errorf("couldn't create key file: %w", err)
}
privBytes, err := x509.MarshalPKCS8PrivateKey(key)
if err != nil {
return fmt.Errorf("couldn't marshal private key: %w", err)
}
if err := pem.Encode(keyOut, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}); err != nil {
return fmt.Errorf("couldn't write private key: %w", err)
}
if err := keyOut.Close(); err != nil {
return fmt.Errorf("couldn't close key file: %w", err)
}
return nil
}

46
utils/signal.go Normal file
View File

@ -0,0 +1,46 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package utils
import (
"os"
"os/signal"
)
// HandleSignals calls f if the go runtime receives the any of the provided
// signals with the received signal.
//
// If f is nil or there are no provided signals, then nil will be returned.
// Otherwise, a signal channel will be returned that can be used to clear the
// signals registed by this function by valling ClearSignals.
func HandleSignals(f func(os.Signal), sigs ...os.Signal) chan<- os.Signal {
if f == nil || len(sigs) == 0 {
return nil
}
// register signals
c := make(chan os.Signal, 1)
for _, sig := range sigs {
signal.Notify(c, sig)
}
go func() {
for sig := range c {
f(sig)
}
}()
return c
}
// ClearSignals clears any signals that have been registered on the provided
// channel and closes the channel.
func ClearSignals(c chan<- os.Signal) {
if c == nil {
return
}
signal.Stop(c)
close(c)
}

View File

@ -96,6 +96,36 @@ func (service *Service) GetTxStatus(r *http.Request, args *GetTxStatusArgs, repl
return nil
}
// GetTxArgs are arguments for passing into GetTx requests
type GetTxArgs struct {
TxID ids.ID `json:"txID"`
}
// GetTxReply defines the GetTxStatus replies returned from the API
type GetTxReply struct {
Tx formatting.CB58 `json:"tx"`
}
// GetTx returns the specified transaction
func (service *Service) GetTx(r *http.Request, args *GetTxArgs, reply *GetTxReply) error {
service.vm.ctx.Log.Verbo("GetTx called with %s", args.TxID)
if args.TxID.IsZero() {
return errNilTxID
}
tx := UniqueTx{
vm: service.vm,
txID: args.TxID,
}
if status := tx.Status(); !status.Fetched() {
return errUnknownTx
}
reply.Tx.Bytes = tx.Bytes()
return nil
}
// GetUTXOsArgs are arguments for passing into GetUTXOs requests
type GetUTXOsArgs struct {
Addresses []string `json:"addresses"`
@ -188,7 +218,8 @@ type GetBalanceArgs struct {
// GetBalanceReply defines the GetBalance replies returned from the API
type GetBalanceReply struct {
Balance json.Uint64 `json:"balance"`
Balance json.Uint64 `json:"balance"`
UTXOIDs []ava.UTXOID `json:"utxoIDs"`
}
// GetBalance returns the amount of an asset that an address at least partially owns
@ -217,18 +248,21 @@ func (service *Service) GetBalance(r *http.Request, args *GetBalanceArgs, reply
}
for _, utxo := range utxos {
if utxo.AssetID().Equals(assetID) {
transferable, ok := utxo.Out.(ava.Transferable)
if !ok {
continue
}
amt, err := safemath.Add64(transferable.Amount(), uint64(reply.Balance))
if err != nil {
return err
}
reply.Balance = json.Uint64(amt)
if !utxo.AssetID().Equals(assetID) {
continue
}
transferable, ok := utxo.Out.(ava.Transferable)
if !ok {
continue
}
amt, err := safemath.Add64(transferable.Amount(), uint64(reply.Balance))
if err != nil {
return err
}
reply.Balance = json.Uint64(amt)
reply.UTXOIDs = append(reply.UTXOIDs, utxo.UTXOID)
}
return nil
}

View File

@ -7,9 +7,10 @@ import (
"fmt"
"testing"
"github.com/ava-labs/gecko/snow/choices"
"github.com/stretchr/testify/assert"
"github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/choices"
"github.com/ava-labs/gecko/utils/formatting"
)
@ -87,6 +88,70 @@ func TestServiceGetTxStatus(t *testing.T) {
}
}
func TestServiceGetBalance(t *testing.T) {
genesisBytes, vm, s := setup(t)
defer ctx.Lock.Unlock()
defer vm.Shutdown()
genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t)
assetID := genesisTx.ID()
addr := keys[0].PublicKey().Address()
balanceArgs := &GetBalanceArgs{
Address: fmt.Sprintf("%s-%s", vm.ctx.ChainID, addr),
AssetID: assetID.String(),
}
balanceReply := &GetBalanceReply{}
err := s.GetBalance(nil, balanceArgs, balanceReply)
assert.NoError(t, err)
assert.Equal(t, uint64(balanceReply.Balance), uint64(300000))
assert.Len(t, balanceReply.UTXOIDs, 4, "should have only returned four utxoIDs")
}
func TestServiceGetTx(t *testing.T) {
genesisBytes, vm, s := setup(t)
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t)
genesisTxBytes := genesisTx.Bytes()
txID := genesisTx.ID()
reply := GetTxReply{}
err := s.GetTx(nil, &GetTxArgs{
TxID: txID,
}, &reply)
assert.NoError(t, err)
assert.Equal(t, genesisTxBytes, reply.Tx.Bytes, "Wrong tx returned from service.GetTx")
}
func TestServiceGetNilTx(t *testing.T) {
_, vm, s := setup(t)
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
reply := GetTxReply{}
err := s.GetTx(nil, &GetTxArgs{}, &reply)
assert.Error(t, err, "Nil TxID should have returned an error")
}
func TestServiceGetUnknownTx(t *testing.T) {
_, vm, s := setup(t)
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
reply := GetTxReply{}
err := s.GetTx(nil, &GetTxArgs{TxID: ids.Empty}, &reply)
assert.Error(t, err, "Unknown TxID should have returned an error")
}
func TestServiceGetUTXOsInvalidAddress(t *testing.T) {
_, vm, s := setup(t)
defer func() {

View File

@ -24,7 +24,7 @@ type UTXOID struct {
OutputIndex uint32 `serialize:"true" json:"outputIndex"`
// Symbol is false if the UTXO should be part of the DB
Symbol bool
Symbol bool `json:"-"`
// id is the unique ID of a UTXO, it is calculated from TxID and OutputIndex
id ids.ID
}

View File

@ -164,7 +164,6 @@ func (cb *CommonBlock) parentBlock() Block {
parentID := cb.ParentID()
parent, err := cb.vm.getBlock(parentID)
if err != nil {
cb.vm.Ctx.Log.Debug("could not get parent (ID %s) of block %s", parentID, cb.ID())
return nil
}
return parent.(Block)

View File

@ -1632,6 +1632,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) {
externalSender.GetF = nil
externalSender.CantPushQuery = false
externalSender.CantPullQuery = false
engine.Put(ctx.NodeID, *reqID, advanceTimeBlkID, advanceTimeBlkBytes)