Merge branch 'master' into delete_keystore_user

This commit is contained in:
Stephen Buttolph 2020-05-12 11:42:30 -04:00 committed by GitHub
commit 939226f29f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
71 changed files with 1171 additions and 380 deletions

View File

@ -40,6 +40,7 @@ import (
const ( const (
defaultChannelSize = 1000 defaultChannelSize = 1000
requestTimeout = 2 * time.Second requestTimeout = 2 * time.Second
gossipFrequency = 10 * time.Second
) )
// Manager manages the chains running on this node. // Manager manages the chains running on this node.
@ -146,7 +147,7 @@ func New(
timeoutManager.Initialize(requestTimeout) timeoutManager.Initialize(requestTimeout)
go log.RecoverAndPanic(timeoutManager.Dispatch) go log.RecoverAndPanic(timeoutManager.Dispatch)
router.Initialize(log, &timeoutManager) router.Initialize(log, &timeoutManager, gossipFrequency)
m := &manager{ m := &manager{
stakingEnabled: stakingEnabled, stakingEnabled: stakingEnabled,

View File

@ -1,5 +0,0 @@
#!/bin/sh
set -ex
openssl genrsa -out `dirname "$0"`/rootCA.key 4096
openssl req -x509 -new -nodes -key `dirname "$0"`/rootCA.key -sha256 -days 365250 -out `dirname "$0"`/rootCA.crt

View File

@ -1,13 +0,0 @@
#!/bin/sh
set -ex
keypath=$GOPATH/src/github.com/ava-labs/gecko/keys
if test -f "$keypath/staker.key" || test -f "$keypath/staker.crt"; then
echo "staker.key or staker.crt already exists. Not generating new key/certificiate."
exit 1
fi
openssl genrsa -out `dirname "$0"`/staker.key 4096
openssl req -new -sha256 -key `dirname "$0"`/staker.key -subj "/C=US/ST=NY/O=Avalabs/CN=ava" -out `dirname "$0"`/staker.csr
openssl x509 -req -in `dirname "$0"`/staker.csr -CA `dirname "$0"`/rootCA.crt -CAkey `dirname "$0"`/rootCA.key -CAcreateserial -out `dirname "$0"`/staker.crt -days 365250 -sha256

View File

@ -1,34 +0,0 @@
-----BEGIN CERTIFICATE-----
MIIF1jCCA76gAwIBAgIJALI1DF9cpwfEMA0GCSqGSIb3DQEBCwUAMH8xCzAJBgNV
BAYTAlVTMQswCQYDVQQIDAJOWTEPMA0GA1UEBwwGSXRoYWNhMRAwDgYDVQQKDAdB
dmFsYWJzMQ4wDAYDVQQLDAVHZWNrbzEMMAoGA1UEAwwDYXZhMSIwIAYJKoZIhvcN
AQkBFhNzdGVwaGVuQGF2YWxhYnMub3JnMCAXDTE5MDIyODIwNTkyNFoYDzMwMTkw
MzA4MjA1OTI0WjB/MQswCQYDVQQGEwJVUzELMAkGA1UECAwCTlkxDzANBgNVBAcM
Bkl0aGFjYTEQMA4GA1UECgwHQXZhbGFiczEOMAwGA1UECwwFR2Vja28xDDAKBgNV
BAMMA2F2YTEiMCAGCSqGSIb3DQEJARYTc3RlcGhlbkBhdmFsYWJzLm9yZzCCAiIw
DQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAJ45ScWV8tsCNO+NTIBuUYsPkhcg
jrp0HEyCHY3XEkxsLuDqtesNyv39YA0xQ3M3FP1e29tjFeHWJzyzV8O1H+6yco93
QAtzh9xELYD301Yq+x55yZrSjZxNIC5Tmz1ewTfD315lNR04M6JmqjrStIuLsWFU
m6P4OgXs4daqnyq9au4PYSrejcbexW59rKxLryK6Acv+E9Ax04oS33g9KqPmlRx0
lfu3x4nkIKIl+VaK1wC5CwJDYZ91KpEbC8Z2YvTeVDH+/hz/MvKl1CEaqK/4G5FB
KGEyd/bGRxMVQF41G7liJLaXzPLyZnKO2n21ZuJhkA9MZelt1U0LuQU505qU7IzW
cmKFEIb1MOrclaF19Is7HQlJWKyDo2/hfjSCZO8zH7eR9EGzKyQwZhwkYCycJD44
RKEHq6s/Z2dHUlpLIgRJ7k171TNkL9+xLntu8v1lzTkhemSNeO9asqJ7VcvpnMHH
bQXpDxJpi8jTnV8In8EolSqaKeN6/nzwbbSJ7gHehgpDhC1DlXPRzTt/ktQKlNGW
T5bdNdvYFyYTd9fu78aJZSbJo8jS2fykWuBgOgnlV8VmwpDa7iHM3EECByhf5GKB
J1jBlXO1ZyfJ7sNTbuVM7Uc2JkB4ASKdm3GZ3sFv95HjSTJAUORjE4pQ1es4kfDU
KqzDHH+bEHaGIGJTAgMBAAGjUzBRMB0GA1UdDgQWBBQr2T0duSMkvGXe3bSdWcei
73QtwzAfBgNVHSMEGDAWgBQr2T0duSMkvGXe3bSdWcei73QtwzAPBgNVHRMBAf8E
BTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQBpP18zCdzvnSdPigg9wx+a8Znr4aJj
FxZYwBY6/BmKb56ke9g+zKKCw2dYYkRYDcTOEfuBgBvNeCSJv4R5rmkukkL8RCIG
XV/WfSn2d3Mnz5KTgGQS6Q9s5qx+8ydkiGZthi+8a8ltXczyYrvWgd47U0NWTcOY
omjgF6XF+hVLWLgiwmA468pd7wyCsuJJkyxxeyDPXQ422I1AJW/7c5JQQa+lDNsv
Vna6420mZ/DiQd3stFkdjhRjmBZvGQ09g6l3zo6TgI1TWr5TMYPrempBVCWPNilC
XaMysU77+tPutI+7kMBuGvLuZtPrH/2uTYdXWPodyORm5i2ABF6In3VISPD9YNc0
gWx3PYGi2BfdnZepCojsffUMlhT3SsiAKMYv5FhW8LQBNMRR4721U1Vf5f8fzNQn
3E55TthV5HXZQ6HcLhkmOvH8CMqtWGETTbBtYSA2AVMjoqs7QDGkfsCH9UuwGd1N
W12JOf53XyOQT2XwWerSQC2kv7elsTh6Bk7PhvrCi0OwCVSGny5IQY/aXM1n6Z6s
scJlZmq6P3AJZ3tRtBt9yDK7iIW7mzNLTb/kAjsNQh06oETJIJ0CIgL0Bn6CANYU
kNqB4oTxmAhdOPKNgqaIwdZAL1VDIVaQEcvGeZRduo7yZvA/MhuQD8IIKSeOBFaD
DB8IRfWqBx2nWw==
-----END CERTIFICATE-----

View File

@ -1,51 +0,0 @@
-----BEGIN RSA PRIVATE KEY-----
MIIJJwIBAAKCAgEAnjlJxZXy2wI0741MgG5Riw+SFyCOunQcTIIdjdcSTGwu4Oq1
6w3K/f1gDTFDczcU/V7b22MV4dYnPLNXw7Uf7rJyj3dAC3OH3EQtgPfTVir7HnnJ
mtKNnE0gLlObPV7BN8PfXmU1HTgzomaqOtK0i4uxYVSbo/g6Bezh1qqfKr1q7g9h
Kt6Nxt7Fbn2srEuvIroBy/4T0DHTihLfeD0qo+aVHHSV+7fHieQgoiX5VorXALkL
AkNhn3UqkRsLxnZi9N5UMf7+HP8y8qXUIRqor/gbkUEoYTJ39sZHExVAXjUbuWIk
tpfM8vJmco7afbVm4mGQD0xl6W3VTQu5BTnTmpTsjNZyYoUQhvUw6tyVoXX0izsd
CUlYrIOjb+F+NIJk7zMft5H0QbMrJDBmHCRgLJwkPjhEoQerqz9nZ0dSWksiBEnu
TXvVM2Qv37Eue27y/WXNOSF6ZI1471qyontVy+mcwcdtBekPEmmLyNOdXwifwSiV
Kpop43r+fPBttInuAd6GCkOELUOVc9HNO3+S1AqU0ZZPlt0129gXJhN31+7vxoll
JsmjyNLZ/KRa4GA6CeVXxWbCkNruIczcQQIHKF/kYoEnWMGVc7VnJ8nuw1Nu5Uzt
RzYmQHgBIp2bcZnewW/3keNJMkBQ5GMTilDV6ziR8NQqrMMcf5sQdoYgYlMCAwEA
AQKCAgAhNota05AoEv2Dr5h4eS/azgjvm+D6GLd8A/AqPxRTQH5SrlJDpiCPUmmg
O1AaVlyslwX1toX4YxjXcBojNdkfJQxRO0oRXU4Oma0nnl4Zf2o5Sn1cZ4hcYAA6
WUiECGjsyMwRp5MPsCV+mKhxMpu9kzRH5xfIwqmDZuc9RZGlyh8xG79c3VzLeyXc
fLsLa9O2qW8JICuOj3cFS9LnDYfu4c85Kuv06+4R7vY+s1P0q65YM3+xGO3cKB8o
WJIPNfityCHKYOl8ssFCGDdAP7VbQuyegBv20z5FafevdM2POPy53HUycwkNkn6Y
243Xx4VyTeKMo4/dATY+NxC+nRXiz4jLna5a7IIIzjAHl2kF6iJVasd3+X/xWHsM
Lx9iDRjERf+J+y58GaDxetXL1C0xm7Rv28yMYVPAzpucvS4i72Xj7X8JkO3az3Qv
/wqBnxj8ouh+5jvT0nqCJsFZwK0F7Dr3na2lSf34XBCTnd9//FfSIY7mDIddxuVF
2rKKOl2KkvbDUuSKVZwdJeAp1CccN6SfLnxKy+436Z5hYzBIeGyejpCMWivDJ2I3
wjs4w4IPobT5dqaSdPYFTKJnoDv62vYbIN3o8pQ3QUXwmRPyKoPuxe7OZZyec43R
WUtajiW6AXjxUoEtPPPHAT/3pGKG2a0VGtDfjLjpp5OtQmteiQKCAQEAz62n9Lh1
POdC4088GEqrGPhq5MUz2j2pOCjEZ7utmD+Lo8x95McCU+jf4UJ+rbaf96dvjPRC
T0Sc6X6IvvQycJubzQe6XX6eyZsr67qpuY2MGze+NvmO4LcCOfNHerRyLK2DoGLW
jQVxJNsBIFo0T7iSuUICbfxKdKxfH+27rPANEvpqS5BJalAfeGPEL0GgUTKQmswc
23Pnu5mkb7TWLKNVq7o/5PxvXyKmJQaFHCV98pqQr/HhXd79dMD12TPZRvsNgPGK
XOsmPtC5RHhbs/Wmdk3X3ihoVezou2VPeWCIrCANCuU0zZBK1igVC3FGeUK8u1Dl
jrTkRsNTLbBiPwKCAQEAwwngBBjbdRCVvUVWIBQBOk/t/6WyeAVH4O/fq32KklW+
/SN5yeZhXjwMrFhSOqFUDipg/C4Imf5S3V+MlXO4lQsZzZa0d0euBIBt0SEcGE8P
rAkGcvwPfISBfYCnPem1ax01ixNJBxWDrgkfHZchywNPFgopiqpYR7X5ttACctCl
KLaDOXn667QmG1icsVgZV3L8gBxEdyjhmUGWFH/auS28oxqhUgiXrUQXbJKCesGD
E39r/SyOAGP5ZtTkWmNDp2+va8lSJwL1Ix+6qvexi/hIIGoFlSh5w+BwnBlxBL4C
cUanaXRtIqQ9rcO/xhZ7izmQzruNARLDPGIJ59MS7QKCAQBGR3wJAssZ2yD1j4DE
r7AK+TYjSODtP+SeDp24hPiQByEYQ0FvRDFzd+Ebd8cqvhyQUGcdiiNOc+et1JYu
GLFhDifBUJYuwYS2sP5B/Z8mHdKF+20xaW6CeSwVtFBCJAJnQCjFA+2bN3Y8hKhy
7FO7jriIXOA5nCEOLq7aPTc/pNSn0XpbK+7MPWUI9qoTW+AG2le5Ks2xLh4DjFDr
RIUeAgAh5xtsQEjoJu+WpAgzqDRg/xFrmS0s+SNIeWw5HqSuspK1SggKvcDpjPTF
SP2vfrfgXSNqGL6GJW/0yqoEZziZFxeS0lH2JphMtK+6eZDhxEXeFdg5XNnLYJor
Yf89AoIBAHbRLESys/c0HFTKybYPGdRhXzcvxXKynOBeoZ9Cgsm1LP3fv9EM5WJY
KMxRnf6Ty7Y5gQ4AKUNPGUI9dFKTxe4ebiC938EOzOd3Ke+OQSRZ/c0rTl98SR7t
Rkmjt77TAq93gufv3rxPEgJTEj6flHmt0V8236nXLqK5LKB/Rg6WJxePYJACTKeM
/u4H5KVxazbIGSUek2MYZ59KwlhIr4HCaDng/kgQbf6jDbYZ5x1LiEO3i50XqIZ6
YTSRG3ApKsz1ECQU6FRVy+sS6FBBR0ti/OWqUS5WEyAOOewO37go3SoPBewLfnTt
I5oZN1pA1hCyCBK5VSRDPucpPqmY/90CggEAbFRUDyEkq9p7/Va/PYJLMe+1zGoy
+jCC1nm5LioxrUdpE+CV1t1cVutnlI3sRD+79oX/zwlwQ+pCx1XOMCmGs4uZUx5f
UtpGnsPamlyQKyQfPam3N4+4gaY9LLPiYCrI/XQh+vZQNlQTStuKLtb0R8+4wEER
KDTtC2cNN5fSnexEifpvq5yK3x6bH66pPyuRE27vVQ7diPar9A+VwkLs+zGbfnWW
MP/zYUbuiatC/LozcYLs/01m3Nu6oYi0OP/nFofepXNpQoZO8jKpnGRVVJ0EfgSe
f3qb9nkygj+gqGWT+PY6H39xKFz0h7dmmcP3Z7CrYXFEFfTCsUgbOKulAA==
-----END RSA PRIVATE KEY-----

View File

@ -1 +0,0 @@
BAF3B5C5C6D0D166

View File

@ -10,6 +10,7 @@ import (
"net" "net"
"os" "os"
"path" "path"
"path/filepath"
"strings" "strings"
"github.com/ava-labs/gecko/database/leveldb" "github.com/ava-labs/gecko/database/leveldb"
@ -19,23 +20,29 @@ import (
"github.com/ava-labs/gecko/nat" "github.com/ava-labs/gecko/nat"
"github.com/ava-labs/gecko/node" "github.com/ava-labs/gecko/node"
"github.com/ava-labs/gecko/snow/networking/router" "github.com/ava-labs/gecko/snow/networking/router"
"github.com/ava-labs/gecko/staking"
"github.com/ava-labs/gecko/utils" "github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/hashing"
"github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/utils/wrappers"
"github.com/mitchellh/go-homedir"
) )
const ( const (
dbVersion = "v0.2.0" dbVersion = "v0.2.0"
defaultDbDir = "~/.gecko/db"
) )
// Results of parsing the CLI // Results of parsing the CLI
var ( var (
Config = node.Config{} Config = node.Config{}
Err error Err error
defaultDbDir = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "db"))
defaultStakingKeyPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.key"))
defaultStakingCertPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.crt"))
)
var (
errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs")
) )
// GetIPs returns the default IPs for each network // GetIPs returns the default IPs for each network
@ -54,17 +61,15 @@ func GetIPs(networkID uint32) []string {
} }
} }
var (
errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs")
)
// Parse the CLI arguments // Parse the CLI arguments
func init() { func init() {
errs := &wrappers.Errs{} errs := &wrappers.Errs{}
defer func() { Err = errs.Err }() defer func() { Err = errs.Err }()
loggingConfig, err := logging.DefaultConfig() loggingConfig, err := logging.DefaultConfig()
errs.Add(err) if errs.Add(err); errs.Errored() {
return
}
fs := flag.NewFlagSet("gecko", flag.ContinueOnError) fs := flag.NewFlagSet("gecko", flag.ContinueOnError)
@ -100,8 +105,8 @@ func init() {
// Staking: // Staking:
consensusPort := fs.Uint("staking-port", 9651, "Port of the consensus server") consensusPort := fs.Uint("staking-port", 9651, "Port of the consensus server")
fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Require TLS to authenticate staking connections") fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Require TLS to authenticate staking connections")
fs.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", "keys/staker.key", "TLS private key file for staking connections") fs.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", defaultStakingKeyPath, "TLS private key for staking")
fs.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", "keys/staker.crt", "TLS certificate file for staking connections") fs.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", defaultStakingCertPath, "TLS certificate for staking")
// Plugins: // Plugins:
fs.StringVar(&Config.PluginDir, "plugin-dir", "./build/plugins", "Plugin directory for Ava VMs") fs.StringVar(&Config.PluginDir, "plugin-dir", "./build/plugins", "Plugin directory for Ava VMs")
@ -142,22 +147,22 @@ func init() {
} }
networkID, err := genesis.NetworkID(*networkName) networkID, err := genesis.NetworkID(*networkName)
errs.Add(err) if errs.Add(err); err != nil {
return
}
Config.NetworkID = networkID Config.NetworkID = networkID
// DB: // DB:
if *db && err == nil { if *db {
// TODO: Add better params here *dbDir = os.ExpandEnv(*dbDir) // parse any env variables
if *dbDir == defaultDbDir {
if *dbDir, err = homedir.Expand(defaultDbDir); err != nil {
errs.Add(fmt.Errorf("couldn't resolve default db path: %v", err))
}
}
dbPath := path.Join(*dbDir, genesis.NetworkName(Config.NetworkID), dbVersion) dbPath := path.Join(*dbDir, genesis.NetworkName(Config.NetworkID), dbVersion)
db, err := leveldb.New(dbPath, 0, 0, 0) db, err := leveldb.New(dbPath, 0, 0, 0)
if err != nil {
errs.Add(fmt.Errorf("couldn't create db at %s: %w", dbPath, err))
return
}
Config.DB = db Config.DB = db
errs.Add(err)
} else { } else {
Config.DB = memdb.New() Config.DB = memdb.New()
} }
@ -169,7 +174,7 @@ func init() {
if *consensusIP == "" { if *consensusIP == "" {
ip, err = Config.Nat.IP() ip, err = Config.Nat.IP()
if err != nil { if err != nil {
ip = net.IPv4zero ip = net.IPv4zero // Couldn't get my IP...set to 0.0.0.0
} }
} else { } else {
ip = net.ParseIP(*consensusIP) ip = net.ParseIP(*consensusIP)
@ -177,7 +182,9 @@ func init() {
if ip == nil { if ip == nil {
errs.Add(fmt.Errorf("Invalid IP Address %s", *consensusIP)) errs.Add(fmt.Errorf("Invalid IP Address %s", *consensusIP))
return
} }
Config.StakingIP = utils.IPDesc{ Config.StakingIP = utils.IPDesc{
IP: ip, IP: ip,
Port: uint16(*consensusPort), Port: uint16(*consensusPort),
@ -190,7 +197,10 @@ func init() {
for _, ip := range strings.Split(*bootstrapIPs, ",") { for _, ip := range strings.Split(*bootstrapIPs, ",") {
if ip != "" { if ip != "" {
addr, err := utils.ToIPDesc(ip) addr, err := utils.ToIPDesc(ip)
errs.Add(err) if err != nil {
errs.Add(fmt.Errorf("couldn't parse ip: %w", err))
return
}
Config.BootstrapPeers = append(Config.BootstrapPeers, &node.Peer{ Config.BootstrapPeers = append(Config.BootstrapPeers, &node.Peer{
IP: addr, IP: addr,
}) })
@ -209,20 +219,27 @@ func init() {
cb58 := formatting.CB58{} cb58 := formatting.CB58{}
for _, id := range strings.Split(*bootstrapIDs, ",") { for _, id := range strings.Split(*bootstrapIDs, ",") {
if id != "" { if id != "" {
errs.Add(cb58.FromString(id)) err = cb58.FromString(id)
cert, err := ids.ToShortID(cb58.Bytes) if err != nil {
errs.Add(err) errs.Add(fmt.Errorf("couldn't parse bootstrap peer id to bytes: %w", err))
return
}
peerID, err := ids.ToShortID(cb58.Bytes)
if err != nil {
errs.Add(fmt.Errorf("couldn't parse bootstrap peer id: %w", err))
return
}
if len(Config.BootstrapPeers) <= i { if len(Config.BootstrapPeers) <= i {
errs.Add(errBootstrapMismatch) errs.Add(errBootstrapMismatch)
continue return
} }
Config.BootstrapPeers[i].ID = cert Config.BootstrapPeers[i].ID = peerID
i++ i++
} }
} }
if len(Config.BootstrapPeers) != i { if len(Config.BootstrapPeers) != i {
errs.Add(fmt.Errorf("More bootstrap IPs, %d, provided than bootstrap IDs, %d", len(Config.BootstrapPeers), i)) errs.Add(fmt.Errorf("More bootstrap IPs, %d, provided than bootstrap IDs, %d", len(Config.BootstrapPeers), i))
return
} }
} else { } else {
for _, peer := range Config.BootstrapPeers { for _, peer := range Config.BootstrapPeers {
@ -230,6 +247,27 @@ func init() {
} }
} }
// Staking
Config.StakingCertFile = os.ExpandEnv(Config.StakingCertFile) // parse any env variable
Config.StakingKeyFile = os.ExpandEnv(Config.StakingKeyFile)
switch {
// If staking key/cert locations are specified but not found, error
case Config.StakingKeyFile != defaultStakingKeyPath || Config.StakingCertFile != defaultStakingCertPath:
if _, err := os.Stat(Config.StakingKeyFile); os.IsNotExist(err) {
errs.Add(fmt.Errorf("couldn't find staking key at %s", Config.StakingKeyFile))
return
} else if _, err := os.Stat(Config.StakingCertFile); os.IsNotExist(err) {
errs.Add(fmt.Errorf("couldn't find staking certificate at %s", Config.StakingCertFile))
return
}
default:
// Only creates staking key/cert if [stakingKeyPath] doesn't exist
if err := staking.GenerateStakingKeyCert(Config.StakingKeyFile, Config.StakingCertFile); err != nil {
errs.Add(fmt.Errorf("couldn't generate staking key/cert: %w", err))
return
}
}
// HTTP: // HTTP:
Config.HTTPPort = uint16(*httpPort) Config.HTTPPort = uint16(*httpPort)
@ -238,14 +276,18 @@ func init() {
loggingConfig.Directory = *logsDir loggingConfig.Directory = *logsDir
} }
logFileLevel, err := logging.ToLevel(*logLevel) logFileLevel, err := logging.ToLevel(*logLevel)
errs.Add(err) if errs.Add(err); err != nil {
return
}
loggingConfig.LogLevel = logFileLevel loggingConfig.LogLevel = logFileLevel
if *logDisplayLevel == "" { if *logDisplayLevel == "" {
*logDisplayLevel = *logLevel *logDisplayLevel = *logLevel
} }
displayLevel, err := logging.ToLevel(*logDisplayLevel) displayLevel, err := logging.ToLevel(*logDisplayLevel)
errs.Add(err) if errs.Add(err); err != nil {
return
}
loggingConfig.DisplayLevel = displayLevel loggingConfig.DisplayLevel = displayLevel
Config.LoggingConfig = loggingConfig Config.LoggingConfig = loggingConfig

View File

@ -18,6 +18,7 @@ import "C"
import ( import (
"errors" "errors"
"fmt" "fmt"
"math"
"unsafe" "unsafe"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -29,9 +30,15 @@ import (
"github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/snow/validators"
"github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/random"
"github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/utils/timer"
) )
// GossipSize is the maximum number of peers to gossip a container to
const (
GossipSize = 50
)
var ( var (
// VotingNet implements the SenderExternal interface. // VotingNet implements the SenderExternal interface.
VotingNet = Voting{} VotingNet = Voting{}
@ -89,34 +96,7 @@ func (s *Voting) Shutdown() { s.executor.Stop() }
// Accept is called after every consensus decision // Accept is called after every consensus decision
func (s *Voting) Accept(chainID, containerID ids.ID, container []byte) error { func (s *Voting) Accept(chainID, containerID ids.ID, container []byte) error {
peers := []salticidae.PeerID(nil) return s.gossip(chainID, containerID, container)
allPeers, allIDs, _ := s.conns.Conns()
for i, id := range allIDs {
if !s.vdrs.Contains(id) {
peers = append(peers, allPeers[i])
}
}
build := Builder{}
msg, err := build.Put(chainID, 0, containerID, container)
if err != nil {
return fmt.Errorf("Attempted to pack too large of a Put message.\nContainer length: %d: %w", len(container), err)
}
s.log.Verbo("Sending a Put message to non-validators."+
"\nNumber of Non-Validators: %d"+
"\nChain: %s"+
"\nContainer ID: %s"+
"\nContainer:\n%s",
len(peers),
chainID,
containerID,
formatting.DumpBytes{Bytes: container},
)
s.send(msg, peers...)
s.numPutSent.Add(float64(len(peers)))
return nil
} }
// GetAcceptedFrontier implements the Sender interface. // GetAcceptedFrontier implements the Sender interface.
@ -412,6 +392,13 @@ func (s *Voting) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32
s.numChitsSent.Inc() s.numChitsSent.Inc()
} }
// Gossip attempts to gossip the container to the network
func (s *Voting) Gossip(chainID, containerID ids.ID, container []byte) {
if err := s.gossip(chainID, containerID, container); err != nil {
s.log.Error("Error gossiping container %s to %s\n%s", containerID, chainID, err)
}
}
func (s *Voting) send(msg Msg, peers ...salticidae.PeerID) { func (s *Voting) send(msg Msg, peers ...salticidae.PeerID) {
ds := msg.DataStream() ds := msg.DataStream()
defer ds.Free() defer ds.Free()
@ -429,6 +416,41 @@ func (s *Voting) send(msg Msg, peers ...salticidae.PeerID) {
} }
} }
func (s *Voting) gossip(chainID, containerID ids.ID, container []byte) error {
allPeers := s.conns.PeerIDs()
numToGossip := GossipSize
if numToGossip > len(allPeers) {
numToGossip = len(allPeers)
}
peers := make([]salticidae.PeerID, numToGossip)
sampler := random.Uniform{N: len(allPeers)}
for i := range peers {
peers[i] = allPeers[sampler.Sample()]
}
build := Builder{}
msg, err := build.Put(chainID, math.MaxUint32, containerID, container)
if err != nil {
return fmt.Errorf("Attempted to pack too large of a Put message.\nContainer length: %d: %w", len(container), err)
}
s.log.Verbo("Sending a Put message to peers."+
"\nNumber of Peers: %d"+
"\nChain: %s"+
"\nContainer ID: %s"+
"\nContainer:\n%s",
len(peers),
chainID,
containerID,
formatting.DumpBytes{Bytes: container},
)
s.send(msg, peers...)
s.numPutSent.Add(float64(len(peers)))
return nil
}
// getAcceptedFrontier handles the recept of a getAcceptedFrontier container // getAcceptedFrontier handles the recept of a getAcceptedFrontier container
// message for a chain // message for a chain
//export getAcceptedFrontier //export getAcceptedFrontier

View File

@ -4,7 +4,7 @@
package node package node
// #include "salticidae/network.h" // #include "salticidae/network.h"
// void onTerm(int sig, void *); // void onTerm(threadcall_handle_t *, void *);
// void errorHandler(SalticidaeCError *, bool, int32_t, void *); // void errorHandler(SalticidaeCError *, bool, int32_t, void *);
import "C" import "C"
@ -14,6 +14,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io/ioutil" "io/ioutil"
"os"
"path" "path"
"sync" "sync"
"unsafe" "unsafe"
@ -35,6 +36,7 @@ import (
"github.com/ava-labs/gecko/networking/xputtest" "github.com/ava-labs/gecko/networking/xputtest"
"github.com/ava-labs/gecko/snow/triggers" "github.com/ava-labs/gecko/snow/triggers"
"github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/snow/validators"
"github.com/ava-labs/gecko/utils"
"github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/hashing"
"github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/utils/wrappers"
@ -92,6 +94,10 @@ type Node struct {
// Event loop manager // Event loop manager
EC salticidae.EventContext EC salticidae.EventContext
// Caller to the event context
TCall salticidae.ThreadCall
// Network that manages validator peers // Network that manages validator peers
PeerNet salticidae.PeerNetwork PeerNet salticidae.PeerNetwork
// Network that manages clients // Network that manages clients
@ -115,6 +121,9 @@ type Node struct {
// This node's configuration // This node's configuration
Config *Config Config *Config
// channel for closing the node
nodeCloser chan<- os.Signal
} }
/* /*
@ -124,7 +133,7 @@ type Node struct {
*/ */
//export onTerm //export onTerm
func onTerm(C.int, unsafe.Pointer) { func onTerm(*C.threadcall_handle_t, unsafe.Pointer) {
MainNode.Log.Debug("Terminate signal received") MainNode.Log.Debug("Terminate signal received")
MainNode.EC.Stop() MainNode.EC.Stop()
} }
@ -143,12 +152,11 @@ func errorHandler(_err *C.struct_SalticidaeCError, fatal C.bool, asyncID C.int32
func (n *Node) initNetlib() error { func (n *Node) initNetlib() error {
// Create main event context // Create main event context
n.EC = salticidae.NewEventContext() n.EC = salticidae.NewEventContext()
n.TCall = salticidae.NewThreadCall(n.EC)
// Set up interrupt signal and terminate signal handlers n.nodeCloser = utils.HandleSignals(func(os.Signal) {
evInt := salticidae.NewSigEvent(n.EC, salticidae.SigEventCallback(C.onTerm), nil) n.TCall.AsyncCall(salticidae.ThreadCallCallback(C.onTerm), nil)
evInt.Add(salticidae.SIGINT) }, os.Interrupt, os.Kill)
evTerm := salticidae.NewSigEvent(n.EC, salticidae.SigEventCallback(C.onTerm), nil)
evTerm.Add(salticidae.SIGTERM)
// Create peer network config, may have tls enabled // Create peer network config, may have tls enabled
peerConfig := salticidae.NewPeerNetworkConfig() peerConfig := salticidae.NewPeerNetworkConfig()
@ -655,4 +663,5 @@ func (n *Node) Shutdown() {
n.ValidatorAPI.Shutdown() n.ValidatorAPI.Shutdown()
n.ConsensusAPI.Shutdown() n.ConsensusAPI.Shutdown()
n.chainManager.Shutdown() n.chainManager.Shutdown()
utils.ClearSignals(n.nodeCloser)
} }

View File

@ -7,8 +7,8 @@
vars: vars:
ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava
repo_folder: ~/go/src/github.com/ava-labs/gecko repo_folder: ~/go/src/github.com/ava-labs/gecko
repo_name: ava-labs/gecko-internal repo_name: ava-labs/gecko
repo_branch: platformvm-proposal-accept repo_branch: master
roles: roles:
- name: ava-stop - name: ava-stop
- name: ava-build - name: ava-build

View File

@ -1,3 +1,3 @@
- name: Kill Node - name: Kill Node
command: killall ava command: killall -SIGINT ava
ignore_errors: true ignore_errors: true

View File

@ -2,8 +2,8 @@ borealis_bootstrap:
hosts: hosts:
bootstrap1: bootstrap1:
ansible_host: 3.84.129.247 ansible_host: 3.84.129.247
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker1.key" staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker1.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker1.crt" staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker1.crt"
vars: vars:
ansible_connection: ssh ansible_connection: ssh
ansible_user: ubuntu ansible_user: ubuntu
@ -44,20 +44,20 @@ borealis_node:
hosts: hosts:
node1: node1:
ansible_host: 35.153.99.244 ansible_host: 35.153.99.244
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker2.key" staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker2.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker2.crt" staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker2.crt"
node2: node2:
ansible_host: 34.201.137.119 ansible_host: 34.201.137.119
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker3.key" staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker3.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker3.crt" staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker3.crt"
node3: node3:
ansible_host: 54.146.1.110 ansible_host: 54.146.1.110
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker4.key" staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker4.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker4.crt" staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker4.crt"
node4: node4:
ansible_host: 54.91.255.231 ansible_host: 54.91.255.231
staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker5.key" staking_tls_key_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker5.key"
staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/keys/local/staker5.crt" staking_tls_cert_file: "/home/ubuntu/go/src/github.com/ava-labs/gecko/staking/local/staker5.crt"
vars: vars:
ansible_connection: ssh ansible_connection: ssh
ansible_user: ubuntu ansible_user: ubuntu

View File

@ -7,8 +7,8 @@
vars: vars:
ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava ava_binary: ~/go/src/github.com/ava-labs/gecko/build/ava
repo_folder: ~/go/src/github.com/ava-labs/gecko repo_folder: ~/go/src/github.com/ava-labs/gecko
repo_name: ava-labs/gecko-internal repo_name: ava-labs/gecko
repo_branch: platformvm-proposal-accept repo_branch: master
roles: roles:
- name: ava-stop - name: ava-stop
- name: ava-build - name: ava-build

View File

@ -5,4 +5,4 @@
SRC_DIR="$(dirname "${BASH_SOURCE[0]}")" SRC_DIR="$(dirname "${BASH_SOURCE[0]}")"
source "$SRC_DIR/env.sh" source "$SRC_DIR/env.sh"
go test -race -coverprofile=coverage.out -covermode=atomic ./... go test -race -timeout="30s" -coverprofile="coverage.out" -covermode="atomic" ./...

View File

@ -8,6 +8,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"testing" "testing"
"time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -60,7 +61,7 @@ func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest,
handler.Initialize(engine, make(chan common.Message), 1) handler.Initialize(engine, make(chan common.Message), 1)
timeouts.Initialize(0) timeouts.Initialize(0)
router.Initialize(ctx.Log, timeouts) router.Initialize(ctx.Log, timeouts, time.Hour)
vtxBlocker, _ := queue.New(prefixdb.New([]byte("vtx"), db)) vtxBlocker, _ := queue.New(prefixdb.New([]byte("vtx"), db))
txBlocker, _ := queue.New(prefixdb.New([]byte("tx"), db)) txBlocker, _ := queue.New(prefixdb.New([]byte("tx"), db))

View File

@ -62,6 +62,26 @@ func (t *Transitive) finishBootstrapping() {
t.bootstrapped = true t.bootstrapped = true
} }
// Gossip implements the Engine interface
func (t *Transitive) Gossip() {
edge := t.Config.State.Edge()
if len(edge) == 0 {
t.Config.Context.Log.Debug("Dropping gossip request as no vertices have been accepted")
return
}
sampler := random.Uniform{N: len(edge)}
vtxID := edge[sampler.Sample()]
vtx, err := t.Config.State.GetVertex(vtxID)
if err != nil {
t.Config.Context.Log.Warn("Dropping gossip request as %s couldn't be loaded due to %s", vtxID, err)
return
}
t.Config.Context.Log.Debug("Gossiping %s as accepted to the network", vtxID)
t.Config.Sender.Gossip(vtxID, vtx.Bytes())
}
// Shutdown implements the Engine interface // Shutdown implements the Engine interface
func (t *Transitive) Shutdown() { func (t *Transitive) Shutdown() {
t.Config.Context.Log.Info("Shutting down Avalanche consensus") t.Config.Context.Log.Info("Shutting down Avalanche consensus")

View File

@ -2536,3 +2536,54 @@ func TestEnginePartiallyValidVertex(t *testing.T) {
te.insert(vtx) te.insert(vtx)
} }
func TestEngineGossip(t *testing.T) {
config := DefaultConfig()
sender := &common.SenderTest{}
sender.T = t
config.Sender = sender
sender.Default(true)
st := &stateTest{t: t}
config.State = st
gVtx := &Vtx{
id: GenerateID(),
status: choices.Accepted,
}
te := &Transitive{}
te.Initialize(config)
te.finishBootstrapping()
st.edge = func() []ids.ID { return []ids.ID{gVtx.ID()} }
st.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) {
switch {
case vtxID.Equals(gVtx.ID()):
return gVtx, nil
}
t.Fatal(errUnknownVertex)
return nil, errUnknownVertex
}
called := new(bool)
sender.GossipF = func(vtxID ids.ID, vtxBytes []byte) {
*called = true
switch {
case !vtxID.Equals(gVtx.ID()):
t.Fatal(errUnknownVertex)
}
switch {
case !bytes.Equal(vtxBytes, gVtx.Bytes()):
t.Fatal(errUnknownVertex)
}
}
te.Gossip()
if !*called {
t.Fatalf("Should have gossiped the vertex")
}
}

View File

@ -112,6 +112,9 @@ type InternalHandler interface {
// Startup this engine. // Startup this engine.
Startup() Startup()
// Gossip to the network a container on the accepted frontier
Gossip()
// Shutdown this engine. // Shutdown this engine.
Shutdown() Shutdown()

View File

@ -14,6 +14,7 @@ type Sender interface {
AcceptedSender AcceptedSender
FetchSender FetchSender
QuerySender QuerySender
Gossiper
} }
// FrontierSender defines how a consensus engine sends frontier messages to // FrontierSender defines how a consensus engine sends frontier messages to
@ -70,3 +71,10 @@ type QuerySender interface {
// Chits sends chits to the specified validator // Chits sends chits to the specified validator
Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set) Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set)
} }
// Gossiper defines how a consensus engine gossips a container on the accepted
// frontier to other validators
type Gossiper interface {
// Gossip gossips the provided container throughout the network
Gossip(containerID ids.ID, container []byte)
}

View File

@ -15,6 +15,7 @@ type EngineTest struct {
T *testing.T T *testing.T
CantStartup, CantStartup,
CantGossip,
CantShutdown, CantShutdown,
CantContext, CantContext,
@ -38,7 +39,7 @@ type EngineTest struct {
CantQueryFailed, CantQueryFailed,
CantChits bool CantChits bool
StartupF, ShutdownF func() StartupF, GossipF, ShutdownF func()
ContextF func() *snow.Context ContextF func() *snow.Context
NotifyF func(Message) NotifyF func(Message)
GetF, GetFailedF, PullQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID) GetF, GetFailedF, PullQueryF func(validatorID ids.ShortID, requestID uint32, containerID ids.ID)
@ -50,6 +51,7 @@ type EngineTest struct {
// Default ... // Default ...
func (e *EngineTest) Default(cant bool) { func (e *EngineTest) Default(cant bool) {
e.CantStartup = cant e.CantStartup = cant
e.CantGossip = cant
e.CantShutdown = cant e.CantShutdown = cant
e.CantContext = cant e.CantContext = cant
@ -83,6 +85,15 @@ func (e *EngineTest) Startup() {
} }
} }
// Gossip ...
func (e *EngineTest) Gossip() {
if e.GossipF != nil {
e.GossipF()
} else if e.CantGossip && e.T != nil {
e.T.Fatalf("Unexpectedly called Gossip")
}
}
// Shutdown ... // Shutdown ...
func (e *EngineTest) Shutdown() { func (e *EngineTest) Shutdown() {
if e.ShutdownF != nil { if e.ShutdownF != nil {

View File

@ -16,7 +16,8 @@ type SenderTest struct {
CantGetAcceptedFrontier, CantAcceptedFrontier, CantGetAcceptedFrontier, CantAcceptedFrontier,
CantGetAccepted, CantAccepted, CantGetAccepted, CantAccepted,
CantGet, CantPut, CantGet, CantPut,
CantPullQuery, CantPushQuery, CantChits bool CantPullQuery, CantPushQuery, CantChits,
CantGossip bool
GetAcceptedFrontierF func(ids.ShortSet, uint32) GetAcceptedFrontierF func(ids.ShortSet, uint32)
AcceptedFrontierF func(ids.ShortID, uint32, ids.Set) AcceptedFrontierF func(ids.ShortID, uint32, ids.Set)
@ -27,6 +28,7 @@ type SenderTest struct {
PushQueryF func(ids.ShortSet, uint32, ids.ID, []byte) PushQueryF func(ids.ShortSet, uint32, ids.ID, []byte)
PullQueryF func(ids.ShortSet, uint32, ids.ID) PullQueryF func(ids.ShortSet, uint32, ids.ID)
ChitsF func(ids.ShortID, uint32, ids.Set) ChitsF func(ids.ShortID, uint32, ids.Set)
GossipF func(ids.ID, []byte)
} }
// Default set the default callable value to [cant] // Default set the default callable value to [cant]
@ -40,6 +42,7 @@ func (s *SenderTest) Default(cant bool) {
s.CantPullQuery = cant s.CantPullQuery = cant
s.CantPushQuery = cant s.CantPushQuery = cant
s.CantChits = cant s.CantChits = cant
s.CantGossip = cant
} }
// GetAcceptedFrontier calls GetAcceptedFrontierF if it was initialized. If it // GetAcceptedFrontier calls GetAcceptedFrontierF if it was initialized. If it
@ -140,3 +143,14 @@ func (s *SenderTest) Chits(vdr ids.ShortID, requestID uint32, votes ids.Set) {
s.T.Fatalf("Unexpectedly called Chits") s.T.Fatalf("Unexpectedly called Chits")
} }
} }
// Gossip calls GossipF if it was initialized. If it wasn't initialized and this
// function shouldn't be called and testing was initialized, then testing will
// fail.
func (s *SenderTest) Gossip(containerID ids.ID, container []byte) {
if s.GossipF != nil {
s.GossipF(containerID, container)
} else if s.CantGossip && s.T != nil {
s.T.Fatalf("Unexpectedly called Gossip")
}
}

View File

@ -8,6 +8,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"testing" "testing"
"time"
"github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus"
@ -54,7 +55,7 @@ func newConfig(t *testing.T) (BootstrapConfig, ids.ShortID, *common.SenderTest,
handler.Initialize(engine, make(chan common.Message), 1) handler.Initialize(engine, make(chan common.Message), 1)
timeouts.Initialize(0) timeouts.Initialize(0)
router.Initialize(ctx.Log, timeouts) router.Initialize(ctx.Log, timeouts, time.Hour)
blocker, _ := queue.New(db) blocker, _ := queue.New(db)

View File

@ -65,6 +65,19 @@ func (t *Transitive) finishBootstrapping() {
} }
} }
// Gossip implements the Engine interface
func (t *Transitive) Gossip() {
blkID := t.Config.VM.LastAccepted()
blk, err := t.Config.VM.GetBlock(blkID)
if err != nil {
t.Config.Context.Log.Warn("Dropping gossip request as %s couldn't be loaded due to %s", blkID, err)
return
}
t.Config.Context.Log.Debug("Gossiping %s as accepted to the network", blkID)
t.Config.Sender.Gossip(blkID, blk.Bytes())
}
// Shutdown implements the Engine interface // Shutdown implements the Engine interface
func (t *Transitive) Shutdown() { func (t *Transitive) Shutdown() {
t.Config.Context.Log.Info("Shutting down Snowman consensus") t.Config.Context.Log.Info("Shutting down Snowman consensus")

View File

@ -1187,3 +1187,36 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) {
t.Fatalf("Should have bubbled invalid votes to the valid parent") t.Fatalf("Should have bubbled invalid votes to the valid parent")
} }
} }
func TestEngineGossip(t *testing.T) {
_, _, sender, vm, te, gBlk := setup(t)
vm.LastAcceptedF = func() ids.ID { return gBlk.ID() }
vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) {
switch {
case blkID.Equals(gBlk.ID()):
return gBlk, nil
}
t.Fatal(errUnknownBlock)
return nil, errUnknownBlock
}
called := new(bool)
sender.GossipF = func(blkID ids.ID, blkBytes []byte) {
*called = true
switch {
case !blkID.Equals(gBlk.ID()):
t.Fatal(errUnknownBlock)
}
switch {
case !bytes.Equal(blkBytes, gBlk.Bytes()):
t.Fatal(errUnknownBytes)
}
}
te.Gossip()
if !*called {
t.Fatalf("Should have gossiped the block")
}
}

View File

@ -91,6 +91,8 @@ func (h *Handler) dispatchMsg(msg message) bool {
h.engine.Chits(msg.validatorID, msg.requestID, msg.containerIDs) h.engine.Chits(msg.validatorID, msg.requestID, msg.containerIDs)
case notifyMsg: case notifyMsg:
h.engine.Notify(msg.notification) h.engine.Notify(msg.notification)
case gossipMsg:
h.engine.Gossip()
case shutdownMsg: case shutdownMsg:
h.engine.Shutdown() h.engine.Shutdown()
return false return false
@ -232,6 +234,9 @@ func (h *Handler) QueryFailed(validatorID ids.ShortID, requestID uint32) {
} }
} }
// Gossip passes a gossip request to the consensus engine
func (h *Handler) Gossip() { h.msgs <- message{messageType: gossipMsg} }
// Shutdown shuts down the dispatcher // Shutdown shuts down the dispatcher
func (h *Handler) Shutdown() { h.msgs <- message{messageType: shutdownMsg}; h.wg.Wait() } func (h *Handler) Shutdown() { h.msgs <- message{messageType: shutdownMsg}; h.wg.Wait() }

View File

@ -29,6 +29,7 @@ const (
chitsMsg chitsMsg
queryFailedMsg queryFailedMsg
notifyMsg notifyMsg
gossipMsg
shutdownMsg shutdownMsg
) )
@ -87,6 +88,8 @@ func (t msgType) String() string {
return "Query Failed Message" return "Query Failed Message"
case notifyMsg: case notifyMsg:
return "Notify Message" return "Notify Message"
case gossipMsg:
return "Gossip Message"
case shutdownMsg: case shutdownMsg:
return "Shutdown Message" return "Shutdown Message"
default: default:

View File

@ -4,6 +4,8 @@
package router package router
import ( import (
"time"
"github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/networking/handler" "github.com/ava-labs/gecko/snow/networking/handler"
"github.com/ava-labs/gecko/snow/networking/timeout" "github.com/ava-labs/gecko/snow/networking/timeout"
@ -19,7 +21,7 @@ type Router interface {
AddChain(chain *handler.Handler) AddChain(chain *handler.Handler)
RemoveChain(chainID ids.ID) RemoveChain(chainID ids.ID)
Shutdown() Shutdown()
Initialize(log logging.Logger, timeouts *timeout.Manager) Initialize(log logging.Logger, timeouts *timeout.Manager, gossipFrequency time.Duration)
} }
// ExternalRouter routes messages from the network to the // ExternalRouter routes messages from the network to the

View File

@ -5,11 +5,13 @@ package router
import ( import (
"sync" "sync"
"time"
"github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/networking/handler" "github.com/ava-labs/gecko/snow/networking/handler"
"github.com/ava-labs/gecko/snow/networking/timeout" "github.com/ava-labs/gecko/snow/networking/timeout"
"github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/logging"
"github.com/ava-labs/gecko/utils/timer"
) )
// ChainRouter routes incoming messages from the validator network // ChainRouter routes incoming messages from the validator network
@ -21,15 +23,24 @@ type ChainRouter struct {
lock sync.RWMutex lock sync.RWMutex
chains map[[32]byte]*handler.Handler chains map[[32]byte]*handler.Handler
timeouts *timeout.Manager timeouts *timeout.Manager
gossiper *timer.Repeater
} }
// Initialize the router // Initialize the router.
// When this router receives an incoming message, it cancels the timeout in [timeouts] //
// associated with the request that caused the incoming message, if applicable // When this router receives an incoming message, it cancels the timeout in
func (sr *ChainRouter) Initialize(log logging.Logger, timeouts *timeout.Manager) { // [timeouts] associated with the request that caused the incoming message, if
// applicable.
//
// This router also fires a gossip event every [gossipFrequency] to the engine,
// notifying the engine it should gossip it's accepted set.
func (sr *ChainRouter) Initialize(log logging.Logger, timeouts *timeout.Manager, gossipFrequency time.Duration) {
sr.log = log sr.log = log
sr.chains = make(map[[32]byte]*handler.Handler) sr.chains = make(map[[32]byte]*handler.Handler)
sr.timeouts = timeouts sr.timeouts = timeouts
sr.gossiper = timer.NewRepeater(sr.Gossip, gossipFrequency)
go log.RecoverAndPanic(sr.gossiper.Dispatch)
} }
// AddChain registers the specified chain so that incoming // AddChain registers the specified chain so that incoming
@ -255,4 +266,19 @@ func (sr *ChainRouter) shutdown() {
for _, chain := range sr.chains { for _, chain := range sr.chains {
chain.Shutdown() chain.Shutdown()
} }
sr.gossiper.Stop()
}
// Gossip accepted containers
func (sr *ChainRouter) Gossip() {
sr.lock.RLock()
defer sr.lock.RUnlock()
sr.gossip()
}
func (sr *ChainRouter) gossip() {
for _, chain := range sr.chains {
chain.Gossip()
}
} }

View File

@ -20,4 +20,6 @@ type ExternalSender interface {
PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID)
Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set)
Gossip(chainID ids.ID, containerID ids.ID, container []byte)
} }

View File

@ -163,3 +163,9 @@ func (s *Sender) Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set)
} }
s.sender.Chits(validatorID, s.ctx.ChainID, requestID, votes) s.sender.Chits(validatorID, s.ctx.ChainID, requestID, votes)
} }
// Gossip the provided container
func (s *Sender) Gossip(containerID ids.ID, container []byte) {
s.ctx.Log.Verbo("Gossiping %s", containerID)
s.sender.Gossip(s.ctx.ChainID, containerID, container)
}

View File

@ -38,7 +38,7 @@ func TestTimeout(t *testing.T) {
go tm.Dispatch() go tm.Dispatch()
router := router.ChainRouter{} router := router.ChainRouter{}
router.Initialize(logging.NoLog{}, &tm) router.Initialize(logging.NoLog{}, &tm, time.Hour)
sender := Sender{} sender := Sender{}
sender.Initialize(snow.DefaultContextTest(), &ExternalSenderTest{}, &router, &tm) sender.Initialize(snow.DefaultContextTest(), &ExternalSenderTest{}, &router, &tm)

View File

@ -17,7 +17,8 @@ type ExternalSenderTest struct {
CantGetAcceptedFrontier, CantAcceptedFrontier, CantGetAcceptedFrontier, CantAcceptedFrontier,
CantGetAccepted, CantAccepted, CantGetAccepted, CantAccepted,
CantGet, CantPut, CantGet, CantPut,
CantPullQuery, CantPushQuery, CantChits bool CantPullQuery, CantPushQuery, CantChits,
CantGossip bool
GetAcceptedFrontierF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32) GetAcceptedFrontierF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32)
AcceptedFrontierF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) AcceptedFrontierF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set)
@ -28,6 +29,7 @@ type ExternalSenderTest struct {
PushQueryF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) PushQueryF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte)
PullQueryF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID) PullQueryF func(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerID ids.ID)
ChitsF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) ChitsF func(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set)
GossipF func(chainID ids.ID, containerID ids.ID, container []byte)
} }
// Default set the default callable value to [cant] // Default set the default callable value to [cant]
@ -41,6 +43,7 @@ func (s *ExternalSenderTest) Default(cant bool) {
s.CantPullQuery = cant s.CantPullQuery = cant
s.CantPushQuery = cant s.CantPushQuery = cant
s.CantChits = cant s.CantChits = cant
s.CantGossip = cant
} }
// GetAcceptedFrontier calls GetAcceptedFrontierF if it was initialized. If it // GetAcceptedFrontier calls GetAcceptedFrontierF if it was initialized. If it
@ -159,3 +162,16 @@ func (s *ExternalSenderTest) Chits(vdr ids.ShortID, chainID ids.ID, requestID ui
s.B.Fatalf("Unexpectedly called Chits") s.B.Fatalf("Unexpectedly called Chits")
} }
} }
// Gossip calls GossipF if it was initialized. If it wasn't initialized and this
// function shouldn't be called and testing was initialized, then testing will
// fail.
func (s *ExternalSenderTest) Gossip(chainID ids.ID, containerID ids.ID, container []byte) {
if s.GossipF != nil {
s.GossipF(chainID, containerID, container)
} else if s.CantGossip && s.T != nil {
s.T.Fatalf("Unexpectedly called Gossip")
} else if s.CantGossip && s.B != nil {
s.B.Fatalf("Unexpectedly called Gossip")
}
}

74
staking/gen_staker_key.go Normal file
View File

@ -0,0 +1,74 @@
package staking
import (
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"fmt"
"math/big"
"os"
"path/filepath"
"time"
)
// GenerateStakingKeyCert generates a self-signed TLS key/cert pair to use in staking
// The key and files will be placed at [keyPath] and [certPath], respectively
// If there is already a file at [keyPath], returns nil
func GenerateStakingKeyCert(keyPath, certPath string) error {
// If there is already a file at [keyPath], do nothing
if _, err := os.Stat(keyPath); !os.IsNotExist(err) {
return nil
}
// Create key to sign cert with
key, err := rsa.GenerateKey(rand.Reader, 4096)
if err != nil {
return fmt.Errorf("couldn't generate rsa key: %w", err)
}
// Create self-signed staking cert
certTemplate := &x509.Certificate{
SerialNumber: big.NewInt(0),
NotBefore: time.Date(2000, time.January, 0, 0, 0, 0, 0, time.UTC),
NotAfter: time.Now().AddDate(100, 0, 0),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageDataEncipherment,
BasicConstraintsValid: true,
}
certBytes, err := x509.CreateCertificate(rand.Reader, certTemplate, certTemplate, &key.PublicKey, key)
if err != nil {
return fmt.Errorf("couldn't create certificate: %w", err)
}
// Write cert to disk
if err := os.MkdirAll(filepath.Dir(certPath), 0755); err != nil {
return fmt.Errorf("couldn't create path for key/cert: %w", err)
}
certOut, err := os.Create(certPath)
if err != nil {
return fmt.Errorf("couldn't create cert file: %w", err)
}
if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: certBytes}); err != nil {
return fmt.Errorf("couldn't write cert file: %w", err)
}
if err := certOut.Close(); err != nil {
return fmt.Errorf("couldn't close cert file: %w", err)
}
// Write key to disk
keyOut, err := os.Create(keyPath)
if err != nil {
return fmt.Errorf("couldn't create key file: %w", err)
}
privBytes, err := x509.MarshalPKCS8PrivateKey(key)
if err != nil {
return fmt.Errorf("couldn't marshal private key: %w", err)
}
if err := pem.Encode(keyOut, &pem.Block{Type: "PRIVATE KEY", Bytes: privBytes}); err != nil {
return fmt.Errorf("couldn't write private key: %w", err)
}
if err := keyOut.Close(); err != nil {
return fmt.Errorf("couldn't close key file: %w", err)
}
return nil
}

View File

@ -50,3 +50,10 @@ func (r *request) Method() (string, error) {
uppercaseRune := string(unicode.ToUpper(firstRune)) uppercaseRune := string(unicode.ToUpper(firstRune))
return fmt.Sprintf("%s.%s%s", class, string(uppercaseRune), function[runeLen:]), nil return fmt.Sprintf("%s.%s%s", class, string(uppercaseRune), function[runeLen:]), nil
} }
func (r *request) ReadRequest(args interface{}) error {
if err := r.CodecRequest.ReadRequest(args); err != nil {
return errors.New("couldn't unmarshal an argument. Ensure arguments are valid and properly formatted. See documentation for example calls")
}
return nil
}

46
utils/signal.go Normal file
View File

@ -0,0 +1,46 @@
// (c) 2019-2020, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.
package utils
import (
"os"
"os/signal"
)
// HandleSignals calls f if the go runtime receives the any of the provided
// signals with the received signal.
//
// If f is nil or there are no provided signals, then nil will be returned.
// Otherwise, a signal channel will be returned that can be used to clear the
// signals registed by this function by valling ClearSignals.
func HandleSignals(f func(os.Signal), sigs ...os.Signal) chan<- os.Signal {
if f == nil || len(sigs) == 0 {
return nil
}
// register signals
c := make(chan os.Signal, 1)
for _, sig := range sigs {
signal.Notify(c, sig)
}
go func() {
for sig := range c {
f(sig)
}
}()
return c
}
// ClearSignals clears any signals that have been registered on the provided
// channel and closes the channel.
func ClearSignals(c chan<- os.Signal) {
if c == nil {
return
}
signal.Stop(c)
close(c)
}

View File

@ -623,7 +623,10 @@ func TestBaseTxSyntacticVerifyUninitialized(t *testing.T) {
func TestBaseTxSemanticVerify(t *testing.T) { func TestBaseTxSemanticVerify(t *testing.T) {
genesisBytes, _, vm := GenesisVM(t) genesisBytes, _, vm := GenesisVM(t)
defer ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t)
@ -687,7 +690,10 @@ func TestBaseTxSemanticVerify(t *testing.T) {
func TestBaseTxSemanticVerifyUnknownFx(t *testing.T) { func TestBaseTxSemanticVerifyUnknownFx(t *testing.T) {
genesisBytes, _, vm := GenesisVM(t) genesisBytes, _, vm := GenesisVM(t)
defer ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
vm.codec.RegisterType(&ava.TestVerifiable{}) vm.codec.RegisterType(&ava.TestVerifiable{})
@ -736,7 +742,10 @@ func TestBaseTxSemanticVerifyUnknownFx(t *testing.T) {
func TestBaseTxSemanticVerifyWrongAssetID(t *testing.T) { func TestBaseTxSemanticVerifyWrongAssetID(t *testing.T) {
genesisBytes, _, vm := GenesisVM(t) genesisBytes, _, vm := GenesisVM(t)
defer ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
vm.codec.RegisterType(&ava.TestVerifiable{}) vm.codec.RegisterType(&ava.TestVerifiable{})
@ -801,14 +810,15 @@ func TestBaseTxSemanticVerifyWrongAssetID(t *testing.T) {
} }
func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) { func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) {
genesisBytes := BuildGenesisTest(t)
issuer := make(chan common.Message, 1)
ctx.Lock.Lock()
defer ctx.Lock.Unlock()
vm := &VM{} vm := &VM{}
ctx.Lock.Lock()
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
genesisBytes := BuildGenesisTest(t)
issuer := make(chan common.Message, 1)
err := vm.Initialize( err := vm.Initialize(
ctx, ctx,
memdb.New(), memdb.New(),
@ -893,7 +903,10 @@ func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) {
func TestBaseTxSemanticVerifyInvalidSignature(t *testing.T) { func TestBaseTxSemanticVerifyInvalidSignature(t *testing.T) {
genesisBytes, _, vm := GenesisVM(t) genesisBytes, _, vm := GenesisVM(t)
defer ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t)
@ -944,7 +957,10 @@ func TestBaseTxSemanticVerifyInvalidSignature(t *testing.T) {
func TestBaseTxSemanticVerifyMissingUTXO(t *testing.T) { func TestBaseTxSemanticVerifyMissingUTXO(t *testing.T) {
genesisBytes, _, vm := GenesisVM(t) genesisBytes, _, vm := GenesisVM(t)
defer ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t)
@ -1008,7 +1024,10 @@ func TestBaseTxSemanticVerifyMissingUTXO(t *testing.T) {
func TestBaseTxSemanticVerifyInvalidUTXO(t *testing.T) { func TestBaseTxSemanticVerifyInvalidUTXO(t *testing.T) {
genesisBytes, _, vm := GenesisVM(t) genesisBytes, _, vm := GenesisVM(t)
defer ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t)
@ -1140,7 +1159,10 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) {
<-issuer <-issuer
ctx.Lock.Lock() ctx.Lock.Lock()
defer ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
vm.PendingTxs() vm.PendingTxs()
@ -1271,7 +1293,10 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) {
<-issuer <-issuer
ctx.Lock.Lock() ctx.Lock.Lock()
defer ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
vm.PendingTxs() vm.PendingTxs()
@ -1436,7 +1461,10 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) {
<-issuer <-issuer
ctx.Lock.Lock() ctx.Lock.Lock()
defer ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
vm.PendingTxs() vm.PendingTxs()
@ -1585,7 +1613,10 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) {
<-issuer <-issuer
ctx.Lock.Lock() ctx.Lock.Lock()
defer ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
vm.PendingTxs() vm.PendingTxs()

View File

@ -216,7 +216,10 @@ func TestIssueExportTx(t *testing.T) {
} }
ctx.Lock.Lock() ctx.Lock.Lock()
defer ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
txs := vm.PendingTxs() txs := vm.PendingTxs()
if len(txs) != 1 { if len(txs) != 1 {
@ -349,7 +352,10 @@ func TestClearForceAcceptedExportTx(t *testing.T) {
} }
ctx.Lock.Lock() ctx.Lock.Lock()
defer ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
txs := vm.PendingTxs() txs := vm.PendingTxs()
if len(txs) != 1 { if len(txs) != 1 {

View File

@ -220,7 +220,6 @@ func TestIssueImportTx(t *testing.T) {
if _, err := vm.IssueTx(tx.Bytes(), nil); err != nil { if _, err := vm.IssueTx(tx.Bytes(), nil); err != nil {
t.Fatalf("should have issued the transaction correctly but errored: %s", err) t.Fatalf("should have issued the transaction correctly but errored: %s", err)
} }
ctx.Lock.Unlock() ctx.Lock.Unlock()
msg := <-issuer msg := <-issuer
@ -228,6 +227,12 @@ func TestIssueImportTx(t *testing.T) {
t.Fatalf("Wrong message") t.Fatalf("Wrong message")
} }
ctx.Lock.Lock()
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
txs := vm.PendingTxs() txs := vm.PendingTxs()
if len(txs) != 1 { if len(txs) != 1 {
t.Fatalf("Should have returned %d tx(s)", 1) t.Fatalf("Should have returned %d tx(s)", 1)
@ -261,10 +266,13 @@ func TestForceAcceptImportTx(t *testing.T) {
platformID := ids.Empty.Prefix(0) platformID := ids.Empty.Prefix(0)
ctx.Lock.Lock()
defer ctx.Lock.Unlock()
vm := &VM{platform: platformID} vm := &VM{platform: platformID}
ctx.Lock.Lock()
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
err := vm.Initialize( err := vm.Initialize(
ctx, ctx,
memdb.New(), memdb.New(),

View File

@ -17,7 +17,11 @@ import (
func TestPrefixedSetsAndGets(t *testing.T) { func TestPrefixedSetsAndGets(t *testing.T) {
_, _, vm := GenesisVM(t) _, _, vm := GenesisVM(t)
ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
state := vm.state state := vm.state
vm.codec.RegisterType(&ava.TestVerifiable{}) vm.codec.RegisterType(&ava.TestVerifiable{})
@ -112,7 +116,11 @@ func TestPrefixedSetsAndGets(t *testing.T) {
func TestPrefixedFundingNoAddresses(t *testing.T) { func TestPrefixedFundingNoAddresses(t *testing.T) {
_, _, vm := GenesisVM(t) _, _, vm := GenesisVM(t)
ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
state := vm.state state := vm.state
vm.codec.RegisterType(&ava.TestVerifiable{}) vm.codec.RegisterType(&ava.TestVerifiable{})
@ -136,7 +144,11 @@ func TestPrefixedFundingNoAddresses(t *testing.T) {
func TestPrefixedFundingAddresses(t *testing.T) { func TestPrefixedFundingAddresses(t *testing.T) {
_, _, vm := GenesisVM(t) _, _, vm := GenesisVM(t)
ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
state := vm.state state := vm.state
vm.codec.RegisterType(&testAddressable{}) vm.codec.RegisterType(&testAddressable{})

View File

@ -96,6 +96,36 @@ func (service *Service) GetTxStatus(r *http.Request, args *GetTxStatusArgs, repl
return nil return nil
} }
// GetTxArgs are arguments for passing into GetTx requests
type GetTxArgs struct {
TxID ids.ID `json:"txID"`
}
// GetTxReply defines the GetTxStatus replies returned from the API
type GetTxReply struct {
Tx formatting.CB58 `json:"tx"`
}
// GetTx returns the specified transaction
func (service *Service) GetTx(r *http.Request, args *GetTxArgs, reply *GetTxReply) error {
service.vm.ctx.Log.Verbo("GetTx called with %s", args.TxID)
if args.TxID.IsZero() {
return errNilTxID
}
tx := UniqueTx{
vm: service.vm,
txID: args.TxID,
}
if status := tx.Status(); !status.Fetched() {
return errUnknownTx
}
reply.Tx.Bytes = tx.Bytes()
return nil
}
// GetUTXOsArgs are arguments for passing into GetUTXOs requests // GetUTXOsArgs are arguments for passing into GetUTXOs requests
type GetUTXOsArgs struct { type GetUTXOsArgs struct {
Addresses []string `json:"addresses"` Addresses []string `json:"addresses"`
@ -188,7 +218,8 @@ type GetBalanceArgs struct {
// GetBalanceReply defines the GetBalance replies returned from the API // GetBalanceReply defines the GetBalance replies returned from the API
type GetBalanceReply struct { type GetBalanceReply struct {
Balance json.Uint64 `json:"balance"` Balance json.Uint64 `json:"balance"`
UTXOIDs []ava.UTXOID `json:"utxoIDs"`
} }
// GetBalance returns the amount of an asset that an address at least partially owns // GetBalance returns the amount of an asset that an address at least partially owns
@ -217,18 +248,21 @@ func (service *Service) GetBalance(r *http.Request, args *GetBalanceArgs, reply
} }
for _, utxo := range utxos { for _, utxo := range utxos {
if utxo.AssetID().Equals(assetID) { if !utxo.AssetID().Equals(assetID) {
transferable, ok := utxo.Out.(ava.Transferable) continue
if !ok {
continue
}
amt, err := safemath.Add64(transferable.Amount(), uint64(reply.Balance))
if err != nil {
return err
}
reply.Balance = json.Uint64(amt)
} }
transferable, ok := utxo.Out.(ava.Transferable)
if !ok {
continue
}
amt, err := safemath.Add64(transferable.Amount(), uint64(reply.Balance))
if err != nil {
return err
}
reply.Balance = json.Uint64(amt)
reply.UTXOIDs = append(reply.UTXOIDs, utxo.UTXOID)
} }
return nil return nil
} }

View File

@ -7,46 +7,25 @@ import (
"fmt" "fmt"
"testing" "testing"
"github.com/ava-labs/gecko/snow/choices" "github.com/stretchr/testify/assert"
"github.com/ava-labs/gecko/database/memdb"
"github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/ids"
"github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/snow/choices"
"github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/formatting"
"github.com/ava-labs/gecko/vms/secp256k1fx"
) )
func setup(t *testing.T) ([]byte, *VM, *Service) { func setup(t *testing.T) ([]byte, *VM, *Service) {
genesisBytes := BuildGenesisTest(t) genesisBytes, _, vm := GenesisVM(t)
ctx.Lock.Lock()
// This VM initilialzation is very similar to that done by GenesisVM().
// However replacing the body of this function, with a call to GenesisVM
// causes a timeout while executing the test suite.
// https://github.com/ava-labs/gecko/pull/59#pullrequestreview-392478636
vm := &VM{}
err := vm.Initialize(
ctx,
memdb.New(),
genesisBytes,
make(chan common.Message, 1),
[]*common.Fx{&common.Fx{
ID: ids.Empty,
Fx: &secp256k1fx.Fx{},
}},
)
if err != nil {
t.Fatal(err)
}
s := &Service{vm: vm} s := &Service{vm: vm}
return genesisBytes, vm, s return genesisBytes, vm, s
} }
func TestServiceIssueTx(t *testing.T) { func TestServiceIssueTx(t *testing.T) {
genesisBytes, vm, s := setup(t) genesisBytes, vm, s := setup(t)
defer ctx.Lock.Unlock() defer func() {
defer vm.Shutdown() vm.Shutdown()
ctx.Lock.Unlock()
}()
txArgs := &IssueTxArgs{} txArgs := &IssueTxArgs{}
txReply := &IssueTxReply{} txReply := &IssueTxReply{}
@ -68,8 +47,10 @@ func TestServiceIssueTx(t *testing.T) {
func TestServiceGetTxStatus(t *testing.T) { func TestServiceGetTxStatus(t *testing.T) {
genesisBytes, vm, s := setup(t) genesisBytes, vm, s := setup(t)
defer ctx.Lock.Unlock() defer func() {
defer vm.Shutdown() vm.Shutdown()
ctx.Lock.Unlock()
}()
statusArgs := &GetTxStatusArgs{} statusArgs := &GetTxStatusArgs{}
statusReply := &GetTxStatusReply{} statusReply := &GetTxStatusReply{}
@ -107,11 +88,77 @@ func TestServiceGetTxStatus(t *testing.T) {
} }
} }
func TestServiceGetUTXOsInvalidAddress(t *testing.T) { func TestServiceGetBalance(t *testing.T) {
_, vm, s := setup(t) genesisBytes, vm, s := setup(t)
defer ctx.Lock.Unlock() defer ctx.Lock.Unlock()
defer vm.Shutdown() defer vm.Shutdown()
genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t)
assetID := genesisTx.ID()
addr := keys[0].PublicKey().Address()
balanceArgs := &GetBalanceArgs{
Address: fmt.Sprintf("%s-%s", vm.ctx.ChainID, addr),
AssetID: assetID.String(),
}
balanceReply := &GetBalanceReply{}
err := s.GetBalance(nil, balanceArgs, balanceReply)
assert.NoError(t, err)
assert.Equal(t, uint64(balanceReply.Balance), uint64(300000))
assert.Len(t, balanceReply.UTXOIDs, 4, "should have only returned four utxoIDs")
}
func TestServiceGetTx(t *testing.T) {
genesisBytes, vm, s := setup(t)
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t)
genesisTxBytes := genesisTx.Bytes()
txID := genesisTx.ID()
reply := GetTxReply{}
err := s.GetTx(nil, &GetTxArgs{
TxID: txID,
}, &reply)
assert.NoError(t, err)
assert.Equal(t, genesisTxBytes, reply.Tx.Bytes, "Wrong tx returned from service.GetTx")
}
func TestServiceGetNilTx(t *testing.T) {
_, vm, s := setup(t)
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
reply := GetTxReply{}
err := s.GetTx(nil, &GetTxArgs{}, &reply)
assert.Error(t, err, "Nil TxID should have returned an error")
}
func TestServiceGetUnknownTx(t *testing.T) {
_, vm, s := setup(t)
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
reply := GetTxReply{}
err := s.GetTx(nil, &GetTxArgs{TxID: ids.Empty}, &reply)
assert.Error(t, err, "Unknown TxID should have returned an error")
}
func TestServiceGetUTXOsInvalidAddress(t *testing.T) {
_, vm, s := setup(t)
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
addr0 := keys[0].PublicKey().Address() addr0 := keys[0].PublicKey().Address()
tests := []struct { tests := []struct {
label string label string
@ -137,8 +184,10 @@ func TestServiceGetUTXOsInvalidAddress(t *testing.T) {
func TestServiceGetUTXOs(t *testing.T) { func TestServiceGetUTXOs(t *testing.T) {
_, vm, s := setup(t) _, vm, s := setup(t)
defer ctx.Lock.Unlock() defer func() {
defer vm.Shutdown() vm.Shutdown()
ctx.Lock.Unlock()
}()
addr0 := keys[0].PublicKey().Address() addr0 := keys[0].PublicKey().Address()
tests := []struct { tests := []struct {
@ -187,8 +236,10 @@ func TestServiceGetUTXOs(t *testing.T) {
func TestGetAssetDescription(t *testing.T) { func TestGetAssetDescription(t *testing.T) {
genesisBytes, vm, s := setup(t) genesisBytes, vm, s := setup(t)
defer ctx.Lock.Unlock() defer func() {
defer vm.Shutdown() vm.Shutdown()
ctx.Lock.Unlock()
}()
genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t)
@ -212,8 +263,10 @@ func TestGetAssetDescription(t *testing.T) {
func TestGetBalance(t *testing.T) { func TestGetBalance(t *testing.T) {
genesisBytes, vm, s := setup(t) genesisBytes, vm, s := setup(t)
defer ctx.Lock.Unlock() defer func() {
defer vm.Shutdown() vm.Shutdown()
ctx.Lock.Unlock()
}()
genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t)
@ -235,8 +288,10 @@ func TestGetBalance(t *testing.T) {
func TestCreateFixedCapAsset(t *testing.T) { func TestCreateFixedCapAsset(t *testing.T) {
_, vm, s := setup(t) _, vm, s := setup(t)
defer ctx.Lock.Unlock() defer func() {
defer vm.Shutdown() vm.Shutdown()
ctx.Lock.Unlock()
}()
reply := CreateFixedCapAssetReply{} reply := CreateFixedCapAssetReply{}
err := s.CreateFixedCapAsset(nil, &CreateFixedCapAssetArgs{ err := s.CreateFixedCapAsset(nil, &CreateFixedCapAssetArgs{
@ -259,8 +314,10 @@ func TestCreateFixedCapAsset(t *testing.T) {
func TestCreateVariableCapAsset(t *testing.T) { func TestCreateVariableCapAsset(t *testing.T) {
_, vm, s := setup(t) _, vm, s := setup(t)
defer ctx.Lock.Unlock() defer func() {
defer vm.Shutdown() vm.Shutdown()
ctx.Lock.Unlock()
}()
reply := CreateVariableCapAssetReply{} reply := CreateVariableCapAssetReply{}
err := s.CreateVariableCapAsset(nil, &CreateVariableCapAssetArgs{ err := s.CreateVariableCapAsset(nil, &CreateVariableCapAssetArgs{

View File

@ -16,7 +16,11 @@ import (
func TestStateIDs(t *testing.T) { func TestStateIDs(t *testing.T) {
_, _, vm := GenesisVM(t) _, _, vm := GenesisVM(t)
ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
state := vm.state.state state := vm.state.state
id0 := ids.NewID([32]byte{0xff, 0}) id0 := ids.NewID([32]byte{0xff, 0})
@ -126,7 +130,11 @@ func TestStateIDs(t *testing.T) {
func TestStateStatuses(t *testing.T) { func TestStateStatuses(t *testing.T) {
_, _, vm := GenesisVM(t) _, _, vm := GenesisVM(t)
ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
state := vm.state.state state := vm.state.state
if _, err := state.Status(ids.Empty); err == nil { if _, err := state.Status(ids.Empty); err == nil {
@ -175,7 +183,11 @@ func TestStateStatuses(t *testing.T) {
func TestStateUTXOs(t *testing.T) { func TestStateUTXOs(t *testing.T) {
_, _, vm := GenesisVM(t) _, _, vm := GenesisVM(t)
ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
state := vm.state.state state := vm.state.state
vm.codec.RegisterType(&ava.TestVerifiable{}) vm.codec.RegisterType(&ava.TestVerifiable{})
@ -246,7 +258,11 @@ func TestStateUTXOs(t *testing.T) {
func TestStateTXs(t *testing.T) { func TestStateTXs(t *testing.T) {
_, _, vm := GenesisVM(t) _, _, vm := GenesisVM(t)
ctx.Lock.Unlock() defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
state := vm.state.state state := vm.state.state
vm.codec.RegisterType(&ava.TestTransferable{}) vm.codec.RegisterType(&ava.TestTransferable{})

View File

@ -204,7 +204,12 @@ func (vm *VM) Shutdown() {
return return
} }
// There is a potential deadlock if the timer is about to execute a timeout.
// So, the lock must be released before stopping the timer.
vm.ctx.Lock.Unlock()
vm.timer.Stop() vm.timer.Stop()
vm.ctx.Lock.Lock()
if err := vm.baseDB.Close(); err != nil { if err := vm.baseDB.Close(); err != nil {
vm.ctx.Log.Error("Closing the database failed with %s", err) vm.ctx.Log.Error("Closing the database failed with %s", err)
} }

View File

@ -392,10 +392,13 @@ func TestTxSerialization(t *testing.T) {
} }
func TestInvalidGenesis(t *testing.T) { func TestInvalidGenesis(t *testing.T) {
ctx.Lock.Lock()
defer ctx.Lock.Unlock()
vm := &VM{} vm := &VM{}
ctx.Lock.Lock()
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
err := vm.Initialize( err := vm.Initialize(
/*context=*/ ctx, /*context=*/ ctx,
/*db=*/ memdb.New(), /*db=*/ memdb.New(),
@ -409,12 +412,14 @@ func TestInvalidGenesis(t *testing.T) {
} }
func TestInvalidFx(t *testing.T) { func TestInvalidFx(t *testing.T) {
genesisBytes := BuildGenesisTest(t)
ctx.Lock.Lock()
defer ctx.Lock.Unlock()
vm := &VM{} vm := &VM{}
ctx.Lock.Lock()
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
genesisBytes := BuildGenesisTest(t)
err := vm.Initialize( err := vm.Initialize(
/*context=*/ ctx, /*context=*/ ctx,
/*db=*/ memdb.New(), /*db=*/ memdb.New(),
@ -430,12 +435,14 @@ func TestInvalidFx(t *testing.T) {
} }
func TestFxInitializationFailure(t *testing.T) { func TestFxInitializationFailure(t *testing.T) {
genesisBytes := BuildGenesisTest(t)
ctx.Lock.Lock()
defer ctx.Lock.Unlock()
vm := &VM{} vm := &VM{}
ctx.Lock.Lock()
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
genesisBytes := BuildGenesisTest(t)
err := vm.Initialize( err := vm.Initialize(
/*context=*/ ctx, /*context=*/ ctx,
/*db=*/ memdb.New(), /*db=*/ memdb.New(),
@ -457,6 +464,10 @@ func (tx *testTxBytes) UnsignedBytes() []byte { return tx.unsignedBytes }
func TestIssueTx(t *testing.T) { func TestIssueTx(t *testing.T) {
genesisBytes, issuer, vm := GenesisVM(t) genesisBytes, issuer, vm := GenesisVM(t)
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
newTx := NewTx(t, genesisBytes, vm) newTx := NewTx(t, genesisBytes, vm)
@ -473,6 +484,7 @@ func TestIssueTx(t *testing.T) {
if msg != common.PendingTxs { if msg != common.PendingTxs {
t.Fatalf("Wrong message") t.Fatalf("Wrong message")
} }
ctx.Lock.Lock()
if txs := vm.PendingTxs(); len(txs) != 1 { if txs := vm.PendingTxs(); len(txs) != 1 {
t.Fatalf("Should have returned %d tx(s)", 1) t.Fatalf("Should have returned %d tx(s)", 1)
@ -481,6 +493,10 @@ func TestIssueTx(t *testing.T) {
func TestGenesisGetUTXOs(t *testing.T) { func TestGenesisGetUTXOs(t *testing.T) {
_, _, vm := GenesisVM(t) _, _, vm := GenesisVM(t)
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
shortAddr := keys[0].PublicKey().Address() shortAddr := keys[0].PublicKey().Address()
addr := ids.NewID(hashing.ComputeHash256Array(shortAddr.Bytes())) addr := ids.NewID(hashing.ComputeHash256Array(shortAddr.Bytes()))
@ -491,8 +507,6 @@ func TestGenesisGetUTXOs(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
vm.Shutdown()
ctx.Lock.Unlock()
if len(utxos) != 7 { if len(utxos) != 7 {
t.Fatalf("Wrong number of utxos. Expected (%d) returned (%d)", 7, len(utxos)) t.Fatalf("Wrong number of utxos. Expected (%d) returned (%d)", 7, len(utxos))
@ -503,6 +517,10 @@ func TestGenesisGetUTXOs(t *testing.T) {
// transaction should be issued successfully. // transaction should be issued successfully.
func TestIssueDependentTx(t *testing.T) { func TestIssueDependentTx(t *testing.T) {
genesisBytes, issuer, vm := GenesisVM(t) genesisBytes, issuer, vm := GenesisVM(t)
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t) genesisTx := GetFirstTxFromGenesisTest(genesisBytes, t)
@ -615,13 +633,13 @@ func TestIssueDependentTx(t *testing.T) {
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
ctx.Lock.Unlock() ctx.Lock.Unlock()
msg := <-issuer msg := <-issuer
if msg != common.PendingTxs { if msg != common.PendingTxs {
t.Fatalf("Wrong message") t.Fatalf("Wrong message")
} }
ctx.Lock.Lock()
if txs := vm.PendingTxs(); len(txs) != 2 { if txs := vm.PendingTxs(); len(txs) != 2 {
t.Fatalf("Should have returned %d tx(s)", 2) t.Fatalf("Should have returned %d tx(s)", 2)
@ -630,14 +648,15 @@ func TestIssueDependentTx(t *testing.T) {
// Test issuing a transaction that creates an NFT family // Test issuing a transaction that creates an NFT family
func TestIssueNFT(t *testing.T) { func TestIssueNFT(t *testing.T) {
genesisBytes := BuildGenesisTest(t)
issuer := make(chan common.Message, 1)
ctx.Lock.Lock()
defer ctx.Lock.Unlock()
vm := &VM{} vm := &VM{}
ctx.Lock.Lock()
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
genesisBytes := BuildGenesisTest(t)
issuer := make(chan common.Message, 1)
err := vm.Initialize( err := vm.Initialize(
ctx, ctx,
memdb.New(), memdb.New(),
@ -788,14 +807,15 @@ func TestIssueNFT(t *testing.T) {
// Test issuing a transaction that creates an Property family // Test issuing a transaction that creates an Property family
func TestIssueProperty(t *testing.T) { func TestIssueProperty(t *testing.T) {
genesisBytes := BuildGenesisTest(t)
issuer := make(chan common.Message, 1)
ctx.Lock.Lock()
defer ctx.Lock.Unlock()
vm := &VM{} vm := &VM{}
ctx.Lock.Lock()
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
genesisBytes := BuildGenesisTest(t)
issuer := make(chan common.Message, 1)
err := vm.Initialize( err := vm.Initialize(
ctx, ctx,
memdb.New(), memdb.New(),
@ -937,8 +957,10 @@ func TestIssueProperty(t *testing.T) {
func TestVMFormat(t *testing.T) { func TestVMFormat(t *testing.T) {
_, _, vm := GenesisVM(t) _, _, vm := GenesisVM(t)
defer ctx.Lock.Unlock() defer func() {
defer vm.Shutdown() vm.Shutdown()
ctx.Lock.Unlock()
}()
tests := []struct { tests := []struct {
in string in string
@ -957,8 +979,10 @@ func TestVMFormat(t *testing.T) {
func TestVMFormatAliased(t *testing.T) { func TestVMFormatAliased(t *testing.T) {
_, _, vm := GenesisVM(t) _, _, vm := GenesisVM(t)
defer ctx.Lock.Unlock() defer func() {
defer vm.Shutdown() vm.Shutdown()
ctx.Lock.Unlock()
}()
origAliases := ctx.BCLookup origAliases := ctx.BCLookup
defer func() { ctx.BCLookup = origAliases }() defer func() { ctx.BCLookup = origAliases }()

View File

@ -24,7 +24,7 @@ type UTXOID struct {
OutputIndex uint32 `serialize:"true" json:"outputIndex"` OutputIndex uint32 `serialize:"true" json:"outputIndex"`
// Symbol is false if the UTXO should be part of the DB // Symbol is false if the UTXO should be part of the DB
Symbol bool Symbol bool `json:"-"`
// id is the unique ID of a UTXO, it is calculated from TxID and OutputIndex // id is the unique ID of a UTXO, it is calculated from TxID and OutputIndex
id ids.ID id ids.ID
} }

View File

@ -13,6 +13,11 @@ import (
func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) { func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// Case 1: tx is nil // Case 1: tx is nil
var tx *addDefaultSubnetDelegatorTx var tx *addDefaultSubnetDelegatorTx
@ -153,6 +158,11 @@ func TestAddDefaultSubnetDelegatorTxSyntacticVerify(t *testing.T) {
func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// Case 1: Proposed validator currently validating default subnet // Case 1: Proposed validator currently validating default subnet
// but stops validating non-default subnet after stops validating default subnet // but stops validating non-default subnet after stops validating default subnet

View File

@ -12,6 +12,11 @@ import (
func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// Case 1: tx is nil // Case 1: tx is nil
var tx *addDefaultSubnetValidatorTx var tx *addDefaultSubnetValidatorTx
@ -216,6 +221,11 @@ func TestAddDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) {
// Test AddDefaultSubnetValidatorTx.SemanticVerify // Test AddDefaultSubnetValidatorTx.SemanticVerify
func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// Case 1: Validator's start time too early // Case 1: Validator's start time too early
tx, err := vm.newAddDefaultSubnetValidatorTx( tx, err := vm.newAddDefaultSubnetValidatorTx(
@ -281,9 +291,9 @@ func TestAddDefaultSubnetValidatorTxSemanticVerify(t *testing.T) {
} }
startTime := defaultGenesisTime.Add(1 * time.Second) startTime := defaultGenesisTime.Add(1 * time.Second)
tx, err = vm.newAddDefaultSubnetValidatorTx( tx, err = vm.newAddDefaultSubnetValidatorTx(
defaultNonce+1, // nonce defaultNonce+1, // nonce
defaultStakeAmount, // stake amount defaultStakeAmount, // stake amount
uint64(startTime.Unix()), // start time uint64(startTime.Unix()), // start time
uint64(startTime.Add(MinimumStakingDuration).Unix()), // end time uint64(startTime.Add(MinimumStakingDuration).Unix()), // end time
key.PublicKey().Address(), // node ID key.PublicKey().Address(), // node ID
defaultKey.PublicKey().Address(), // destination defaultKey.PublicKey().Address(), // destination

View File

@ -14,6 +14,11 @@ import (
func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) { func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// Case 1: tx is nil // Case 1: tx is nil
var tx *addNonDefaultSubnetValidatorTx var tx *addNonDefaultSubnetValidatorTx
@ -202,6 +207,11 @@ func TestAddNonDefaultSubnetValidatorTxSyntacticVerify(t *testing.T) {
func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) { func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// Case 1: Proposed validator currently validating default subnet // Case 1: Proposed validator currently validating default subnet
// but stops validating non-default subnet after stops validating default subnet // but stops validating non-default subnet after stops validating default subnet
@ -590,12 +600,16 @@ func TestAddNonDefaultSubnetValidatorTxSemanticVerify(t *testing.T) {
if err == nil { if err == nil {
t.Fatal("should have failed verification because validator already in pending validator set of the specified subnet") t.Fatal("should have failed verification because validator already in pending validator set of the specified subnet")
} }
} }
// Test that marshalling/unmarshalling works // Test that marshalling/unmarshalling works
func TestAddNonDefaultSubnetValidatorMarshal(t *testing.T) { func TestAddNonDefaultSubnetValidatorMarshal(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// valid tx // valid tx
tx, err := vm.newAddNonDefaultSubnetValidatorTx( tx, err := vm.newAddNonDefaultSubnetValidatorTx(

View File

@ -17,6 +17,12 @@ func TestAdvanceTimeTxSyntacticVerify(t *testing.T) {
// Case 2: Timestamp is ahead of synchrony bound // Case 2: Timestamp is ahead of synchrony bound
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
tx = &advanceTimeTx{ tx = &advanceTimeTx{
Time: uint64(defaultGenesisTime.Add(Delta).Add(1 * time.Second).Unix()), Time: uint64(defaultGenesisTime.Add(Delta).Add(1 * time.Second).Unix()),
vm: vm, vm: vm,
@ -38,6 +44,11 @@ func TestAdvanceTimeTxSyntacticVerify(t *testing.T) {
// Ensure semantic verification fails when proposed timestamp is at or before current timestamp // Ensure semantic verification fails when proposed timestamp is at or before current timestamp
func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) { func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
tx := &advanceTimeTx{ tx := &advanceTimeTx{
Time: uint64(defaultGenesisTime.Unix()), Time: uint64(defaultGenesisTime.Unix()),
@ -52,6 +63,7 @@ func TestAdvanceTimeTxTimestampTooEarly(t *testing.T) {
// Ensure semantic verification fails when proposed timestamp is after next validator set change time // Ensure semantic verification fails when proposed timestamp is after next validator set change time
func TestAdvanceTimeTxTimestampTooLate(t *testing.T) { func TestAdvanceTimeTxTimestampTooLate(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
// Case 1: Timestamp is after next validator start time // Case 1: Timestamp is after next validator start time
// Add a pending validator // Add a pending validator
@ -94,9 +106,16 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) {
if err == nil { if err == nil {
t.Fatal("should've failed verification because proposed timestamp is after pending validator start time") t.Fatal("should've failed verification because proposed timestamp is after pending validator start time")
} }
vm.Shutdown()
vm.Ctx.Lock.Unlock()
// Case 2: Timestamp is after next validator end time // Case 2: Timestamp is after next validator end time
vm = defaultVM() vm = defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// fast forward clock to 10 seconds before genesis validators stop validating // fast forward clock to 10 seconds before genesis validators stop validating
vm.clock.Set(defaultValidateEndTime.Add(-10 * time.Second)) vm.clock.Set(defaultValidateEndTime.Add(-10 * time.Second))
@ -117,6 +136,11 @@ func TestAdvanceTimeTxTimestampTooLate(t *testing.T) {
// Ensure semantic verification updates the current and pending validator sets correctly // Ensure semantic verification updates the current and pending validator sets correctly
func TestAdvanceTimeTxUpdateValidators(t *testing.T) { func TestAdvanceTimeTxUpdateValidators(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// Case 1: Timestamp is after next validator start time // Case 1: Timestamp is after next validator start time
// Add a pending validator // Add a pending validator
@ -196,6 +220,11 @@ func TestAdvanceTimeTxUpdateValidators(t *testing.T) {
// Test method InitiallyPrefersCommit // Test method InitiallyPrefersCommit
func TestAdvanceTimeTxInitiallyPrefersCommit(t *testing.T) { func TestAdvanceTimeTxInitiallyPrefersCommit(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// Proposed advancing timestamp to 1 second after current timestamp // Proposed advancing timestamp to 1 second after current timestamp
tx, err := vm.newAdvanceTimeTx(defaultGenesisTime.Add(1 * time.Second)) tx, err := vm.newAdvanceTimeTx(defaultGenesisTime.Add(1 * time.Second))
@ -217,6 +246,11 @@ func TestAdvanceTimeTxInitiallyPrefersCommit(t *testing.T) {
// Ensure marshaling/unmarshaling works // Ensure marshaling/unmarshaling works
func TestAdvanceTimeTxUnmarshal(t *testing.T) { func TestAdvanceTimeTxUnmarshal(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
tx, err := vm.newAdvanceTimeTx(defaultGenesisTime) tx, err := vm.newAdvanceTimeTx(defaultGenesisTime)
if err != nil { if err != nil {

View File

@ -14,6 +14,11 @@ import (
// test method SyntacticVerify // test method SyntacticVerify
func TestCreateChainTxSyntacticVerify(t *testing.T) { func TestCreateChainTxSyntacticVerify(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// Case 1: tx is nil // Case 1: tx is nil
var tx *CreateChainTx var tx *CreateChainTx
@ -142,6 +147,11 @@ func TestCreateChainTxSyntacticVerify(t *testing.T) {
// Ensure SemanticVerify fails when there are not enough control sigs // Ensure SemanticVerify fails when there are not enough control sigs
func TestCreateChainTxInsufficientControlSigs(t *testing.T) { func TestCreateChainTxInsufficientControlSigs(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// Case 1: No control sigs (2 are needed) // Case 1: No control sigs (2 are needed)
tx, err := vm.newCreateChainTx( tx, err := vm.newCreateChainTx(
@ -189,6 +199,11 @@ func TestCreateChainTxInsufficientControlSigs(t *testing.T) {
// Ensure SemanticVerify fails when an incorrect control signature is given // Ensure SemanticVerify fails when an incorrect control signature is given
func TestCreateChainTxWrongControlSig(t *testing.T) { func TestCreateChainTxWrongControlSig(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// Generate new, random key to sign tx with // Generate new, random key to sign tx with
factory := crypto.FactorySECP256K1R{} factory := crypto.FactorySECP256K1R{}
@ -222,6 +237,11 @@ func TestCreateChainTxWrongControlSig(t *testing.T) {
// its validator set doesn't exist // its validator set doesn't exist
func TestCreateChainTxNoSuchSubnet(t *testing.T) { func TestCreateChainTxNoSuchSubnet(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
tx, err := vm.newCreateChainTx( tx, err := vm.newCreateChainTx(
defaultNonce+1, defaultNonce+1,
@ -245,6 +265,11 @@ func TestCreateChainTxNoSuchSubnet(t *testing.T) {
func TestCreateChainTxAlreadyExists(t *testing.T) { func TestCreateChainTxAlreadyExists(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// create a tx // create a tx
tx, err := vm.newCreateChainTx( tx, err := vm.newCreateChainTx(
@ -276,6 +301,11 @@ func TestCreateChainTxAlreadyExists(t *testing.T) {
// Ensure valid tx passes semanticVerify // Ensure valid tx passes semanticVerify
func TestCreateChainTxValid(t *testing.T) { func TestCreateChainTxValid(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// create a valid tx // create a valid tx
tx, err := vm.newCreateChainTx( tx, err := vm.newCreateChainTx(

View File

@ -11,6 +11,12 @@ import (
func TestTxHeapStart(t *testing.T) { func TestTxHeapStart(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
txHeap := EventHeap{SortByStartTime: true} txHeap := EventHeap{SortByStartTime: true}
validator0, err := vm.newAddDefaultSubnetValidatorTx( validator0, err := vm.newAddDefaultSubnetValidatorTx(
@ -78,6 +84,12 @@ func TestTxHeapStart(t *testing.T) {
func TestTxHeapStop(t *testing.T) { func TestTxHeapStop(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
txHeap := EventHeap{} txHeap := EventHeap{}
validator0, err := vm.newAddDefaultSubnetValidatorTx( validator0, err := vm.newAddDefaultSubnetValidatorTx(
@ -145,6 +157,12 @@ func TestTxHeapStop(t *testing.T) {
func TestTxHeapStartValidatorVsDelegatorOrdering(t *testing.T) { func TestTxHeapStartValidatorVsDelegatorOrdering(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
txHeap := EventHeap{SortByStartTime: true} txHeap := EventHeap{SortByStartTime: true}
validator, err := vm.newAddDefaultSubnetValidatorTx( validator, err := vm.newAddDefaultSubnetValidatorTx(
@ -186,6 +204,12 @@ func TestTxHeapStartValidatorVsDelegatorOrdering(t *testing.T) {
func TestTxHeapStopValidatorVsDelegatorOrdering(t *testing.T) { func TestTxHeapStopValidatorVsDelegatorOrdering(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
txHeap := EventHeap{} txHeap := EventHeap{}
validator, err := vm.newAddDefaultSubnetValidatorTx( validator, err := vm.newAddDefaultSubnetValidatorTx(

View File

@ -18,6 +18,12 @@ func TestRewardValidatorTxSyntacticVerify(t *testing.T) {
} }
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
txID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7}) txID := ids.NewID([32]byte{1, 2, 3, 4, 5, 6, 7})
tests := []test{ tests := []test{
@ -54,6 +60,12 @@ func TestRewardValidatorTxSyntacticVerify(t *testing.T) {
func TestRewardValidatorTxSemanticVerify(t *testing.T) { func TestRewardValidatorTxSemanticVerify(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
var nextToRemove *addDefaultSubnetValidatorTx var nextToRemove *addDefaultSubnetValidatorTx
currentValidators, err := vm.getCurrentValidators(vm.DB, DefaultSubnetID) currentValidators, err := vm.getCurrentValidators(vm.DB, DefaultSubnetID)
if err != nil { if err != nil {
@ -130,6 +142,11 @@ func TestRewardValidatorTxSemanticVerify(t *testing.T) {
func TestRewardDelegatorTxSemanticVerify(t *testing.T) { func TestRewardDelegatorTxSemanticVerify(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
keyIntf1, err := vm.factory.NewPrivateKey() keyIntf1, err := vm.factory.NewPrivateKey()
if err != nil { if err != nil {

View File

@ -8,6 +8,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"net/http" "net/http"
"time"
"github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database"
"github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/ids"
@ -34,6 +35,8 @@ var (
errGetStakeSource = errors.New("couldn't get account specified in 'stakeSource'") errGetStakeSource = errors.New("couldn't get account specified in 'stakeSource'")
errNoBlockchainWithAlias = errors.New("there is no blockchain with the specified alias") errNoBlockchainWithAlias = errors.New("there is no blockchain with the specified alias")
errDSCantValidate = errors.New("new blockchain can't be validated by default Subnet") errDSCantValidate = errors.New("new blockchain can't be validated by default Subnet")
errNilSigner = errors.New("nil ShortID 'signer' is not valid")
errNilTo = errors.New("nil ShortID 'to' is not valid")
) )
// Service defines the API calls that can be made to the platform chain // Service defines the API calls that can be made to the platform chain
@ -76,7 +79,7 @@ type GetSubnetsResponse struct {
func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *GetSubnetsResponse) error { func (service *Service) GetSubnets(_ *http.Request, args *GetSubnetsArgs, response *GetSubnetsResponse) error {
subnets, err := service.vm.getSubnets(service.vm.DB) // all subnets subnets, err := service.vm.getSubnets(service.vm.DB) // all subnets
if err != nil { if err != nil {
return fmt.Errorf("error getting subnets from database: %v", err) return fmt.Errorf("error getting subnets from database: %w", err)
} }
getAll := len(args.IDs) == 0 getAll := len(args.IDs) == 0
@ -278,7 +281,7 @@ type GetAccountReply struct {
func (service *Service) GetAccount(_ *http.Request, args *GetAccountArgs, reply *GetAccountReply) error { func (service *Service) GetAccount(_ *http.Request, args *GetAccountArgs, reply *GetAccountReply) error {
account, err := service.vm.getAccount(service.vm.DB, args.Address) account, err := service.vm.getAccount(service.vm.DB, args.Address)
if err != nil && err != database.ErrNotFound { if err != nil && err != database.ErrNotFound {
return errGetAccount return fmt.Errorf("couldn't get account: %w", err)
} else if err == database.ErrNotFound { } else if err == database.ErrNotFound {
account = newAccount(args.Address, 0, 0) account = newAccount(args.Address, 0, 0)
} }
@ -308,7 +311,7 @@ func (service *Service) ListAccounts(_ *http.Request, args *ListAccountsArgs, re
// db holds the user's info that pertains to the Platform Chain // db holds the user's info that pertains to the Platform Chain
userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password)
if err != nil { if err != nil {
return errGetUser return fmt.Errorf("couldn't get user: %w", err)
} }
// The user // The user
@ -319,14 +322,14 @@ func (service *Service) ListAccounts(_ *http.Request, args *ListAccountsArgs, re
// IDs of accounts controlled by this user // IDs of accounts controlled by this user
accountIDs, err := user.getAccountIDs() accountIDs, err := user.getAccountIDs()
if err != nil { if err != nil {
return errGetAccounts return fmt.Errorf("couldn't get accounts held by user: %w", err)
} }
reply.Accounts = []APIAccount{} reply.Accounts = []APIAccount{}
for _, accountID := range accountIDs { for _, accountID := range accountIDs {
account, err := service.vm.getAccount(service.vm.DB, accountID) // Get account whose ID is [accountID] account, err := service.vm.getAccount(service.vm.DB, accountID) // Get account whose ID is [accountID]
if err != nil && err != database.ErrNotFound { if err != nil && err != database.ErrNotFound {
service.vm.Ctx.Log.Error("couldn't get account from database: %v", err) service.vm.Ctx.Log.Error("couldn't get account from database: %w", err)
continue continue
} else if err == database.ErrNotFound { } else if err == database.ErrNotFound {
account = newAccount(accountID, 0, 0) account = newAccount(accountID, 0, 0)
@ -370,7 +373,7 @@ func (service *Service) CreateAccount(_ *http.Request, args *CreateAccountArgs,
// userDB holds the user's info that pertains to the Platform Chain // userDB holds the user's info that pertains to the Platform Chain
userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password)
if err != nil { if err != nil {
return errGetUser return fmt.Errorf("couldn't get user: %w", err)
} }
// The user creating a new account // The user creating a new account
@ -428,7 +431,7 @@ type CreateTxResponse struct {
type AddDefaultSubnetValidatorArgs struct { type AddDefaultSubnetValidatorArgs struct {
APIDefaultSubnetValidator APIDefaultSubnetValidator
// Next unused nonce of the account the staked $AVA and tx fee are paid from // Next nonce of the sender
PayerNonce json.Uint64 `json:"payerNonce"` PayerNonce json.Uint64 `json:"payerNonce"`
} }
@ -437,8 +440,13 @@ type AddDefaultSubnetValidatorArgs struct {
func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefaultSubnetValidatorArgs, reply *CreateTxResponse) error { func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefaultSubnetValidatorArgs, reply *CreateTxResponse) error {
service.vm.Ctx.Log.Debug("AddDefaultSubnetValidator called") service.vm.Ctx.Log.Debug("AddDefaultSubnetValidator called")
if args.ID.IsZero() { // If ID unspecified, use this node's ID as validator ID switch {
case args.ID.IsZero(): // If ID unspecified, use this node's ID as validator ID
args.ID = service.vm.Ctx.NodeID args.ID = service.vm.Ctx.NodeID
case args.PayerNonce == 0:
return fmt.Errorf("sender's next nonce not specified")
case int64(args.StartTime) < time.Now().Unix():
return fmt.Errorf("start time must be in the future")
} }
// Create the transaction // Create the transaction
@ -482,8 +490,13 @@ type AddDefaultSubnetDelegatorArgs struct {
func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefaultSubnetDelegatorArgs, reply *CreateTxResponse) error { func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefaultSubnetDelegatorArgs, reply *CreateTxResponse) error {
service.vm.Ctx.Log.Debug("AddDefaultSubnetDelegator called") service.vm.Ctx.Log.Debug("AddDefaultSubnetDelegator called")
if args.ID.IsZero() { // If ID unspecified, use this node's ID as validator ID switch {
case args.ID.IsZero(): // If ID unspecified, use this node's ID as validator ID
args.ID = service.vm.Ctx.NodeID args.ID = service.vm.Ctx.NodeID
case args.PayerNonce == 0:
return fmt.Errorf("sender's next unused nonce not specified")
case int64(args.StartTime) < time.Now().Unix():
return fmt.Errorf("start time must be in the future")
} }
// Create the transaction // Create the transaction
@ -571,6 +584,11 @@ type CreateSubnetArgs struct {
func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *CreateTxResponse) error { func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *CreateTxResponse) error {
service.vm.Ctx.Log.Debug("platform.createSubnet called") service.vm.Ctx.Log.Debug("platform.createSubnet called")
switch {
case args.PayerNonce == 0:
return fmt.Errorf("sender's next nonce not specified")
}
// Create the transaction // Create the transaction
tx := CreateSubnetTx{ tx := CreateSubnetTx{
UnsignedCreateSubnetTx: UnsignedCreateSubnetTx{ UnsignedCreateSubnetTx: UnsignedCreateSubnetTx{
@ -612,6 +630,13 @@ type ExportAVAArgs struct {
func (service *Service) ExportAVA(_ *http.Request, args *ExportAVAArgs, response *CreateTxResponse) error { func (service *Service) ExportAVA(_ *http.Request, args *ExportAVAArgs, response *CreateTxResponse) error {
service.vm.Ctx.Log.Debug("platform.ExportAVA called") service.vm.Ctx.Log.Debug("platform.ExportAVA called")
switch {
case args.PayerNonce == 0:
return fmt.Errorf("sender's next nonce not specified")
case uint64(args.Amount) == 0:
return fmt.Errorf("amount must be >0")
}
// Create the transaction // Create the transaction
tx := ExportTx{UnsignedExportTx: UnsignedExportTx{ tx := ExportTx{UnsignedExportTx: UnsignedExportTx{
NetworkID: service.vm.Ctx.NetworkID, NetworkID: service.vm.Ctx.NetworkID,
@ -667,6 +692,10 @@ type SignResponse struct {
func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignResponse) error { func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignResponse) error {
service.vm.Ctx.Log.Debug("sign called") service.vm.Ctx.Log.Debug("sign called")
if args.Signer.IsZero() {
return errNilSigner
}
// Get the key of the Signer // Get the key of the Signer
db, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) db, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password)
if err != nil { if err != nil {
@ -719,7 +748,7 @@ func (service *Service) signAddDefaultSubnetValidatorTx(tx *addDefaultSubnetVali
unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetValidatorTx) unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetValidatorTx)
unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) unsignedTxBytes, err := Codec.Marshal(&unsignedIntf)
if err != nil { if err != nil {
return nil, fmt.Errorf("error serializing unsigned tx: %v", err) return nil, fmt.Errorf("error serializing unsigned tx: %w", err)
} }
sig, err := key.Sign(unsignedTxBytes) sig, err := key.Sign(unsignedTxBytes)
@ -742,7 +771,7 @@ func (service *Service) signAddDefaultSubnetDelegatorTx(tx *addDefaultSubnetDele
unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetDelegatorTx) unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetDelegatorTx)
unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) unsignedTxBytes, err := Codec.Marshal(&unsignedIntf)
if err != nil { if err != nil {
return nil, fmt.Errorf("error serializing unsigned tx: %v", err) return nil, fmt.Errorf("error serializing unsigned tx: %w", err)
} }
sig, err := key.Sign(unsignedTxBytes) sig, err := key.Sign(unsignedTxBytes)
@ -765,7 +794,7 @@ func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.Priva
unsignedIntf := interface{}(&tx.UnsignedCreateSubnetTx) unsignedIntf := interface{}(&tx.UnsignedCreateSubnetTx)
unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) unsignedTxBytes, err := Codec.Marshal(&unsignedIntf)
if err != nil { if err != nil {
return nil, fmt.Errorf("error serializing unsigned tx: %v", err) return nil, fmt.Errorf("error serializing unsigned tx: %w", err)
} }
sig, err := key.Sign(unsignedTxBytes) sig, err := key.Sign(unsignedTxBytes)
@ -788,7 +817,7 @@ func (service *Service) signExportTx(tx *ExportTx, key *crypto.PrivateKeySECP256
unsignedIntf := interface{}(&tx.UnsignedExportTx) unsignedIntf := interface{}(&tx.UnsignedExportTx)
unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) unsignedTxBytes, err := Codec.Marshal(&unsignedIntf)
if err != nil { if err != nil {
return nil, fmt.Errorf("error serializing unsigned tx: %v", err) return nil, fmt.Errorf("error serializing unsigned tx: %w", err)
} }
sig, err := key.Sign(unsignedTxBytes) sig, err := key.Sign(unsignedTxBytes)
@ -816,7 +845,7 @@ func (service *Service) signAddNonDefaultSubnetValidatorTx(tx *addNonDefaultSubn
unsignedIntf := interface{}(&tx.UnsignedAddNonDefaultSubnetValidatorTx) unsignedIntf := interface{}(&tx.UnsignedAddNonDefaultSubnetValidatorTx)
unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) unsignedTxBytes, err := Codec.Marshal(&unsignedIntf)
if err != nil { if err != nil {
return nil, fmt.Errorf("error serializing unsigned tx: %v", err) return nil, fmt.Errorf("error serializing unsigned tx: %w", err)
} }
sig, err := key.Sign(unsignedTxBytes) sig, err := key.Sign(unsignedTxBytes)
if err != nil { if err != nil {
@ -829,7 +858,7 @@ func (service *Service) signAddNonDefaultSubnetValidatorTx(tx *addNonDefaultSubn
// Get information about the subnet // Get information about the subnet
subnet, err := service.vm.getSubnet(service.vm.DB, tx.SubnetID()) subnet, err := service.vm.getSubnet(service.vm.DB, tx.SubnetID())
if err != nil { if err != nil {
return nil, fmt.Errorf("problem getting subnet information: %v", err) return nil, fmt.Errorf("problem getting subnet information: %w", err)
} }
// Find the location at which [key] should put its signature. // Find the location at which [key] should put its signature.
@ -861,7 +890,7 @@ type ImportAVAArgs struct {
// ID of the account that will receive the imported funds, and pay the transaction fee // ID of the account that will receive the imported funds, and pay the transaction fee
To ids.ShortID `json:"to"` To ids.ShortID `json:"to"`
// Next unused nonce of the account // Next nonce of the sender
PayerNonce json.Uint64 `json:"payerNonce"` PayerNonce json.Uint64 `json:"payerNonce"`
// User that controls the account // User that controls the account
@ -875,10 +904,17 @@ type ImportAVAArgs struct {
func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, response *SignResponse) error { func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, response *SignResponse) error {
service.vm.Ctx.Log.Debug("platform.ImportAVA called") service.vm.Ctx.Log.Debug("platform.ImportAVA called")
switch {
case args.To.IsZero():
return errNilTo
case args.PayerNonce == 0:
return fmt.Errorf("sender's next nonce not specified")
}
// Get the key of the Signer // Get the key of the Signer
db, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) db, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password)
if err != nil { if err != nil {
return fmt.Errorf("couldn't get data for user '%s'. Does user exist?", args.Username) return fmt.Errorf("couldn't get user: %w", err)
} }
user := user{db: db} user := user{db: db}
@ -991,7 +1027,7 @@ func (service *Service) signCreateChainTx(tx *CreateChainTx, key *crypto.Private
unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) unsignedIntf := interface{}(&tx.UnsignedCreateChainTx)
unsignedTxBytes, err := Codec.Marshal(&unsignedIntf) unsignedTxBytes, err := Codec.Marshal(&unsignedIntf)
if err != nil { if err != nil {
return nil, fmt.Errorf("error serializing unsigned tx: %v", err) return nil, fmt.Errorf("error serializing unsigned tx: %w", err)
} }
sig, err := key.Sign(unsignedTxBytes) sig, err := key.Sign(unsignedTxBytes)
if err != nil { if err != nil {
@ -1004,7 +1040,7 @@ func (service *Service) signCreateChainTx(tx *CreateChainTx, key *crypto.Private
// Get information about the subnet // Get information about the subnet
subnet, err := service.vm.getSubnet(service.vm.DB, tx.SubnetID) subnet, err := service.vm.getSubnet(service.vm.DB, tx.SubnetID)
if err != nil { if err != nil {
return nil, fmt.Errorf("problem getting subnet information: %v", err) return nil, fmt.Errorf("problem getting subnet information: %w", err)
} }
// Find the location at which [key] should put its signature. // Find the location at which [key] should put its signature.
@ -1099,7 +1135,7 @@ type CreateBlockchainArgs struct {
// Human-readable name for the new blockchain, not necessarily unique // Human-readable name for the new blockchain, not necessarily unique
Name string `json:"name"` Name string `json:"name"`
// Next unused nonce of the account paying the transaction fee // Next nonce of the sender
PayerNonce json.Uint64 `json:"payerNonce"` PayerNonce json.Uint64 `json:"payerNonce"`
// Genesis state of the blockchain being created // Genesis state of the blockchain being created
@ -1111,6 +1147,15 @@ type CreateBlockchainArgs struct {
func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, response *CreateTxResponse) error { func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, response *CreateTxResponse) error {
service.vm.Ctx.Log.Debug("createBlockchain called") service.vm.Ctx.Log.Debug("createBlockchain called")
switch {
case args.PayerNonce == 0:
return errors.New("sender's next nonce not specified")
case args.VMID == "":
return errors.New("VM not specified")
case args.SubnetID.Equals(ids.Empty):
return errors.New("subnet not specified")
}
vmID, err := service.vm.chainManager.LookupVM(args.VMID) vmID, err := service.vm.chainManager.LookupVM(args.VMID)
if err != nil { if err != nil {
return fmt.Errorf("no VM with ID '%s' found", args.VMID) return fmt.Errorf("no VM with ID '%s' found", args.VMID)
@ -1156,7 +1201,7 @@ func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchain
txBytes, err := Codec.Marshal(genericTx{Tx: &tx}) txBytes, err := Codec.Marshal(genericTx{Tx: &tx})
if err != nil { if err != nil {
service.vm.Ctx.Log.Error("problem marshaling createChainTx: %v", err) service.vm.Ctx.Log.Error("problem marshaling createChainTx: %w", err)
return errCreatingTransaction return errCreatingTransaction
} }
@ -1180,6 +1225,11 @@ type GetBlockchainStatusReply struct {
func (service *Service) GetBlockchainStatus(_ *http.Request, args *GetBlockchainStatusArgs, reply *GetBlockchainStatusReply) error { func (service *Service) GetBlockchainStatus(_ *http.Request, args *GetBlockchainStatusArgs, reply *GetBlockchainStatusReply) error {
service.vm.Ctx.Log.Debug("getBlockchainStatus called") service.vm.Ctx.Log.Debug("getBlockchainStatus called")
switch {
case args.BlockchainID == "":
return errors.New("'blockchainID' not given")
}
_, err := service.vm.chainManager.Lookup(args.BlockchainID) _, err := service.vm.chainManager.Lookup(args.BlockchainID)
if err == nil { if err == nil {
reply.Status = Validating reply.Status = Validating
@ -1255,6 +1305,11 @@ type ValidatedByResponse struct {
func (service *Service) ValidatedBy(_ *http.Request, args *ValidatedByArgs, response *ValidatedByResponse) error { func (service *Service) ValidatedBy(_ *http.Request, args *ValidatedByArgs, response *ValidatedByResponse) error {
service.vm.Ctx.Log.Debug("validatedBy called") service.vm.Ctx.Log.Debug("validatedBy called")
switch {
case args.BlockchainID.Equals(ids.Empty):
return errors.New("'blockchainID' not specified")
}
chain, err := service.vm.getChain(service.vm.DB, args.BlockchainID) chain, err := service.vm.getChain(service.vm.DB, args.BlockchainID)
if err != nil { if err != nil {
return err return err
@ -1277,6 +1332,11 @@ type ValidatesResponse struct {
func (service *Service) Validates(_ *http.Request, args *ValidatesArgs, response *ValidatesResponse) error { func (service *Service) Validates(_ *http.Request, args *ValidatesArgs, response *ValidatesResponse) error {
service.vm.Ctx.Log.Debug("validates called") service.vm.Ctx.Log.Debug("validates called")
switch {
case args.SubnetID.Equals(ids.Empty):
return errors.New("'subnetID' not specified")
}
// Verify that the Subnet exists // Verify that the Subnet exists
if _, err := service.vm.getSubnet(service.vm.DB, args.SubnetID); err != nil { if _, err := service.vm.getSubnet(service.vm.DB, args.SubnetID); err != nil {
return err return err
@ -1322,7 +1382,7 @@ func (service *Service) GetBlockchains(_ *http.Request, args *struct{}, response
chains, err := service.vm.getChains(service.vm.DB) chains, err := service.vm.getChains(service.vm.DB)
if err != nil { if err != nil {
return fmt.Errorf("couldn't retrieve blockchains: %v", err) return fmt.Errorf("couldn't retrieve blockchains: %w", err)
} }
for _, chain := range chains { for _, chain := range chains {

View File

@ -405,7 +405,12 @@ func (vm *VM) Shutdown() {
return return
} }
// There is a potential deadlock if the timer is about to execute a timeout.
// So, the lock must be released before stopping the timer.
vm.Ctx.Lock.Unlock()
vm.timer.Stop() vm.timer.Stop()
vm.Ctx.Lock.Lock()
if err := vm.DB.Close(); err != nil { if err := vm.DB.Close(); err != nil {
vm.Ctx.Log.Error("Closing the database failed with %s", err) vm.Ctx.Log.Error("Closing the database failed with %s", err)
} }

View File

@ -142,6 +142,8 @@ func defaultVM() *VM {
db := memdb.New() db := memdb.New()
msgChan := make(chan common.Message, 1) msgChan := make(chan common.Message, 1)
ctx := defaultContext() ctx := defaultContext()
ctx.Lock.Lock()
defer ctx.Lock.Unlock()
if err := vm.Initialize(ctx, db, genesisBytes, msgChan, nil); err != nil { if err := vm.Initialize(ctx, db, genesisBytes, msgChan, nil); err != nil {
panic(err) panic(err)
} }
@ -233,6 +235,11 @@ func GenesisCurrentValidators() *EventHeap {
// Ensure genesis state is parsed from bytes and stored correctly // Ensure genesis state is parsed from bytes and stored correctly
func TestGenesis(t *testing.T) { func TestGenesis(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// Ensure the genesis block has been accepted and stored // Ensure the genesis block has been accepted and stored
genesisBlockID := vm.LastAccepted() // lastAccepted should be ID of genesis block genesisBlockID := vm.LastAccepted() // lastAccepted should be ID of genesis block
@ -302,6 +309,12 @@ func TestGenesis(t *testing.T) {
// accept proposal to add validator to default subnet // accept proposal to add validator to default subnet
func TestAddDefaultSubnetValidatorCommit(t *testing.T) { func TestAddDefaultSubnetValidatorCommit(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
startTime := defaultGenesisTime.Add(Delta).Add(1 * time.Second) startTime := defaultGenesisTime.Add(Delta).Add(1 * time.Second)
endTime := startTime.Add(MinimumStakingDuration) endTime := startTime.Add(MinimumStakingDuration)
key, _ := vm.factory.NewPrivateKey() key, _ := vm.factory.NewPrivateKey()
@ -325,12 +338,10 @@ func TestAddDefaultSubnetValidatorCommit(t *testing.T) {
// trigger block creation // trigger block creation
vm.unissuedEvents.Add(tx) vm.unissuedEvents.Add(tx)
vm.Ctx.Lock.Lock()
blk, err := vm.BuildBlock() blk, err := vm.BuildBlock()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Unlock()
// Assert preferences are correct // Assert preferences are correct
block := blk.(*ProposalBlock) block := blk.(*ProposalBlock)
@ -369,6 +380,12 @@ func TestAddDefaultSubnetValidatorCommit(t *testing.T) {
// Reject proposal to add validator to default subnet // Reject proposal to add validator to default subnet
func TestAddDefaultSubnetValidatorReject(t *testing.T) { func TestAddDefaultSubnetValidatorReject(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
startTime := defaultGenesisTime.Add(Delta).Add(1 * time.Second) startTime := defaultGenesisTime.Add(Delta).Add(1 * time.Second)
endTime := startTime.Add(MinimumStakingDuration) endTime := startTime.Add(MinimumStakingDuration)
key, _ := vm.factory.NewPrivateKey() key, _ := vm.factory.NewPrivateKey()
@ -392,12 +409,10 @@ func TestAddDefaultSubnetValidatorReject(t *testing.T) {
// trigger block creation // trigger block creation
vm.unissuedEvents.Add(tx) vm.unissuedEvents.Add(tx)
vm.Ctx.Lock.Lock()
blk, err := vm.BuildBlock() blk, err := vm.BuildBlock()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Unlock()
// Assert preferences are correct // Assert preferences are correct
block := blk.(*ProposalBlock) block := blk.(*ProposalBlock)
@ -440,6 +455,12 @@ func TestAddDefaultSubnetValidatorReject(t *testing.T) {
// Accept proposal to add validator to non-default subnet // Accept proposal to add validator to non-default subnet
func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) { func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
startTime := defaultValidateStartTime.Add(Delta).Add(1 * time.Second) startTime := defaultValidateStartTime.Add(Delta).Add(1 * time.Second)
endTime := startTime.Add(MinimumStakingDuration) endTime := startTime.Add(MinimumStakingDuration)
@ -463,12 +484,10 @@ func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) {
// trigger block creation // trigger block creation
vm.unissuedEvents.Add(tx) vm.unissuedEvents.Add(tx)
vm.Ctx.Lock.Lock()
blk, err := vm.BuildBlock() blk, err := vm.BuildBlock()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Unlock()
// Assert preferences are correct // Assert preferences are correct
block := blk.(*ProposalBlock) block := blk.(*ProposalBlock)
@ -511,6 +530,12 @@ func TestAddNonDefaultSubnetValidatorAccept(t *testing.T) {
// Reject proposal to add validator to non-default subnet // Reject proposal to add validator to non-default subnet
func TestAddNonDefaultSubnetValidatorReject(t *testing.T) { func TestAddNonDefaultSubnetValidatorReject(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
startTime := defaultValidateStartTime.Add(Delta).Add(1 * time.Second) startTime := defaultValidateStartTime.Add(Delta).Add(1 * time.Second)
endTime := startTime.Add(MinimumStakingDuration) endTime := startTime.Add(MinimumStakingDuration)
key, _ := vm.factory.NewPrivateKey() key, _ := vm.factory.NewPrivateKey()
@ -536,12 +561,10 @@ func TestAddNonDefaultSubnetValidatorReject(t *testing.T) {
// trigger block creation // trigger block creation
vm.unissuedEvents.Add(tx) vm.unissuedEvents.Add(tx)
vm.Ctx.Lock.Lock()
blk, err := vm.BuildBlock() blk, err := vm.BuildBlock()
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Unlock()
// Assert preferences are correct // Assert preferences are correct
block := blk.(*ProposalBlock) block := blk.(*ProposalBlock)
@ -584,16 +607,19 @@ func TestAddNonDefaultSubnetValidatorReject(t *testing.T) {
// Test case where default subnet validator rewarded // Test case where default subnet validator rewarded
func TestRewardValidatorAccept(t *testing.T) { func TestRewardValidatorAccept(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// Fast forward clock to time for genesis validators to leave // Fast forward clock to time for genesis validators to leave
vm.clock.Set(defaultValidateEndTime) vm.clock.Set(defaultValidateEndTime)
vm.Ctx.Lock.Lock()
blk, err := vm.BuildBlock() // should contain proposal to advance time blk, err := vm.BuildBlock() // should contain proposal to advance time
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Unlock()
// Assert preferences are correct // Assert preferences are correct
block := blk.(*ProposalBlock) block := blk.(*ProposalBlock)
@ -630,12 +656,10 @@ func TestRewardValidatorAccept(t *testing.T) {
t.Fatal("expected timestamp to have advanced") t.Fatal("expected timestamp to have advanced")
} }
vm.Ctx.Lock.Lock()
blk, err = vm.BuildBlock() // should contain proposal to reward genesis validator blk, err = vm.BuildBlock() // should contain proposal to reward genesis validator
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Unlock()
// Assert preferences are correct // Assert preferences are correct
block = blk.(*ProposalBlock) block = blk.(*ProposalBlock)
@ -676,16 +700,19 @@ func TestRewardValidatorAccept(t *testing.T) {
// Test case where default subnet validator not rewarded // Test case where default subnet validator not rewarded
func TestRewardValidatorReject(t *testing.T) { func TestRewardValidatorReject(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
// Fast forward clock to time for genesis validators to leave // Fast forward clock to time for genesis validators to leave
vm.clock.Set(defaultValidateEndTime) vm.clock.Set(defaultValidateEndTime)
vm.Ctx.Lock.Lock()
blk, err := vm.BuildBlock() // should contain proposal to advance time blk, err := vm.BuildBlock() // should contain proposal to advance time
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Unlock()
// Assert preferences are correct // Assert preferences are correct
block := blk.(*ProposalBlock) block := blk.(*ProposalBlock)
@ -722,12 +749,10 @@ func TestRewardValidatorReject(t *testing.T) {
t.Fatal("expected timestamp to have advanced") t.Fatal("expected timestamp to have advanced")
} }
vm.Ctx.Lock.Lock()
blk, err = vm.BuildBlock() // should contain proposal to reward genesis validator blk, err = vm.BuildBlock() // should contain proposal to reward genesis validator
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Unlock()
// Assert preferences are correct // Assert preferences are correct
block = blk.(*ProposalBlock) block = blk.(*ProposalBlock)
@ -768,6 +793,11 @@ func TestRewardValidatorReject(t *testing.T) {
// Ensure BuildBlock errors when there is no block to build // Ensure BuildBlock errors when there is no block to build
func TestUnneededBuildBlock(t *testing.T) { func TestUnneededBuildBlock(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
if _, err := vm.BuildBlock(); err == nil { if _, err := vm.BuildBlock(); err == nil {
t.Fatalf("Should have errored on BuildBlock") t.Fatalf("Should have errored on BuildBlock")
@ -777,6 +807,11 @@ func TestUnneededBuildBlock(t *testing.T) {
// test acceptance of proposal to create a new chain // test acceptance of proposal to create a new chain
func TestCreateChain(t *testing.T) { func TestCreateChain(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
tx, err := vm.newCreateChainTx( tx, err := vm.newCreateChainTx(
defaultNonce+1, defaultNonce+1,
@ -793,13 +828,11 @@ func TestCreateChain(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Lock()
vm.unissuedDecisionTxs = append(vm.unissuedDecisionTxs, tx) vm.unissuedDecisionTxs = append(vm.unissuedDecisionTxs, tx)
blk, err := vm.BuildBlock() // should contain proposal to create chain blk, err := vm.BuildBlock() // should contain proposal to create chain
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Unlock()
if err := blk.Verify(); err != nil { if err := blk.Verify(); err != nil {
t.Fatal(err) t.Fatal(err)
@ -839,6 +872,11 @@ func TestCreateChain(t *testing.T) {
// 4) Advance timestamp to validator's end time (removing validator from current) // 4) Advance timestamp to validator's end time (removing validator from current)
func TestCreateSubnet(t *testing.T) { func TestCreateSubnet(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
createSubnetTx, err := vm.newCreateSubnetTx( createSubnetTx, err := vm.newCreateSubnetTx(
testNetworkID, testNetworkID,
@ -854,13 +892,11 @@ func TestCreateSubnet(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Lock()
vm.unissuedDecisionTxs = append(vm.unissuedDecisionTxs, createSubnetTx) vm.unissuedDecisionTxs = append(vm.unissuedDecisionTxs, createSubnetTx)
blk, err := vm.BuildBlock() // should contain proposal to create subnet blk, err := vm.BuildBlock() // should contain proposal to create subnet
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Unlock()
if err := blk.Verify(); err != nil { if err := blk.Verify(); err != nil {
t.Fatal(err) t.Fatal(err)
@ -917,13 +953,11 @@ func TestCreateSubnet(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Lock()
vm.unissuedEvents.Push(addValidatorTx) vm.unissuedEvents.Push(addValidatorTx)
blk, err = vm.BuildBlock() // should add validator to the new subnet blk, err = vm.BuildBlock() // should add validator to the new subnet
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Unlock()
// Assert preferences are correct // Assert preferences are correct
// and accept the proposal/commit // and accept the proposal/commit
@ -971,12 +1005,10 @@ func TestCreateSubnet(t *testing.T) {
// from pending to current validator set // from pending to current validator set
vm.clock.Set(startTime) vm.clock.Set(startTime)
vm.Ctx.Lock.Lock()
blk, err = vm.BuildBlock() // should be advance time tx blk, err = vm.BuildBlock() // should be advance time tx
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Unlock()
// Assert preferences are correct // Assert preferences are correct
// and accept the proposal/commit // and accept the proposal/commit
@ -1031,12 +1063,10 @@ func TestCreateSubnet(t *testing.T) {
// fast forward clock to time validator should stop validating // fast forward clock to time validator should stop validating
vm.clock.Set(endTime) vm.clock.Set(endTime)
vm.Ctx.Lock.Lock()
blk, err = vm.BuildBlock() // should be advance time tx blk, err = vm.BuildBlock() // should be advance time tx
if err != nil { if err != nil {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Unlock()
// Assert preferences are correct // Assert preferences are correct
// and accept the proposal/commit // and accept the proposal/commit
@ -1084,6 +1114,11 @@ func TestCreateSubnet(t *testing.T) {
// test asset import // test asset import
func TestAtomicImport(t *testing.T) { func TestAtomicImport(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
avmID := ids.Empty.Prefix(0) avmID := ids.Empty.Prefix(0)
utxoID := ava.UTXOID{ utxoID := ava.UTXOID{
@ -1117,9 +1152,6 @@ func TestAtomicImport(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Lock()
defer vm.Ctx.Lock.Unlock()
vm.ava = assetID vm.ava = assetID
vm.avm = avmID vm.avm = avmID
@ -1175,6 +1207,11 @@ func TestAtomicImport(t *testing.T) {
// test optimistic asset import // test optimistic asset import
func TestOptimisticAtomicImport(t *testing.T) { func TestOptimisticAtomicImport(t *testing.T) {
vm := defaultVM() vm := defaultVM()
vm.Ctx.Lock.Lock()
defer func() {
vm.Shutdown()
vm.Ctx.Lock.Unlock()
}()
avmID := ids.Empty.Prefix(0) avmID := ids.Empty.Prefix(0)
utxoID := ava.UTXOID{ utxoID := ava.UTXOID{
@ -1208,9 +1245,6 @@ func TestOptimisticAtomicImport(t *testing.T) {
t.Fatal(err) t.Fatal(err)
} }
vm.Ctx.Lock.Lock()
defer vm.Ctx.Lock.Unlock()
vm.ava = assetID vm.ava = assetID
vm.avm = avmID vm.avm = avmID
@ -1271,6 +1305,8 @@ func TestRestartPartiallyAccepted(t *testing.T) {
firstVM.clock.Set(defaultGenesisTime) firstVM.clock.Set(defaultGenesisTime)
firstCtx := defaultContext() firstCtx := defaultContext()
firstCtx.Lock.Lock()
firstMsgChan := make(chan common.Message, 1) firstMsgChan := make(chan common.Message, 1)
if err := firstVM.Initialize(firstCtx, db, genesisBytes, firstMsgChan, nil); err != nil { if err := firstVM.Initialize(firstCtx, db, genesisBytes, firstMsgChan, nil); err != nil {
t.Fatal(err) t.Fatal(err)
@ -1318,6 +1354,7 @@ func TestRestartPartiallyAccepted(t *testing.T) {
} }
firstVM.Shutdown() firstVM.Shutdown()
firstCtx.Lock.Unlock()
secondVM := &VM{ secondVM := &VM{
SnowmanVM: &core.SnowmanVM{}, SnowmanVM: &core.SnowmanVM{},
@ -1330,6 +1367,12 @@ func TestRestartPartiallyAccepted(t *testing.T) {
secondVM.clock.Set(defaultGenesisTime) secondVM.clock.Set(defaultGenesisTime)
secondCtx := defaultContext() secondCtx := defaultContext()
secondCtx.Lock.Lock()
defer func() {
secondVM.Shutdown()
secondCtx.Lock.Unlock()
}()
secondMsgChan := make(chan common.Message, 1) secondMsgChan := make(chan common.Message, 1)
if err := secondVM.Initialize(secondCtx, db, genesisBytes, secondMsgChan, nil); err != nil { if err := secondVM.Initialize(secondCtx, db, genesisBytes, secondMsgChan, nil); err != nil {
t.Fatal(err) t.Fatal(err)
@ -1371,6 +1414,8 @@ func TestRestartFullyAccepted(t *testing.T) {
firstVM.clock.Set(defaultGenesisTime) firstVM.clock.Set(defaultGenesisTime)
firstCtx := defaultContext() firstCtx := defaultContext()
firstCtx.Lock.Lock()
firstMsgChan := make(chan common.Message, 1) firstMsgChan := make(chan common.Message, 1)
if err := firstVM.Initialize(firstCtx, db, genesisBytes, firstMsgChan, nil); err != nil { if err := firstVM.Initialize(firstCtx, db, genesisBytes, firstMsgChan, nil); err != nil {
t.Fatal(err) t.Fatal(err)
@ -1418,6 +1463,7 @@ func TestRestartFullyAccepted(t *testing.T) {
} }
firstVM.Shutdown() firstVM.Shutdown()
firstCtx.Lock.Unlock()
secondVM := &VM{ secondVM := &VM{
SnowmanVM: &core.SnowmanVM{}, SnowmanVM: &core.SnowmanVM{},
@ -1430,6 +1476,12 @@ func TestRestartFullyAccepted(t *testing.T) {
secondVM.clock.Set(defaultGenesisTime) secondVM.clock.Set(defaultGenesisTime)
secondCtx := defaultContext() secondCtx := defaultContext()
secondCtx.Lock.Lock()
defer func() {
secondVM.Shutdown()
secondCtx.Lock.Unlock()
}()
secondMsgChan := make(chan common.Message, 1) secondMsgChan := make(chan common.Message, 1)
if err := secondVM.Initialize(secondCtx, db, genesisBytes, secondMsgChan, nil); err != nil { if err := secondVM.Initialize(secondCtx, db, genesisBytes, secondMsgChan, nil); err != nil {
t.Fatal(err) t.Fatal(err)
@ -1471,7 +1523,6 @@ func TestBootstrapPartiallyAccepted(t *testing.T) {
SnowmanVM: &core.SnowmanVM{}, SnowmanVM: &core.SnowmanVM{},
chainManager: chains.MockManager{}, chainManager: chains.MockManager{},
} }
defer vm.Shutdown()
defaultSubnet := validators.NewSet() defaultSubnet := validators.NewSet()
vm.validators = validators.NewManager() vm.validators = validators.NewManager()
@ -1479,9 +1530,9 @@ func TestBootstrapPartiallyAccepted(t *testing.T) {
vm.clock.Set(defaultGenesisTime) vm.clock.Set(defaultGenesisTime)
ctx := defaultContext() ctx := defaultContext()
msgChan := make(chan common.Message, 1)
ctx.Lock.Lock() ctx.Lock.Lock()
msgChan := make(chan common.Message, 1)
if err := vm.Initialize(ctx, vmDB, genesisBytes, msgChan, nil); err != nil { if err := vm.Initialize(ctx, vmDB, genesisBytes, msgChan, nil); err != nil {
t.Fatal(err) t.Fatal(err)
} }
@ -1510,7 +1561,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) {
go timeoutManager.Dispatch() go timeoutManager.Dispatch()
router := &router.ChainRouter{} router := &router.ChainRouter{}
router.Initialize(logging.NoLog{}, &timeoutManager) router.Initialize(logging.NoLog{}, &timeoutManager, time.Hour)
externalSender := &sender.ExternalSenderTest{T: t} externalSender := &sender.ExternalSenderTest{T: t}
externalSender.Default(true) externalSender.Default(true)
@ -1581,6 +1632,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) {
externalSender.GetF = nil externalSender.GetF = nil
externalSender.CantPushQuery = false externalSender.CantPushQuery = false
externalSender.CantPullQuery = false
engine.Put(ctx.NodeID, *reqID, advanceTimeBlkID, advanceTimeBlkBytes) engine.Put(ctx.NodeID, *reqID, advanceTimeBlkID, advanceTimeBlkBytes)
@ -1627,6 +1679,12 @@ func TestUnverifiedParent(t *testing.T) {
vm.clock.Set(defaultGenesisTime) vm.clock.Set(defaultGenesisTime)
ctx := defaultContext() ctx := defaultContext()
ctx.Lock.Lock()
defer func() {
vm.Shutdown()
ctx.Lock.Unlock()
}()
msgChan := make(chan common.Message, 1) msgChan := make(chan common.Message, 1)
if err := vm.Initialize(ctx, db, genesisBytes, msgChan, nil); err != nil { if err := vm.Initialize(ctx, db, genesisBytes, msgChan, nil); err != nil {
t.Fatal(err) t.Fatal(err)

View File

@ -62,10 +62,11 @@ func ConsensusLeader(numBlocks, numTxsPerBlock int, b *testing.B) {
go timeoutManager.Dispatch() go timeoutManager.Dispatch()
router := &router.ChainRouter{} router := &router.ChainRouter{}
router.Initialize(logging.NoLog{}, &timeoutManager) router.Initialize(logging.NoLog{}, &timeoutManager, time.Hour)
// Initialize the VM // Initialize the VM
vm := &VM{} vm := &VM{}
defer func() { ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
ctx.Lock.Lock() ctx.Lock.Lock()
if err := vm.Initialize(ctx, vmDB, genesisData, msgChan, nil); err != nil { if err := vm.Initialize(ctx, vmDB, genesisData, msgChan, nil); err != nil {
b.Fatal(err) b.Fatal(err)
@ -189,7 +190,7 @@ func ConsensusFollower(numBlocks, numTxsPerBlock int, b *testing.B) {
go timeoutManager.Dispatch() go timeoutManager.Dispatch()
router := &router.ChainRouter{} router := &router.ChainRouter{}
router.Initialize(logging.NoLog{}, &timeoutManager) router.Initialize(logging.NoLog{}, &timeoutManager, time.Hour)
wg := sync.WaitGroup{} wg := sync.WaitGroup{}
wg.Add(numBlocks) wg.Add(numBlocks)
@ -198,6 +199,7 @@ func ConsensusFollower(numBlocks, numTxsPerBlock int, b *testing.B) {
vm := &VM{ vm := &VM{
onAccept: func(ids.ID) { wg.Done() }, onAccept: func(ids.ID) { wg.Done() },
} }
defer func() { ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
ctx.Lock.Lock() ctx.Lock.Lock()
if err := vm.Initialize(ctx, vmDB, genesisData, msgChan, nil); err != nil { if err := vm.Initialize(ctx, vmDB, genesisData, msgChan, nil); err != nil {
b.Fatal(err) b.Fatal(err)

View File

@ -122,7 +122,11 @@ func (vm *VM) Shutdown() {
return return
} }
// There is a potential deadlock if the timer is about to execute a timeout.
// So, the lock must be released before stopping the timer.
vm.ctx.Lock.Unlock()
vm.timer.Stop() vm.timer.Stop()
vm.ctx.Lock.Lock()
if err := vm.baseDB.Close(); err != nil { if err := vm.baseDB.Close(); err != nil {
vm.ctx.Log.Error("Closing the database failed with %s", err) vm.ctx.Log.Error("Closing the database failed with %s", err)
} }

View File

@ -73,6 +73,7 @@ func BenchmarkParseBlock(b *testing.B) {
/*testing=*/ b, /*testing=*/ b,
) )
vm := &VM{} vm := &VM{}
defer func() { ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
vm.Initialize( vm.Initialize(
/*ctx=*/ ctx, /*ctx=*/ ctx,
/*db=*/ memdb.New(), /*db=*/ memdb.New(),
@ -106,6 +107,7 @@ func BenchmarkParseAndVerify(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
vm := &VM{} vm := &VM{}
defer func() { ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
vm.Initialize( vm.Initialize(
/*ctx=*/ snow.DefaultContextTest(), /*ctx=*/ snow.DefaultContextTest(),
/*db=*/ memdb.New(), /*db=*/ memdb.New(),
@ -141,6 +143,8 @@ func BenchmarkAccept(b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
vm := &VM{} vm := &VM{}
defer func() { ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
vm.Initialize( vm.Initialize(
/*ctx=*/ snow.DefaultContextTest(), /*ctx=*/ snow.DefaultContextTest(),
/*db=*/ memdb.New(), /*db=*/ memdb.New(),
@ -178,6 +182,7 @@ func ParseAndVerifyAndAccept(numBlocks, numTxsPerBlock int, b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
vm := &VM{} vm := &VM{}
defer func() { ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
vm.Initialize( vm.Initialize(
/*ctx=*/ snow.DefaultContextTest(), /*ctx=*/ snow.DefaultContextTest(),
/*db=*/ memdb.New(), /*db=*/ memdb.New(),
@ -232,6 +237,7 @@ func ParseThenVerifyThenAccept(numBlocks, numTxsPerBlock int, b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
vm := &VM{} vm := &VM{}
defer func() { ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
vm.Initialize( vm.Initialize(
/*ctx=*/ snow.DefaultContextTest(), /*ctx=*/ snow.DefaultContextTest(),
/*db=*/ memdb.New(), /*db=*/ memdb.New(),
@ -292,6 +298,7 @@ func IssueAndVerifyAndAccept(numBlocks, numTxsPerBlock int, b *testing.B) {
for n := 0; n < b.N; n++ { for n := 0; n < b.N; n++ {
vm := &VM{} vm := &VM{}
defer func() { ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
vm.Initialize( vm.Initialize(
/*ctx=*/ snow.DefaultContextTest(), /*ctx=*/ snow.DefaultContextTest(),
/*db=*/ memdb.New(), /*db=*/ memdb.New(),

View File

@ -67,6 +67,7 @@ func TestPayments(t *testing.T) {
blocker, _ := queue.New(bootstrappingDB) blocker, _ := queue.New(bootstrappingDB)
vm := &VM{} vm := &VM{}
defer func() { ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
vm.Initialize(ctx, db, genesisData, msgChan, nil) vm.Initialize(ctx, db, genesisData, msgChan, nil)
sender := &common.SenderTest{} sender := &common.SenderTest{}

View File

@ -134,7 +134,11 @@ func (vm *VM) Shutdown() {
return return
} }
// There is a potential deadlock if the timer is about to execute a timeout.
// So, the lock must be released before stopping the timer.
vm.ctx.Lock.Unlock()
vm.timer.Stop() vm.timer.Stop()
vm.ctx.Lock.Lock()
if err := vm.baseDB.Close(); err != nil { if err := vm.baseDB.Close(); err != nil {
vm.ctx.Log.Error("Closing the database failed with %s", err) vm.ctx.Log.Error("Closing the database failed with %s", err)
} }

View File

@ -91,6 +91,7 @@ func TestAva(t *testing.T) {
msgChan := make(chan common.Message, 1) msgChan := make(chan common.Message, 1)
vm := &VM{} vm := &VM{}
defer func() { vm.ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil)
vm.batchTimeout = 0 vm.batchTimeout = 0
@ -172,6 +173,7 @@ func TestInvalidSpentTx(t *testing.T) {
msgChan := make(chan common.Message, 1) msgChan := make(chan common.Message, 1)
vm := &VM{} vm := &VM{}
defer func() { vm.ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
ctx.Lock.Lock() ctx.Lock.Lock()
vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil)
@ -258,6 +260,7 @@ func TestInvalidTxVerification(t *testing.T) {
msgChan := make(chan common.Message, 1) msgChan := make(chan common.Message, 1)
vm := &VM{} vm := &VM{}
defer func() { vm.ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
ctx.Lock.Lock() ctx.Lock.Lock()
vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil)
@ -319,6 +322,7 @@ func TestRPCAPI(t *testing.T) {
vmDB := memdb.New() vmDB := memdb.New()
msgChan := make(chan common.Message, 1) msgChan := make(chan common.Message, 1)
vm := &VM{} vm := &VM{}
defer func() { vm.ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil)
vm.batchTimeout = 0 vm.batchTimeout = 0
@ -526,6 +530,7 @@ func TestMultipleSend(t *testing.T) {
vmDB := memdb.New() vmDB := memdb.New()
msgChan := make(chan common.Message, 1) msgChan := make(chan common.Message, 1)
vm := &VM{} vm := &VM{}
defer func() { vm.ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil)
// Initialize these data structures // Initialize these data structures
@ -635,6 +640,7 @@ func TestIssuePendingDependency(t *testing.T) {
ctx.Lock.Lock() ctx.Lock.Lock()
vm := &VM{} vm := &VM{}
defer func() { vm.ctx.Lock.Lock(); vm.Shutdown(); vm.ctx.Lock.Unlock() }()
vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil) vm.Initialize(ctx, vmDB, genesisTx.Bytes(), msgChan, nil)
vm.batchTimeout = 0 vm.batchTimeout = 0