Merge branch 'master' into build-success-msg

This commit is contained in:
Stephen Buttolph 2020-04-27 13:28:59 -04:00 committed by GitHub
commit 91f3eb8600
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
16 changed files with 166 additions and 85 deletions

View File

@ -150,9 +150,9 @@ func TestIDUnmarshalJSON(t *testing.T) {
func TestIDHex(t *testing.T) { func TestIDHex(t *testing.T) {
id := NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'}) id := NewID([32]byte{'a', 'v', 'a', ' ', 'l', 'a', 'b', 's'})
expected := "617661206c61627300000000000000000000000000000000000000000000000000" expected := "617661206c616273000000000000000000000000000000000000000000000000"
actual := id.Hex() actual := id.Hex()
if actual != actual { if actual != expected {
t.Fatalf("got %s, expected %s", actual, expected) t.Fatalf("got %s, expected %s", actual, expected)
} }
} }

View File

@ -26,6 +26,10 @@ import (
"github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/utils/wrappers"
) )
const (
dbVersion = "v0.1.0"
)
// Results of parsing the CLI // Results of parsing the CLI
var ( var (
Config = node.Config{} Config = node.Config{}
@ -143,7 +147,7 @@ func init() {
// DB: // DB:
if *db && err == nil { if *db && err == nil {
// TODO: Add better params here // TODO: Add better params here
dbPath := path.Join(*dbDir, genesis.NetworkName(Config.NetworkID)) dbPath := path.Join(*dbDir, genesis.NetworkName(Config.NetworkID), dbVersion)
db, err := leveldb.New(dbPath, 0, 0, 0) db, err := leveldb.New(dbPath, 0, 0, 0)
Config.DB = db Config.DB = db
errs.Add(err) errs.Add(err)

View File

@ -19,6 +19,8 @@ import (
"errors" "errors"
"fmt" "fmt"
"math" "math"
"strconv"
"strings"
"sync" "sync"
"time" "time"
"unsafe" "unsafe"
@ -61,9 +63,23 @@ Attempt reconnections
node isn't connected to after awhile delete the connection. node isn't connected to after awhile delete the connection.
*/ */
// Version this avalanche instance is executing.
var (
VersionPrefix = "avalanche/"
VersionSeparator = "."
MajorVersion = 0
MinorVersion = 1
PatchVersion = 0
ClientVersion = fmt.Sprintf("%s%d%s%d%s%d",
VersionPrefix,
MajorVersion,
VersionSeparator,
MinorVersion,
VersionSeparator,
PatchVersion)
)
const ( const (
// CurrentVersion this avalanche instance is executing.
CurrentVersion = "avalanche/0.0.1"
// MaxClockDifference allowed between connected nodes. // MaxClockDifference allowed between connected nodes.
MaxClockDifference = time.Minute MaxClockDifference = time.Minute
// PeerListGossipSpacing is the amount of time to wait between pushing this // PeerListGossipSpacing is the amount of time to wait between pushing this
@ -356,7 +372,7 @@ func (nm *Handshake) SendGetVersion(peer salticidae.PeerID) {
// SendVersion to the requested peer // SendVersion to the requested peer
func (nm *Handshake) SendVersion(peer salticidae.PeerID) error { func (nm *Handshake) SendVersion(peer salticidae.PeerID) error {
build := Builder{} build := Builder{}
v, err := build.Version(nm.networkID, nm.clock.Unix(), toIPDesc(nm.myAddr), CurrentVersion) v, err := build.Version(nm.networkID, nm.clock.Unix(), toIPDesc(nm.myAddr), ClientVersion)
if err != nil { if err != nil {
return fmt.Errorf("packing Version failed due to %s", err) return fmt.Errorf("packing Version failed due to %s", err)
} }
@ -518,6 +534,59 @@ func (nm *Handshake) disconnectedFromPeer(peer salticidae.PeerID) {
} }
} }
// checkCompatibility Check to make sure that the peer and I speak the same language.
func (nm *Handshake) checkCompatibility(peerVersion string) bool {
if !strings.HasPrefix(peerVersion, VersionPrefix) {
nm.log.Warn("Peer attempted to connect with an invalid version prefix")
return false
}
peerVersion = peerVersion[len(VersionPrefix):]
splitPeerVersion := strings.SplitN(peerVersion, VersionSeparator, 3)
if len(splitPeerVersion) != 3 {
nm.log.Warn("Peer attempted to connect with an invalid number of subversions")
return false
}
major, err := strconv.Atoi(splitPeerVersion[0])
if err != nil {
nm.log.Warn("Peer attempted to connect with an invalid major version")
return false
}
minor, err := strconv.Atoi(splitPeerVersion[1])
if err != nil {
nm.log.Warn("Peer attempted to connect with an invalid minor version")
return false
}
patch, err := strconv.Atoi(splitPeerVersion[2])
if err != nil {
nm.log.Warn("Peer attempted to connect with an invalid patch version")
return false
}
switch {
case major < MajorVersion:
// peers major version is too low
return false
case major > MajorVersion:
nm.log.Warn("Peer attempted to connect with a higher major version, this client may need to be updated")
return false
}
switch {
case minor < MinorVersion:
// peers minor version is too low
return false
case minor > MinorVersion:
nm.log.Warn("Peer attempted to connect with a higher minor version, this client may need to be updated")
return false
}
if patch > PatchVersion {
nm.log.Warn("Peer is connecting with a higher patch version, this client may need to be updated")
}
return true
}
// peerHandler notifies a change to the set of connected peers // peerHandler notifies a change to the set of connected peers
// connected is true if a new peer is connected // connected is true if a new peer is connected
// connected is false if a formerly connected peer has disconnected // connected is false if a formerly connected peer has disconnected
@ -645,8 +714,8 @@ func version(_msg *C.struct_msg_t, _conn *C.struct_msgnetwork_conn_t, _ unsafe.P
return return
} }
if peerVersion := pMsg.Get(VersionStr).(string); !checkCompatibility(CurrentVersion, peerVersion) { if peerVersion := pMsg.Get(VersionStr).(string); !HandshakeNet.checkCompatibility(peerVersion) {
HandshakeNet.log.Warn("Bad version") HandshakeNet.log.Debug("Dropping connection due to an incompatible version from peer")
HandshakeNet.net.DelPeer(peer) HandshakeNet.net.DelPeer(peer)
return return
@ -741,12 +810,6 @@ func getCert(cert salticidae.X509) ids.ShortID {
return certID return certID
} }
// checkCompatibility Check to make sure that the peer and I speak the same language.
func checkCompatibility(myVersion string, peerVersion string) bool {
// At the moment, we are all compatible.
return true
}
func toShortID(ip utils.IPDesc) ids.ShortID { func toShortID(ip utils.IPDesc) ids.ShortID {
return ids.NewShortID(hashing.ComputeHash160Array([]byte(ip.String()))) return ids.NewShortID(hashing.ComputeHash160Array([]byte(ip.String())))
} }

View File

@ -10,20 +10,21 @@ import (
) )
const ( const (
errMsg = "__________ .___\n" + errMsg = "" +
"\\______ \\____________ __| _/__.__.\n" + `__________ .___` + "\n" +
" | | _/\\_ __ \\__ \\ / __ < | |\n" + `\______ \____________ __| _/__.__.` + "\n" +
" | | \\ | | \\// __ \\_/ /_/ |\\___ |\n" + ` | | _/\_ __ \__ \ / __ < | |` + "\n" +
" |______ / |__| (____ /\\____ |/ ____|\n" + ` | | \ | | \// __ \_/ /_/ |\___ |` + "\n" +
" \\/ \\/ \\/\\/\n" + ` |______ / |__| (____ /\____ |/ ____|` + "\n" +
` \/ \/ \/\/` + "\n" +
"\n" + "\n" +
"🏆 🏆 🏆 🏆 🏆 🏆\n" + `🏆 🏆 🏆 🏆 🏆 🏆` + "\n" +
" ________ ________ ________________\n" + ` ________ ________ ________________` + "\n" +
" / _____/ \\_____ \\ / _ \\__ ___/\n" + ` / _____/ \_____ \ / _ \__ ___/` + "\n" +
"/ \\ ___ / | \\ / /_\\ \\| |\n" + `/ \ ___ / | \ / /_\ \| |` + "\n" +
"\\ \\_\\ \\/ | \\/ | \\ |\n" + `\ \_\ \/ | \/ | \ |` + "\n" +
" \\______ /\\_______ /\\____|__ /____|\n" + ` \______ /\_______ /\____|__ /____|` + "\n" +
" \\/ \\/ \\/\n" ` \/ \/ \/` + "\n"
) )
// Parameters required for snowball consensus // Parameters required for snowball consensus

View File

@ -177,9 +177,7 @@ func (b *bootstrapper) storeVertex(vtx avalanche.Vertex) {
} }
} }
for _, parent := range vtx.Parents() { vts = append(vts, vtx.Parents()...)
vts = append(vts, parent)
}
case choices.Accepted: case choices.Accepted:
b.BootstrapConfig.Context.Log.Verbo("Bootstrapping confirmed %s", vtxID) b.BootstrapConfig.Context.Log.Verbo("Bootstrapping confirmed %s", vtxID)
case choices.Rejected: case choices.Rejected:

View File

@ -82,6 +82,10 @@ func (s *Serializer) ParseVertex(b []byte) (avacon.Vertex, error) {
// BuildVertex implements the avalanche.State interface // BuildVertex implements the avalanche.State interface
func (s *Serializer) BuildVertex(parentSet ids.Set, txs []snowstorm.Tx) (avacon.Vertex, error) { func (s *Serializer) BuildVertex(parentSet ids.Set, txs []snowstorm.Tx) (avacon.Vertex, error) {
if len(txs) == 0 {
return nil, errNoTxs
}
parentIDs := parentSet.List() parentIDs := parentSet.List()
ids.SortIDs(parentIDs) ids.SortIDs(parentIDs)
sortTxs(txs) sortTxs(txs)

View File

@ -24,6 +24,7 @@ var (
errExtraSpace = errors.New("trailing buffer space") errExtraSpace = errors.New("trailing buffer space")
errInvalidParents = errors.New("vertex contains non-sorted or duplicated parentIDs") errInvalidParents = errors.New("vertex contains non-sorted or duplicated parentIDs")
errInvalidTxs = errors.New("vertex contains non-sorted or duplicated transactions") errInvalidTxs = errors.New("vertex contains non-sorted or duplicated transactions")
errNoTxs = errors.New("vertex contains no transactions")
) )
type vertex struct { type vertex struct {
@ -45,6 +46,8 @@ func (vtx *vertex) Verify() error {
switch { switch {
case !ids.IsSortedAndUniqueIDs(vtx.parentIDs): case !ids.IsSortedAndUniqueIDs(vtx.parentIDs):
return errInvalidParents return errInvalidParents
case len(vtx.txs) == 0:
return errNoTxs
case !isSortedAndUniqueTxs(vtx.txs): case !isSortedAndUniqueTxs(vtx.txs):
return errInvalidTxs return errInvalidTxs
default: default:
@ -55,7 +58,7 @@ func (vtx *vertex) Verify() error {
/* /*
* Vertex: * Vertex:
* Codec | 04 Bytes * Codec | 04 Bytes
* Chain | 32 Bytes * Chain | 32 Bytes
* Height | 08 Bytes * Height | 08 Bytes
* NumParents | 04 Bytes * NumParents | 04 Bytes
* Repeated (NumParents): * Repeated (NumParents):

View File

@ -316,8 +316,37 @@ func (t *Transitive) batch(txs []snowstorm.Tx, force, empty bool) {
} }
} }
if len(batch) > 0 || (empty && !issued) { if len(batch) > 0 {
t.issueBatch(batch) t.issueBatch(batch)
} else if empty && !issued {
t.issueRepoll()
}
}
func (t *Transitive) issueRepoll() {
preferredIDs := t.Consensus.Preferences().List()
numPreferredIDs := len(preferredIDs)
if numPreferredIDs == 0 {
t.Config.Context.Log.Error("Re-query attempt was dropped due to no pending vertices")
return
}
sampler := random.Uniform{N: len(preferredIDs)}
vtxID := preferredIDs[sampler.Sample()]
p := t.Consensus.Parameters()
vdrs := t.Config.Validators.Sample(p.K) // Validators to sample
vdrSet := ids.ShortSet{} // Validators to sample repr. as a set
for _, vdr := range vdrs {
vdrSet.Add(vdr.ID())
}
t.RequestID++
if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet.Len()) {
t.Config.Sender.PullQuery(vdrSet, t.RequestID, vtxID)
} else if numVdrs < p.K {
t.Config.Context.Log.Error("Re-query for %s was dropped due to an insufficient number of validators", vtxID)
} }
} }

View File

@ -698,23 +698,12 @@ func TestEngineScheduleRepoll(t *testing.T) {
sender.PushQueryF = nil sender.PushQueryF = nil
st.buildVertex = func(_ ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) {
consumers := []snowstorm.Tx{}
for _, tx := range txs {
consumers = append(consumers, tx)
}
return &Vtx{
parents: []avalanche.Vertex{gVtx, mVtx},
id: GenerateID(),
txs: consumers,
status: choices.Processing,
bytes: []byte{1},
}, nil
}
repolled := new(bool) repolled := new(bool)
sender.PushQueryF = func(_ ids.ShortSet, _ uint32, _ ids.ID, _ []byte) { sender.PullQueryF = func(_ ids.ShortSet, _ uint32, vtxID ids.ID) {
*repolled = true *repolled = true
if !vtxID.Equals(vtx.ID()) {
t.Fatalf("Wrong vertex queried")
}
} }
te.QueryFailed(vdr.ID(), *requestID) te.QueryFailed(vdr.ID(), *requestID)
@ -979,31 +968,14 @@ func TestEngineIssueRepoll(t *testing.T) {
te.Initialize(config) te.Initialize(config)
te.finishBootstrapping() te.finishBootstrapping()
newVtxID := new(ids.ID) sender.PullQueryF = func(vdrs ids.ShortSet, _ uint32, vtxID ids.ID) {
st.buildVertex = func(s ids.Set, txs []snowstorm.Tx) (avalanche.Vertex, error) {
if len(txs) != 0 {
t.Fatalf("Wrong vertex issued")
}
if s.Len() != 2 || !s.Contains(gVtx.ID()) || !s.Contains(mVtx.ID()) {
t.Fatalf("Wrong vertex issued")
}
vtx := &Vtx{
parents: []avalanche.Vertex{gVtx, mVtx},
id: GenerateID(),
status: choices.Processing,
bytes: []byte{1},
}
*newVtxID = vtx.ID()
return vtx, nil
}
sender.PushQueryF = func(vdrs ids.ShortSet, _ uint32, vtxID ids.ID, vtx []byte) {
vdrSet := ids.ShortSet{} vdrSet := ids.ShortSet{}
vdrSet.Add(vdr.ID()) vdrSet.Add(vdr.ID())
if !vdrs.Equals(vdrSet) || !vtxID.Equals(*newVtxID) { if !vdrs.Equals(vdrSet) {
t.Fatalf("Wrong query message") t.Fatalf("Wrong query recipients")
}
if !vtxID.Equals(gVtx.ID()) && !vtxID.Equals(mVtx.ID()) {
t.Fatalf("Unknown re-query")
} }
} }

View File

@ -91,8 +91,10 @@ func (cr *codecRegistry) RegisterType(val interface{}) error {
cr.typeToFxIndex[valType] = cr.index cr.typeToFxIndex[valType] = cr.index
return cr.codec.RegisterType(val) return cr.codec.RegisterType(val)
} }
func (cr *codecRegistry) Marshal(val interface{}) ([]byte, error) { return cr.codec.Marshal(val) } func (cr *codecRegistry) Marshal(val interface{}) ([]byte, error) { return cr.codec.Marshal(val) }
func (cr *codecRegistry) Unmarshal(b []byte, val interface{}) error { return cr.codec.Unmarshal(b, val) } func (cr *codecRegistry) Unmarshal(b []byte, val interface{}) error {
return cr.codec.Unmarshal(b, val)
}
/* /*
****************************************************************************** ******************************************************************************
@ -387,7 +389,9 @@ func (vm *VM) initAliases(genesisBytes []byte) error {
txID := tx.ID() txID := tx.ID()
vm.Alias(txID, genesisTx.Alias) if err = vm.Alias(txID, genesisTx.Alias); err != nil {
return err
}
} }
return nil return nil
@ -456,7 +460,10 @@ func (vm *VM) parseTx(b []byte) (*UniqueTx, error) {
if err := vm.state.SetTx(tx.ID(), tx.Tx); err != nil { if err := vm.state.SetTx(tx.ID(), tx.Tx); err != nil {
return nil, err return nil, err
} }
tx.setStatus(choices.Processing)
if err := tx.setStatus(choices.Processing); err != nil {
return nil, err
}
} }
return tx, nil return tx, nil

View File

@ -341,8 +341,5 @@ func (c codec) unmarshal(p *wrappers.Packer, field reflect.Value) error {
// Returns true iff [field] should be serialized // Returns true iff [field] should be serialized
func shouldSerialize(field reflect.StructField) bool { func shouldSerialize(field reflect.StructField) bool {
if field.Tag.Get("serialize") == "true" { return field.Tag.Get("serialize") == "true"
return true
}
return false
} }

View File

@ -95,10 +95,7 @@ func (svm *SnowmanVM) Shutdown() {
// DBInitialized returns true iff [svm]'s database has values in it already // DBInitialized returns true iff [svm]'s database has values in it already
func (svm *SnowmanVM) DBInitialized() bool { func (svm *SnowmanVM) DBInitialized() bool {
status := svm.State.GetStatus(svm.DB, dbInitializedID) status := svm.State.GetStatus(svm.DB, dbInitializedID)
if status == choices.Accepted { return status == choices.Accepted
return true
}
return false
} }
// SetDBInitialized marks the database as initialized // SetDBInitialized marks the database as initialized

View File

@ -131,6 +131,8 @@ func (m *manager) addStaticAPIEndpoints(vmID ids.ID) {
// register the static endpoints // register the static endpoints
for extension, service := range staticVM.CreateStaticHandlers() { for extension, service := range staticVM.CreateStaticHandlers() {
m.log.Verbo("adding static API endpoint: %s", defaultEndpoint+extension) m.log.Verbo("adding static API endpoint: %s", defaultEndpoint+extension)
m.apiServer.AddRoute(service, lock, defaultEndpoint, extension, m.log) if err := m.apiServer.AddRoute(service, lock, defaultEndpoint, extension, m.log); err != nil {
m.log.Warn("failed to add static API endpoint %s: %v", fmt.Sprintf("%s%s", defaultEndpoint, extension), err)
}
} }
} }

View File

@ -1227,6 +1227,10 @@ func (service *Service) chainExists(blockID ids.ID, chainID ids.ID) (bool, error
db := block.onAccept() db := block.onAccept()
chains, err := service.vm.getChains(db) chains, err := service.vm.getChains(db)
if err != nil {
return false, err
}
for _, chain := range chains { for _, chain := range chains {
if chain.ID().Equals(chainID) { if chain.ID().Equals(chainID) {
return true, nil return true, nil

View File

@ -9,7 +9,7 @@ import (
) )
func TestAddDefaultSubnetValidator(t *testing.T) { func TestAddDefaultSubnetValidator(t *testing.T) {
expectedJSONString := `{"startTime":"0","endtime":"0","id":null,"destination":null,"delegationFeeRate":"0","payerNonce":"0"}` expectedJSONString := `{"startTime":"0","endTime":"0","id":null,"destination":null,"delegationFeeRate":"0","payerNonce":"0"}`
args := AddDefaultSubnetValidatorArgs{} args := AddDefaultSubnetValidatorArgs{}
bytes, err := json.Marshal(&args) bytes, err := json.Marshal(&args)
if err != nil { if err != nil {

View File

@ -44,7 +44,7 @@ type APIAccount struct {
// is sent when this staker is done staking. // is sent when this staker is done staking.
type APIValidator struct { type APIValidator struct {
StartTime json.Uint64 `json:"startTime"` StartTime json.Uint64 `json:"startTime"`
EndTime json.Uint64 `json:"endtime"` EndTime json.Uint64 `json:"endTime"`
Weight *json.Uint64 `json:"weight,omitempty"` Weight *json.Uint64 `json:"weight,omitempty"`
StakeAmount *json.Uint64 `json:"stakeAmount,omitempty"` StakeAmount *json.Uint64 `json:"stakeAmount,omitempty"`
ID ids.ShortID `json:"id"` ID ids.ShortID `json:"id"`