From 27eb3ca6eece3eb24104b9c397b7ade5a359e0d6 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 10 Mar 2020 16:10:53 -0400 Subject: [PATCH 001/183] fixed bugs added for the bug bounty --- bugs.txt | 9 ++ snow/consensus/avalanche/topological.go | 19 +++ snow/consensus/avalanche/topological_test.go | 72 +++++++++++ snow/consensus/snowball/unary_snowball.go | 2 + .../consensus/snowball/unary_snowball_test.go | 30 ++++- snow/engine/avalanche/bootstrapper.go | 6 + snow/engine/avalanche/bootstrapper_test.go | 108 ++++++++++++++++ snow/engine/avalanche/issuer.go | 7 +- snow/engine/avalanche/polls.go | 22 ++-- snow/engine/avalanche/transitive_test.go | 117 ++++++++++++++++++ snow/engine/snowman/bootstrapper.go | 6 + snow/engine/snowman/bootstrapper_test.go | 110 ++++++++++++++++ snow/engine/snowman/polls.go | 33 +++-- snow/engine/snowman/transitive.go | 14 ++- snow/engine/snowman/transitive_test.go | 112 +++++++++++++++++ 15 files changed, 630 insertions(+), 37 deletions(-) create mode 100644 bugs.txt diff --git a/bugs.txt b/bugs.txt new file mode 100644 index 0000000..984fcef --- /dev/null +++ b/bugs.txt @@ -0,0 +1,9 @@ +Added bugs: + +- Inside of gecko/snow/consensus/avalanche/topological.go#Topological.pushVotes the votes must be properly filtered. Specifically, a byzantine node should not be able to vote for two different transactions that conflict with each other during the same poll. If a node votes for conflicting transactions during the same poll, either all the node's votes should be dropped, or the votes for the conflicting transactions should be dropped. + +- Inside of gecko/snow/consensus/snowball/unary_snowball.go#unarySnowball.Extend the confidence and bias fields should have been set to the values in the unary snowball instance. + +- Inside of gecko/snow/engine/avalanche/bootstrapper.go#bootstrapper.Put and gecko/snow/engine/snowman/bootstrapper.go#bootstrapper.Put the engine must check that the provided vtx/blk ID is the ID of the parsed container. Otherwise, a byzantine node could send a container and report the wrong container ID for it. This would allow an un-intended container to be marked as accepted during bootstrapping. + +- Inside of gecko/snow/engine/avalanche/polls.go#poll and gecko/snow/engine/snowman/polls.go#poll the poll should only allow a validator to vote once per poll. Also, this validator must have been part of set of validators that was polled during the query. \ No newline at end of file diff --git a/snow/consensus/avalanche/topological.go b/snow/consensus/avalanche/topological.go index dca8a19..5e6212c 100644 --- a/snow/consensus/avalanche/topological.go +++ b/snow/consensus/avalanche/topological.go @@ -257,6 +257,7 @@ func (ta *Topological) pushVotes( kahnNodes map[[32]byte]kahnNode, leaves []ids.ID) ids.Bag { votes := make(ids.UniqueBag) + txConflicts := make(map[[32]byte]ids.Set) for len(leaves) > 0 { newLeavesSize := len(leaves) - 1 @@ -271,6 +272,12 @@ func (ta *Topological) pushVotes( // Give the votes to the consumer txID := tx.ID() votes.UnionSet(txID, kahn.votes) + + // Map txID to set of Conflicts + txKey := txID.Key() + if _, exists := txConflicts[txKey]; !exists { + txConflicts[txKey] = ta.cg.Conflicts(tx) + } } for _, dep := range vtx.Parents() { @@ -291,6 +298,18 @@ func (ta *Topological) pushVotes( } } + // Create bag of votes for conflicting transactions + conflictingVotes := make(ids.UniqueBag) + for txHash, conflicts := range txConflicts { + txID := ids.NewID(txHash) + for conflictTxHash := range conflicts { + conflictTxID := ids.NewID(conflictTxHash) + conflictingVotes.UnionSet(txID, votes.GetSet(conflictTxID)) + } + } + + votes.Difference(&conflictingVotes) + return votes.Bag(ta.params.Alpha) } diff --git a/snow/consensus/avalanche/topological_test.go b/snow/consensus/avalanche/topological_test.go index f43ee5b..c28d567 100644 --- a/snow/consensus/avalanche/topological_test.go +++ b/snow/consensus/avalanche/topological_test.go @@ -103,6 +103,78 @@ func TestAvalancheVoting(t *testing.T) { } } +func TestAvalancheIgnoreInvalidVoting(t *testing.T) { + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 3, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + }, + Parents: 2, + BatchSize: 1, + } + + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID()} + + ta := Topological{} + ta.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + ta.Add(vtx0) + ta.Add(vtx1) + + sm := make(ids.UniqueBag) + + sm.Add(0, vtx0.id) + sm.Add(1, vtx1.id) + + // Add Illegal Vote cast by Response 2 + sm.Add(2, vtx0.id) + sm.Add(2, vtx1.id) + + ta.RecordPoll(sm) + + if ta.Finalized() { + t.Fatalf("An avalanche instance finalized too early") + } +} + func TestAvalancheTransitiveVoting(t *testing.T) { params := Parameters{ Parameters: snowball.Parameters{ diff --git a/snow/consensus/snowball/unary_snowball.go b/snow/consensus/snowball/unary_snowball.go index 6d0db07..3999a74 100644 --- a/snow/consensus/snowball/unary_snowball.go +++ b/snow/consensus/snowball/unary_snowball.go @@ -48,9 +48,11 @@ func (sb *unarySnowball) Extend(beta int, choice int) BinarySnowball { snowflake: binarySnowflake{ beta: beta, preference: choice, + confidence: sb.confidence, finalized: sb.Finalized(), }, } + bs.numSuccessfulPolls[choice] = sb.numSuccessfulPolls return bs } diff --git a/snow/consensus/snowball/unary_snowball_test.go b/snow/consensus/snowball/unary_snowball_test.go index 8bf098a..224cd4c 100644 --- a/snow/consensus/snowball/unary_snowball_test.go +++ b/snow/consensus/snowball/unary_snowball_test.go @@ -42,11 +42,32 @@ func TestUnarySnowball(t *testing.T) { binarySnowball := sbClone.Extend(beta, 0) + expected := "SB(Preference = 0, NumSuccessfulPolls[0] = 2, NumSuccessfulPolls[1] = 0, SF = SF(Preference = 0, Confidence = 1, Finalized = false))" + if result := binarySnowball.String(); result != expected { + t.Fatalf("Expected:\n%s\nReturned:\n%s", expected, result) + } + binarySnowball.RecordUnsuccessfulPoll() + for i := 0; i < 3; i++ { + if binarySnowball.Preference() != 0 { + t.Fatalf("Wrong preference") + } else if binarySnowball.Finalized() { + t.Fatalf("Should not have finalized") + } + binarySnowball.RecordSuccessfulPoll(1) + binarySnowball.RecordUnsuccessfulPoll() + } + + if binarySnowball.Preference() != 1 { + t.Fatalf("Wrong preference") + } else if binarySnowball.Finalized() { + t.Fatalf("Should not have finalized") + } binarySnowball.RecordSuccessfulPoll(1) - - if binarySnowball.Finalized() { + if binarySnowball.Preference() != 1 { + t.Fatalf("Wrong preference") + } else if binarySnowball.Finalized() { t.Fatalf("Should not have finalized") } @@ -57,4 +78,9 @@ func TestUnarySnowball(t *testing.T) { } else if !binarySnowball.Finalized() { t.Fatalf("Should have finalized") } + + expected = "SB(NumSuccessfulPolls = 2, Confidence = 1, Finalized = false)" + if str := sb.String(); str != expected { + t.Fatalf("Wrong state. Expected:\n%s\nGot:\n%s", expected, str) + } } diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index 7d3d7c8..26b202e 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -103,6 +103,12 @@ func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxB return } + if realVtxID := vtx.ID(); !vtxID.Equals(realVtxID) { + b.BootstrapConfig.Context.Log.Warn("Put called for vertexID %s, but provided vertexID %s", vtxID, realVtxID) + b.GetFailed(vdr, requestID, vtxID) + return + } + b.addVertex(vtx) } diff --git a/snow/engine/avalanche/bootstrapper_test.go b/snow/engine/avalanche/bootstrapper_test.go index d1be936..6f581b5 100644 --- a/snow/engine/avalanche/bootstrapper_test.go +++ b/snow/engine/avalanche/bootstrapper_test.go @@ -334,6 +334,114 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { } } +func TestBootstrapperWrongIDByzantineResponse(t *testing.T) { + config, peerID, sender, state, _ := newConfig(t) + + vtxID0 := ids.Empty.Prefix(0) + vtxID1 := ids.Empty.Prefix(1) + + vtxBytes0 := []byte{0} + vtxBytes1 := []byte{1} + + vtx0 := &Vtx{ + id: vtxID0, + height: 0, + status: choices.Processing, + bytes: vtxBytes0, + } + vtx1 := &Vtx{ + id: vtxID1, + height: 0, + status: choices.Processing, + bytes: vtxBytes1, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + acceptedIDs := ids.Set{} + acceptedIDs.Add( + vtxID0, + ) + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0): + return nil, errUnknownVertex + default: + t.Fatal(errUnknownVertex) + panic(errUnknownVertex) + } + } + + requestID := new(uint32) + sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested vertex from %s, requested from %s", peerID, vdr) + } + switch { + case vtxID.Equals(vtxID0): + default: + t.Fatalf("Requested unknown vertex") + } + + *requestID = reqID + } + + bs.ForceAccepted(acceptedIDs) + + state.getVertex = nil + sender.GetF = nil + + state.parseVertex = func(vtxBytes []byte) (avalanche.Vertex, error) { + switch { + case bytes.Equal(vtxBytes, vtxBytes0): + return vtx0, nil + case bytes.Equal(vtxBytes, vtxBytes1): + return vtx1, nil + } + t.Fatal(errParsedUnknownVertex) + return nil, errParsedUnknownVertex + } + + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0): + return vtx0, nil + case vtxID.Equals(vtxID1): + return vtx1, nil + default: + t.Fatal(errUnknownVertex) + panic(errUnknownVertex) + } + } + + finished := new(bool) + bs.onFinished = func() { *finished = true } + sender.CantGet = false + + bs.Put(peerID, *requestID, vtxID0, vtxBytes1) + + sender.CantGet = true + + bs.Put(peerID, *requestID, vtxID0, vtxBytes0) + + state.parseVertex = nil + state.edge = nil + bs.onFinished = nil + + if !*finished { + t.Fatalf("Bootstrapping should have finished") + } + if vtx0.Status() != choices.Accepted { + t.Fatalf("Vertex should be accepted") + } + if vtx1.Status() != choices.Processing { + t.Fatalf("Vertex should be processing") + } +} + func TestBootstrapperVertexDependencies(t *testing.T) { config, peerID, sender, state, _ := newConfig(t) diff --git a/snow/engine/avalanche/issuer.go b/snow/engine/avalanche/issuer.go index befe973..f953fe6 100644 --- a/snow/engine/avalanche/issuer.go +++ b/snow/engine/avalanche/issuer.go @@ -64,9 +64,12 @@ func (i *issuer) Update() { vdrSet.Add(vdr.ID()) } + toSample := ids.ShortSet{} // Copy to a new variable because we may remove an element in sender.Sender + toSample.Union(vdrSet) // and we don't want that to affect the set of validators we wait for [ie vdrSet] + i.t.RequestID++ - if numVdrs := len(vdrs); numVdrs == p.K && i.t.polls.Add(i.t.RequestID, vdrSet.Len()) { - i.t.Config.Sender.PushQuery(vdrSet, i.t.RequestID, vtxID, i.vtx.Bytes()) + if numVdrs := len(vdrs); numVdrs == p.K && i.t.polls.Add(i.t.RequestID, vdrSet) { + i.t.Config.Sender.PushQuery(toSample, i.t.RequestID, vtxID, i.vtx.Bytes()) } else if numVdrs < p.K { i.t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", vtxID) } diff --git a/snow/engine/avalanche/polls.go b/snow/engine/avalanche/polls.go index 282fe6a..fa1e7df 100644 --- a/snow/engine/avalanche/polls.go +++ b/snow/engine/avalanche/polls.go @@ -38,10 +38,10 @@ type polls struct { // Add to the current set of polls // Returns true if the poll was registered correctly and the network sample // should be made. -func (p *polls) Add(requestID uint32, numPolled int) bool { +func (p *polls) Add(requestID uint32, vdrs ids.ShortSet) bool { poll, exists := p.m[requestID] if !exists { - poll.numPending = numPolled + poll.polled = vdrs p.m[requestID] = poll p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics @@ -59,7 +59,7 @@ func (p *polls) Vote(requestID uint32, vdr ids.ShortID, votes []ids.ID) (ids.Uni return nil, false } - poll.Vote(votes) + poll.Vote(votes, vdr) if poll.Finished() { p.log.Verbo("Poll is finished") delete(p.m, requestID) @@ -83,19 +83,19 @@ func (p *polls) String() string { // poll represents the current state of a network poll for a vertex type poll struct { - votes ids.UniqueBag - numPending int + votes ids.UniqueBag + polled ids.ShortSet } // Vote registers a vote for this poll -func (p *poll) Vote(votes []ids.ID) { - if p.numPending > 0 { - p.numPending-- - p.votes.Add(uint(p.numPending), votes...) +func (p *poll) Vote(votes []ids.ID, vdr ids.ShortID) { + if p.polled.Contains(vdr) { + p.polled.Remove(vdr) + p.votes.Add(uint(p.polled.Len()), votes...) } } // Finished returns true if the poll has completed, with no more required // responses -func (p poll) Finished() bool { return p.numPending <= 0 } -func (p poll) String() string { return fmt.Sprintf("Waiting on %d chits", p.numPending) } +func (p poll) Finished() bool { return p.polled.Len() == 0 } +func (p poll) String() string { return fmt.Sprintf("Waiting on %d chits", p.polled.Len()) } diff --git a/snow/engine/avalanche/transitive_test.go b/snow/engine/avalanche/transitive_test.go index 6f5b5ed..15b258b 100644 --- a/snow/engine/avalanche/transitive_test.go +++ b/snow/engine/avalanche/transitive_test.go @@ -2363,3 +2363,120 @@ func TestEngineBootstrappingIntoConsensus(t *testing.T) { sender.PushQueryF = nil st.getVertex = nil } + +func TestEngineDoubleChit(t *testing.T) { + config := DefaultConfig() + + config.Params.Alpha = 2 + config.Params.K = 2 + + vdr0 := validators.GenerateRandomValidator(1) + vdr1 := validators.GenerateRandomValidator(1) + vals := validators.NewSet() + vals.Add(vdr0) + vals.Add(vdr1) + config.Validators = vals + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + sender.CantGetAcceptedFrontier = false + + st := &stateTest{t: t} + config.State = st + + st.Default(true) + + gVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + mVtx := &Vtx{ + id: GenerateID(), + status: choices.Accepted, + } + + vts := []avalanche.Vertex{gVtx, mVtx} + utxos := []ids.ID{GenerateID()} + + tx := &TestTx{ + TestTx: snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + }, + } + tx.Ins.Add(utxos[0]) + + vtx := &Vtx{ + parents: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx}, + height: 1, + status: choices.Processing, + bytes: []byte{1, 1, 2, 3}, + } + + st.edge = func() []ids.ID { return []ids.ID{vts[0].ID(), vts[1].ID()} } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(gVtx.ID()): + return gVtx, nil + case id.Equals(mVtx.ID()): + return mVtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + reqID := new(uint32) + sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, vtxID ids.ID, _ []byte) { + *reqID = requestID + if inVdrs.Len() != 2 { + t.Fatalf("Wrong number of validators") + } + if !vtxID.Equals(vtx.ID()) { + t.Fatalf("Wrong vertex requested") + } + } + st.getVertex = func(id ids.ID) (avalanche.Vertex, error) { + switch { + case id.Equals(vtx.ID()): + return vtx, nil + } + t.Fatalf("Unknown vertex") + panic("Should have errored") + } + + te.insert(vtx) + + votes := ids.Set{} + votes.Add(vtx.ID()) + + if status := tx.Status(); status != choices.Processing { + t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) + } + + te.Chits(vdr0.ID(), *reqID, votes) + + if status := tx.Status(); status != choices.Processing { + t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) + } + + te.Chits(vdr0.ID(), *reqID, votes) + + if status := tx.Status(); status != choices.Processing { + t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Processing) + } + + te.Chits(vdr1.ID(), *reqID, votes) + + if status := tx.Status(); status != choices.Accepted { + t.Fatalf("Wrong tx status: %s ; expected: %s", status, choices.Accepted) + } +} diff --git a/snow/engine/snowman/bootstrapper.go b/snow/engine/snowman/bootstrapper.go index 88724ed..2c0415e 100644 --- a/snow/engine/snowman/bootstrapper.go +++ b/snow/engine/snowman/bootstrapper.go @@ -97,6 +97,12 @@ func (b *bootstrapper) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkB return } + if realBlkID := blk.ID(); !blkID.Equals(realBlkID) { + b.BootstrapConfig.Context.Log.Warn("Put called for blockID %s, but provided blockID %s", blkID, realBlkID) + b.GetFailed(vdr, requestID, blkID) + return + } + b.addBlock(blk) } diff --git a/snow/engine/snowman/bootstrapper_test.go b/snow/engine/snowman/bootstrapper_test.go index 9cb0968..a972a23 100644 --- a/snow/engine/snowman/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrapper_test.go @@ -252,6 +252,116 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { } } +func TestBootstrapperWrongIDByzantineResponse(t *testing.T) { + config, peerID, sender, vm := newConfig(t) + + blkID0 := ids.Empty.Prefix(0) + blkID1 := ids.Empty.Prefix(1) + blkID2 := ids.Empty.Prefix(2) + + blkBytes0 := []byte{0} + blkBytes1 := []byte{1} + blkBytes2 := []byte{2} + + blk0 := &Blk{ + id: blkID0, + height: 0, + status: choices.Accepted, + bytes: blkBytes0, + } + blk1 := &Blk{ + parent: blk0, + id: blkID1, + height: 1, + status: choices.Processing, + bytes: blkBytes1, + } + blk2 := &Blk{ + parent: blk1, + id: blkID2, + height: 2, + status: choices.Processing, + bytes: blkBytes2, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + + acceptedIDs := ids.Set{} + acceptedIDs.Add(blkID1) + + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blkID1): + return nil, errUnknownBlock + default: + t.Fatal(errUnknownBlock) + panic(errUnknownBlock) + } + } + + requestID := new(uint32) + sender.GetF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) + } + switch { + case vtxID.Equals(blkID1): + default: + t.Fatalf("Requested unknown block") + } + + *requestID = reqID + } + + bs.ForceAccepted(acceptedIDs) + + vm.GetBlockF = nil + sender.GetF = nil + + vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) { + switch { + case bytes.Equal(blkBytes, blkBytes2): + return blk2, nil + } + t.Fatal(errUnknownBlock) + return nil, errUnknownBlock + } + + sender.CantGet = false + + bs.Put(peerID, *requestID, blkID1, blkBytes2) + + sender.CantGet = true + + vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) { + switch { + case bytes.Equal(blkBytes, blkBytes1): + return blk1, nil + } + t.Fatal(errUnknownBlock) + return nil, errUnknownBlock + } + + finished := new(bool) + bs.onFinished = func() { *finished = true } + + bs.Put(peerID, *requestID, blkID1, blkBytes1) + + vm.ParseBlockF = nil + + if !*finished { + t.Fatalf("Bootstrapping should have finished") + } + if blk1.Status() != choices.Accepted { + t.Fatalf("Block should be accepted") + } + if blk2.Status() != choices.Processing { + t.Fatalf("Block should be processing") + } +} + func TestBootstrapperDependency(t *testing.T) { config, peerID, sender, vm := newConfig(t) diff --git a/snow/engine/snowman/polls.go b/snow/engine/snowman/polls.go index 6e666dc..6765ff7 100644 --- a/snow/engine/snowman/polls.go +++ b/snow/engine/snowman/polls.go @@ -22,11 +22,11 @@ type polls struct { // Add to the current set of polls // Returns true if the poll was registered correctly and the network sample // should be made. -func (p *polls) Add(requestID uint32, numPolled int) bool { +func (p *polls) Add(requestID uint32, vdrs ids.ShortSet) bool { poll, exists := p.m[requestID] if !exists { poll.alpha = p.alpha - poll.numPolled = numPolled + poll.polled = vdrs p.m[requestID] = poll p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics @@ -42,7 +42,7 @@ func (p *polls) Vote(requestID uint32, vdr ids.ShortID, vote ids.ID) (ids.Bag, b if !exists { return ids.Bag{}, false } - poll.Vote(vote) + poll.Vote(vote, vdr) if poll.Finished() { delete(p.m, requestID) p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics @@ -60,7 +60,7 @@ func (p *polls) CancelVote(requestID uint32, vdr ids.ShortID) (ids.Bag, bool) { return ids.Bag{}, false } - poll.CancelVote() + poll.CancelVote(vdr) if poll.Finished() { delete(p.m, requestID) p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics @@ -83,22 +83,18 @@ func (p *polls) String() string { // poll represents the current state of a network poll for a block type poll struct { - alpha int - votes ids.Bag - numPolled int + alpha int + votes ids.Bag + polled ids.ShortSet } // Vote registers a vote for this poll -func (p *poll) CancelVote() { - if p.numPolled > 0 { - p.numPolled-- - } -} +func (p *poll) CancelVote(vdr ids.ShortID) { p.polled.Remove(vdr) } // Vote registers a vote for this poll -func (p *poll) Vote(vote ids.ID) { - if p.numPolled > 0 { - p.numPolled-- +func (p *poll) Vote(vote ids.ID, vdr ids.ShortID) { + if p.polled.Contains(vdr) { + p.polled.Remove(vdr) p.votes.Add(vote) } } @@ -106,13 +102,14 @@ func (p *poll) Vote(vote ids.ID) { // Finished returns true if the poll has completed, with no more required // responses func (p poll) Finished() bool { + remaining := p.polled.Len() received := p.votes.Len() _, freq := p.votes.Mode() - return p.numPolled == 0 || // All k nodes responded + return remaining == 0 || // All k nodes responded freq >= p.alpha || // An alpha majority has returned - received+p.numPolled < p.alpha // An alpha majority can never return + received+remaining < p.alpha // An alpha majority can never return } func (p poll) String() string { - return fmt.Sprintf("Waiting on %d chits", p.numPolled) + return fmt.Sprintf("Waiting on %d chits from %s", p.polled.Len(), p.polled) } diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index e023a7d..9e97f0a 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -297,9 +297,12 @@ func (t *Transitive) pullSample(blkID ids.ID) { vdrSet.Add(vdr.ID()) } + toSample := ids.ShortSet{} + toSample.Union(vdrSet) + t.RequestID++ - if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet.Len()) { - t.Config.Sender.PullQuery(vdrSet, t.RequestID, blkID) + if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet) { + t.Config.Sender.PullQuery(toSample, t.RequestID, blkID) } else if numVdrs < p.K { t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blkID) } @@ -314,9 +317,12 @@ func (t *Transitive) pushSample(blk snowman.Block) { vdrSet.Add(vdr.ID()) } + toSample := ids.ShortSet{} + toSample.Union(vdrSet) + t.RequestID++ - if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet.Len()) { - t.Config.Sender.PushQuery(vdrSet, t.RequestID, blk.ID(), blk.Bytes()) + if numVdrs := len(vdrs); numVdrs == p.K && t.polls.Add(t.RequestID, vdrSet) { + t.Config.Sender.PushQuery(toSample, t.RequestID, blk.ID(), blk.Bytes()) } else if numVdrs < p.K { t.Config.Context.Log.Error("Query for %s was dropped due to an insufficient number of validators", blk.ID()) } diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 1920d8c..c97f2f0 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -1076,3 +1076,115 @@ func TestEngineRetryFetch(t *testing.T) { t.Fatalf("Should have requested the block again") } } + +func TestEngineDoubleChit(t *testing.T) { + config := DefaultConfig() + + config.Params = snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + } + + vdr0 := validators.GenerateRandomValidator(1) + vdr1 := validators.GenerateRandomValidator(1) + + vals := validators.NewSet() + config.Validators = vals + + vals.Add(vdr0) + vals.Add(vdr1) + + sender := &common.SenderTest{} + sender.T = t + config.Sender = sender + + sender.Default(true) + + vm := &VMTest{} + vm.T = t + config.VM = vm + + vm.Default(true) + vm.CantSetPreference = false + + gBlk := &Blk{ + id: GenerateID(), + status: choices.Accepted, + } + + vm.LastAcceptedF = func() ids.ID { return gBlk.ID() } + sender.CantGetAcceptedFrontier = false + + te := &Transitive{} + te.Initialize(config) + te.finishBootstrapping() + + vm.LastAcceptedF = nil + sender.CantGetAcceptedFrontier = true + + blk := &Blk{ + parent: gBlk, + id: GenerateID(), + status: choices.Processing, + bytes: []byte{1}, + } + + queried := new(bool) + queryRequestID := new(uint32) + sender.PushQueryF = func(inVdrs ids.ShortSet, requestID uint32, blkID ids.ID, blkBytes []byte) { + if *queried { + t.Fatalf("Asked multiple times") + } + *queried = true + *queryRequestID = requestID + vdrSet := ids.ShortSet{} + vdrSet.Add(vdr0.ID(), vdr1.ID()) + if !inVdrs.Equals(vdrSet) { + t.Fatalf("Asking wrong validator for preference") + } + if !blk.ID().Equals(blkID) { + t.Fatalf("Asking for wrong block") + } + } + + te.insert(blk) + + vm.GetBlockF = func(id ids.ID) (snowman.Block, error) { + switch { + case id.Equals(gBlk.ID()): + return gBlk, nil + case id.Equals(blk.ID()): + return blk, nil + } + t.Fatalf("Unknown block") + panic("Should have errored") + } + + blkSet := ids.Set{} + blkSet.Add(blk.ID()) + + if status := blk.Status(); status != choices.Processing { + t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) + } + + te.Chits(vdr0.ID(), *queryRequestID, blkSet) + + if status := blk.Status(); status != choices.Processing { + t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) + } + + te.Chits(vdr0.ID(), *queryRequestID, blkSet) + + if status := blk.Status(); status != choices.Processing { + t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Processing) + } + + te.Chits(vdr1.ID(), *queryRequestID, blkSet) + + if status := blk.Status(); status != choices.Accepted { + t.Fatalf("Wrong status: %s ; expected: %s", status, choices.Accepted) + } +} From 081ab1146d89f07d5d9225a4e9aa78e1d0e30a76 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 31 May 2020 10:01:54 -0300 Subject: [PATCH 002/183] defining basic e2e tests ci --- .ci/run_e2e_tests.sh | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) create mode 100644 .ci/run_e2e_tests.sh diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh new file mode 100644 index 0000000..222fd1b --- /dev/null +++ b/.ci/run_e2e_tests.sh @@ -0,0 +1,34 @@ +LATEST_KURTOSIS_TAG="kurtosistech/kurtosis:latest" +LATEST_CONTROLLER_TAG="kurtosistech/ava-test-controller:latest" +GECKO_IMAGE="${DOCKERHUB_REPO}":"$COMMIT" + +#bash "${KURTOSIS_PATH}"/scripts/build_image.sh ${LATEST_KURTOSIS_TAG} +#docker pull ${LATEST_CONTROLLER_TAG} + +(docker run -v /var/run/docker.sock:/var/run/docker.sock \ +--env DEFAULT_GECKO_IMAGE="${DEFAULT_GECKO_IMAGE}" \ +--env TEST_CONTROLLER_IMAGE="${LATEST_CONTROLLER_TAG}" \ +${LATEST_KURTOSIS_TAG}) & + +kurtosis_pid=$! + +sleep 15 +kill ${kurtosis_pid} + +ACTUAL_EXIT_STATUS=$(docker ps -a --latest --filter ancestor=${LATEST_CONTROLLER_TAG} --format="{{.Status}}") +EXPECTED_EXIT_STATUS="Exited \(0\).*" + +echo "${ACTUAL_EXIT_STATUS}" + +# Clear containers. +echo "Clearing kurtosis testnet containers." +docker rm $(docker stop $(docker ps -a -q --filter ancestor="${GECKO_IMAGE}" --format="{{.ID}}")) >/dev/null + +if [[ ${ACTUAL_EXIT_STATUS} =~ ${EXPECTED_EXIT_STATUS} ]] +then + echo "Kurtosis test succeeded." + exit 0 +else + echo "Kurtosis test failed." + exit 1 +fi From 3eb9efded9f2fd4e5cf440b7c7869030c9f88375 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 31 May 2020 10:03:02 -0300 Subject: [PATCH 003/183] modifying travisci yaml --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index a919fa0..1725f39 100644 --- a/.travis.yml +++ b/.travis.yml @@ -26,7 +26,7 @@ install: script: - if [ "$TRAVIS_OS_NAME" = "osx" ]; then .ci/runscript_osx.sh; fi - - if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/runscript_linux.sh; fi + - if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/runscript_linux.sh; .ci/run_e2e_tests.sh; fi #Need to push to docker hub only from one build after_success: From 37b7440788f771a19c58c923eadcab8d4b1503a3 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 31 May 2020 10:06:36 -0300 Subject: [PATCH 004/183] pulling images --- .ci/run_e2e_tests.sh | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 222fd1b..d154b99 100644 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -2,8 +2,8 @@ LATEST_KURTOSIS_TAG="kurtosistech/kurtosis:latest" LATEST_CONTROLLER_TAG="kurtosistech/ava-test-controller:latest" GECKO_IMAGE="${DOCKERHUB_REPO}":"$COMMIT" -#bash "${KURTOSIS_PATH}"/scripts/build_image.sh ${LATEST_KURTOSIS_TAG} -#docker pull ${LATEST_CONTROLLER_TAG} +docker pull ${LATEST_CONTROLLER_TAG} +docker pull ${LATEST_KURTOSIS_TAG} (docker run -v /var/run/docker.sock:/var/run/docker.sock \ --env DEFAULT_GECKO_IMAGE="${DEFAULT_GECKO_IMAGE}" \ From d85a01631087d023b9e9155e196d91ded801a84a Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 31 May 2020 10:09:38 -0300 Subject: [PATCH 005/183] e2e tests must be runnable --- .ci/run_e2e_tests.sh | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 .ci/run_e2e_tests.sh diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh old mode 100644 new mode 100755 From 0165827857e700424f51d504bc2dcb56411c3776 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 31 May 2020 10:28:21 -0300 Subject: [PATCH 006/183] removing osx build to debug kurtosis runs; --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 1725f39..3b88aaf 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,8 +6,8 @@ jobs: include: - os: linux dist: bionic - - os: osx - osx_image: xcode11.4 + #- os: osx + # osx_image: xcode11.4 services: - docker env: From 374062aa1ac0ca3e54a6a8b8531e28117ec35345 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 31 May 2020 12:56:16 -0300 Subject: [PATCH 007/183] sleeping 90 --- .ci/run_e2e_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index d154b99..84ae038 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -12,7 +12,7 @@ ${LATEST_KURTOSIS_TAG}) & kurtosis_pid=$! -sleep 15 +sleep 90 kill ${kurtosis_pid} ACTUAL_EXIT_STATUS=$(docker ps -a --latest --filter ancestor=${LATEST_CONTROLLER_TAG} --format="{{.Status}}") From dea626aea362c2e36e2b758ab56139ce3478eefd Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 31 May 2020 13:10:05 -0300 Subject: [PATCH 008/183] putting in debugging statements --- .ci/run_e2e_tests.sh | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 84ae038..ebf9cef 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -1,3 +1,5 @@ +set -x + LATEST_KURTOSIS_TAG="kurtosistech/kurtosis:latest" LATEST_CONTROLLER_TAG="kurtosistech/ava-test-controller:latest" GECKO_IMAGE="${DOCKERHUB_REPO}":"$COMMIT" @@ -5,8 +7,11 @@ GECKO_IMAGE="${DOCKERHUB_REPO}":"$COMMIT" docker pull ${LATEST_CONTROLLER_TAG} docker pull ${LATEST_KURTOSIS_TAG} +docker image ls +echo "MY GECKO IMAGE: ${GECKO_IMAGE}" + (docker run -v /var/run/docker.sock:/var/run/docker.sock \ ---env DEFAULT_GECKO_IMAGE="${DEFAULT_GECKO_IMAGE}" \ +--env DEFAULT_GECKO_IMAGE="${GECKO_IMAGE}" \ --env TEST_CONTROLLER_IMAGE="${LATEST_CONTROLLER_TAG}" \ ${LATEST_KURTOSIS_TAG}) & From 2036c7233afe6e12fd3816af8c99ee35ca222013 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 31 May 2020 13:50:32 -0300 Subject: [PATCH 009/183] building with docker deploy in scripts --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 3b88aaf..85a2176 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,7 +22,7 @@ before_install: - if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/before_install_linux.sh; fi install: - - if [ "$TRAVIS_OS_NAME" = "linux" ]; then DOCKER_BUILDKIT=1 docker build --progress plain -t $DOCKERHUB_REPO:$COMMIT . ; fi + - if [ "$TRAVIS_OS_NAME" = "linux" ]; then DOCKER_BUILDKIT=1 docker build --progress plain -t $DOCKERHUB_REPO:$COMMIT -f "./scripts/Dockerfile.deploy" ; fi script: - if [ "$TRAVIS_OS_NAME" = "osx" ]; then .ci/runscript_osx.sh; fi From c6954227eb0235bf4e5f8f97fded70dc735cff5c Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 31 May 2020 14:09:29 -0300 Subject: [PATCH 010/183] build image script --- .ci/run_e2e_tests.sh | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index ebf9cef..2091485 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -2,11 +2,20 @@ set -x LATEST_KURTOSIS_TAG="kurtosistech/kurtosis:latest" LATEST_CONTROLLER_TAG="kurtosistech/ava-test-controller:latest" -GECKO_IMAGE="${DOCKERHUB_REPO}":"$COMMIT" +#GECKO_IMAGE="${DOCKERHUB_REPO}":"$COMMIT" docker pull ${LATEST_CONTROLLER_TAG} docker pull ${LATEST_KURTOSIS_TAG} +SCRIPTS_PATH=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd) +SRC_PATH=$(dirname "${SCRIPTS_PATH}") + +# build docker image we need +echo $(pwd) +bash ${SRC_PATH}/scripts/build_image.sh +# get docker image label +GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1) + docker image ls echo "MY GECKO IMAGE: ${GECKO_IMAGE}" From 6fac7c244625cf01fd5ca465371e5a11b3adf6e6 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 31 May 2020 14:13:07 -0300 Subject: [PATCH 011/183] fixing travis yml --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 85a2176..bb57fc9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,7 +22,7 @@ before_install: - if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/before_install_linux.sh; fi install: - - if [ "$TRAVIS_OS_NAME" = "linux" ]; then DOCKER_BUILDKIT=1 docker build --progress plain -t $DOCKERHUB_REPO:$COMMIT -f "./scripts/Dockerfile.deploy" ; fi + - if [ "$TRAVIS_OS_NAME" = "linux" ]; then DOCKER_BUILDKIT=1 docker build --progress plain -t $DOCKERHUB_REPO:$COMMIT .; fi script: - if [ "$TRAVIS_OS_NAME" = "osx" ]; then .ci/runscript_osx.sh; fi From f8cea1d2999601accfa4786c8d95c01d7db36806 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 31 May 2020 14:25:11 -0300 Subject: [PATCH 012/183] travisyml now minimally modified to include kurtosis tests --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index bb57fc9..f596a65 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,8 +6,8 @@ jobs: include: - os: linux dist: bionic - #- os: osx - # osx_image: xcode11.4 + - os: osx + osx_image: xcode11.4 services: - docker env: From 72330d494ce537e8cb5d26c02d0a08c0ea6a3ddd Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 31 May 2020 14:25:47 -0300 Subject: [PATCH 013/183] removing debugging statements --- .ci/run_e2e_tests.sh | 9 --------- 1 file changed, 9 deletions(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 2091485..56d778d 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -1,8 +1,5 @@ -set -x - LATEST_KURTOSIS_TAG="kurtosistech/kurtosis:latest" LATEST_CONTROLLER_TAG="kurtosistech/ava-test-controller:latest" -#GECKO_IMAGE="${DOCKERHUB_REPO}":"$COMMIT" docker pull ${LATEST_CONTROLLER_TAG} docker pull ${LATEST_KURTOSIS_TAG} @@ -11,14 +8,10 @@ SCRIPTS_PATH=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd) SRC_PATH=$(dirname "${SCRIPTS_PATH}") # build docker image we need -echo $(pwd) bash ${SRC_PATH}/scripts/build_image.sh # get docker image label GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1) -docker image ls -echo "MY GECKO IMAGE: ${GECKO_IMAGE}" - (docker run -v /var/run/docker.sock:/var/run/docker.sock \ --env DEFAULT_GECKO_IMAGE="${GECKO_IMAGE}" \ --env TEST_CONTROLLER_IMAGE="${LATEST_CONTROLLER_TAG}" \ @@ -32,8 +25,6 @@ kill ${kurtosis_pid} ACTUAL_EXIT_STATUS=$(docker ps -a --latest --filter ancestor=${LATEST_CONTROLLER_TAG} --format="{{.Status}}") EXPECTED_EXIT_STATUS="Exited \(0\).*" -echo "${ACTUAL_EXIT_STATUS}" - # Clear containers. echo "Clearing kurtosis testnet containers." docker rm $(docker stop $(docker ps -a -q --filter ancestor="${GECKO_IMAGE}" --format="{{.ID}}")) >/dev/null From 1ae9c76c5344a135bc3d4f2cbcbe0223aeeb8789 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 31 May 2020 14:48:33 -0300 Subject: [PATCH 014/183] isolating the kurtosis testing --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index f596a65..bb57fc9 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,8 +6,8 @@ jobs: include: - os: linux dist: bionic - - os: osx - osx_image: xcode11.4 + #- os: osx + # osx_image: xcode11.4 services: - docker env: From 8e8dd7529b730f346f2cf0cf8e5659911a962017 Mon Sep 17 00:00:00 2001 From: Shashank Date: Tue, 2 Jun 2020 22:47:02 +0530 Subject: [PATCH 015/183] Fix for KeyStore DoS vulnerability https://github.com/ava-labs/gecko/issues/195 --- api/keystore/service.go | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/api/keystore/service.go b/api/keystore/service.go index 5575083..c1e1c56 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -148,9 +148,17 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre return fmt.Errorf("user already exists: %s", args.Username) } - if zxcvbn.PasswordStrength(args.Password, nil).Score < requiredPassScore { - return errWeakPassword - } + if len(args.Password) < 50 { + if zxcvbn.PasswordStrength(args.Password, nil).Score < requiredPassScore { + return errWeakPassword + } + } + + if len(args.Password) >= 50 { + if zxcvbn.PasswordStrength(args.Password[:50], nil).Score < requiredPassScore { + return errWeakPassword + } + } usr := &User{} if err := usr.Initialize(args.Password); err != nil { From 6dc67bbf7024535ee130388a81e34302d93411ec Mon Sep 17 00:00:00 2001 From: Shashank Date: Wed, 3 Jun 2020 14:08:57 +0530 Subject: [PATCH 016/183] Updated fix for issue 195 https://github.com/ava-labs/gecko/issues/195 --- api/keystore/service.go | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/api/keystore/service.go b/api/keystore/service.go index c1e1c56..72c7c24 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -148,17 +148,17 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre return fmt.Errorf("user already exists: %s", args.Username) } - if len(args.Password) < 50 { - if zxcvbn.PasswordStrength(args.Password, nil).Score < requiredPassScore { - return errWeakPassword - } - } +// As per issue https://github.com/ava-labs/gecko/issues/195 it was found the longer the length of password the slower zxcvbn.PasswordStrength() performs. +// To avoid performance issues and DOS vector we only check the first 50 characters of the password. + checkPass := args.Password - if len(args.Password) >= 50 { - if zxcvbn.PasswordStrength(args.Password[:50], nil).Score < requiredPassScore { - return errWeakPassword - } - } + if len(args.Password) > 50 { + checkPass = args.Password[:50] + } + + if zxcvbn.PasswordStrength(checkPass, nil).Score < requiredPassScore { + return errWeakPassword + } usr := &User{} if err := usr.Initialize(args.Password); err != nil { From 311ce90977dc2d87fe3094f4c3e8171aace99d4e Mon Sep 17 00:00:00 2001 From: Shashank Date: Wed, 3 Jun 2020 19:47:39 +0530 Subject: [PATCH 017/183] Fixed go format --- api/keystore/service.go | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/api/keystore/service.go b/api/keystore/service.go index 72c7c24..e80b4d9 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -148,16 +148,16 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre return fmt.Errorf("user already exists: %s", args.Username) } -// As per issue https://github.com/ava-labs/gecko/issues/195 it was found the longer the length of password the slower zxcvbn.PasswordStrength() performs. -// To avoid performance issues and DOS vector we only check the first 50 characters of the password. + // As per issue https://github.com/ava-labs/gecko/issues/195 it was found the longer the length of password the slower zxcvbn.PasswordStrength() performs. + // To avoid performance issues and DOS vector we only check the first 50 characters of the password. checkPass := args.Password if len(args.Password) > 50 { - checkPass = args.Password[:50] + checkPass = args.Password[:50] } if zxcvbn.PasswordStrength(checkPass, nil).Score < requiredPassScore { - return errWeakPassword + return errWeakPassword } usr := &User{} From 8c42f14a49f566983d059ddc8ce832eb027194d4 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Thu, 4 Jun 2020 01:57:43 -0400 Subject: [PATCH 018/183] Added ping pong messages --- network/builder.go | 6 ++++ network/commands.go | 9 +++++ network/metrics.go | 7 ++++ network/network.go | 10 ++++++ network/peer.go | 86 ++++++++++++++++++++++++++++++++++++++------- 5 files changed, 105 insertions(+), 13 deletions(-) diff --git a/network/builder.go b/network/builder.go index 20c7e4a..c7ba4aa 100644 --- a/network/builder.go +++ b/network/builder.go @@ -33,6 +33,12 @@ func (m Builder) PeerList(ipDescs []utils.IPDesc) (Msg, error) { return m.Pack(PeerList, map[Field]interface{}{Peers: ipDescs}) } +// Ping message +func (m Builder) Ping() (Msg, error) { return m.Pack(Ping, nil) } + +// Pong message +func (m Builder) Pong() (Msg, error) { return m.Pack(Pong, nil) } + // GetAcceptedFrontier message func (m Builder) GetAcceptedFrontier(chainID ids.ID, requestID uint32) (Msg, error) { return m.Pack(GetAcceptedFrontier, map[Field]interface{}{ diff --git a/network/commands.go b/network/commands.go index 177f58b..ab58a8d 100644 --- a/network/commands.go +++ b/network/commands.go @@ -125,6 +125,10 @@ func (op Op) String() string { return "get_peerlist" case PeerList: return "peerlist" + case Ping: + return "ping" + case Pong: + return "pong" case GetAcceptedFrontier: return "get_accepted_frontier" case AcceptedFrontier: @@ -166,6 +170,9 @@ const ( PushQuery PullQuery Chits + // Handshake: + Ping + Pong ) // Defines the messages that can be sent/received with this network @@ -176,6 +183,8 @@ var ( Version: []Field{NetworkID, NodeID, MyTime, IP, VersionStr}, GetPeerList: []Field{}, PeerList: []Field{Peers}, + Ping: []Field{}, + Pong: []Field{}, // Bootstrapping: GetAcceptedFrontier: []Field{ChainID, RequestID}, AcceptedFrontier: []Field{ChainID, RequestID, ContainerIDs}, diff --git a/network/metrics.go b/network/metrics.go index 3afda5b..400e27a 100644 --- a/network/metrics.go +++ b/network/metrics.go @@ -54,6 +54,7 @@ type metrics struct { getVersion, version, getPeerlist, peerlist, + ping, pong, getAcceptedFrontier, acceptedFrontier, getAccepted, accepted, get, put, @@ -78,6 +79,8 @@ func (m *metrics) initialize(registerer prometheus.Registerer) error { errs.Add(m.version.initialize(Version, registerer)) errs.Add(m.getPeerlist.initialize(GetPeerList, registerer)) errs.Add(m.peerlist.initialize(PeerList, registerer)) + errs.Add(m.ping.initialize(Ping, registerer)) + errs.Add(m.pong.initialize(Pong, registerer)) errs.Add(m.getAcceptedFrontier.initialize(GetAcceptedFrontier, registerer)) errs.Add(m.acceptedFrontier.initialize(AcceptedFrontier, registerer)) errs.Add(m.getAccepted.initialize(GetAccepted, registerer)) @@ -101,6 +104,10 @@ func (m *metrics) message(msgType Op) *messageMetrics { return &m.getPeerlist case PeerList: return &m.peerlist + case Ping: + return &m.ping + case Pong: + return &m.pong case GetAcceptedFrontier: return &m.getAcceptedFrontier case AcceptedFrontier: diff --git a/network/network.go b/network/network.go index 50471eb..85ce09d 100644 --- a/network/network.go +++ b/network/network.go @@ -39,6 +39,8 @@ const ( defaultGetVersionTimeout = 2 * time.Second defaultAllowPrivateIPs = true defaultGossipSize = 50 + defaultPingPongTimeout = time.Minute + defaultPingFrequency = 3 * defaultPingPongTimeout / 4 ) // Network defines the functionality of the networking library. @@ -113,6 +115,8 @@ type network struct { getVersionTimeout time.Duration allowPrivateIPs bool gossipSize int + pingPongTimeout time.Duration + pingFrequency time.Duration executor timer.Executor @@ -171,6 +175,8 @@ func NewDefaultNetwork( defaultGetVersionTimeout, defaultAllowPrivateIPs, defaultGossipSize, + defaultPingPongTimeout, + defaultPingFrequency, ) } @@ -200,6 +206,8 @@ func NewNetwork( getVersionTimeout time.Duration, allowPrivateIPs bool, gossipSize int, + pingPongTimeout time.Duration, + pingFrequency time.Duration, ) Network { net := &network{ log: log, @@ -226,6 +234,8 @@ func NewNetwork( getVersionTimeout: getVersionTimeout, allowPrivateIPs: allowPrivateIPs, gossipSize: gossipSize, + pingPongTimeout: pingPongTimeout, + pingFrequency: pingFrequency, disconnectedIPs: make(map[string]struct{}), connectedIPs: make(map[string]struct{}), diff --git a/network/peer.go b/network/peer.go index 5bb7601..394b838 100644 --- a/network/peer.go +++ b/network/peer.go @@ -60,6 +60,24 @@ func (p *peer) Start() { // Initially send the version to the peer go p.Version() go p.requestVersion() + go p.sendPings() +} + +func (p *peer) sendPings() { + t := time.NewTicker(p.net.pingFrequency) + defer t.Stop() + + for range t.C { + p.net.stateLock.Lock() + closed := p.closed + p.net.stateLock.Unlock() + + if closed { + return + } + + p.Ping() + } } // request the version from the peer until we get the version from them @@ -76,6 +94,7 @@ func (p *peer) requestVersion() { if connected || closed { return } + p.GetVersion() } } @@ -84,6 +103,11 @@ func (p *peer) requestVersion() { func (p *peer) ReadMessages() { defer p.Close() + if err := p.conn.SetReadDeadline(p.net.clock.Time().Add(p.net.pingPongTimeout)); err != nil { + p.net.log.Verbo("error on setting the connection read timeout %s", err) + return + } + pendingBuffer := wrappers.Packer{} readBuffer := make([]byte, 1<<10) for { @@ -196,7 +220,15 @@ func (p *peer) send(msg Msg) bool { // assumes the stateLock is not held func (p *peer) handle(msg Msg) { p.net.heartbeat() - atomic.StoreInt64(&p.lastReceived, p.net.clock.Time().Unix()) + + currentTime := p.net.clock.Time() + atomic.StoreInt64(&p.lastReceived, currentTime.Unix()) + + if err := p.conn.SetReadDeadline(currentTime.Add(p.net.pingPongTimeout)); err != nil { + p.net.log.Verbo("error on setting the connection read timeout %s, closing the connection", err) + p.Close() + return + } op := msg.Op() msgMetrics := p.net.message(op) @@ -213,6 +245,12 @@ func (p *peer) handle(msg Msg) { case GetVersion: p.getVersion(msg) return + case Ping: + p.ping(msg) + return + case Pong: + p.pong(msg) + return } if !p.connected { p.net.log.Debug("dropping message from %s because the connection hasn't been established yet", p.id) @@ -290,6 +328,12 @@ func (p *peer) GetPeerList() { p.Send(msg) } +// assumes the stateLock is not held +func (p *peer) SendPeerList() { + ips := p.net.validatorIPs() + p.PeerList(ips) +} + // assumes the stateLock is not held func (p *peer) PeerList(peers []utils.IPDesc) { msg, err := p.net.b.PeerList(peers) @@ -298,7 +342,28 @@ func (p *peer) PeerList(peers []utils.IPDesc) { return } p.Send(msg) - return +} + +// assumes the stateLock is not held +func (p *peer) Ping() { + msg, err := p.net.b.Ping() + p.net.log.AssertNoError(err) + if p.Send(msg) { + p.net.ping.numSent.Inc() + } else { + p.net.ping.numFailed.Inc() + } +} + +// assumes the stateLock is not held +func (p *peer) Pong() { + msg, err := p.net.b.Pong() + p.net.log.AssertNoError(err) + if p.Send(msg) { + p.net.pong.numSent.Inc() + } else { + p.net.pong.numFailed.Inc() + } } // assumes the stateLock is not held @@ -430,17 +495,6 @@ func (p *peer) version(msg Msg) { p.net.connected(p) } -// assumes the stateLock is not held -func (p *peer) SendPeerList() { - ips := p.net.validatorIPs() - reply, err := p.net.b.PeerList(ips) - if err != nil { - p.net.log.Warn("failed to send PeerList message due to %s", err) - return - } - p.Send(reply) -} - // assumes the stateLock is not held func (p *peer) getPeerList(_ Msg) { p.SendPeerList() } @@ -460,6 +514,12 @@ func (p *peer) peerList(msg Msg) { p.net.stateLock.Unlock() } +// assumes the stateLock is not held +func (p *peer) ping(_ Msg) { p.Pong() } + +// assumes the stateLock is not held +func (p *peer) pong(_ Msg) {} + // assumes the stateLock is not held func (p *peer) getAcceptedFrontier(msg Msg) { chainID, err := ids.ToID(msg.Get(ChainID).([]byte)) From 9e74fdf15dcda89679115ca10a67bf03283af5bd Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Sat, 6 Jun 2020 11:48:13 -0400 Subject: [PATCH 019/183] improve network logging --- network/network.go | 94 +++++++++++++++++++++---- snow/networking/router/subnet_router.go | 32 +++++---- 2 files changed, 97 insertions(+), 29 deletions(-) diff --git a/network/network.go b/network/network.go index 50471eb..83a9e99 100644 --- a/network/network.go +++ b/network/network.go @@ -12,6 +12,8 @@ import ( "sync/atomic" "time" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/gecko/api/health" @@ -266,8 +268,11 @@ func (n *network) GetAcceptedFrontier(validatorIDs ids.ShortSet, chainID ids.ID, func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) { msg, err := n.b.AcceptedFrontier(chainID, requestID, containerIDs) if err != nil { - n.log.Error("attempted to pack too large of an AcceptedFrontier message.\nNumber of containerIDs: %d", - containerIDs.Len()) + n.log.Error("failed to build AcceptedFrontier(%s, %d, %s): %s", + chainID, + requestID, + containerIDs, + err) return // Packing message failed } @@ -279,7 +284,11 @@ func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requ sent = peer.send(msg) } if !sent { - n.log.Debug("failed to send an AcceptedFrontier message to: %s", validatorID) + n.log.Debug("failed to send AcceptedFrontier(%s, %s, %d, %s)", + validatorID, + chainID, + requestID, + containerIDs) n.acceptedFrontier.numFailed.Inc() } else { n.acceptedFrontier.numSent.Inc() @@ -290,6 +299,11 @@ func (n *network) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, requ func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, requestID uint32, containerIDs ids.Set) { msg, err := n.b.GetAccepted(chainID, requestID, containerIDs) if err != nil { + n.log.Error("failed to build GetAccepted(%s, %d, %s): %s", + chainID, + requestID, + containerIDs, + err) for _, validatorID := range validatorIDs.List() { vID := validatorID n.executor.Add(func() { n.router.GetAcceptedFailed(vID, chainID, requestID) }) @@ -307,6 +321,11 @@ func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, request sent = peer.send(msg) } if !sent { + n.log.Debug("failed to send GetAccepted(%s, %s, %d, %s)", + validatorID, + chainID, + requestID, + containerIDs) n.executor.Add(func() { n.router.GetAcceptedFailed(vID, chainID, requestID) }) n.getAccepted.numFailed.Inc() } else { @@ -319,8 +338,11 @@ func (n *network) GetAccepted(validatorIDs ids.ShortSet, chainID ids.ID, request func (n *network) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerIDs ids.Set) { msg, err := n.b.Accepted(chainID, requestID, containerIDs) if err != nil { - n.log.Error("attempted to pack too large of an Accepted message.\nNumber of containerIDs: %d", - containerIDs.Len()) + n.log.Error("failed to build Accepted(%s, %d, %s): %s", + chainID, + requestID, + containerIDs, + err) return // Packing message failed } @@ -332,7 +354,11 @@ func (n *network) Accepted(validatorID ids.ShortID, chainID ids.ID, requestID ui sent = peer.send(msg) } if !sent { - n.log.Debug("failed to send an Accepted message to: %s", validatorID) + n.log.Debug("failed to send Accepted(%s, %s, %d, %s)", + validatorID, + chainID, + requestID, + containerIDs) n.accepted.numFailed.Inc() } else { n.accepted.numSent.Inc() @@ -352,7 +378,11 @@ func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, sent = peer.send(msg) } if !sent { - n.log.Debug("failed to send a Get message to: %s", validatorID) + n.log.Debug("failed to send Get(%s, %s, %d, %s)", + validatorID, + chainID, + requestID, + containerID) n.get.numFailed.Inc() } else { n.get.numSent.Inc() @@ -363,7 +393,12 @@ func (n *network) Get(validatorID ids.ShortID, chainID ids.ID, requestID uint32, func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, containerID ids.ID, container []byte) { msg, err := n.b.Put(chainID, requestID, containerID, container) if err != nil { - n.log.Error("failed to build Put message because of container of size %d", len(container)) + n.log.Error("failed to build Put(%s, %d, %s): %s. len(container) : %d", + chainID, + requestID, + containerID, + err, + len(container)) return } @@ -375,7 +410,12 @@ func (n *network) Put(validatorID ids.ShortID, chainID ids.ID, requestID uint32, sent = peer.send(msg) } if !sent { - n.log.Debug("failed to send a Put message to: %s", validatorID) + n.log.Debug("failed to send Put(%s, %s, %d, %s)", + validatorID, + chainID, + requestID, + containerID) + n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container}) n.put.numFailed.Inc() } else { n.put.numSent.Inc() @@ -390,7 +430,13 @@ func (n *network) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID vID := validatorID n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) }) } - n.log.Error("attempted to pack too large of a PushQuery message.\nContainer length: %d", len(container)) + n.log.Error("failed to build PushQuery(%s, %d, %s): %s. len(container): %d", + chainID, + requestID, + containerID, + err, + len(container)) + n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container}) return // Packing message failed } @@ -404,7 +450,12 @@ func (n *network) PushQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID sent = peer.send(msg) } if !sent { - n.log.Debug("failed sending a PushQuery message to: %s", vID) + n.log.Debug("failed to send PushQuery(%s, %s, %d, %s)", + validatorID, + chainID, + requestID, + containerID) + n.log.Verbo("container: %s", formatting.DumpBytes{Bytes: container}) n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) }) n.pushQuery.numFailed.Inc() } else { @@ -428,7 +479,11 @@ func (n *network) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID sent = peer.send(msg) } if !sent { - n.log.Debug("failed sending a PullQuery message to: %s", vID) + n.log.Debug("failed to send PullQuery(%s, %s, %d, %s)", + validatorID, + chainID, + requestID, + containerID) n.executor.Add(func() { n.router.QueryFailed(vID, chainID, requestID) }) n.pullQuery.numFailed.Inc() } else { @@ -441,7 +496,11 @@ func (n *network) PullQuery(validatorIDs ids.ShortSet, chainID ids.ID, requestID func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint32, votes ids.Set) { msg, err := n.b.Chits(chainID, requestID, votes) if err != nil { - n.log.Error("failed to build Chits message because of %d votes", votes.Len()) + n.log.Error("failed to build Chits(%s, %d, %s): %s", + chainID, + requestID, + votes, + err) return } @@ -453,7 +512,11 @@ func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint3 sent = peer.send(msg) } if !sent { - n.log.Debug("failed to send a Chits message to: %s", validatorID) + n.log.Debug("failed to send Chits(%s, %s, %d, %s)", + validatorID, + chainID, + requestID, + votes) n.chits.numFailed.Inc() } else { n.chits.numSent.Inc() @@ -463,7 +526,8 @@ func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint3 // Gossip attempts to gossip the container to the network func (n *network) Gossip(chainID, containerID ids.ID, container []byte) { if err := n.gossipContainer(chainID, containerID, container); err != nil { - n.log.Error("error gossiping container %s to %s: %s", containerID, chainID, err) + n.log.Error("failed to Gossip(%s, %s): %s", chainID, containerID, err) + n.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container}) } } diff --git a/snow/networking/router/subnet_router.go b/snow/networking/router/subnet_router.go index 36187dc..5ac7c80 100644 --- a/snow/networking/router/subnet_router.go +++ b/snow/networking/router/subnet_router.go @@ -7,6 +7,8 @@ import ( "sync" "time" + "github.com/ava-labs/gecko/utils/formatting" + "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/networking/timeout" "github.com/ava-labs/gecko/utils/logging" @@ -81,7 +83,7 @@ func (sr *ChainRouter) RemoveChain(chainID ids.ID) { delete(sr.chains, chainID.Key()) } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Debug("can't remove unknown chain %s", chainID) } } @@ -95,7 +97,7 @@ func (sr *ChainRouter) GetAcceptedFrontier(validatorID ids.ShortID, chainID ids. if chain, exists := sr.chains[chainID.Key()]; exists { chain.GetAcceptedFrontier(validatorID, requestID) } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Debug("GetAcceptedFrontier(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID) } } @@ -110,7 +112,7 @@ func (sr *ChainRouter) AcceptedFrontier(validatorID ids.ShortID, chainID ids.ID, if chain, exists := sr.chains[chainID.Key()]; exists { chain.AcceptedFrontier(validatorID, requestID, containerIDs) } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Debug("AcceptedFrontier(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerIDs) } } @@ -125,7 +127,7 @@ func (sr *ChainRouter) GetAcceptedFrontierFailed(validatorID ids.ShortID, chainI if chain, exists := sr.chains[chainID.Key()]; exists { chain.GetAcceptedFrontierFailed(validatorID, requestID) } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Error("GetAcceptedFrontierFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID) } } @@ -139,7 +141,7 @@ func (sr *ChainRouter) GetAccepted(validatorID ids.ShortID, chainID ids.ID, requ if chain, exists := sr.chains[chainID.Key()]; exists { chain.GetAccepted(validatorID, requestID, containerIDs) } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Debug("GetAccepted(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerIDs) } } @@ -154,7 +156,7 @@ func (sr *ChainRouter) Accepted(validatorID ids.ShortID, chainID ids.ID, request if chain, exists := sr.chains[chainID.Key()]; exists { chain.Accepted(validatorID, requestID, containerIDs) } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Debug("Accepted(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerIDs) } } @@ -169,7 +171,7 @@ func (sr *ChainRouter) GetAcceptedFailed(validatorID ids.ShortID, chainID ids.ID if chain, exists := sr.chains[chainID.Key()]; exists { chain.GetAcceptedFailed(validatorID, requestID) } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Error("GetAcceptedFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID) } } @@ -182,7 +184,7 @@ func (sr *ChainRouter) Get(validatorID ids.ShortID, chainID ids.ID, requestID ui if chain, exists := sr.chains[chainID.Key()]; exists { chain.Get(validatorID, requestID, containerID) } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Debug("Get(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID) } } @@ -198,7 +200,8 @@ func (sr *ChainRouter) Put(validatorID ids.ShortID, chainID ids.ID, requestID ui if chain, exists := sr.chains[chainID.Key()]; exists { chain.Put(validatorID, requestID, containerID, container) } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Debug("Put(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID) + sr.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container}) } } @@ -212,7 +215,7 @@ func (sr *ChainRouter) GetFailed(validatorID ids.ShortID, chainID ids.ID, reques if chain, exists := sr.chains[chainID.Key()]; exists { chain.GetFailed(validatorID, requestID) } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Error("GetFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID) } } @@ -225,7 +228,8 @@ func (sr *ChainRouter) PushQuery(validatorID ids.ShortID, chainID ids.ID, reques if chain, exists := sr.chains[chainID.Key()]; exists { chain.PushQuery(validatorID, requestID, containerID, container) } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Debug("PushQuery(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID) + sr.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container}) } } @@ -238,7 +242,7 @@ func (sr *ChainRouter) PullQuery(validatorID ids.ShortID, chainID ids.ID, reques if chain, exists := sr.chains[chainID.Key()]; exists { chain.PullQuery(validatorID, requestID, containerID) } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Debug("PullQuery(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, containerID) } } @@ -253,7 +257,7 @@ func (sr *ChainRouter) Chits(validatorID ids.ShortID, chainID ids.ID, requestID if chain, exists := sr.chains[chainID.Key()]; exists { chain.Chits(validatorID, requestID, votes) } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Debug("Chits(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID, votes) } } @@ -267,7 +271,7 @@ func (sr *ChainRouter) QueryFailed(validatorID ids.ShortID, chainID ids.ID, requ if chain, exists := sr.chains[chainID.Key()]; exists { chain.QueryFailed(validatorID, requestID) } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Error("QueryFailed(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID) } } From c560eeab37a8fc906fd55c7a03bcf9e40c930dcb Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Sat, 6 Jun 2020 11:50:46 -0400 Subject: [PATCH 020/183] lower/improve gossip logging --- network/network.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/network/network.go b/network/network.go index 83a9e99..d2d81fb 100644 --- a/network/network.go +++ b/network/network.go @@ -526,7 +526,7 @@ func (n *network) Chits(validatorID ids.ShortID, chainID ids.ID, requestID uint3 // Gossip attempts to gossip the container to the network func (n *network) Gossip(chainID, containerID ids.ID, container []byte) { if err := n.gossipContainer(chainID, containerID, container); err != nil { - n.log.Error("failed to Gossip(%s, %s): %s", chainID, containerID, err) + n.log.Debug("failed to Gossip(%s, %s): %s", chainID, containerID, err) n.log.Verbo("container:\n%s", formatting.DumpBytes{Bytes: container}) } } @@ -701,7 +701,9 @@ func (n *network) gossip() { } msg, err := n.b.PeerList(ips) if err != nil { - n.log.Warn("failed to gossip PeerList message due to %s", err) + n.log.Error("failed to build peer list to gossip: %s. len(ips): %d", + err, + len(ips)) continue } From 3a4ffb48505cce59b9a0e5465bca1526c5fd7766 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 8 Jun 2020 20:30:03 -0400 Subject: [PATCH 021/183] lower log level for gossiped put messages --- network/network.go | 6 +++++- snow/engine/avalanche/transitive.go | 6 +++++- snow/engine/snowman/transitive.go | 6 +++++- 3 files changed, 15 insertions(+), 3 deletions(-) diff --git a/network/network.go b/network/network.go index 9780c07..7bfc4f7 100644 --- a/network/network.go +++ b/network/network.go @@ -39,6 +39,10 @@ const ( defaultGetVersionTimeout = 2 * time.Second defaultAllowPrivateIPs = true defaultGossipSize = 50 + + // Request ID used when sending a Put message to gossip an accepted container + // (ie not sent in response to a Get) + GossipMsgRequestID = math.MaxUint32 ) // Network defines the functionality of the networking library. @@ -620,7 +624,7 @@ func (n *network) Track(ip utils.IPDesc) { // assumes the stateLock is not held. func (n *network) gossipContainer(chainID, containerID ids.ID, container []byte) error { - msg, err := n.b.Put(chainID, math.MaxUint32, containerID, container) + msg, err := n.b.Put(chainID, GossipMsgRequestID, containerID, container) if err != nil { return fmt.Errorf("attempted to pack too large of a Put message.\nContainer length: %d", len(container)) } diff --git a/snow/engine/avalanche/transitive.go b/snow/engine/avalanche/transitive.go index e48b167..37f0c9a 100644 --- a/snow/engine/avalanche/transitive.go +++ b/snow/engine/avalanche/transitive.go @@ -169,7 +169,11 @@ func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, vtxID ids.ID, vtxByt t.Config.Context.Log.Verbo("Put(%s, %d, %s) called", vdr, requestID, vtxID) if !t.bootstrapped { // Bootstrapping unfinished --> didn't call Get --> this message is invalid - t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID) + if requestID == network.GossipMsgRequestID { + t.Config.Context.Log.Verbo("dropping gossip Put(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID) + } else { + t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, vtxID) + } return nil } diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index f155f5e..5461c44 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -185,7 +185,11 @@ func (t *Transitive) GetAncestors(vdr ids.ShortID, requestID uint32, blkID ids.I func (t *Transitive) Put(vdr ids.ShortID, requestID uint32, blkID ids.ID, blkBytes []byte) error { // bootstrapping isn't done --> we didn't send any gets --> this put is invalid if !t.bootstrapped { - t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID) + if requestID == network.GossipMsgRequestID { + t.Config.Context.Log.Verbo("dropping gossip Put(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID) + } else { + t.Config.Context.Log.Debug("dropping Put(%s, %d, %s) due to bootstrapping", vdr, requestID, blkID) + } return nil } From b576f273976b86f6e3ea84e55f3af6463c2bd893 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 10 Jun 2020 16:20:40 -0400 Subject: [PATCH 022/183] comments/reorganize --- vms/components/codec/codec.go | 95 ++++++++++++++++++------------ vms/components/codec/codec_test.go | 4 +- 2 files changed, 60 insertions(+), 39 deletions(-) diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index 72192cb..5896464 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -4,8 +4,10 @@ package codec import ( + "encoding/binary" "errors" "fmt" + "math" "reflect" "unicode" @@ -15,14 +17,14 @@ import ( const ( defaultMaxSize = 1 << 18 // default max size, in bytes, of something being marshalled by Marshal() defaultMaxSliceLength = 1 << 18 // default max length of a slice being marshalled by Marshal() + maxStringLen = math.MaxInt16 ) // ErrBadCodec is returned when one tries to perform an operation // using an unknown codec var ( errBadCodec = errors.New("wrong or unknown codec used") - errNil = errors.New("can't marshal nil value") - errUnmarshalNil = errors.New("can't unmarshal into nil") + errNil = errors.New("can't marshal/unmarshal nil value") errNeedPointer = errors.New("must unmarshal into a pointer") errMarshalUnregisteredType = errors.New("can't marshal an unregistered type") errUnmarshalUnregisteredType = errors.New("can't unmarshal an unregistered type") @@ -62,7 +64,7 @@ func New(maxSize, maxSliceLen int) Codec { // NewDefault returns a new codec with reasonable default values func NewDefault() Codec { return New(defaultMaxSize, defaultMaxSliceLength) } -// RegisterType is used to register types that may be unmarshaled into an interface typed value +// RegisterType is used to register types that may be unmarshaled into an interface // [val] is a value of the type being registered func (c codec) RegisterType(val interface{}) error { valType := reflect.TypeOf(val) @@ -79,23 +81,20 @@ func (c codec) RegisterType(val interface{}) error { // 2) We use "marshal" and "serialize" interchangeably, and "unmarshal" and "deserialize" interchangeably // 3) To include a field of a struct in the serialized form, add the tag `serialize:"true"` to it // 4) These typed members of a struct may be serialized: -// bool, string, uint[8,16,32,64, int[8,16,32,64], +// bool, string, uint[8,16,32,64], int[8,16,32,64], // structs, slices, arrays, interface. -// structs, slices and arrays can only be serialized if their constituent parts can be. -// 5) To marshal an interface typed value, you must pass a _pointer_ to the value -// 6) If you want to be able to unmarshal into an interface typed value, -// you must call codec.RegisterType([instance of the type that fulfills the interface]). +// structs, slices and arrays can only be serialized if their constituent values can be. +// 5) To marshal an interface, you must pass a pointer to the value +// 6) To unmarshal an interface, you must call codec.RegisterType([instance of the type that fulfills the interface]). // 7) nil slices will be unmarshaled as an empty slice of the appropriate type // 8) Serialized fields must be exported // Marshal returns the byte representation of [value] -// If you want to marshal an interface, [value] must be a pointer -// to the interface +// To marshal an interface, [value] must be a pointer to the interface func (c codec) Marshal(value interface{}) ([]byte, error) { if value == nil { return nil, errNil } - return c.marshal(reflect.ValueOf(value)) } @@ -105,46 +104,69 @@ func (c codec) marshal(value reflect.Value) ([]byte, error) { t := value.Type() valueKind := value.Kind() + + // Case: Value can't be marshalled switch valueKind { - case reflect.Interface, reflect.Ptr, reflect.Slice: - if value.IsNil() { + case reflect.Interface, reflect.Ptr, reflect.Slice, reflect.Invalid: + if value.IsNil() { // Can't marshal nil return nil, errNil } } + // Case: Value is of known size; return its byte repr. switch valueKind { case reflect.Uint8: - p.PackByte(uint8(value.Uint())) - return p.Bytes, p.Err + return []byte{byte(value.Uint())}, nil case reflect.Int8: - p.PackByte(uint8(value.Int())) - return p.Bytes, p.Err + return []byte{byte(value.Int())}, nil case reflect.Uint16: - p.PackShort(uint16(value.Uint())) - return p.Bytes, p.Err + bytes := make([]byte, 2, 2) + binary.BigEndian.PutUint16(bytes, uint16(value.Uint())) + return bytes, nil case reflect.Int16: - p.PackShort(uint16(value.Int())) - return p.Bytes, p.Err + bytes := make([]byte, 2, 2) + binary.BigEndian.PutUint16(bytes, uint16(value.Int())) + return bytes, nil case reflect.Uint32: - p.PackInt(uint32(value.Uint())) - return p.Bytes, p.Err + bytes := make([]byte, 4, 4) + binary.BigEndian.PutUint32(bytes, uint32(value.Uint())) + return bytes, nil case reflect.Int32: - p.PackInt(uint32(value.Int())) - return p.Bytes, p.Err + bytes := make([]byte, 4, 4) + binary.BigEndian.PutUint32(bytes, uint32(value.Int())) + return bytes, nil case reflect.Uint64: - p.PackLong(value.Uint()) - return p.Bytes, p.Err + bytes := make([]byte, 8, 8) + binary.BigEndian.PutUint64(bytes, uint64(value.Uint())) + return bytes, nil case reflect.Int64: - p.PackLong(uint64(value.Int())) - return p.Bytes, p.Err + bytes := make([]byte, 8, 8) + binary.BigEndian.PutUint64(bytes, uint64(value.Int())) + return bytes, nil case reflect.Uintptr, reflect.Ptr: return c.marshal(value.Elem()) case reflect.String: - p.PackStr(value.String()) - return p.Bytes, p.Err + asStr := value.String() + strSize := len(asStr) + if strSize > maxStringLen { + return nil, errSliceTooLarge + } + bytes := make([]byte, 2+strSize, 2+strSize) + binary.BigEndian.PutUint16(bytes[0:2], uint16(strSize)) + if strSize == 0 { + return bytes, nil + } + copy(bytes[2:], []byte(asStr)) + return bytes, nil case reflect.Bool: - p.PackBool(value.Bool()) - return p.Bytes, p.Err + if value.Bool() { + return []byte{1}, nil + } + return []byte{0}, nil + } + + // Case: Value is of unknown size. Calculate its size and fill byte array. + switch valueKind { case reflect.Interface: typeID, ok := c.typeToTypeID[reflect.TypeOf(value.Interface())] // Get the type ID of the value being marshaled if !ok { @@ -181,7 +203,7 @@ func (c codec) marshal(value reflect.Value) ([]byte, error) { continue } if unicode.IsLower(rune(field.Name[0])) { // Can only marshal exported fields - return nil, errMarshalUnexportedField + return nil, fmt.Errorf("can't marshal exported field %s", field.Name) } fieldVal := value.Field(i) // The field we're serializing if fieldVal.Kind() == reflect.Slice && fieldVal.IsNil() { @@ -195,8 +217,7 @@ func (c codec) marshal(value reflect.Value) ([]byte, error) { p.PackFixedBytes(fieldBytes) } return p.Bytes, p.Err - case reflect.Invalid: - return nil, errUnmarshalNil + default: return nil, errUnknownType } @@ -332,7 +353,7 @@ func (c codec) unmarshal(p *wrappers.Packer, field reflect.Value) error { // Assign to the top-level struct's member field.Set(underlyingValue) case reflect.Invalid: - return errUnmarshalNil + return errNil default: return errUnknownType } diff --git a/vms/components/codec/codec_test.go b/vms/components/codec/codec_test.go index 6fdfeba..a58f685 100644 --- a/vms/components/codec/codec_test.go +++ b/vms/components/codec/codec_test.go @@ -407,8 +407,8 @@ func TestSerializeUnexportedField(t *testing.T) { } codec := NewDefault() - if _, err := codec.Marshal(myS); err != errMarshalUnexportedField { - t.Fatalf("expected err to be errUnexportedField but was %v", err) + if _, err := codec.Marshal(myS); err == nil { + t.Fatalf("expected err but got none") } } From 64b2df39b5f182b86aa5ded461c2222adb1d15a0 Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Wed, 10 Jun 2020 16:47:31 -0400 Subject: [PATCH 023/183] Split admin api into admin and info apis --- api/admin/service.go | 96 +------------------------------- api/info/service.go | 130 +++++++++++++++++++++++++++++++++++++++++++ main/params.go | 1 + node/config.go | 1 + node/node.go | 12 +++- 5 files changed, 144 insertions(+), 96 deletions(-) create mode 100644 api/info/service.go diff --git a/api/admin/service.go b/api/admin/service.go index e05a440..baf8fe7 100644 --- a/api/admin/service.go +++ b/api/admin/service.go @@ -10,129 +10,35 @@ import ( "github.com/ava-labs/gecko/api" "github.com/ava-labs/gecko/chains" - "github.com/ava-labs/gecko/genesis" - "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/network" "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/utils/logging" - "github.com/ava-labs/gecko/version" cjson "github.com/ava-labs/gecko/utils/json" ) // Admin is the API service for node admin management type Admin struct { - version version.Version - nodeID ids.ShortID - networkID uint32 log logging.Logger - networking network.Network performance Performance chainManager chains.Manager httpServer *api.Server } // NewService returns a new admin API service -func NewService(version version.Version, nodeID ids.ShortID, networkID uint32, log logging.Logger, chainManager chains.Manager, peers network.Network, httpServer *api.Server) *common.HTTPHandler { +func NewService(log logging.Logger, chainManager chains.Manager, peers network.Network, httpServer *api.Server) *common.HTTPHandler { newServer := rpc.NewServer() codec := cjson.NewCodec() newServer.RegisterCodec(codec, "application/json") newServer.RegisterCodec(codec, "application/json;charset=UTF-8") newServer.RegisterService(&Admin{ - version: version, - nodeID: nodeID, - networkID: networkID, log: log, chainManager: chainManager, - networking: peers, httpServer: httpServer, }, "admin") return &common.HTTPHandler{Handler: newServer} } -// GetNodeVersionReply are the results from calling GetNodeVersion -type GetNodeVersionReply struct { - Version string `json:"version"` -} - -// GetNodeVersion returns the version this node is running -func (service *Admin) GetNodeVersion(_ *http.Request, _ *struct{}, reply *GetNodeVersionReply) error { - service.log.Debug("Admin: GetNodeVersion called") - - reply.Version = service.version.String() - return nil -} - -// GetNodeIDReply are the results from calling GetNodeID -type GetNodeIDReply struct { - NodeID ids.ShortID `json:"nodeID"` -} - -// GetNodeID returns the node ID of this node -func (service *Admin) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error { - service.log.Debug("Admin: GetNodeID called") - - reply.NodeID = service.nodeID - return nil -} - -// GetNetworkIDReply are the results from calling GetNetworkID -type GetNetworkIDReply struct { - NetworkID cjson.Uint32 `json:"networkID"` -} - -// GetNetworkID returns the network ID this node is running on -func (service *Admin) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error { - service.log.Debug("Admin: GetNetworkID called") - - reply.NetworkID = cjson.Uint32(service.networkID) - return nil -} - -// GetNetworkNameReply is the result from calling GetNetworkName -type GetNetworkNameReply struct { - NetworkName string `json:"networkName"` -} - -// GetNetworkName returns the network name this node is running on -func (service *Admin) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error { - service.log.Debug("Admin: GetNetworkName called") - - reply.NetworkName = genesis.NetworkName(service.networkID) - return nil -} - -// GetBlockchainIDArgs are the arguments for calling GetBlockchainID -type GetBlockchainIDArgs struct { - Alias string `json:"alias"` -} - -// GetBlockchainIDReply are the results from calling GetBlockchainID -type GetBlockchainIDReply struct { - BlockchainID string `json:"blockchainID"` -} - -// GetBlockchainID returns the blockchain ID that resolves the alias that was supplied -func (service *Admin) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error { - service.log.Debug("Admin: GetBlockchainID called") - - bID, err := service.chainManager.Lookup(args.Alias) - reply.BlockchainID = bID.String() - return err -} - -// PeersReply are the results from calling Peers -type PeersReply struct { - Peers []network.PeerID `json:"peers"` -} - -// Peers returns the list of current validators -func (service *Admin) Peers(_ *http.Request, _ *struct{}, reply *PeersReply) error { - service.log.Debug("Admin: Peers called") - reply.Peers = service.networking.Peers() - return nil -} - // StartCPUProfilerArgs are the arguments for calling StartCPUProfiler type StartCPUProfilerArgs struct { Filename string `json:"filename"` diff --git a/api/info/service.go b/api/info/service.go new file mode 100644 index 0000000..a1e1937 --- /dev/null +++ b/api/info/service.go @@ -0,0 +1,130 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package info + +import ( + "net/http" + + "github.com/gorilla/rpc/v2" + + "github.com/ava-labs/gecko/chains" + "github.com/ava-labs/gecko/genesis" + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/network" + "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/version" + + cjson "github.com/ava-labs/gecko/utils/json" +) + +// Info is the API service for unprivileged info on a node +type Info struct { + version version.Version + nodeID ids.ShortID + networkID uint32 + log logging.Logger + networking network.Network + chainManager chains.Manager +} + +// NewService returns a new admin API service +func NewService(log logging.Logger, version version.Version, nodeID ids.ShortID, networkID uint32, chainManager chains.Manager, peers network.Network) *common.HTTPHandler { + newServer := rpc.NewServer() + codec := cjson.NewCodec() + newServer.RegisterCodec(codec, "application/json") + newServer.RegisterCodec(codec, "application/json;charset=UTF-8") + newServer.RegisterService(&Info{ + version: version, + nodeID: nodeID, + networkID: networkID, + log: log, + chainManager: chainManager, + networking: peers, + }, "info") + return &common.HTTPHandler{Handler: newServer} +} + +// GetNodeVersionReply are the results from calling GetNodeVersion +type GetNodeVersionReply struct { + Version string `json:"version"` +} + +// GetNodeVersion returns the version this node is running +func (service *Info) GetNodeVersion(_ *http.Request, _ *struct{}, reply *GetNodeVersionReply) error { + service.log.Debug("Info: GetNodeVersion called") + + reply.Version = service.version.String() + return nil +} + +// GetNodeIDReply are the results from calling GetNodeID +type GetNodeIDReply struct { + NodeID ids.ShortID `json:"nodeID"` +} + +// GetNodeID returns the node ID of this node +func (service *Info) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error { + service.log.Debug("Info: GetNodeID called") + + reply.NodeID = service.nodeID + return nil +} + +// GetNetworkIDReply are the results from calling GetNetworkID +type GetNetworkIDReply struct { + NetworkID cjson.Uint32 `json:"networkID"` +} + +// GetNetworkID returns the network ID this node is running on +func (service *Info) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error { + service.log.Debug("Info: GetNetworkID called") + + reply.NetworkID = cjson.Uint32(service.networkID) + return nil +} + +// GetNetworkNameReply is the result from calling GetNetworkName +type GetNetworkNameReply struct { + NetworkName string `json:"networkName"` +} + +// GetNetworkName returns the network name this node is running on +func (service *Info) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error { + service.log.Debug("Info: GetNetworkName called") + + reply.NetworkName = genesis.NetworkName(service.networkID) + return nil +} + +// GetBlockchainIDArgs are the arguments for calling GetBlockchainID +type GetBlockchainIDArgs struct { + Alias string `json:"alias"` +} + +// GetBlockchainIDReply are the results from calling GetBlockchainID +type GetBlockchainIDReply struct { + BlockchainID string `json:"blockchainID"` +} + +// GetBlockchainID returns the blockchain ID that resolves the alias that was supplied +func (service *Info) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error { + service.log.Debug("Info: GetBlockchainID called") + + bID, err := service.chainManager.Lookup(args.Alias) + reply.BlockchainID = bID.String() + return err +} + +// PeersReply are the results from calling Peers +type PeersReply struct { + Peers []network.PeerID `json:"peers"` +} + +// Peers returns the list of current validators +func (service *Info) Peers(_ *http.Request, _ *struct{}, reply *PeersReply) error { + service.log.Debug("Info: Peers called") + reply.Peers = service.networking.Peers() + return nil +} diff --git a/main/params.go b/main/params.go index eef8e60..74383bd 100644 --- a/main/params.go +++ b/main/params.go @@ -222,6 +222,7 @@ func init() { // Enable/Disable APIs: fs.BoolVar(&Config.AdminAPIEnabled, "api-admin-enabled", true, "If true, this node exposes the Admin API") + fs.BoolVar(&Config.InfoAPIEnabled, "api-info-enabled", true, "If true, this node exposes the Info API") fs.BoolVar(&Config.KeystoreAPIEnabled, "api-keystore-enabled", true, "If true, this node exposes the Keystore API") fs.BoolVar(&Config.MetricsAPIEnabled, "api-metrics-enabled", true, "If true, this node exposes the Metrics API") fs.BoolVar(&Config.HealthAPIEnabled, "api-health-enabled", true, "If true, this node exposes the Health API") diff --git a/node/config.go b/node/config.go index 74ff491..29e79b9 100644 --- a/node/config.go +++ b/node/config.go @@ -50,6 +50,7 @@ type Config struct { // Enable/Disable APIs AdminAPIEnabled bool + InfoAPIEnabled bool KeystoreAPIEnabled bool MetricsAPIEnabled bool HealthAPIEnabled bool diff --git a/node/node.go b/node/node.go index ea0e8fc..0bb2c24 100644 --- a/node/node.go +++ b/node/node.go @@ -18,6 +18,7 @@ import ( "github.com/ava-labs/gecko/api" "github.com/ava-labs/gecko/api/admin" "github.com/ava-labs/gecko/api/health" + "github.com/ava-labs/gecko/api/info" "github.com/ava-labs/gecko/api/ipcs" "github.com/ava-labs/gecko/api/keystore" "github.com/ava-labs/gecko/api/metrics" @@ -461,11 +462,19 @@ func (n *Node) initMetricsAPI() { func (n *Node) initAdminAPI() { if n.Config.AdminAPIEnabled { n.Log.Info("initializing Admin API") - service := admin.NewService(Version, n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.Net, &n.APIServer) + service := admin.NewService(n.Log, n.chainManager, n.Net, &n.APIServer) n.APIServer.AddRoute(service, &sync.RWMutex{}, "admin", "", n.HTTPLog) } } +func (n *Node) initInfoAPI() { + if n.Config.InfoAPIEnabled { + n.Log.Info("initializing Info API") + service := info.NewService(n.Log, Version, n.ID, n.Config.NetworkID, n.chainManager, n.Net) + n.APIServer.AddRoute(service, &sync.RWMutex{}, "info", "", n.HTTPLog) + } +} + // initHealthAPI initializes the Health API service // Assumes n.Log, n.ConsensusAPI, and n.ValidatorAPI already initialized func (n *Node) initHealthAPI() { @@ -562,6 +571,7 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg n.initChainManager() // Set up the chain manager n.initAdminAPI() // Start the Admin API + n.initInfoAPI() // Start the Info API n.initHealthAPI() // Start the Health API n.initIPCAPI() // Start the IPC API From 3d60db3a05a7f9e5dcd8ff821c7f16270d2d9848 Mon Sep 17 00:00:00 2001 From: Gabriel Cardona Date: Thu, 11 Jun 2020 13:14:02 -0700 Subject: [PATCH 024/183] Subtract from balance when adding a default subnet delegator. --- vms/platformvm/add_default_subnet_delegator_tx.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/add_default_subnet_delegator_tx.go b/vms/platformvm/add_default_subnet_delegator_tx.go index 3012d84..9881652 100644 --- a/vms/platformvm/add_default_subnet_delegator_tx.go +++ b/vms/platformvm/add_default_subnet_delegator_tx.go @@ -128,7 +128,7 @@ func (tx *addDefaultSubnetDelegatorTx) SemanticVerify(db database.Database) (*ve // The account if this block's proposal is committed and the validator is // added to the pending validator set. (Increase the account's nonce; // decrease its balance.) - newAccount, err := account.Remove(0, tx.Nonce) // Remove also removes the fee + newAccount, err := account.Remove(tx.Wght, tx.Nonce) // Remove also removes the fee if err != nil { return nil, nil, nil, nil, permError{err} } From e073b4e8ad99cc026e32d9ce34f320bf9d647892 Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Thu, 11 Jun 2020 17:01:28 -0400 Subject: [PATCH 025/183] Prevent duplicated addresses in avm import key --- vms/avm/service.go | 22 +++++++++++++++++----- 1 file changed, 17 insertions(+), 5 deletions(-) diff --git a/vms/avm/service.go b/vms/avm/service.go index f71d607..1dce66d 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -665,14 +665,26 @@ func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *I return fmt.Errorf("problem saving key %w", err) } - addresses, _ := user.Addresses(db) - addresses = append(addresses, sk.PublicKey().Address()) + addresses, err := user.Addresses(db) + if err != nil { + return fmt.Errorf("problem saving key while getting existing addresses: %w", err) + } + newAddress := sk.PublicKey().Address() + exists := false + for _, address := range addresses { + if newAddress.Equals(address) { + exists = true + } + } + if !exists { + addresses = append(addresses, newAddress) - if err := user.SetAddresses(db, addresses); err != nil { - return fmt.Errorf("problem saving addresses: %w", err) + if err := user.SetAddresses(db, addresses); err != nil { + return fmt.Errorf("problem saving addresses: %w", err) + } } - reply.Address = service.vm.Format(sk.PublicKey().Address().Bytes()) + reply.Address = service.vm.Format(newAddress.Bytes()) return nil } From f52d0c29bd35574714f5e5a132a4c77215c208fe Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Thu, 11 Jun 2020 18:00:21 -0400 Subject: [PATCH 026/183] Register a timeout for querying ourselves to ensure we never drop a query --- go.mod | 1 + go.sum | 4 + snow/engine/snowman/transitive.go | 1 - snow/networking/router/handler.go | 166 ++++++++++++++---------- snow/networking/router/subnet_router.go | 51 ++------ snow/networking/sender/sender.go | 73 ++++++----- snow/networking/sender/sender_test.go | 126 ++++++++++++++++++ 7 files changed, 280 insertions(+), 142 deletions(-) diff --git a/go.mod b/go.mod index 4636c8c..a8a8f39 100644 --- a/go.mod +++ b/go.mod @@ -33,6 +33,7 @@ require ( github.com/olekukonko/tablewriter v0.0.4 // indirect github.com/pborman/uuid v1.2.0 // indirect github.com/prometheus/client_golang v1.6.0 + github.com/prometheus/common v0.9.1 github.com/prometheus/tsdb v0.10.0 // indirect github.com/rjeczalik/notify v0.9.2 // indirect github.com/rs/cors v1.7.0 diff --git a/go.sum b/go.sum index 774be35..4bb3270 100644 --- a/go.sum +++ b/go.sum @@ -7,8 +7,10 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/allegro/bigcache v1.2.1 h1:hg1sY1raCwic3Vnsvje6TT7/pnZba83LeFck5NrFKSc= github.com/allegro/bigcache v1.2.1/go.mod h1:Cb/ax3seSYIx7SuZdm2G2xzfwmv3TPSk2ucNfQESPXM= @@ -217,6 +219,7 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/status-im/keycard-go v0.0.0-20200402102358-957c09536969 h1:Oo2KZNP70KE0+IUJSidPj/BFS/RXNHmKIJOdckzml2E= @@ -336,6 +339,7 @@ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miE google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index f155f5e..2c6267c 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -579,7 +579,6 @@ func (t *Transitive) pushSample(blk snowman.Block) { } t.Config.Sender.PushQuery(vdrSet, t.RequestID, blkID, blk.Bytes()) - return } func (t *Transitive) deliver(blk snowman.Block) error { diff --git a/snow/networking/router/handler.go b/snow/networking/router/handler.go index 8b40223..3f1e21f 100644 --- a/snow/networking/router/handler.go +++ b/snow/networking/router/handler.go @@ -4,6 +4,7 @@ package router import ( + "sync" "time" "github.com/ava-labs/gecko/ids" @@ -17,12 +18,18 @@ import ( type Handler struct { metrics - msgs chan message - closed chan struct{} - engine common.Engine - msgChan <-chan common.Message + msgs chan message + reliableMsgsSema chan struct{} + reliableMsgsLock sync.Mutex + reliableMsgs []message + closed chan struct{} + msgChan <-chan common.Message + + ctx *snow.Context + engine common.Engine toClose func() + closing bool } // Initialize this consensus handler @@ -35,9 +42,12 @@ func (h *Handler) Initialize( ) { h.metrics.Initialize(namespace, metrics) h.msgs = make(chan message, bufferSize) + h.reliableMsgsSema = make(chan struct{}, 1) h.closed = make(chan struct{}) - h.engine = engine h.msgChan = msgChan + + h.ctx = engine.Context() + h.engine = engine } // Context of this Handler @@ -46,37 +56,38 @@ func (h *Handler) Context() *snow.Context { return h.engine.Context() } // Dispatch waits for incoming messages from the network // and, when they arrive, sends them to the consensus engine func (h *Handler) Dispatch() { - log := h.Context().Log defer func() { - log.Info("finished shutting down chain") + h.ctx.Log.Info("finished shutting down chain") close(h.closed) }() - closing := false for { select { case msg, ok := <-h.msgs: if !ok { + // the msgs channel has been closed, so this dispatcher should exit return } + h.metrics.pending.Dec() - if closing { - log.Debug("dropping message due to closing:\n%s", msg) - continue - } - if h.dispatchMsg(msg) { - closing = true + h.dispatchMsg(msg) + case <-h.reliableMsgsSema: + // get all the reliable messages + h.reliableMsgsLock.Lock() + msgs := h.reliableMsgs + h.reliableMsgs = nil + h.reliableMsgsLock.Unlock() + + // fire all the reliable messages + for _, msg := range msgs { + h.metrics.pending.Dec() + h.dispatchMsg(msg) } case msg := <-h.msgChan: - if closing { - log.Debug("dropping internal message due to closing:\n%s", msg) - continue - } - if h.dispatchMsg(message{messageType: notifyMsg, notification: msg}) { - closing = true - } + // handle a message from the VM + h.dispatchMsg(message{messageType: notifyMsg, notification: msg}) } - if closing && h.toClose != nil { + if h.closing && h.toClose != nil { go h.toClose() } } @@ -85,14 +96,19 @@ func (h *Handler) Dispatch() { // Dispatch a message to the consensus engine. // Returns true iff this consensus handler (and its associated engine) should shutdown // (due to receipt of a shutdown message) -func (h *Handler) dispatchMsg(msg message) bool { +func (h *Handler) dispatchMsg(msg message) { + if h.closing { + h.ctx.Log.Debug("dropping message due to closing:\n%s", msg) + h.metrics.dropped.Inc() + return + } + startTime := time.Now() - ctx := h.engine.Context() - ctx.Lock.Lock() - defer ctx.Lock.Unlock() + h.ctx.Lock.Lock() + defer h.ctx.Lock.Unlock() - ctx.Log.Verbo("Forwarding message to consensus: %s", msg) + h.ctx.Log.Verbo("Forwarding message to consensus: %s", msg) var ( err error done bool @@ -159,9 +175,10 @@ func (h *Handler) dispatchMsg(msg message) bool { } if err != nil { - ctx.Log.Fatal("forcing chain to shutdown due to %s", err) + h.ctx.Log.Fatal("forcing chain to shutdown due to %s", err) } - return done || err != nil + + h.closing = done || err != nil } // GetAcceptedFrontier passes a GetAcceptedFrontier message received from the @@ -187,8 +204,8 @@ func (h *Handler) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, co // GetAcceptedFrontierFailed passes a GetAcceptedFrontierFailed message received // from the network to the consensus engine. -func (h *Handler) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) bool { - return h.sendMsg(message{ +func (h *Handler) GetAcceptedFrontierFailed(validatorID ids.ShortID, requestID uint32) { + h.sendReliableMsg(message{ messageType: getAcceptedFrontierFailedMsg, validatorID: validatorID, requestID: requestID, @@ -219,14 +236,43 @@ func (h *Handler) Accepted(validatorID ids.ShortID, requestID uint32, containerI // GetAcceptedFailed passes a GetAcceptedFailed message received from the // network to the consensus engine. -func (h *Handler) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) bool { - return h.sendMsg(message{ +func (h *Handler) GetAcceptedFailed(validatorID ids.ShortID, requestID uint32) { + h.sendReliableMsg(message{ messageType: getAcceptedFailedMsg, validatorID: validatorID, requestID: requestID, }) } +// GetAncestors passes a GetAncestors message received from the network to the consensus engine. +func (h *Handler) GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) bool { + return h.sendMsg(message{ + messageType: getAncestorsMsg, + validatorID: validatorID, + requestID: requestID, + containerID: containerID, + }) +} + +// MultiPut passes a MultiPut message received from the network to the consensus engine. +func (h *Handler) MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte) bool { + return h.sendMsg(message{ + messageType: multiPutMsg, + validatorID: validatorID, + requestID: requestID, + containers: containers, + }) +} + +// GetAncestorsFailed passes a GetAncestorsFailed message to the consensus engine. +func (h *Handler) GetAncestorsFailed(validatorID ids.ShortID, requestID uint32) { + h.sendReliableMsg(message{ + messageType: getAncestorsFailedMsg, + validatorID: validatorID, + requestID: requestID, + }) +} + // Get passes a Get message received from the network to the consensus engine. func (h *Handler) Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) bool { return h.sendMsg(message{ @@ -237,16 +283,6 @@ func (h *Handler) Get(validatorID ids.ShortID, requestID uint32, containerID ids }) } -// GetAncestors passes a GetAncestors message received from the network to the consensus engine. -func (h *Handler) GetAncestors(validatorID ids.ShortID, requestID uint32, containerID ids.ID) bool { - return h.sendMsg(message{ - messageType: getAncestorsMsg, - validatorID: validatorID, - requestID: requestID, - containerID: containerID, - }) -} - // Put passes a Put message received from the network to the consensus engine. func (h *Handler) Put(validatorID ids.ShortID, requestID uint32, containerID ids.ID, container []byte) bool { return h.sendMsg(message{ @@ -258,34 +294,15 @@ func (h *Handler) Put(validatorID ids.ShortID, requestID uint32, containerID ids }) } -// MultiPut passes a MultiPut message received from the network to the consensus engine. -func (h *Handler) MultiPut(validatorID ids.ShortID, requestID uint32, containers [][]byte) bool { - return h.sendMsg(message{ - messageType: multiPutMsg, - validatorID: validatorID, - requestID: requestID, - containers: containers, - }) -} - // GetFailed passes a GetFailed message to the consensus engine. -func (h *Handler) GetFailed(validatorID ids.ShortID, requestID uint32) bool { - return h.sendMsg(message{ +func (h *Handler) GetFailed(validatorID ids.ShortID, requestID uint32) { + h.sendReliableMsg(message{ messageType: getFailedMsg, validatorID: validatorID, requestID: requestID, }) } -// GetAncestorsFailed passes a GetAncestorsFailed message to the consensus engine. -func (h *Handler) GetAncestorsFailed(validatorID ids.ShortID, requestID uint32) bool { - return h.sendMsg(message{ - messageType: getAncestorsFailedMsg, - validatorID: validatorID, - requestID: requestID, - }) -} - // PushQuery passes a PushQuery message received from the network to the consensus engine. func (h *Handler) PushQuery(validatorID ids.ShortID, requestID uint32, blockID ids.ID, block []byte) bool { return h.sendMsg(message{ @@ -318,8 +335,8 @@ func (h *Handler) Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set } // QueryFailed passes a QueryFailed message received from the network to the consensus engine. -func (h *Handler) QueryFailed(validatorID ids.ShortID, requestID uint32) bool { - return h.sendMsg(message{ +func (h *Handler) QueryFailed(validatorID ids.ShortID, requestID uint32) { + h.sendReliableMsg(message{ messageType: queryFailedMsg, validatorID: validatorID, requestID: requestID, @@ -341,8 +358,9 @@ func (h *Handler) Notify(msg common.Message) bool { // Shutdown shuts down the dispatcher func (h *Handler) Shutdown() { - h.metrics.pending.Inc() - h.msgs <- message{messageType: shutdownMsg} + h.sendReliableMsg(message{ + messageType: shutdownMsg, + }) } func (h *Handler) sendMsg(msg message) bool { @@ -355,3 +373,15 @@ func (h *Handler) sendMsg(msg message) bool { return false } } + +func (h *Handler) sendReliableMsg(msg message) { + h.reliableMsgsLock.Lock() + defer h.reliableMsgsLock.Unlock() + + h.metrics.pending.Inc() + h.reliableMsgs = append(h.reliableMsgs, msg) + select { + case h.reliableMsgsSema <- struct{}{}: + default: + } +} diff --git a/snow/networking/router/subnet_router.go b/snow/networking/router/subnet_router.go index 5bf977c..731dcd6 100644 --- a/snow/networking/router/subnet_router.go +++ b/snow/networking/router/subnet_router.go @@ -122,19 +122,12 @@ func (sr *ChainRouter) GetAcceptedFrontierFailed(validatorID ids.ShortID, chainI sr.lock.RLock() defer sr.lock.RUnlock() + sr.timeouts.Cancel(validatorID, chainID, requestID) if chain, exists := sr.chains[chainID.Key()]; exists { - if !chain.GetAcceptedFrontierFailed(validatorID, requestID) { - sr.log.Debug("deferring GetAcceptedFrontier timeout due to a full queue on %s", chainID) - // Defer this call to later - sr.timeouts.Register(validatorID, chainID, requestID, func() { - sr.GetAcceptedFrontierFailed(validatorID, chainID, requestID) - }) - return - } + chain.GetAcceptedFrontierFailed(validatorID, requestID) } else { sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) } - sr.timeouts.Cancel(validatorID, chainID, requestID) } // GetAccepted routes an incoming GetAccepted request from the @@ -174,18 +167,12 @@ func (sr *ChainRouter) GetAcceptedFailed(validatorID ids.ShortID, chainID ids.ID sr.lock.RLock() defer sr.lock.RUnlock() + sr.timeouts.Cancel(validatorID, chainID, requestID) if chain, exists := sr.chains[chainID.Key()]; exists { - if !chain.GetAcceptedFailed(validatorID, requestID) { - sr.timeouts.Register(validatorID, chainID, requestID, func() { - sr.log.Debug("deferring GetAccepted timeout due to a full queue on %s", chainID) - sr.GetAcceptedFailed(validatorID, chainID, requestID) - }) - return - } + chain.GetAcceptedFailed(validatorID, requestID) } else { sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) } - sr.timeouts.Cancel(validatorID, chainID, requestID) } // GetAncestors routes an incoming GetAncestors message from the validator with ID [validatorID] @@ -225,18 +212,12 @@ func (sr *ChainRouter) GetAncestorsFailed(validatorID ids.ShortID, chainID ids.I sr.lock.RLock() defer sr.lock.RUnlock() + sr.timeouts.Cancel(validatorID, chainID, requestID) if chain, exists := sr.chains[chainID.Key()]; exists { - if !chain.GetAncestorsFailed(validatorID, requestID) { - sr.timeouts.Register(validatorID, chainID, requestID, func() { - sr.log.Debug("deferring GetAncestors timeout due to a full queue on %s", chainID) - sr.GetAncestorsFailed(validatorID, chainID, requestID) - }) - return - } + chain.GetAncestorsFailed(validatorID, requestID) } else { sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) } - sr.timeouts.Cancel(validatorID, chainID, requestID) } // Get routes an incoming Get request from the validator with ID [validatorID] @@ -275,18 +256,12 @@ func (sr *ChainRouter) GetFailed(validatorID ids.ShortID, chainID ids.ID, reques sr.lock.RLock() defer sr.lock.RUnlock() + sr.timeouts.Cancel(validatorID, chainID, requestID) if chain, exists := sr.chains[chainID.Key()]; exists { - if !chain.GetFailed(validatorID, requestID) { - sr.timeouts.Register(validatorID, chainID, requestID, func() { - sr.log.Debug("deferring Get timeout due to a full queue on %s", chainID) - sr.GetFailed(validatorID, chainID, requestID) - }) - return - } + chain.GetFailed(validatorID, requestID) } else { sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) } - sr.timeouts.Cancel(validatorID, chainID, requestID) } // PushQuery routes an incoming PushQuery request from the validator with ID [validatorID] @@ -337,18 +312,12 @@ func (sr *ChainRouter) QueryFailed(validatorID ids.ShortID, chainID ids.ID, requ sr.lock.RLock() defer sr.lock.RUnlock() + sr.timeouts.Cancel(validatorID, chainID, requestID) if chain, exists := sr.chains[chainID.Key()]; exists { - if !chain.QueryFailed(validatorID, requestID) { - sr.timeouts.Register(validatorID, chainID, requestID, func() { - sr.log.Debug("deferring Query timeout due to a full queue on %s", chainID) - sr.QueryFailed(validatorID, chainID, requestID) - }) - return - } + chain.QueryFailed(validatorID, requestID) } else { sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) } - sr.timeouts.Cancel(validatorID, chainID, requestID) } // Shutdown shuts down this router diff --git a/snow/networking/sender/sender.go b/snow/networking/sender/sender.go index e81c5c0..92c02b8 100644 --- a/snow/networking/sender/sender.go +++ b/snow/networking/sender/sender.go @@ -31,17 +31,16 @@ func (s *Sender) Context() *snow.Context { return s.ctx } // GetAcceptedFrontier ... func (s *Sender) GetAcceptedFrontier(validatorIDs ids.ShortSet, requestID uint32) { - if validatorIDs.Contains(s.ctx.NodeID) { - validatorIDs.Remove(s.ctx.NodeID) - go s.router.GetAcceptedFrontier(s.ctx.NodeID, s.ctx.ChainID, requestID) - } - validatorList := validatorIDs.List() - for _, validatorID := range validatorList { + for _, validatorID := range validatorIDs.List() { vID := validatorID s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() { s.router.GetAcceptedFrontierFailed(vID, s.ctx.ChainID, requestID) }) } + if validatorIDs.Contains(s.ctx.NodeID) { + validatorIDs.Remove(s.ctx.NodeID) + go s.router.GetAcceptedFrontier(s.ctx.NodeID, s.ctx.ChainID, requestID) + } s.sender.GetAcceptedFrontier(validatorIDs, s.ctx.ChainID, requestID) } @@ -49,24 +48,23 @@ func (s *Sender) GetAcceptedFrontier(validatorIDs ids.ShortSet, requestID uint32 func (s *Sender) AcceptedFrontier(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { if validatorID.Equals(s.ctx.NodeID) { go s.router.AcceptedFrontier(validatorID, s.ctx.ChainID, requestID, containerIDs) - return + } else { + s.sender.AcceptedFrontier(validatorID, s.ctx.ChainID, requestID, containerIDs) } - s.sender.AcceptedFrontier(validatorID, s.ctx.ChainID, requestID, containerIDs) } // GetAccepted ... func (s *Sender) GetAccepted(validatorIDs ids.ShortSet, requestID uint32, containerIDs ids.Set) { - if validatorIDs.Contains(s.ctx.NodeID) { - validatorIDs.Remove(s.ctx.NodeID) - go s.router.GetAccepted(s.ctx.NodeID, s.ctx.ChainID, requestID, containerIDs) - } - validatorList := validatorIDs.List() - for _, validatorID := range validatorList { + for _, validatorID := range validatorIDs.List() { vID := validatorID s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() { s.router.GetAcceptedFailed(vID, s.ctx.ChainID, requestID) }) } + if validatorIDs.Contains(s.ctx.NodeID) { + validatorIDs.Remove(s.ctx.NodeID) + go s.router.GetAccepted(s.ctx.NodeID, s.ctx.ChainID, requestID, containerIDs) + } s.sender.GetAccepted(validatorIDs, s.ctx.ChainID, requestID, containerIDs) } @@ -74,9 +72,9 @@ func (s *Sender) GetAccepted(validatorIDs ids.ShortSet, requestID uint32, contai func (s *Sender) Accepted(validatorID ids.ShortID, requestID uint32, containerIDs ids.Set) { if validatorID.Equals(s.ctx.NodeID) { go s.router.Accepted(validatorID, s.ctx.ChainID, requestID, containerIDs) - return + } else { + s.sender.Accepted(validatorID, s.ctx.ChainID, requestID, containerIDs) } - s.sender.Accepted(validatorID, s.ctx.ChainID, requestID, containerIDs) } // Get sends a Get message to the consensus engine running on the specified @@ -85,6 +83,13 @@ func (s *Sender) Accepted(validatorID ids.ShortID, requestID uint32, containerID // specified container. func (s *Sender) Get(validatorID ids.ShortID, requestID uint32, containerID ids.ID) { s.ctx.Log.Verbo("Sending Get to validator %s. RequestID: %d. ContainerID: %s", validatorID, requestID, containerID) + + // Sending a Get to myself will always fail + if validatorID.Equals(s.ctx.NodeID) { + go s.router.GetFailed(validatorID, s.ctx.ChainID, requestID) + return + } + // Add a timeout -- if we don't get a response before the timeout expires, // send this consensus engine a GetFailed message s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() { @@ -101,6 +106,7 @@ func (s *Sender) GetAncestors(validatorID ids.ShortID, requestID uint32, contain go s.router.GetAncestorsFailed(validatorID, s.ctx.ChainID, requestID) return } + s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() { s.router.GetAncestorsFailed(validatorID, s.ctx.ChainID, requestID) }) @@ -130,6 +136,13 @@ func (s *Sender) MultiPut(validatorID ids.ShortID, requestID uint32, containers // their preferred frontier given the existence of the specified container. func (s *Sender) PushQuery(validatorIDs ids.ShortSet, requestID uint32, containerID ids.ID, container []byte) { s.ctx.Log.Verbo("Sending PushQuery to validators %v. RequestID: %d. ContainerID: %s", validatorIDs, requestID, containerID) + for _, validatorID := range validatorIDs.List() { + vID := validatorID + s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() { + s.router.QueryFailed(vID, s.ctx.ChainID, requestID) + }) + } + // If one of the validators in [validatorIDs] is myself, send this message directly // to my own router rather than sending it over the network if validatorIDs.Contains(s.ctx.NodeID) { // One of the validators in [validatorIDs] was myself @@ -139,13 +152,7 @@ func (s *Sender) PushQuery(validatorIDs ids.ShortSet, requestID uint32, containe // If this were not a goroutine, then we would deadlock here when [handler].msgs is full go s.router.PushQuery(s.ctx.NodeID, s.ctx.ChainID, requestID, containerID, container) } - validatorList := validatorIDs.List() // Convert set to list for easier iteration - for _, validatorID := range validatorList { - vID := validatorID - s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() { - s.router.QueryFailed(vID, s.ctx.ChainID, requestID) - }) - } + s.sender.PushQuery(validatorIDs, s.ctx.ChainID, requestID, containerID, container) } @@ -155,6 +162,14 @@ func (s *Sender) PushQuery(validatorIDs ids.ShortSet, requestID uint32, containe // their preferred frontier. func (s *Sender) PullQuery(validatorIDs ids.ShortSet, requestID uint32, containerID ids.ID) { s.ctx.Log.Verbo("Sending PullQuery. RequestID: %d. ContainerID: %s", requestID, containerID) + + for _, validatorID := range validatorIDs.List() { + vID := validatorID + s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() { + s.router.QueryFailed(vID, s.ctx.ChainID, requestID) + }) + } + // If one of the validators in [validatorIDs] is myself, send this message directly // to my own router rather than sending it over the network if validatorIDs.Contains(s.ctx.NodeID) { // One of the validators in [validatorIDs] was myself @@ -164,13 +179,7 @@ func (s *Sender) PullQuery(validatorIDs ids.ShortSet, requestID uint32, containe // If this were not a goroutine, then we would deadlock when [handler].msgs is full go s.router.PullQuery(s.ctx.NodeID, s.ctx.ChainID, requestID, containerID) } - validatorList := validatorIDs.List() // Convert set to list for easier iteration - for _, validatorID := range validatorList { - vID := validatorID - s.timeouts.Register(validatorID, s.ctx.ChainID, requestID, func() { - s.router.QueryFailed(vID, s.ctx.ChainID, requestID) - }) - } + s.sender.PullQuery(validatorIDs, s.ctx.ChainID, requestID, containerID) } @@ -181,9 +190,9 @@ func (s *Sender) Chits(validatorID ids.ShortID, requestID uint32, votes ids.Set) // to my own router rather than sending it over the network if validatorID.Equals(s.ctx.NodeID) { go s.router.Chits(validatorID, s.ctx.ChainID, requestID, votes) - return + } else { + s.sender.Chits(validatorID, s.ctx.ChainID, requestID, votes) } - s.sender.Chits(validatorID, s.ctx.ChainID, requestID, votes) } // Gossip the provided container diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 7760307..7c7cabc 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -4,6 +4,7 @@ package sender import ( + "math/rand" "reflect" "sync" "testing" @@ -82,3 +83,128 @@ func TestTimeout(t *testing.T) { t.Fatalf("Timeouts should have fired") } } + +func TestReliableMessages(t *testing.T) { + tm := timeout.Manager{} + tm.Initialize(50 * time.Millisecond) + go tm.Dispatch() + + chainRouter := router.ChainRouter{} + chainRouter.Initialize(logging.NoLog{}, &tm, time.Hour, time.Second) + + sender := Sender{} + sender.Initialize(snow.DefaultContextTest(), &ExternalSenderTest{}, &chainRouter, &tm) + + engine := common.EngineTest{T: t} + engine.Default(true) + + engine.ContextF = snow.DefaultContextTest + engine.GossipF = func() error { return nil } + + queriesToSend := 1000 + awaiting := make([]chan struct{}, queriesToSend) + for i := 0; i < queriesToSend; i++ { + awaiting[i] = make(chan struct{}, 1) + } + + engine.QueryFailedF = func(validatorID ids.ShortID, reqID uint32) error { + close(awaiting[int(reqID)]) + return nil + } + + handler := router.Handler{} + handler.Initialize( + &engine, + nil, + 1, + "", + prometheus.NewRegistry(), + ) + go handler.Dispatch() + + chainRouter.AddChain(&handler) + + go func() { + for i := 0; i < queriesToSend; i++ { + vdrIDs := ids.ShortSet{} + vdrIDs.Add(ids.NewShortID([20]byte{1})) + + sender.PullQuery(vdrIDs, uint32(i), ids.Empty) + time.Sleep(time.Duration(rand.Float64() * float64(time.Microsecond))) + } + }() + + go func() { + for { + chainRouter.Gossip() + time.Sleep(time.Duration(rand.Float64() * float64(time.Microsecond))) + } + }() + + for _, await := range awaiting { + _, _ = <-await + } +} + +func TestReliableMessagesToMyself(t *testing.T) { + tm := timeout.Manager{} + tm.Initialize(50 * time.Millisecond) + go tm.Dispatch() + + chainRouter := router.ChainRouter{} + chainRouter.Initialize(logging.NoLog{}, &tm, time.Hour, time.Second) + + sender := Sender{} + sender.Initialize(snow.DefaultContextTest(), &ExternalSenderTest{}, &chainRouter, &tm) + + engine := common.EngineTest{T: t} + engine.Default(false) + + engine.ContextF = snow.DefaultContextTest + engine.GossipF = func() error { return nil } + engine.CantPullQuery = false + + queriesToSend := 2 + awaiting := make([]chan struct{}, queriesToSend) + for i := 0; i < queriesToSend; i++ { + awaiting[i] = make(chan struct{}, 1) + } + + engine.QueryFailedF = func(validatorID ids.ShortID, reqID uint32) error { + close(awaiting[int(reqID)]) + return nil + } + + handler := router.Handler{} + handler.Initialize( + &engine, + nil, + 1, + "", + prometheus.NewRegistry(), + ) + go handler.Dispatch() + + chainRouter.AddChain(&handler) + + go func() { + for i := 0; i < queriesToSend; i++ { + vdrIDs := ids.ShortSet{} + vdrIDs.Add(engine.Context().NodeID) + + sender.PullQuery(vdrIDs, uint32(i), ids.Empty) + time.Sleep(time.Duration(rand.Float64() * float64(time.Microsecond))) + } + }() + + go func() { + for { + chainRouter.Gossip() + time.Sleep(time.Duration(rand.Float64() * float64(time.Microsecond))) + } + }() + + for _, await := range awaiting { + _, _ = <-await + } +} From 960377e2b4227df17bdfdfca62c934c8598a1f38 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Thu, 11 Jun 2020 18:08:42 -0400 Subject: [PATCH 027/183] cleaned up imports --- snow/networking/router/handler.go | 3 ++- snow/networking/sender/sender_test.go | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/snow/networking/router/handler.go b/snow/networking/router/handler.go index 3f1e21f..9d45baf 100644 --- a/snow/networking/router/handler.go +++ b/snow/networking/router/handler.go @@ -7,10 +7,11 @@ import ( "sync" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/engine/common" - "github.com/prometheus/client_golang/prometheus" ) // Handler passes incoming messages from the network to the consensus engine diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 7c7cabc..8be5e99 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -10,13 +10,14 @@ import ( "testing" "time" + "github.com/prometheus/client_golang/prometheus" + "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/snow/networking/router" "github.com/ava-labs/gecko/snow/networking/timeout" "github.com/ava-labs/gecko/utils/logging" - "github.com/prometheus/client_golang/prometheus" ) func TestSenderContext(t *testing.T) { From 7879dd17688872a6260a60b6ace13e4e7dc702d6 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Thu, 11 Jun 2020 18:16:21 -0400 Subject: [PATCH 028/183] upgrade codec to be more efficient. Passes all codec tests. Failing some other tests due to new format --- vms/components/codec/codec.go | 541 ++++++++++++++++++++++------- vms/components/codec/codec_test.go | 72 ++-- 2 files changed, 460 insertions(+), 153 deletions(-) diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index 5896464..ad09964 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -10,8 +10,6 @@ import ( "math" "reflect" "unicode" - - "github.com/ava-labs/gecko/utils/wrappers" ) const ( @@ -89,275 +87,552 @@ func (c codec) RegisterType(val interface{}) error { // 7) nil slices will be unmarshaled as an empty slice of the appropriate type // 8) Serialized fields must be exported -// Marshal returns the byte representation of [value] // To marshal an interface, [value] must be a pointer to the interface func (c codec) Marshal(value interface{}) ([]byte, error) { if value == nil { return nil, errNil } - return c.marshal(reflect.ValueOf(value)) + size, f, err := c.marshal(reflect.ValueOf(value)) + if err != nil { + return nil, err + } + + bytes := make([]byte, size, size) + if err := f(bytes); err != nil { + return nil, err + } + return bytes, nil } -// Marshal [value] to bytes -func (c codec) marshal(value reflect.Value) ([]byte, error) { - p := wrappers.Packer{MaxSize: c.maxSize, Bytes: []byte{}} - t := value.Type() - +// marshal returns: +// 1) The size, in bytes, of the byte representation of [value] +// 2) A slice of functions, where each function writes bytes to its argument +// and returns the number of bytes it wrote. +// When these functions are called in order, they write [value] to a byte slice. +// 3) An error +func (c codec) marshal(value reflect.Value) (size int, f func([]byte) error, err error) { valueKind := value.Kind() // Case: Value can't be marshalled switch valueKind { - case reflect.Interface, reflect.Ptr, reflect.Slice, reflect.Invalid: - if value.IsNil() { // Can't marshal nil - return nil, errNil + case reflect.Interface, reflect.Ptr, reflect.Invalid: + if value.IsNil() { // Can't marshal nil or nil pointers + return 0, nil, errNil } } // Case: Value is of known size; return its byte repr. switch valueKind { case reflect.Uint8: - return []byte{byte(value.Uint())}, nil + size = 1 + f = func(b []byte) error { + if bytesLen := len(b); bytesLen < 1 { + return fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) + } + copy(b, []byte{byte(value.Uint())}) + return nil + } + return case reflect.Int8: - return []byte{byte(value.Int())}, nil + size = 1 + f = func(b []byte) error { + if bytesLen := len(b); bytesLen < 1 { + return fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) + } + copy(b, []byte{byte(value.Int())}) + return nil + } + return case reflect.Uint16: - bytes := make([]byte, 2, 2) - binary.BigEndian.PutUint16(bytes, uint16(value.Uint())) - return bytes, nil + size = 2 + f = func(b []byte) error { + if bytesLen := len(b); bytesLen < 2 { + return fmt.Errorf("expected len(bytes) to be at least 2 but is %d", bytesLen) + } + binary.BigEndian.PutUint16(b, uint16(value.Uint())) + return nil + } + return case reflect.Int16: - bytes := make([]byte, 2, 2) - binary.BigEndian.PutUint16(bytes, uint16(value.Int())) - return bytes, nil + size = 2 + f = func(b []byte) error { + if bytesLen := len(b); bytesLen < 2 { + return fmt.Errorf("expected len(bytes) to be at least 2 but is %d", bytesLen) + } + binary.BigEndian.PutUint16(b, uint16(value.Int())) + return nil + } + return case reflect.Uint32: - bytes := make([]byte, 4, 4) - binary.BigEndian.PutUint32(bytes, uint32(value.Uint())) - return bytes, nil + size = 4 + f = func(b []byte) error { + if bytesLen := len(b); bytesLen < 4 { + return fmt.Errorf("expected len(bytes) to be at least 4 but is %d", bytesLen) + } + binary.BigEndian.PutUint32(b, uint32(value.Uint())) + return nil + } + return case reflect.Int32: - bytes := make([]byte, 4, 4) - binary.BigEndian.PutUint32(bytes, uint32(value.Int())) - return bytes, nil + size = 4 + f = func(b []byte) error { + if bytesLen := len(b); bytesLen < 4 { + return fmt.Errorf("expected len(bytes) to be at least 4 but is %d", bytesLen) + } + binary.BigEndian.PutUint32(b, uint32(value.Int())) + return nil + } + return case reflect.Uint64: - bytes := make([]byte, 8, 8) - binary.BigEndian.PutUint64(bytes, uint64(value.Uint())) - return bytes, nil + size = 8 + f = func(b []byte) error { + if bytesLen := len(b); bytesLen < 8 { + return fmt.Errorf("expected len(bytes) to be at least 8 but is %d", bytesLen) + } + binary.BigEndian.PutUint64(b, uint64(value.Uint())) + return nil + } + return case reflect.Int64: - bytes := make([]byte, 8, 8) - binary.BigEndian.PutUint64(bytes, uint64(value.Int())) - return bytes, nil - case reflect.Uintptr, reflect.Ptr: - return c.marshal(value.Elem()) + size = 8 + f = func(b []byte) error { + if bytesLen := len(b); bytesLen < 8 { + return fmt.Errorf("expected len(bytes) to be at least 8 but is %d", bytesLen) + } + binary.BigEndian.PutUint64(b, uint64(value.Int())) + return nil + } + return case reflect.String: asStr := value.String() strSize := len(asStr) if strSize > maxStringLen { - return nil, errSliceTooLarge + return 0, nil, errSliceTooLarge } - bytes := make([]byte, 2+strSize, 2+strSize) - binary.BigEndian.PutUint16(bytes[0:2], uint16(strSize)) - if strSize == 0 { - return bytes, nil - } - copy(bytes[2:], []byte(asStr)) - return bytes, nil - case reflect.Bool: - if value.Bool() { - return []byte{1}, nil - } - return []byte{0}, nil - } - // Case: Value is of unknown size. Calculate its size and fill byte array. - switch valueKind { + size = strSize + 2 + f = func(b []byte) error { + if bytesLen := len(b); bytesLen < size { + return fmt.Errorf("expected len(bytes) to be at least %d but is %d", size, bytesLen) + } + binary.BigEndian.PutUint16(b, uint16(strSize)) + if strSize == 0 { + return nil + } + copy(b[2:], []byte(asStr)) + return nil + } + return + case reflect.Bool: + size = 1 + f = func(b []byte) error { + if bytesLen := len(b); bytesLen < 1 { + return fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) + } + if value.Bool() { + copy(b, []byte{1}) + } else { + copy(b, []byte{0}) + } + return nil + } + return + case reflect.Uintptr, reflect.Ptr: + return c.marshal(value.Elem()) case reflect.Interface: typeID, ok := c.typeToTypeID[reflect.TypeOf(value.Interface())] // Get the type ID of the value being marshaled if !ok { - return nil, fmt.Errorf("can't marshal unregistered type '%v'", reflect.TypeOf(value.Interface()).String()) + return 0, nil, fmt.Errorf("can't marshal unregistered type '%v'", reflect.TypeOf(value.Interface()).String()) } - p.PackInt(typeID) - bytes, err := c.Marshal(value.Interface()) - if err != nil { - return nil, err + + subsize, subfunc, subErr := c.marshal(reflect.ValueOf(value.Interface())) // TODO: Is this right? + if subErr != nil { + return 0, nil, subErr } - p.PackFixedBytes(bytes) - if p.Errored() { - return nil, p.Err - } - return p.Bytes, err - case reflect.Array, reflect.Slice: - numElts := value.Len() // # elements in the slice/array (assumed to be <= 2^31 - 1) - // If this is a slice, pack the number of elements in the slice - if valueKind == reflect.Slice { - p.PackInt(uint32(numElts)) - } - for i := 0; i < numElts; i++ { // Pack each element in the slice/array - eltBytes, err := c.marshal(value.Index(i)) - if err != nil { - return nil, err + + size = 4 + subsize // 4 because we pack the type ID, a uint32 + f = func(b []byte) error { + if bytesLen := len(b); bytesLen < 4+subsize { + return fmt.Errorf("expected len(bytes) to be at least %d but is %d", 4+subsize, bytesLen) } - p.PackFixedBytes(eltBytes) + binary.BigEndian.PutUint32(b, uint32(typeID)) + if len(b) == 4 { + return nil + } + return subfunc(b[4:]) } - return p.Bytes, p.Err + return + case reflect.Slice: + if value.IsNil() { + size = 1 + f = func(b []byte) error { + if bytesLen := len(b); bytesLen < 1 { + return fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) + } + b[0] = 1 // slice is nil; set isNil flag to 1 + return nil + } + return + } + + numElts := value.Len() // # elements in the slice/array (assumed to be <= 2^31 - 1) + if numElts > c.maxSliceLen { + return 0, nil, fmt.Errorf("slice length, %d, exceeds maximum length, %d", numElts, math.MaxUint32) + } + + size = 5 // 1 for the isNil flag. 0 --> this slice isn't nil. 1--> it is nil. + // 4 for the size of the slice (uint32) + + // offsets[i] is the index in the byte array that subFuncs[i] will start writing at + offsets := make([]int, numElts+1, numElts+1) + if numElts != 0 { + offsets[1] = 5 // 1 for nil flag, 4 for slice size + } + subFuncs := make([]func([]byte) error, numElts+1, numElts+1) + subFuncs[0] = func(b []byte) error { // write the nil flag and number of elements + if bytesLen := len(b); bytesLen < 5 { + return fmt.Errorf("expected len(bytes) to be at least 5 but is %d", bytesLen) + } + b[0] = 0 // slice is non-nil; set isNil flag to 0 + binary.BigEndian.PutUint32(b[1:], uint32(numElts)) + return nil + } + for i := 1; i < numElts+1; i++ { // Process each element in the slice + subSize, subFunc, subErr := c.marshal(value.Index(i - 1)) + if subErr != nil { + return 0, nil, subErr + } + size += subSize + if i != numElts { // set offest for next function unless this is last ieration + offsets[i+1] = offsets[i] + subSize + } + subFuncs[i] = subFunc + } + + if subFuncsLen := len(subFuncs); subFuncsLen != len(offsets) { + return 0, nil, fmt.Errorf("expected len(subFuncs) = %d. len(offsets) = %d. Should be same", subFuncsLen, len(offsets)) + } + + f = func(b []byte) error { + bytesLen := len(b) + for i, f := range subFuncs { + offset := offsets[i] + if offset > bytesLen { + return fmt.Errorf("attempted out of bounds slice. offset: %d. bytesLen: %d", offset, bytesLen) + } + if err := f(b[offset:]); err != nil { + return err + } + } + return nil + } + return + case reflect.Array: + numElts := value.Len() // # elements in the slice/array (assumed to be <= 2^31 - 1) + if numElts > math.MaxUint32 { + return 0, nil, fmt.Errorf("array length, %d, exceeds maximum length, %d", numElts, math.MaxUint32) + } + + size = 0 + // offsets[i] is the index in the byte array that subFuncs[i] will start writing at + offsets := make([]int, numElts, numElts) + offsets[1] = 4 // 4 for slice size + subFuncs := make([]func([]byte) error, numElts, numElts) + for i := 0; i < numElts; i++ { // Process each element in the array + subSize, subFunc, subErr := c.marshal(value.Index(i)) + if subErr != nil { + return 0, nil, subErr + } + size += subSize + if i != numElts-1 { // set offest for next function unless this is last ieration + offsets[i+1] = offsets[i] + subSize + } + subFuncs[i] = subFunc + } + + if subFuncsLen := len(subFuncs); subFuncsLen != len(offsets) { + return 0, nil, fmt.Errorf("expected len(subFuncs) = %d. len(offsets) = %d. Should be same", subFuncsLen, len(offsets)) + } + + f = func(b []byte) error { + bytesLen := len(b) + for i, f := range subFuncs { + offset := offsets[i] + if offset > bytesLen { + return fmt.Errorf("attempted out of bounds slice") + } + if err := f(b[offset:]); err != nil { + return err + } + } + return nil + } + return case reflect.Struct: - for i := 0; i < t.NumField(); i++ { // Go through all fields of this struct + t := value.Type() + numFields := t.NumField() + size = 0 + // offsets[i] is the index in the byte array that subFuncs[i] will start writing at + offsets := make([]int, 0, numFields) + offsets = append(offsets, 0) + subFuncs := make([]func([]byte) error, 0, numFields) + for i := 0; i < numFields; i++ { // Go through all fields of this struct field := t.Field(i) if !shouldSerialize(field) { // Skip fields we don't need to serialize continue } if unicode.IsLower(rune(field.Name[0])) { // Can only marshal exported fields - return nil, fmt.Errorf("can't marshal exported field %s", field.Name) + return 0, nil, fmt.Errorf("can't marshal unexported field %s", field.Name) } - fieldVal := value.Field(i) // The field we're serializing - if fieldVal.Kind() == reflect.Slice && fieldVal.IsNil() { - p.PackInt(0) - continue - } - fieldBytes, err := c.marshal(fieldVal) // Serialize the field + fieldVal := value.Field(i) // The field we're serializing + subSize, subfunc, err := c.marshal(fieldVal) // Serialize the field if err != nil { - return nil, err + return 0, nil, err + } + size += subSize + subFuncs = append(subFuncs, subfunc) + if i != numFields-1 { // set offset for next function if not last iteration + offsets = append(offsets, offsets[len(offsets)-1]+subSize) } - p.PackFixedBytes(fieldBytes) } - return p.Bytes, p.Err + f = func(b []byte) error { + bytesLen := len(b) + for i, f := range subFuncs { + offset := offsets[i] + if offset > bytesLen { + return fmt.Errorf("attempted out of bounds slice") + } + if err := f(b[offset:]); err != nil { + return err + } + } + return nil + } + return default: - return nil, errUnknownType + return 0, nil, errUnknownType } } // Unmarshal unmarshals [bytes] into [dest], where // [dest] must be a pointer or interface func (c codec) Unmarshal(bytes []byte, dest interface{}) error { - p := &wrappers.Packer{Bytes: bytes} - - if len(bytes) > c.maxSize { + switch { + case len(bytes) > c.maxSize: return errSliceTooLarge - } - - if dest == nil { + case dest == nil: return errNil } destPtr := reflect.ValueOf(dest) - if destPtr.Kind() != reflect.Ptr { return errNeedPointer } destVal := destPtr.Elem() - - err := c.unmarshal(p, destVal) + bytesRead, err := c.unmarshal(bytes, destVal) if err != nil { return err } - if p.Offset != len(p.Bytes) { - return fmt.Errorf("has %d leftover bytes after unmarshalling", len(p.Bytes)-p.Offset) + if l := len(bytes); l != bytesRead { + return fmt.Errorf("%d leftover bytes after unmarshalling", l-bytesRead) } return nil } -// Unmarshal bytes from [p] into [field] +// Unmarshal bytes from [bytes] into [field] // [field] must be addressable -func (c codec) unmarshal(p *wrappers.Packer, field reflect.Value) error { +// Returns the number of bytes read from [bytes] +func (c codec) unmarshal(bytes []byte, field reflect.Value) (int, error) { + bytesLen := len(bytes) kind := field.Kind() switch kind { case reflect.Uint8: - field.SetUint(uint64(p.UnpackByte())) + if bytesLen < 1 { + return 0, fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) + } + field.SetUint(uint64(bytes[0])) + return 1, nil case reflect.Int8: - field.SetInt(int64(p.UnpackByte())) + if bytesLen < 1 { + return 0, fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) + } + field.SetInt(int64(bytes[0])) + return 1, nil case reflect.Uint16: - field.SetUint(uint64(p.UnpackShort())) + if bytesLen < 2 { + return 0, fmt.Errorf("expected len(bytes) to be at least 2 but is %d", bytesLen) + } + field.SetUint(uint64(binary.BigEndian.Uint16(bytes))) + return 2, nil case reflect.Int16: - field.SetInt(int64(p.UnpackShort())) + if bytesLen < 2 { + return 0, fmt.Errorf("expected len(bytes) to be at least 2 but is %d", bytesLen) + } + field.SetInt(int64(binary.BigEndian.Uint16(bytes))) + return 2, nil case reflect.Uint32: - field.SetUint(uint64(p.UnpackInt())) + if bytesLen < 4 { + return 0, fmt.Errorf("expected len(bytes) to be at least 4 but is %d", bytesLen) + } + field.SetUint(uint64(binary.BigEndian.Uint32(bytes))) + return 4, nil case reflect.Int32: - field.SetInt(int64(p.UnpackInt())) + if bytesLen < 4 { + return 0, fmt.Errorf("expected len(bytes) to be at least 4 but is %d", bytesLen) + } + field.SetInt(int64(binary.BigEndian.Uint32(bytes))) + return 4, nil case reflect.Uint64: - field.SetUint(p.UnpackLong()) + if bytesLen < 4 { + return 0, fmt.Errorf("expected len(bytes) to be at least 8 but is %d", bytesLen) + } + field.SetUint(uint64(binary.BigEndian.Uint64(bytes))) + return 8, nil case reflect.Int64: - field.SetInt(int64(p.UnpackLong())) + if bytesLen < 4 { + return 0, fmt.Errorf("expected len(bytes) to be at least 8 but is %d", bytesLen) + } + field.SetInt(int64(binary.BigEndian.Uint64(bytes))) + return 8, nil case reflect.Bool: - field.SetBool(p.UnpackBool()) + if bytesLen < 1 { + return 0, fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) + } + if bytes[0] == 0 { + field.SetBool(false) + } else { + field.SetBool(true) + } + return 1, nil case reflect.Slice: - sliceLen := int(p.UnpackInt()) // number of elements in the slice - if sliceLen < 0 || sliceLen > c.maxSliceLen { - return errSliceTooLarge + if bytesLen < 1 { + return 0, fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) + } + if bytes[0] == 1 { // isNil flag is 1 --> this slice is nil + return 1, nil } - // First set [field] to be a slice of the appropriate type/capacity (right now [field] is nil) - slice := reflect.MakeSlice(field.Type(), sliceLen, sliceLen) + numElts := int(binary.BigEndian.Uint32(bytes[1:])) // number of elements in the slice + if numElts > c.maxSliceLen { + return 0, fmt.Errorf("slice length, %d, exceeds maximum, %d", numElts, c.maxSliceLen) + } + + // set [field] to be a slice of the appropriate type/capacity (right now [field] is nil) + slice := reflect.MakeSlice(field.Type(), numElts, numElts) field.Set(slice) + // Unmarshal each element into the appropriate index of the slice - for i := 0; i < sliceLen; i++ { - if err := c.unmarshal(p, field.Index(i)); err != nil { - return err + bytesRead := 5 // 1 for isNil flag, 4 for numElts + for i := 0; i < numElts; i++ { + if bytesRead > bytesLen { + return 0, fmt.Errorf("attempted out of bounds slice") } + n, err := c.unmarshal(bytes[bytesRead:], field.Index(i)) + if err != nil { + return 0, err + } + bytesRead += n } + return bytesRead, nil case reflect.Array: + bytesRead := 0 for i := 0; i < field.Len(); i++ { - if err := c.unmarshal(p, field.Index(i)); err != nil { - return err + if bytesRead > bytesLen { + return 0, fmt.Errorf("attempted out of bounds slice") } + n, err := c.unmarshal(bytes[bytesRead:], field.Index(i)) + if err != nil { + return 0, err + } + bytesRead += n } + return bytesRead, nil case reflect.String: - field.SetString(p.UnpackStr()) + if bytesLen < 2 { + return 0, fmt.Errorf("expected len(bytes) to be at least 2 but is %d", bytesLen) + } + strLen := int(binary.BigEndian.Uint16(bytes)) + if bytesLen < 2+strLen { + return 0, fmt.Errorf("expected len(bytes) to be at least %d but is %d", 2+strLen, bytesLen) + } + if strLen > 0 { + field.SetString(string(bytes[2 : 2+strLen])) + } else { + field.SetString("") + } + return strLen + 2, nil case reflect.Interface: + if bytesLen < 4 { + return 0, fmt.Errorf("expected len(bytes) to be at least 4 but is %d", bytesLen) + } + // Get the type ID - typeID := p.UnpackInt() + typeID := binary.BigEndian.Uint32(bytes) // Get a struct that implements the interface typ, ok := c.typeIDToType[typeID] if !ok { - return errUnmarshalUnregisteredType + return 0, errUnmarshalUnregisteredType } // Ensure struct actually does implement the interface fieldType := field.Type() if !typ.Implements(fieldType) { - return fmt.Errorf("%s does not implement interface %s", typ, fieldType) + return 0, fmt.Errorf("%s does not implement interface %s", typ, fieldType) } concreteInstancePtr := reflect.New(typ) // instance of the proper type // Unmarshal into the struct - if err := c.unmarshal(p, concreteInstancePtr.Elem()); err != nil { - return err + + n, err := c.unmarshal(bytes[4:], concreteInstancePtr.Elem()) + if err != nil { + return 0, err } // And assign the filled struct to the field field.Set(concreteInstancePtr.Elem()) + return n + 4, nil case reflect.Struct: // Type of this struct structType := reflect.TypeOf(field.Interface()) // Go through all the fields and umarshal into each + bytesRead := 0 for i := 0; i < structType.NumField(); i++ { structField := structType.Field(i) if !shouldSerialize(structField) { // Skip fields we don't need to unmarshal continue } if unicode.IsLower(rune(structField.Name[0])) { // Only unmarshal into exported field - return errUnmarshalUnexportedField + return 0, errUnmarshalUnexportedField } - field := field.Field(i) // Get the field - if err := c.unmarshal(p, field); err != nil { // Unmarshal into the field - return err + field := field.Field(i) // Get the field + if bytesRead > bytesLen { + return 0, fmt.Errorf("attempted out of bounds slice") } - if p.Errored() { // If there was an error just return immediately - return p.Err + n, err := c.unmarshal(bytes[bytesRead:], field) // Unmarshal into the field + if err != nil { + return 0, err } + bytesRead += n } + return bytesRead, nil case reflect.Ptr: // Get the type this pointer points to underlyingType := field.Type().Elem() // Create a new pointer to a new value of the underlying type underlyingValue := reflect.New(underlyingType) // Fill the value - if err := c.unmarshal(p, underlyingValue.Elem()); err != nil { - return err + n, err := c.unmarshal(bytes, underlyingValue.Elem()) + if err != nil { + return 0, err } // Assign to the top-level struct's member field.Set(underlyingValue) + return n, nil case reflect.Invalid: - return errNil + return 0, errNil default: - return errUnknownType + return 0, errUnknownType } - return p.Err } // Returns true iff [field] should be serialized diff --git a/vms/components/codec/codec_test.go b/vms/components/codec/codec_test.go index a58f685..7100a79 100644 --- a/vms/components/codec/codec_test.go +++ b/vms/components/codec/codec_test.go @@ -97,6 +97,7 @@ func TestStruct(t *testing.T) { if err != nil { t.Fatal(err) } + t.Logf("myStructBytes: %v", myStructBytes) myStructUnmarshaled := &myStruct{} err = codec.Unmarshal(myStructBytes, myStructUnmarshaled) @@ -370,7 +371,7 @@ func TestString(t *testing.T) { } } -// Ensure a nil slice is unmarshaled as an empty slice +// Ensure a nil slice is marshaled/unmarshaled correctly func TestNilSlice(t *testing.T) { type structWithSlice struct { Slice []byte `serialize:"true"` @@ -388,13 +389,13 @@ func TestNilSlice(t *testing.T) { t.Fatal(err) } - if structUnmarshaled.Slice == nil || len(structUnmarshaled.Slice) != 0 { - t.Fatal("expected slice to be empty slice") + if structUnmarshaled.Slice != nil { + t.Fatal("expected slice to be nil") } } // Ensure that trying to serialize a struct with an unexported member -// that has `serialize:"true"` returns errUnexportedField +// that has `serialize:"true"` returns error func TestSerializeUnexportedField(t *testing.T) { type s struct { ExportedField string `serialize:"true"` @@ -426,12 +427,12 @@ func TestSerializeOfNoSerializeField(t *testing.T) { codec := NewDefault() marshalled, err := codec.Marshal(myS) if err != nil { - t.Fatalf("Unexpected error %q", err) + t.Fatal(err) } unmarshalled := s{} err = codec.Unmarshal(marshalled, &unmarshalled) if err != nil { - t.Fatalf("Unexpected error %q", err) + t.Fatal(err) } expectedUnmarshalled := s{SerializedField: "Serialize me"} if !reflect.DeepEqual(unmarshalled, expectedUnmarshalled) { @@ -443,11 +444,12 @@ type simpleSliceStruct struct { Arr []uint32 `serialize:"true"` } -func TestEmptySliceSerialization(t *testing.T) { +// Test marshalling of nil slice +func TestNilSliceSerialization(t *testing.T) { codec := NewDefault() val := &simpleSliceStruct{} - expected := []byte{0, 0, 0, 0} + expected := []byte{1} // 1 for isNil result, err := codec.Marshal(val) if err != nil { t.Fatal(err) @@ -456,6 +458,40 @@ func TestEmptySliceSerialization(t *testing.T) { if !bytes.Equal(expected, result) { t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) } + + valUnmarshaled := &simpleSliceStruct{} + if err = codec.Unmarshal(result, &valUnmarshaled); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(valUnmarshaled, val) { + t.Logf("val: %v\n", val) + t.Logf("valUnmarshaled: %v\n", valUnmarshaled) + t.Fatal("should be same") + } +} + +// Test marshaling a slice that has 0 elements (but isn't nil) +func TestEmptySliceSerialization(t *testing.T) { + codec := NewDefault() + + val := &simpleSliceStruct{Arr: make([]uint32, 0, 1)} + expected := []byte{0, 0, 0, 0, 0} // 0 for isNil flag, 0 for size + result, err := codec.Marshal(val) + if err != nil { + t.Fatal(err) + } + + if !bytes.Equal(expected, result) { + t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + } + + valUnmarshaled := &simpleSliceStruct{} + if err = codec.Unmarshal(result, &valUnmarshaled); err != nil { + t.Fatal(err) + } else if !reflect.DeepEqual(valUnmarshaled, val) { + t.Logf("val: %v\n", val) + t.Logf("valUnmarshaled: %v\n", valUnmarshaled) + t.Fatal("should be same") + } } type emptyStruct struct{} @@ -464,13 +500,14 @@ type nestedSliceStruct struct { Arr []emptyStruct `serialize:"true"` } +// Test marshaling slice that is not nil and not empty func TestSliceWithEmptySerialization(t *testing.T) { codec := NewDefault() val := &nestedSliceStruct{ Arr: make([]emptyStruct, 1000), } - expected := []byte{0x00, 0x00, 0x03, 0xE8} + expected := []byte{0x00, 0x00, 0x00, 0x03, 0xE8} // 0 for isNil flag, then 1000 for numElts result, err := codec.Marshal(val) if err != nil { t.Fatal(err) @@ -485,7 +522,7 @@ func TestSliceWithEmptySerialization(t *testing.T) { t.Fatal(err) } if len(unmarshaled.Arr) != 1000 { - t.Fatalf("Should have created an array of length %d", 1000) + t.Fatalf("Should have created a slice of length %d", 1000) } } @@ -493,20 +530,15 @@ func TestSliceWithEmptySerializationOutOfMemory(t *testing.T) { codec := NewDefault() val := &nestedSliceStruct{ - Arr: make([]emptyStruct, 1000000), + Arr: make([]emptyStruct, defaultMaxSliceLength+1), } - expected := []byte{0x00, 0x0f, 0x42, 0x40} // 1,000,000 in hex - result, err := codec.Marshal(val) - if err != nil { - t.Fatal(err) - } - - if !bytes.Equal(expected, result) { - t.Fatalf("\nExpected: 0x%x\nResult: 0x%x", expected, result) + bytes, err := codec.Marshal(val) + if err == nil { + t.Fatal("should have failed due to slice length too large") } unmarshaled := nestedSliceStruct{} - if err := codec.Unmarshal(expected, &unmarshaled); err == nil { + if err := codec.Unmarshal(bytes, &unmarshaled); err == nil { t.Fatalf("Should have errored due to excess memory requested") } } From 617a1580974e84f32a285e2fe7c03d7756cab5f9 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 12 Jun 2020 10:41:02 -0400 Subject: [PATCH 029/183] use wrappers.packer instead of byte array --- vms/components/codec/codec.go | 401 +++++++++++------------------ vms/components/codec/codec_test.go | 4 +- 2 files changed, 154 insertions(+), 251 deletions(-) diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index ad09964..873aac2 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -4,18 +4,19 @@ package codec import ( - "encoding/binary" "errors" "fmt" "math" "reflect" "unicode" + + "github.com/ava-labs/gecko/utils/wrappers" ) const ( defaultMaxSize = 1 << 18 // default max size, in bytes, of something being marshalled by Marshal() defaultMaxSliceLength = 1 << 18 // default max length of a slice being marshalled by Marshal() - maxStringLen = math.MaxInt16 + maxStringLen = math.MaxUint16 ) // ErrBadCodec is returned when one tries to perform an operation @@ -84,8 +85,7 @@ func (c codec) RegisterType(val interface{}) error { // structs, slices and arrays can only be serialized if their constituent values can be. // 5) To marshal an interface, you must pass a pointer to the value // 6) To unmarshal an interface, you must call codec.RegisterType([instance of the type that fulfills the interface]). -// 7) nil slices will be unmarshaled as an empty slice of the appropriate type -// 8) Serialized fields must be exported +// 7) Serialized fields must be exported // To marshal an interface, [value] must be a pointer to the interface func (c codec) Marshal(value interface{}) ([]byte, error) { @@ -97,11 +97,11 @@ func (c codec) Marshal(value interface{}) ([]byte, error) { return nil, err } - bytes := make([]byte, size, size) - if err := f(bytes); err != nil { + p := &wrappers.Packer{MaxSize: size, Bytes: make([]byte, 0, size)} + if err := f(p); err != nil { return nil, err } - return bytes, nil + return p.Bytes, nil } // marshal returns: @@ -110,7 +110,7 @@ func (c codec) Marshal(value interface{}) ([]byte, error) { // and returns the number of bytes it wrote. // When these functions are called in order, they write [value] to a byte slice. // 3) An error -func (c codec) marshal(value reflect.Value) (size int, f func([]byte) error, err error) { +func (c codec) marshal(value reflect.Value) (size int, f func(*wrappers.Packer) error, err error) { valueKind := value.Kind() // Case: Value can't be marshalled @@ -125,116 +125,73 @@ func (c codec) marshal(value reflect.Value) (size int, f func([]byte) error, err switch valueKind { case reflect.Uint8: size = 1 - f = func(b []byte) error { - if bytesLen := len(b); bytesLen < 1 { - return fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) - } - copy(b, []byte{byte(value.Uint())}) - return nil + f = func(p *wrappers.Packer) error { + p.PackByte(byte(value.Uint())) + return p.Err } return case reflect.Int8: size = 1 - f = func(b []byte) error { - if bytesLen := len(b); bytesLen < 1 { - return fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) - } - copy(b, []byte{byte(value.Int())}) - return nil + f = func(p *wrappers.Packer) error { + p.PackByte(byte(value.Int())) + return p.Err } return case reflect.Uint16: size = 2 - f = func(b []byte) error { - if bytesLen := len(b); bytesLen < 2 { - return fmt.Errorf("expected len(bytes) to be at least 2 but is %d", bytesLen) - } - binary.BigEndian.PutUint16(b, uint16(value.Uint())) - return nil + f = func(p *wrappers.Packer) error { + p.PackShort(uint16(value.Uint())) + return p.Err } return case reflect.Int16: size = 2 - f = func(b []byte) error { - if bytesLen := len(b); bytesLen < 2 { - return fmt.Errorf("expected len(bytes) to be at least 2 but is %d", bytesLen) - } - binary.BigEndian.PutUint16(b, uint16(value.Int())) - return nil + f = func(p *wrappers.Packer) error { + p.PackShort(uint16(value.Int())) + return p.Err } return case reflect.Uint32: size = 4 - f = func(b []byte) error { - if bytesLen := len(b); bytesLen < 4 { - return fmt.Errorf("expected len(bytes) to be at least 4 but is %d", bytesLen) - } - binary.BigEndian.PutUint32(b, uint32(value.Uint())) - return nil + f = func(p *wrappers.Packer) error { + p.PackInt(uint32(value.Uint())) + return p.Err } return case reflect.Int32: size = 4 - f = func(b []byte) error { - if bytesLen := len(b); bytesLen < 4 { - return fmt.Errorf("expected len(bytes) to be at least 4 but is %d", bytesLen) - } - binary.BigEndian.PutUint32(b, uint32(value.Int())) - return nil + f = func(p *wrappers.Packer) error { + p.PackInt(uint32(value.Int())) + return p.Err } return case reflect.Uint64: size = 8 - f = func(b []byte) error { - if bytesLen := len(b); bytesLen < 8 { - return fmt.Errorf("expected len(bytes) to be at least 8 but is %d", bytesLen) - } - binary.BigEndian.PutUint64(b, uint64(value.Uint())) - return nil + f = func(p *wrappers.Packer) error { + p.PackLong(uint64(value.Uint())) + return p.Err } return case reflect.Int64: size = 8 - f = func(b []byte) error { - if bytesLen := len(b); bytesLen < 8 { - return fmt.Errorf("expected len(bytes) to be at least 8 but is %d", bytesLen) - } - binary.BigEndian.PutUint64(b, uint64(value.Int())) - return nil + f = func(p *wrappers.Packer) error { + p.PackLong(uint64(value.Int())) + return p.Err } return case reflect.String: asStr := value.String() - strSize := len(asStr) - if strSize > maxStringLen { - return 0, nil, errSliceTooLarge - } - - size = strSize + 2 - f = func(b []byte) error { - if bytesLen := len(b); bytesLen < size { - return fmt.Errorf("expected len(bytes) to be at least %d but is %d", size, bytesLen) - } - binary.BigEndian.PutUint16(b, uint16(strSize)) - if strSize == 0 { - return nil - } - copy(b[2:], []byte(asStr)) - return nil + size = len(asStr) + 2 + f = func(p *wrappers.Packer) error { + p.PackStr(asStr) + return p.Err } return case reflect.Bool: size = 1 - f = func(b []byte) error { - if bytesLen := len(b); bytesLen < 1 { - return fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) - } - if value.Bool() { - copy(b, []byte{1}) - } else { - copy(b, []byte{0}) - } - return nil + f = func(p *wrappers.Packer) error { + p.PackBool(value.Bool()) + return p.Err } return case reflect.Uintptr, reflect.Ptr: @@ -251,51 +208,41 @@ func (c codec) marshal(value reflect.Value) (size int, f func([]byte) error, err } size = 4 + subsize // 4 because we pack the type ID, a uint32 - f = func(b []byte) error { - if bytesLen := len(b); bytesLen < 4+subsize { - return fmt.Errorf("expected len(bytes) to be at least %d but is %d", 4+subsize, bytesLen) + f = func(p *wrappers.Packer) error { + p.PackInt(typeID) + if p.Err != nil { + return p.Err } - binary.BigEndian.PutUint32(b, uint32(typeID)) - if len(b) == 4 { - return nil - } - return subfunc(b[4:]) + return subfunc(p) } return case reflect.Slice: if value.IsNil() { size = 1 - f = func(b []byte) error { - if bytesLen := len(b); bytesLen < 1 { - return fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) - } - b[0] = 1 // slice is nil; set isNil flag to 1 - return nil + f = func(p *wrappers.Packer) error { + p.PackBool(true) // slice is nil; set isNil flag to 1 + return p.Err } return } - numElts := value.Len() // # elements in the slice/array (assumed to be <= 2^31 - 1) + numElts := value.Len() // # elements in the slice/array (assumed to be <= math.MaxUint16) if numElts > c.maxSliceLen { return 0, nil, fmt.Errorf("slice length, %d, exceeds maximum length, %d", numElts, math.MaxUint32) } - size = 5 // 1 for the isNil flag. 0 --> this slice isn't nil. 1--> it is nil. - // 4 for the size of the slice (uint32) + size = 3 // 1 for the isNil flag. 2 for the size of the slice (uint16) // offsets[i] is the index in the byte array that subFuncs[i] will start writing at offsets := make([]int, numElts+1, numElts+1) if numElts != 0 { - offsets[1] = 5 // 1 for nil flag, 4 for slice size + offsets[1] = 3 } - subFuncs := make([]func([]byte) error, numElts+1, numElts+1) - subFuncs[0] = func(b []byte) error { // write the nil flag and number of elements - if bytesLen := len(b); bytesLen < 5 { - return fmt.Errorf("expected len(bytes) to be at least 5 but is %d", bytesLen) - } - b[0] = 0 // slice is non-nil; set isNil flag to 0 - binary.BigEndian.PutUint32(b[1:], uint32(numElts)) - return nil + subFuncs := make([]func(*wrappers.Packer) error, numElts+1, numElts+1) + subFuncs[0] = func(p *wrappers.Packer) error { // write the nil flag and number of elements + p.PackBool(false) + p.PackShort(uint16(numElts)) + return p.Err } for i := 1; i < numElts+1; i++ { // Process each element in the slice subSize, subFunc, subErr := c.marshal(value.Index(i - 1)) @@ -313,14 +260,9 @@ func (c codec) marshal(value reflect.Value) (size int, f func([]byte) error, err return 0, nil, fmt.Errorf("expected len(subFuncs) = %d. len(offsets) = %d. Should be same", subFuncsLen, len(offsets)) } - f = func(b []byte) error { - bytesLen := len(b) - for i, f := range subFuncs { - offset := offsets[i] - if offset > bytesLen { - return fmt.Errorf("attempted out of bounds slice. offset: %d. bytesLen: %d", offset, bytesLen) - } - if err := f(b[offset:]); err != nil { + f = func(p *wrappers.Packer) error { + for _, f := range subFuncs { + if err := f(p); err != nil { return err } } @@ -328,7 +270,7 @@ func (c codec) marshal(value reflect.Value) (size int, f func([]byte) error, err } return case reflect.Array: - numElts := value.Len() // # elements in the slice/array (assumed to be <= 2^31 - 1) + numElts := value.Len() // # elements in the slice/array (assumed to be <= math.MaxUint16) if numElts > math.MaxUint32 { return 0, nil, fmt.Errorf("array length, %d, exceeds maximum length, %d", numElts, math.MaxUint32) } @@ -337,7 +279,7 @@ func (c codec) marshal(value reflect.Value) (size int, f func([]byte) error, err // offsets[i] is the index in the byte array that subFuncs[i] will start writing at offsets := make([]int, numElts, numElts) offsets[1] = 4 // 4 for slice size - subFuncs := make([]func([]byte) error, numElts, numElts) + subFuncs := make([]func(*wrappers.Packer) error, numElts, numElts) for i := 0; i < numElts; i++ { // Process each element in the array subSize, subFunc, subErr := c.marshal(value.Index(i)) if subErr != nil { @@ -354,14 +296,9 @@ func (c codec) marshal(value reflect.Value) (size int, f func([]byte) error, err return 0, nil, fmt.Errorf("expected len(subFuncs) = %d. len(offsets) = %d. Should be same", subFuncsLen, len(offsets)) } - f = func(b []byte) error { - bytesLen := len(b) - for i, f := range subFuncs { - offset := offsets[i] - if offset > bytesLen { - return fmt.Errorf("attempted out of bounds slice") - } - if err := f(b[offset:]); err != nil { + f = func(p *wrappers.Packer) error { + for _, f := range subFuncs { + if err := f(p); err != nil { return err } } @@ -375,7 +312,7 @@ func (c codec) marshal(value reflect.Value) (size int, f func([]byte) error, err // offsets[i] is the index in the byte array that subFuncs[i] will start writing at offsets := make([]int, 0, numFields) offsets = append(offsets, 0) - subFuncs := make([]func([]byte) error, 0, numFields) + subFuncs := make([]func(*wrappers.Packer) error, 0, numFields) for i := 0; i < numFields; i++ { // Go through all fields of this struct field := t.Field(i) if !shouldSerialize(field) { // Skip fields we don't need to serialize @@ -396,14 +333,9 @@ func (c codec) marshal(value reflect.Value) (size int, f func([]byte) error, err } } - f = func(b []byte) error { - bytesLen := len(b) - for i, f := range subFuncs { - offset := offsets[i] - if offset > bytesLen { - return fmt.Errorf("attempted out of bounds slice") - } - if err := f(b[offset:]); err != nil { + f = func(p *wrappers.Packer) error { + for _, f := range subFuncs { + if err := f(p); err != nil { return err } } @@ -430,94 +362,95 @@ func (c codec) Unmarshal(bytes []byte, dest interface{}) error { return errNeedPointer } + p := &wrappers.Packer{MaxSize: c.maxSize, Bytes: bytes} destVal := destPtr.Elem() - bytesRead, err := c.unmarshal(bytes, destVal) - if err != nil { + if err := c.unmarshal(p, destVal); err != nil { return err } - if l := len(bytes); l != bytesRead { - return fmt.Errorf("%d leftover bytes after unmarshalling", l-bytesRead) - } return nil } // Unmarshal bytes from [bytes] into [field] // [field] must be addressable -// Returns the number of bytes read from [bytes] -func (c codec) unmarshal(bytes []byte, field reflect.Value) (int, error) { - bytesLen := len(bytes) +func (c codec) unmarshal(p *wrappers.Packer, field reflect.Value) error { kind := field.Kind() switch kind { case reflect.Uint8: - if bytesLen < 1 { - return 0, fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) + b := p.UnpackByte() + if p.Err != nil { + return p.Err } - field.SetUint(uint64(bytes[0])) - return 1, nil + field.SetUint(uint64(b)) + return nil case reflect.Int8: - if bytesLen < 1 { - return 0, fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) + b := p.UnpackByte() + if p.Err != nil { + return p.Err } - field.SetInt(int64(bytes[0])) - return 1, nil + field.SetInt(int64(b)) + return nil case reflect.Uint16: - if bytesLen < 2 { - return 0, fmt.Errorf("expected len(bytes) to be at least 2 but is %d", bytesLen) + b := p.UnpackShort() + if p.Err != nil { + return p.Err } - field.SetUint(uint64(binary.BigEndian.Uint16(bytes))) - return 2, nil + field.SetUint(uint64(b)) + return nil case reflect.Int16: - if bytesLen < 2 { - return 0, fmt.Errorf("expected len(bytes) to be at least 2 but is %d", bytesLen) + b := p.UnpackShort() + if p.Err != nil { + return p.Err } - field.SetInt(int64(binary.BigEndian.Uint16(bytes))) - return 2, nil + field.SetInt(int64(b)) + return nil case reflect.Uint32: - if bytesLen < 4 { - return 0, fmt.Errorf("expected len(bytes) to be at least 4 but is %d", bytesLen) + b := p.UnpackInt() + if p.Err != nil { + return p.Err } - field.SetUint(uint64(binary.BigEndian.Uint32(bytes))) - return 4, nil + field.SetUint(uint64(b)) + return nil case reflect.Int32: - if bytesLen < 4 { - return 0, fmt.Errorf("expected len(bytes) to be at least 4 but is %d", bytesLen) + b := p.UnpackInt() + if p.Err != nil { + return p.Err } - field.SetInt(int64(binary.BigEndian.Uint32(bytes))) - return 4, nil + field.SetInt(int64(b)) + return nil case reflect.Uint64: - if bytesLen < 4 { - return 0, fmt.Errorf("expected len(bytes) to be at least 8 but is %d", bytesLen) + b := p.UnpackLong() + if p.Err != nil { + return p.Err } - field.SetUint(uint64(binary.BigEndian.Uint64(bytes))) - return 8, nil + field.SetUint(uint64(b)) + return nil case reflect.Int64: - if bytesLen < 4 { - return 0, fmt.Errorf("expected len(bytes) to be at least 8 but is %d", bytesLen) + b := p.UnpackLong() + if p.Err != nil { + return p.Err } - field.SetInt(int64(binary.BigEndian.Uint64(bytes))) - return 8, nil + field.SetInt(int64(b)) + return nil case reflect.Bool: - if bytesLen < 1 { - return 0, fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) + b := p.UnpackBool() + if p.Err != nil { + return p.Err } - if bytes[0] == 0 { - field.SetBool(false) - } else { - field.SetBool(true) - } - return 1, nil + field.SetBool(b) + return nil case reflect.Slice: - if bytesLen < 1 { - return 0, fmt.Errorf("expected len(bytes) to be at least 1 but is %d", bytesLen) + isNil := p.UnpackBool() + if p.Err != nil { + return p.Err } - if bytes[0] == 1 { // isNil flag is 1 --> this slice is nil - return 1, nil + if isNil { // slice is nil + return nil } - numElts := int(binary.BigEndian.Uint32(bytes[1:])) // number of elements in the slice - if numElts > c.maxSliceLen { - return 0, fmt.Errorf("slice length, %d, exceeds maximum, %d", numElts, c.maxSliceLen) + numElts := int(p.UnpackShort()) + if p.Err != nil { + return p.Err } // set [field] to be a slice of the appropriate type/capacity (right now [field] is nil) @@ -525,113 +458,83 @@ func (c codec) unmarshal(bytes []byte, field reflect.Value) (int, error) { field.Set(slice) // Unmarshal each element into the appropriate index of the slice - bytesRead := 5 // 1 for isNil flag, 4 for numElts for i := 0; i < numElts; i++ { - if bytesRead > bytesLen { - return 0, fmt.Errorf("attempted out of bounds slice") + if err := c.unmarshal(p, field.Index(i)); err != nil { + return err } - n, err := c.unmarshal(bytes[bytesRead:], field.Index(i)) - if err != nil { - return 0, err - } - bytesRead += n } - return bytesRead, nil + return nil case reflect.Array: - bytesRead := 0 for i := 0; i < field.Len(); i++ { - if bytesRead > bytesLen { - return 0, fmt.Errorf("attempted out of bounds slice") + if err := c.unmarshal(p, field.Index(i)); err != nil { + return err } - n, err := c.unmarshal(bytes[bytesRead:], field.Index(i)) - if err != nil { - return 0, err - } - bytesRead += n } - return bytesRead, nil + return nil case reflect.String: - if bytesLen < 2 { - return 0, fmt.Errorf("expected len(bytes) to be at least 2 but is %d", bytesLen) + str := p.UnpackStr() + if p.Err != nil { + return p.Err } - strLen := int(binary.BigEndian.Uint16(bytes)) - if bytesLen < 2+strLen { - return 0, fmt.Errorf("expected len(bytes) to be at least %d but is %d", 2+strLen, bytesLen) - } - if strLen > 0 { - field.SetString(string(bytes[2 : 2+strLen])) - } else { - field.SetString("") - } - return strLen + 2, nil + field.SetString(str) + return nil case reflect.Interface: - if bytesLen < 4 { - return 0, fmt.Errorf("expected len(bytes) to be at least 4 but is %d", bytesLen) + typeID := p.UnpackInt() // Get the type ID + if p.Err != nil { + return p.Err } - - // Get the type ID - typeID := binary.BigEndian.Uint32(bytes) // Get a struct that implements the interface typ, ok := c.typeIDToType[typeID] if !ok { - return 0, errUnmarshalUnregisteredType + return errUnmarshalUnregisteredType } // Ensure struct actually does implement the interface fieldType := field.Type() if !typ.Implements(fieldType) { - return 0, fmt.Errorf("%s does not implement interface %s", typ, fieldType) + return fmt.Errorf("%s does not implement interface %s", typ, fieldType) } concreteInstancePtr := reflect.New(typ) // instance of the proper type // Unmarshal into the struct - - n, err := c.unmarshal(bytes[4:], concreteInstancePtr.Elem()) - if err != nil { - return 0, err + if err := c.unmarshal(p, concreteInstancePtr.Elem()); err != nil { + return err } // And assign the filled struct to the field field.Set(concreteInstancePtr.Elem()) - return n + 4, nil + return nil case reflect.Struct: // Type of this struct structType := reflect.TypeOf(field.Interface()) // Go through all the fields and umarshal into each - bytesRead := 0 for i := 0; i < structType.NumField(); i++ { structField := structType.Field(i) if !shouldSerialize(structField) { // Skip fields we don't need to unmarshal continue } if unicode.IsLower(rune(structField.Name[0])) { // Only unmarshal into exported field - return 0, errUnmarshalUnexportedField + return errUnmarshalUnexportedField } - field := field.Field(i) // Get the field - if bytesRead > bytesLen { - return 0, fmt.Errorf("attempted out of bounds slice") + field := field.Field(i) // Get the field + if err := c.unmarshal(p, field); err != nil { // Unmarshal into the field + return err } - n, err := c.unmarshal(bytes[bytesRead:], field) // Unmarshal into the field - if err != nil { - return 0, err - } - bytesRead += n } - return bytesRead, nil + return nil case reflect.Ptr: // Get the type this pointer points to underlyingType := field.Type().Elem() // Create a new pointer to a new value of the underlying type underlyingValue := reflect.New(underlyingType) // Fill the value - n, err := c.unmarshal(bytes, underlyingValue.Elem()) - if err != nil { - return 0, err + if err := c.unmarshal(p, underlyingValue.Elem()); err != nil { + return err } // Assign to the top-level struct's member field.Set(underlyingValue) - return n, nil + return nil case reflect.Invalid: - return 0, errNil + return errNil default: - return 0, errUnknownType + return errUnknownType } } diff --git a/vms/components/codec/codec_test.go b/vms/components/codec/codec_test.go index 7100a79..8bf22a1 100644 --- a/vms/components/codec/codec_test.go +++ b/vms/components/codec/codec_test.go @@ -474,7 +474,7 @@ func TestEmptySliceSerialization(t *testing.T) { codec := NewDefault() val := &simpleSliceStruct{Arr: make([]uint32, 0, 1)} - expected := []byte{0, 0, 0, 0, 0} // 0 for isNil flag, 0 for size + expected := []byte{0, 0, 0} // 0 for isNil flag, 0 for size result, err := codec.Marshal(val) if err != nil { t.Fatal(err) @@ -507,7 +507,7 @@ func TestSliceWithEmptySerialization(t *testing.T) { val := &nestedSliceStruct{ Arr: make([]emptyStruct, 1000), } - expected := []byte{0x00, 0x00, 0x00, 0x03, 0xE8} // 0 for isNil flag, then 1000 for numElts + expected := []byte{0x00, 0x03, 0xE8} // 0 for isNil flag, then 1000 for numElts result, err := codec.Marshal(val) if err != nil { t.Fatal(err) From e15c1bad8cef1d6496497a20177df4a4b263a3cb Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Tue, 9 Jun 2020 13:51:11 -0400 Subject: [PATCH 030/183] Use Add instead of Push to correctly order timed txs in event heap --- vms/platformvm/service.go | 2 +- vms/platformvm/static_service.go | 7 ++- vms/platformvm/static_service_test.go | 69 +++++++++++++++++++++++++++ vms/platformvm/vm.go | 7 ++- vms/platformvm/vm_test.go | 5 +- 5 files changed, 78 insertions(+), 12 deletions(-) diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index de2d41b..05723dd 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -1275,7 +1275,7 @@ func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *Is if err := tx.initialize(service.vm); err != nil { return fmt.Errorf("error initializing tx: %s", err) } - service.vm.unissuedEvents.Push(tx) + service.vm.unissuedEvents.Add(tx) response.TxID = tx.ID() case DecisionTx: if err := tx.initialize(service.vm); err != nil { diff --git a/vms/platformvm/static_service.go b/vms/platformvm/static_service.go index 8acc0a9..1cdeeca 100644 --- a/vms/platformvm/static_service.go +++ b/vms/platformvm/static_service.go @@ -4,7 +4,6 @@ package platformvm import ( - "container/heap" "errors" "net/http" @@ -174,8 +173,8 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl return errAccountHasNoValue } accounts = append(accounts, newAccount( - account.Address, // ID - 0, // nonce + account.Address, // ID + 0, // nonce uint64(account.Balance), // balance )) } @@ -210,7 +209,7 @@ func (*StaticService) BuildGenesis(_ *http.Request, args *BuildGenesisArgs, repl return err } - heap.Push(validators, tx) + validators.Add(tx) } // Specify the chains that exist at genesis. diff --git a/vms/platformvm/static_service_test.go b/vms/platformvm/static_service_test.go index 04433ff..b0cb494 100644 --- a/vms/platformvm/static_service_test.go +++ b/vms/platformvm/static_service_test.go @@ -111,3 +111,72 @@ func TestBuildGenesisInvalidEndtime(t *testing.T) { t.Fatalf("Should have errored due to an invalid end time") } } + +func TestBuildGenesisReturnsSortedValidators(t *testing.T) { + id := ids.NewShortID([20]byte{1}) + account := APIAccount{ + Address: id, + Balance: 123456789, + } + + weight := json.Uint64(987654321) + validator1 := APIDefaultSubnetValidator{ + APIValidator: APIValidator{ + StartTime: 0, + EndTime: 20, + Weight: &weight, + ID: id, + }, + Destination: id, + } + + validator2 := APIDefaultSubnetValidator{ + APIValidator: APIValidator{ + StartTime: 3, + EndTime: 15, + Weight: &weight, + ID: id, + }, + Destination: id, + } + + validator3 := APIDefaultSubnetValidator{ + APIValidator: APIValidator{ + StartTime: 1, + EndTime: 10, + Weight: &weight, + ID: id, + }, + Destination: id, + } + + args := BuildGenesisArgs{ + Accounts: []APIAccount{ + account, + }, + Validators: []APIDefaultSubnetValidator{ + validator1, + validator2, + validator3, + }, + Time: 5, + } + reply := BuildGenesisReply{} + + ss := StaticService{} + if err := ss.BuildGenesis(nil, &args, &reply); err != nil { + t.Fatalf("BuildGenesis should not have errored") + } + + genesis := &Genesis{} + Codec.Unmarshal(reply.Bytes.Bytes, genesis) + validators := genesis.Validators + currentValidator := validators.Remove() + for validators.Len() > 0 { + nextValidator := validators.Remove() + if currentValidator.EndTime().Unix() > nextValidator.EndTime().Unix() { + t.Fatalf("Validators returned by genesis should be a min heap sorted by end time") + } + currentValidator = nextValidator + } +} diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 9f1ce53..8d5a71c 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -4,7 +4,6 @@ package platformvm import ( - "container/heap" "errors" "fmt" "time" @@ -698,7 +697,7 @@ func (vm *VM) resetTimer() { vm.SnowmanVM.NotifyBlockReady() // Should issue a ProposeAddValidator return } - // If the tx doesn't meet the syncrony bound, drop it + // If the tx doesn't meet the synchrony bound, drop it vm.unissuedEvents.Remove() vm.Ctx.Log.Debug("dropping tx to add validator because its start time has passed") } @@ -780,8 +779,8 @@ func (vm *VM) calculateValidators(db database.Database, timestamp time.Time, sub if timestamp.Before(nextTx.StartTime()) { break } - heap.Push(current, nextTx) - heap.Pop(pending) + current.Add(nextTx) + pending.Remove() started.Add(nextTx.Vdr().ID()) } return current, pending, started, stopped, nil diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index b8bb47c..182c44a 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -5,7 +5,6 @@ package platformvm import ( "bytes" - "container/heap" "errors" "testing" "time" @@ -226,7 +225,7 @@ func GenesisCurrentValidators() *EventHeap { testNetworkID, // network ID key, // key paying tx fee and stake ) - heap.Push(validators, validator) + validators.Add(validator) } return validators } @@ -1011,7 +1010,7 @@ func TestCreateSubnet(t *testing.T) { t.Fatal(err) } - vm.unissuedEvents.Push(addValidatorTx) + vm.unissuedEvents.Add(addValidatorTx) blk, err = vm.BuildBlock() // should add validator to the new subnet if err != nil { t.Fatal(err) From 26f5503a432e5eb02b803c70c5de2f2beb25d362 Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Wed, 10 Jun 2020 22:10:19 -0400 Subject: [PATCH 031/183] Add test to ensure IssueTx maintains ordering of unissued events heap --- vms/platformvm/service.go | 2 +- vms/platformvm/service_test.go | 184 ++++++++++++++++++++++++++ vms/platformvm/static_service_test.go | 7 +- vms/platformvm/vm_test.go | 2 + 4 files changed, 193 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 05723dd..78316c5 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -1290,7 +1290,7 @@ func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *Is service.vm.unissuedAtomicTxs = append(service.vm.unissuedAtomicTxs, tx) response.TxID = tx.ID() default: - return errors.New("Could not parse given tx. Must be a TimedTx, DecisionTx, or AtomicTx") + return errors.New("Could not parse given tx. Must not be a TimedTx, DecisionTx, or AtomicTx") } service.vm.resetTimer() diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 9ac4a6c..b6e4a31 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -6,6 +6,9 @@ package platformvm import ( "encoding/json" "testing" + "time" + + "github.com/ava-labs/gecko/utils/formatting" ) func TestAddDefaultSubnetValidator(t *testing.T) { @@ -50,3 +53,184 @@ func TestImportKey(t *testing.T) { t.Fatal(err) } } + +func TestIssueTxKeepsTimedEventsSorted(t *testing.T) { + vm := defaultVM() + vm.Ctx.Lock.Lock() + defer func() { + vm.Shutdown() + vm.Ctx.Lock.Unlock() + }() + + service := Service{vm: vm} + + pendingValidatorStartTime1 := defaultGenesisTime.Add(3 * time.Second) + pendingValidatorEndTime1 := pendingValidatorStartTime1.Add(MinimumStakingDuration) + nodeIDKey1, _ := vm.factory.NewPrivateKey() + nodeID1 := nodeIDKey1.PublicKey().Address() + addPendingValidatorTx1, err := vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(pendingValidatorStartTime1.Unix()), + uint64(pendingValidatorEndTime1.Unix()), + nodeID1, + nodeID1, + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + txBytes1, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx1}) + if err != nil { + t.Fatal(err) + } + + args1 := &IssueTxArgs{} + args1.Tx = formatting.CB58{Bytes: txBytes1} + reply1 := IssueTxResponse{} + + err = service.IssueTx(nil, args1, &reply1) + if err != nil { + t.Fatal(err) + } + + pendingValidatorStartTime2 := defaultGenesisTime.Add(2 * time.Second) + pendingValidatorEndTime2 := pendingValidatorStartTime2.Add(MinimumStakingDuration) + nodeIDKey2, _ := vm.factory.NewPrivateKey() + nodeID2 := nodeIDKey2.PublicKey().Address() + addPendingValidatorTx2, err := vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(pendingValidatorStartTime2.Unix()), + uint64(pendingValidatorEndTime2.Unix()), + nodeID2, + nodeID2, + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + txBytes2, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx2}) + if err != nil { + t.Fatal(err) + } + + args2 := IssueTxArgs{Tx: formatting.CB58{Bytes: txBytes2}} + reply2 := IssueTxResponse{} + + err = service.IssueTx(nil, &args2, &reply2) + if err != nil { + t.Fatal(err) + } + + pendingValidatorStartTime3 := defaultGenesisTime.Add(10 * time.Second) + pendingValidatorEndTime3 := pendingValidatorStartTime3.Add(MinimumStakingDuration) + nodeIDKey3, _ := vm.factory.NewPrivateKey() + nodeID3 := nodeIDKey3.PublicKey().Address() + addPendingValidatorTx3, err := vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(pendingValidatorStartTime3.Unix()), + uint64(pendingValidatorEndTime3.Unix()), + nodeID3, + nodeID3, + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + txBytes3, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx3}) + if err != nil { + t.Fatal(err) + } + + args3 := IssueTxArgs{Tx: formatting.CB58{Bytes: txBytes3}} + reply3 := IssueTxResponse{} + + err = service.IssueTx(nil, &args3, &reply3) + if err != nil { + t.Fatal(err) + } + + pendingValidatorStartTime4 := defaultGenesisTime.Add(1 * time.Second) + pendingValidatorEndTime4 := pendingValidatorStartTime4.Add(MinimumStakingDuration) + nodeIDKey4, _ := vm.factory.NewPrivateKey() + nodeID4 := nodeIDKey4.PublicKey().Address() + addPendingValidatorTx4, err := vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(pendingValidatorStartTime4.Unix()), + uint64(pendingValidatorEndTime4.Unix()), + nodeID4, + nodeID4, + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + txBytes4, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx4}) + if err != nil { + t.Fatal(err) + } + + args4 := IssueTxArgs{Tx: formatting.CB58{Bytes: txBytes4}} + reply4 := IssueTxResponse{} + + err = service.IssueTx(nil, &args4, &reply4) + if err != nil { + t.Fatal(err) + } + + pendingValidatorStartTime5 := defaultGenesisTime.Add(50 * time.Second) + pendingValidatorEndTime5 := pendingValidatorStartTime5.Add(MinimumStakingDuration) + nodeIDKey5, _ := vm.factory.NewPrivateKey() + nodeID5 := nodeIDKey5.PublicKey().Address() + addPendingValidatorTx5, err := vm.newAddDefaultSubnetValidatorTx( + defaultNonce+1, + defaultStakeAmount, + uint64(pendingValidatorStartTime5.Unix()), + uint64(pendingValidatorEndTime5.Unix()), + nodeID5, + nodeID5, + NumberOfShares, + testNetworkID, + defaultKey, + ) + if err != nil { + t.Fatal(err) + } + + txBytes5, err := Codec.Marshal(genericTx{Tx: addPendingValidatorTx5}) + if err != nil { + t.Fatal(err) + } + + args5 := IssueTxArgs{Tx: formatting.CB58{Bytes: txBytes5}} + reply5 := IssueTxResponse{} + + err = service.IssueTx(nil, &args5, &reply5) + if err != nil { + t.Fatal(err) + } + + currentEvent := vm.unissuedEvents.Remove() + for vm.unissuedEvents.Len() > 0 { + nextEvent := vm.unissuedEvents.Remove() + if !currentEvent.StartTime().Before(nextEvent.StartTime()) { + t.Fatal("IssueTx does not keep event heap ordered") + } + currentEvent = nextEvent + } +} diff --git a/vms/platformvm/static_service_test.go b/vms/platformvm/static_service_test.go index b0cb494..3f64a9b 100644 --- a/vms/platformvm/static_service_test.go +++ b/vms/platformvm/static_service_test.go @@ -169,8 +169,13 @@ func TestBuildGenesisReturnsSortedValidators(t *testing.T) { } genesis := &Genesis{} - Codec.Unmarshal(reply.Bytes.Bytes, genesis) + if err := Codec.Unmarshal(reply.Bytes.Bytes, genesis); err != nil { + t.Fatal(err) + } validators := genesis.Validators + if validators.Len() == 0 { + t.Fatal("Validators should contain 3 validators") + } currentValidator := validators.Remove() for validators.Len() > 0 { nextValidator := validators.Remove() diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 182c44a..dcee89a 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -192,6 +192,8 @@ func defaultVM() *VM { panic("no subnets found") } // end delete + vm.registerDBTypes() + return vm } From 750f7b212074879e941d4049478dd22b6759f612 Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Fri, 12 Jun 2020 13:48:45 -0400 Subject: [PATCH 032/183] Improve error message in platform API IssueTx call --- vms/platformvm/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 78316c5..91d18c4 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -1290,7 +1290,7 @@ func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *Is service.vm.unissuedAtomicTxs = append(service.vm.unissuedAtomicTxs, tx) response.TxID = tx.ID() default: - return errors.New("Could not parse given tx. Must not be a TimedTx, DecisionTx, or AtomicTx") + return errors.New("Could not parse given tx. Provided tx needs to be a TimedTx, DecisionTx, or AtomicTx") } service.vm.resetTimer() From 1572b1bd97ad11be54837b2e3fcfecf3bace9bfe Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Fri, 12 Jun 2020 14:09:45 -0400 Subject: [PATCH 033/183] Return early when finding address already exists --- vms/avm/service.go | 12 +++++------- 1 file changed, 5 insertions(+), 7 deletions(-) diff --git a/vms/avm/service.go b/vms/avm/service.go index 1dce66d..56e5599 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -670,18 +670,16 @@ func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *I return fmt.Errorf("problem saving key while getting existing addresses: %w", err) } newAddress := sk.PublicKey().Address() - exists := false for _, address := range addresses { if newAddress.Equals(address) { - exists = true + reply.Address = service.vm.Format(newAddress.Bytes()) + return nil } } - if !exists { - addresses = append(addresses, newAddress) - if err := user.SetAddresses(db, addresses); err != nil { - return fmt.Errorf("problem saving addresses: %w", err) - } + addresses = append(addresses, newAddress) + if err := user.SetAddresses(db, addresses); err != nil { + return fmt.Errorf("problem saving addresses: %w", err) } reply.Address = service.vm.Format(newAddress.Bytes()) From 2fb88906cc15ca8734fb520202e774141f41bdfc Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Fri, 12 Jun 2020 15:10:39 -0400 Subject: [PATCH 034/183] Add testing for avm API ImportKey --- api/keystore/service.go | 70 +++++++++++++--------- api/keystore/service_test.go | 34 ++++------- vms/avm/service.go | 6 +- vms/avm/service_test.go | 112 +++++++++++++++++++++++++++++++++++ 4 files changed, 168 insertions(+), 54 deletions(-) diff --git a/api/keystore/service.go b/api/keystore/service.go index 16aca06..5c0d4e4 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -8,12 +8,14 @@ import ( "fmt" "net/http" "sync" + "testing" "github.com/gorilla/rpc/v2" "github.com/ava-labs/gecko/chains/atomic" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/encdb" + "github.com/ava-labs/gecko/database/memdb" "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/engine/common" @@ -137,35 +139,9 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre ks.log.Verbo("CreateUser called with %.*s", maxUserPassLen, args.Username) - if len(args.Username) > maxUserPassLen || len(args.Password) > maxUserPassLen { - return errUserPassMaxLength - } - - if args.Username == "" { - return errEmptyUsername - } - if usr, err := ks.getUser(args.Username); err == nil || usr != nil { - return fmt.Errorf("user already exists: %s", args.Username) - } - - if zxcvbn.PasswordStrength(args.Password, nil).Score < requiredPassScore { - return errWeakPassword - } - - usr := &User{} - if err := usr.Initialize(args.Password); err != nil { + if err := ks.AddUser(args.Username, args.Password); err != nil { return err } - - usrBytes, err := ks.codec.Marshal(usr) - if err != nil { - return err - } - - if err := ks.userDB.Put([]byte(args.Username), usrBytes); err != nil { - return err - } - ks.users[args.Username] = usr reply.Success = true return nil } @@ -403,3 +379,43 @@ func (ks *Keystore) GetDatabase(bID ids.ID, username, password string) (database return encDB, nil } + +func (ks *Keystore) AddUser(username, password string) error { + if len(username) > maxUserPassLen || len(password) > maxUserPassLen { + return errUserPassMaxLength + } + + if username == "" { + return errEmptyUsername + } + if usr, err := ks.getUser(username); err == nil || usr != nil { + return fmt.Errorf("user already exists: %s", username) + } + + if zxcvbn.PasswordStrength(password, nil).Score < requiredPassScore { + return errWeakPassword + } + + usr := &User{} + if err := usr.Initialize(password); err != nil { + return err + } + + usrBytes, err := ks.codec.Marshal(usr) + if err != nil { + return err + } + + if err := ks.userDB.Put([]byte(username), usrBytes); err != nil { + return err + } + ks.users[username] = usr + + return nil +} + +func CreateTestKeystore(t *testing.T) *Keystore { + ks := &Keystore{} + ks.Initialize(logging.NoLog{}, memdb.New()) + return ks +} diff --git a/api/keystore/service_test.go b/api/keystore/service_test.go index 9ec5cfa..3e0b18f 100644 --- a/api/keystore/service_test.go +++ b/api/keystore/service_test.go @@ -10,9 +10,7 @@ import ( "reflect" "testing" - "github.com/ava-labs/gecko/database/memdb" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/utils/logging" ) var ( @@ -22,8 +20,7 @@ var ( ) func TestServiceListNoUsers(t *testing.T) { - ks := Keystore{} - ks.Initialize(logging.NoLog{}, memdb.New()) + ks := CreateTestKeystore(t) reply := ListUsersReply{} if err := ks.ListUsers(nil, &ListUsersArgs{}, &reply); err != nil { @@ -35,8 +32,7 @@ func TestServiceListNoUsers(t *testing.T) { } func TestServiceCreateUser(t *testing.T) { - ks := Keystore{} - ks.Initialize(logging.NoLog{}, memdb.New()) + ks := CreateTestKeystore(t) { reply := CreateUserReply{} @@ -75,8 +71,7 @@ func genStr(n int) string { // TestServiceCreateUserArgsChecks generates excessively long usernames or // passwords to assure the santity checks on string length are not exceeded func TestServiceCreateUserArgsCheck(t *testing.T) { - ks := Keystore{} - ks.Initialize(logging.NoLog{}, memdb.New()) + ks := CreateTestKeystore(t) { reply := CreateUserReply{} @@ -117,8 +112,7 @@ func TestServiceCreateUserArgsCheck(t *testing.T) { // TestServiceCreateUserWeakPassword tests creating a new user with a weak // password to ensure the password strength check is working func TestServiceCreateUserWeakPassword(t *testing.T) { - ks := Keystore{} - ks.Initialize(logging.NoLog{}, memdb.New()) + ks := CreateTestKeystore(t) { reply := CreateUserReply{} @@ -138,8 +132,7 @@ func TestServiceCreateUserWeakPassword(t *testing.T) { } func TestServiceCreateDuplicate(t *testing.T) { - ks := Keystore{} - ks.Initialize(logging.NoLog{}, memdb.New()) + ks := CreateTestKeystore(t) { reply := CreateUserReply{} @@ -166,8 +159,7 @@ func TestServiceCreateDuplicate(t *testing.T) { } func TestServiceCreateUserNoName(t *testing.T) { - ks := Keystore{} - ks.Initialize(logging.NoLog{}, memdb.New()) + ks := CreateTestKeystore(t) reply := CreateUserReply{} if err := ks.CreateUser(nil, &CreateUserArgs{ @@ -178,8 +170,7 @@ func TestServiceCreateUserNoName(t *testing.T) { } func TestServiceUseBlockchainDB(t *testing.T) { - ks := Keystore{} - ks.Initialize(logging.NoLog{}, memdb.New()) + ks := CreateTestKeystore(t) { reply := CreateUserReply{} @@ -218,8 +209,7 @@ func TestServiceUseBlockchainDB(t *testing.T) { } func TestServiceExportImport(t *testing.T) { - ks := Keystore{} - ks.Initialize(logging.NoLog{}, memdb.New()) + ks := CreateTestKeystore(t) { reply := CreateUserReply{} @@ -252,8 +242,7 @@ func TestServiceExportImport(t *testing.T) { t.Fatal(err) } - newKS := Keystore{} - newKS.Initialize(logging.NoLog{}, memdb.New()) + newKS := CreateTestKeystore(t) { reply := ImportUserReply{} @@ -358,11 +347,10 @@ func TestServiceDeleteUser(t *testing.T) { for _, tt := range tests { t.Run(tt.desc, func(t *testing.T) { - ks := Keystore{} - ks.Initialize(logging.NoLog{}, memdb.New()) + ks := CreateTestKeystore(t) if tt.setup != nil { - if err := tt.setup(&ks); err != nil { + if err := tt.setup(ks); err != nil { t.Fatalf("failed to create user setup in keystore: %v", err) } } diff --git a/vms/avm/service.go b/vms/avm/service.go index 56e5599..37a37a3 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -665,10 +665,8 @@ func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *I return fmt.Errorf("problem saving key %w", err) } - addresses, err := user.Addresses(db) - if err != nil { - return fmt.Errorf("problem saving key while getting existing addresses: %w", err) - } + addresses, _ := user.Addresses(db) + newAddress := sk.PublicKey().Address() for _, address := range addresses { if newAddress.Equals(address) { diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index fdd8053..6e1d387 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -9,8 +9,10 @@ import ( "github.com/stretchr/testify/assert" + "github.com/ava-labs/gecko/api/keystore" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/formatting" ) @@ -340,3 +342,113 @@ func TestCreateVariableCapAsset(t *testing.T) { t.Fatalf("Wrong assetID returned from CreateFixedCapAsset %s", reply.AssetID) } } + +func TestImportAvmKey(t *testing.T) { + _, vm, s := setup(t) + defer func() { + vm.Shutdown() + ctx.Lock.Unlock() + }() + + userKeystore := keystore.CreateTestKeystore(t) + + username := "bobby" + password := "StrnasfqewiurPasswdn56d" + if err := userKeystore.AddUser(username, password); err != nil { + t.Fatal(err) + } + + vm.ctx.Keystore = userKeystore.NewBlockchainKeyStore(vm.ctx.ChainID) + _, err := vm.ctx.Keystore.GetDatabase(username, password) + if err != nil { + t.Fatal(err) + } + + factory := crypto.FactorySECP256K1R{} + skIntf, err := factory.NewPrivateKey() + if err != nil { + t.Fatalf("problem generating private key: %w", err) + } + sk := skIntf.(*crypto.PrivateKeySECP256K1R) + + args := ImportKeyArgs{ + Username: username, + Password: password, + PrivateKey: formatting.CB58{Bytes: sk.Bytes()}, + } + reply := ImportKeyReply{} + if err = s.ImportKey(nil, &args, &reply); err != nil { + t.Fatal(err) + } +} + +func TestImportAvmKeyNoDuplicates(t *testing.T) { + _, vm, s := setup(t) + defer func() { + vm.Shutdown() + ctx.Lock.Unlock() + }() + + userKeystore := keystore.CreateTestKeystore(t) + + username := "bobby" + password := "StrnasfqewiurPasswdn56d" + if err := userKeystore.AddUser(username, password); err != nil { + t.Fatal(err) + } + + vm.ctx.Keystore = userKeystore.NewBlockchainKeyStore(vm.ctx.ChainID) + _, err := vm.ctx.Keystore.GetDatabase(username, password) + if err != nil { + t.Fatal(err) + } + + factory := crypto.FactorySECP256K1R{} + skIntf, err := factory.NewPrivateKey() + if err != nil { + t.Fatalf("problem generating private key: %w", err) + } + sk := skIntf.(*crypto.PrivateKeySECP256K1R) + + args := ImportKeyArgs{ + Username: username, + Password: password, + PrivateKey: formatting.CB58{Bytes: sk.Bytes()}, + } + reply := ImportKeyReply{} + if err = s.ImportKey(nil, &args, &reply); err != nil { + t.Fatal(err) + } + + expectedAddress := vm.Format(sk.PublicKey().Address().Bytes()) + + if reply.Address != expectedAddress { + t.Fatalf("Reply address: %s did not match expected address: %s", reply.Address, expectedAddress) + } + + reply2 := ImportKeyReply{} + if err = s.ImportKey(nil, &args, &reply2); err != nil { + t.Fatal(err) + } + + if reply2.Address != expectedAddress { + t.Fatalf("Reply address: %s did not match expected address: %s", reply2.Address, expectedAddress) + } + + addrsArgs := ListAddressesArgs{ + Username: username, + Password: password, + } + addrsReply := ListAddressesResponse{} + if err := s.ListAddresses(nil, &addrsArgs, &addrsReply); err != nil { + t.Fatal(err) + } + + if len(addrsReply.Addresses) != 1 { + t.Fatal("Importing the same key twice created duplicate addresses") + } + + if addrsReply.Addresses[0] != expectedAddress { + t.Fatal("List addresses returned an incorrect address") + } +} From a895b691a2f7cfbcd57abc2f8f7e7f04e7a39e0f Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 12 Jun 2020 16:52:58 -0400 Subject: [PATCH 035/183] change wire format to be exact same as it was before --- vms/components/codec/codec.go | 81 +++++++----------------------- vms/components/codec/codec_test.go | 20 +++----- 2 files changed, 25 insertions(+), 76 deletions(-) diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index 873aac2..182d8d2 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -15,7 +15,7 @@ import ( const ( defaultMaxSize = 1 << 18 // default max size, in bytes, of something being marshalled by Marshal() - defaultMaxSliceLength = 1 << 18 // default max length of a slice being marshalled by Marshal() + defaultMaxSliceLength = 1 << 18 // default max length of a slice being marshalled by Marshal(). Should be <= math.MaxUint32. maxStringLen = math.MaxUint16 ) @@ -86,6 +86,7 @@ func (c codec) RegisterType(val interface{}) error { // 5) To marshal an interface, you must pass a pointer to the value // 6) To unmarshal an interface, you must call codec.RegisterType([instance of the type that fulfills the interface]). // 7) Serialized fields must be exported +// 8) nil slices are marshaled as empty slices // To marshal an interface, [value] must be a pointer to the interface func (c codec) Marshal(value interface{}) ([]byte, error) { @@ -217,50 +218,27 @@ func (c codec) marshal(value reflect.Value) (size int, f func(*wrappers.Packer) } return case reflect.Slice: - if value.IsNil() { - size = 1 - f = func(p *wrappers.Packer) error { - p.PackBool(true) // slice is nil; set isNil flag to 1 - return p.Err - } - return - } - - numElts := value.Len() // # elements in the slice/array (assumed to be <= math.MaxUint16) + numElts := value.Len() // # elements in the slice/array. 0 if this slice is nil. if numElts > c.maxSliceLen { - return 0, nil, fmt.Errorf("slice length, %d, exceeds maximum length, %d", numElts, math.MaxUint32) + return 0, nil, fmt.Errorf("slice length, %d, exceeds maximum length, %d", numElts, c.maxSliceLen) } - size = 3 // 1 for the isNil flag. 2 for the size of the slice (uint16) - - // offsets[i] is the index in the byte array that subFuncs[i] will start writing at - offsets := make([]int, numElts+1, numElts+1) - if numElts != 0 { - offsets[1] = 3 - } - subFuncs := make([]func(*wrappers.Packer) error, numElts+1, numElts+1) - subFuncs[0] = func(p *wrappers.Packer) error { // write the nil flag and number of elements - p.PackBool(false) - p.PackShort(uint16(numElts)) - return p.Err - } - for i := 1; i < numElts+1; i++ { // Process each element in the slice - subSize, subFunc, subErr := c.marshal(value.Index(i - 1)) + subFuncs := make([]func(*wrappers.Packer) error, numElts, numElts) + size = wrappers.IntLen // for # elements + for i := 0; i < numElts; i++ { // Process each element in the slice + subSize, subFunc, subErr := c.marshal(value.Index(i)) if subErr != nil { return 0, nil, subErr } size += subSize - if i != numElts { // set offest for next function unless this is last ieration - offsets[i+1] = offsets[i] + subSize - } subFuncs[i] = subFunc } - if subFuncsLen := len(subFuncs); subFuncsLen != len(offsets) { - return 0, nil, fmt.Errorf("expected len(subFuncs) = %d. len(offsets) = %d. Should be same", subFuncsLen, len(offsets)) - } - f = func(p *wrappers.Packer) error { + p.PackInt(uint32(numElts)) // pack # elements + if p.Err != nil { + return p.Err + } for _, f := range subFuncs { if err := f(p); err != nil { return err @@ -270,15 +248,12 @@ func (c codec) marshal(value reflect.Value) (size int, f func(*wrappers.Packer) } return case reflect.Array: - numElts := value.Len() // # elements in the slice/array (assumed to be <= math.MaxUint16) - if numElts > math.MaxUint32 { - return 0, nil, fmt.Errorf("array length, %d, exceeds maximum length, %d", numElts, math.MaxUint32) + numElts := value.Len() + if numElts > c.maxSliceLen { + return 0, nil, fmt.Errorf("array length, %d, exceeds maximum length, %d", numElts, c.maxSliceLen) } size = 0 - // offsets[i] is the index in the byte array that subFuncs[i] will start writing at - offsets := make([]int, numElts, numElts) - offsets[1] = 4 // 4 for slice size subFuncs := make([]func(*wrappers.Packer) error, numElts, numElts) for i := 0; i < numElts; i++ { // Process each element in the array subSize, subFunc, subErr := c.marshal(value.Index(i)) @@ -286,16 +261,9 @@ func (c codec) marshal(value reflect.Value) (size int, f func(*wrappers.Packer) return 0, nil, subErr } size += subSize - if i != numElts-1 { // set offest for next function unless this is last ieration - offsets[i+1] = offsets[i] + subSize - } subFuncs[i] = subFunc } - if subFuncsLen := len(subFuncs); subFuncsLen != len(offsets) { - return 0, nil, fmt.Errorf("expected len(subFuncs) = %d. len(offsets) = %d. Should be same", subFuncsLen, len(offsets)) - } - f = func(p *wrappers.Packer) error { for _, f := range subFuncs { if err := f(p); err != nil { @@ -308,10 +276,8 @@ func (c codec) marshal(value reflect.Value) (size int, f func(*wrappers.Packer) case reflect.Struct: t := value.Type() numFields := t.NumField() + size = 0 - // offsets[i] is the index in the byte array that subFuncs[i] will start writing at - offsets := make([]int, 0, numFields) - offsets = append(offsets, 0) subFuncs := make([]func(*wrappers.Packer) error, 0, numFields) for i := 0; i < numFields; i++ { // Go through all fields of this struct field := t.Field(i) @@ -328,9 +294,6 @@ func (c codec) marshal(value reflect.Value) (size int, f func(*wrappers.Packer) } size += subSize subFuncs = append(subFuncs, subfunc) - if i != numFields-1 { // set offset for next function if not last iteration - offsets = append(offsets, offsets[len(offsets)-1]+subSize) - } } f = func(p *wrappers.Packer) error { @@ -440,23 +403,13 @@ func (c codec) unmarshal(p *wrappers.Packer, field reflect.Value) error { field.SetBool(b) return nil case reflect.Slice: - isNil := p.UnpackBool() + numElts := int(p.UnpackInt()) if p.Err != nil { return p.Err } - if isNil { // slice is nil - return nil - } - - numElts := int(p.UnpackShort()) - if p.Err != nil { - return p.Err - } - // set [field] to be a slice of the appropriate type/capacity (right now [field] is nil) slice := reflect.MakeSlice(field.Type(), numElts, numElts) field.Set(slice) - // Unmarshal each element into the appropriate index of the slice for i := 0; i < numElts; i++ { if err := c.unmarshal(p, field.Index(i)); err != nil { diff --git a/vms/components/codec/codec_test.go b/vms/components/codec/codec_test.go index 8bf22a1..f42e827 100644 --- a/vms/components/codec/codec_test.go +++ b/vms/components/codec/codec_test.go @@ -371,7 +371,7 @@ func TestString(t *testing.T) { } } -// Ensure a nil slice is marshaled/unmarshaled correctly +// Ensure a nil slice is unmarshaled to slice with length 0 func TestNilSlice(t *testing.T) { type structWithSlice struct { Slice []byte `serialize:"true"` @@ -389,8 +389,8 @@ func TestNilSlice(t *testing.T) { t.Fatal(err) } - if structUnmarshaled.Slice != nil { - t.Fatal("expected slice to be nil") + if structUnmarshaled.Slice == nil || len(structUnmarshaled.Slice) != 0 { + t.Fatal("expected slice to be non-nil and length 0") } } @@ -449,7 +449,7 @@ func TestNilSliceSerialization(t *testing.T) { codec := NewDefault() val := &simpleSliceStruct{} - expected := []byte{1} // 1 for isNil + expected := []byte{0, 0, 0, 0} // nil slice marshaled as 0 length slice result, err := codec.Marshal(val) if err != nil { t.Fatal(err) @@ -462,10 +462,8 @@ func TestNilSliceSerialization(t *testing.T) { valUnmarshaled := &simpleSliceStruct{} if err = codec.Unmarshal(result, &valUnmarshaled); err != nil { t.Fatal(err) - } else if !reflect.DeepEqual(valUnmarshaled, val) { - t.Logf("val: %v\n", val) - t.Logf("valUnmarshaled: %v\n", valUnmarshaled) - t.Fatal("should be same") + } else if len(valUnmarshaled.Arr) != 0 { + t.Fatal("should be 0 length") } } @@ -474,7 +472,7 @@ func TestEmptySliceSerialization(t *testing.T) { codec := NewDefault() val := &simpleSliceStruct{Arr: make([]uint32, 0, 1)} - expected := []byte{0, 0, 0} // 0 for isNil flag, 0 for size + expected := []byte{0, 0, 0, 0} // 0 for size result, err := codec.Marshal(val) if err != nil { t.Fatal(err) @@ -488,8 +486,6 @@ func TestEmptySliceSerialization(t *testing.T) { if err = codec.Unmarshal(result, &valUnmarshaled); err != nil { t.Fatal(err) } else if !reflect.DeepEqual(valUnmarshaled, val) { - t.Logf("val: %v\n", val) - t.Logf("valUnmarshaled: %v\n", valUnmarshaled) t.Fatal("should be same") } } @@ -507,7 +503,7 @@ func TestSliceWithEmptySerialization(t *testing.T) { val := &nestedSliceStruct{ Arr: make([]emptyStruct, 1000), } - expected := []byte{0x00, 0x03, 0xE8} // 0 for isNil flag, then 1000 for numElts + expected := []byte{0x00, 0x00, 0x03, 0xE8} //1000 for numElts result, err := codec.Marshal(val) if err != nil { t.Fatal(err) From 42deac45e926dd5992973cfc89bc120d59a63ed8 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 12 Jun 2020 19:03:08 -0400 Subject: [PATCH 036/183] use pre-allocated array of functions --- vms/components/codec/codec.go | 127 +++++++++---------- vms/components/codec/codec_benchmark_test.go | 9 ++ vms/components/codec/codec_test.go | 1 - 3 files changed, 72 insertions(+), 65 deletions(-) diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index 182d8d2..560f2b7 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -93,15 +93,22 @@ func (c codec) Marshal(value interface{}) ([]byte, error) { if value == nil { return nil, errNil } - size, f, err := c.marshal(reflect.ValueOf(value)) + + funcs := make([]func(*wrappers.Packer) error, 512, 512) + size, _, err := c.marshal(reflect.ValueOf(value), 0, &funcs) if err != nil { return nil, err } p := &wrappers.Packer{MaxSize: size, Bytes: make([]byte, 0, size)} - if err := f(p); err != nil { - return nil, err + for _, f := range funcs { + if f == nil { + break + } else if err := f(p); err != nil { + return nil, err + } } + return p.Bytes, nil } @@ -111,14 +118,14 @@ func (c codec) Marshal(value interface{}) ([]byte, error) { // and returns the number of bytes it wrote. // When these functions are called in order, they write [value] to a byte slice. // 3) An error -func (c codec) marshal(value reflect.Value) (size int, f func(*wrappers.Packer) error, err error) { +func (c codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers.Packer) error) (size int, funcsWritten int, err error) { valueKind := value.Kind() // Case: Value can't be marshalled switch valueKind { case reflect.Interface, reflect.Ptr, reflect.Invalid: if value.IsNil() { // Can't marshal nil or nil pointers - return 0, nil, errNil + return 0, 0, errNil } } @@ -126,151 +133,150 @@ func (c codec) marshal(value reflect.Value) (size int, f func(*wrappers.Packer) switch valueKind { case reflect.Uint8: size = 1 - f = func(p *wrappers.Packer) error { + funcsWritten = 1 + (*funcs)[index] = func(p *wrappers.Packer) error { p.PackByte(byte(value.Uint())) return p.Err } return case reflect.Int8: size = 1 - f = func(p *wrappers.Packer) error { + funcsWritten = 1 + (*funcs)[index] = func(p *wrappers.Packer) error { p.PackByte(byte(value.Int())) return p.Err } return case reflect.Uint16: size = 2 - f = func(p *wrappers.Packer) error { + funcsWritten = 1 + (*funcs)[index] = func(p *wrappers.Packer) error { p.PackShort(uint16(value.Uint())) return p.Err } return case reflect.Int16: size = 2 - f = func(p *wrappers.Packer) error { + funcsWritten = 1 + (*funcs)[index] = func(p *wrappers.Packer) error { p.PackShort(uint16(value.Int())) return p.Err } return case reflect.Uint32: size = 4 - f = func(p *wrappers.Packer) error { + funcsWritten = 1 + (*funcs)[index] = func(p *wrappers.Packer) error { p.PackInt(uint32(value.Uint())) return p.Err } return case reflect.Int32: size = 4 - f = func(p *wrappers.Packer) error { + funcsWritten = 1 + (*funcs)[index] = func(p *wrappers.Packer) error { p.PackInt(uint32(value.Int())) return p.Err } return case reflect.Uint64: size = 8 - f = func(p *wrappers.Packer) error { + funcsWritten = 1 + (*funcs)[index] = func(p *wrappers.Packer) error { p.PackLong(uint64(value.Uint())) return p.Err } return case reflect.Int64: size = 8 - f = func(p *wrappers.Packer) error { + funcsWritten = 1 + (*funcs)[index] = func(p *wrappers.Packer) error { p.PackLong(uint64(value.Int())) return p.Err } return case reflect.String: + funcsWritten = 1 asStr := value.String() - size = len(asStr) + 2 - f = func(p *wrappers.Packer) error { + size = len(asStr) + wrappers.ShortLen + (*funcs)[index] = func(p *wrappers.Packer) error { p.PackStr(asStr) return p.Err } return case reflect.Bool: size = 1 - f = func(p *wrappers.Packer) error { + funcsWritten = 1 + (*funcs)[index] = func(p *wrappers.Packer) error { p.PackBool(value.Bool()) return p.Err } return case reflect.Uintptr, reflect.Ptr: - return c.marshal(value.Elem()) + return c.marshal(value.Elem(), index, funcs) case reflect.Interface: typeID, ok := c.typeToTypeID[reflect.TypeOf(value.Interface())] // Get the type ID of the value being marshaled if !ok { - return 0, nil, fmt.Errorf("can't marshal unregistered type '%v'", reflect.TypeOf(value.Interface()).String()) + return 0, 0, fmt.Errorf("can't marshal unregistered type '%v'", reflect.TypeOf(value.Interface()).String()) } - subsize, subfunc, subErr := c.marshal(reflect.ValueOf(value.Interface())) // TODO: Is this right? + (*funcs)[index] = nil + subsize, subFuncsWritten, subErr := c.marshal(reflect.ValueOf(value.Interface()), index+1, funcs) if subErr != nil { - return 0, nil, subErr + return 0, 0, subErr } size = 4 + subsize // 4 because we pack the type ID, a uint32 - f = func(p *wrappers.Packer) error { + (*funcs)[index] = func(p *wrappers.Packer) error { p.PackInt(typeID) if p.Err != nil { return p.Err } - return subfunc(p) + return nil } + funcsWritten = 1 + subFuncsWritten return case reflect.Slice: numElts := value.Len() // # elements in the slice/array. 0 if this slice is nil. if numElts > c.maxSliceLen { - return 0, nil, fmt.Errorf("slice length, %d, exceeds maximum length, %d", numElts, c.maxSliceLen) + return 0, 0, fmt.Errorf("slice length, %d, exceeds maximum length, %d", numElts, c.maxSliceLen) } - subFuncs := make([]func(*wrappers.Packer) error, numElts, numElts) - size = wrappers.IntLen // for # elements + size = wrappers.IntLen // for # elements + subFuncsWritten := 0 for i := 0; i < numElts; i++ { // Process each element in the slice - subSize, subFunc, subErr := c.marshal(value.Index(i)) + subSize, n, subErr := c.marshal(value.Index(i), index+subFuncsWritten+1, funcs) if subErr != nil { - return 0, nil, subErr + return 0, 0, subErr } size += subSize - subFuncs[i] = subFunc + subFuncsWritten += n } - f = func(p *wrappers.Packer) error { + (*funcs)[index] = func(p *wrappers.Packer) error { p.PackInt(uint32(numElts)) // pack # elements if p.Err != nil { return p.Err } - for _, f := range subFuncs { - if err := f(p); err != nil { - return err - } - } return nil } + funcsWritten = subFuncsWritten + 1 return case reflect.Array: numElts := value.Len() if numElts > c.maxSliceLen { - return 0, nil, fmt.Errorf("array length, %d, exceeds maximum length, %d", numElts, c.maxSliceLen) + return 0, 0, fmt.Errorf("array length, %d, exceeds maximum length, %d", numElts, c.maxSliceLen) } size = 0 - subFuncs := make([]func(*wrappers.Packer) error, numElts, numElts) + funcsWritten = 0 for i := 0; i < numElts; i++ { // Process each element in the array - subSize, subFunc, subErr := c.marshal(value.Index(i)) + subSize, n, subErr := c.marshal(value.Index(i), index+funcsWritten, funcs) if subErr != nil { - return 0, nil, subErr + return 0, 0, subErr } size += subSize - subFuncs[i] = subFunc - } - - f = func(p *wrappers.Packer) error { - for _, f := range subFuncs { - if err := f(p); err != nil { - return err - } - } - return nil + funcsWritten += n } return case reflect.Struct: @@ -278,35 +284,28 @@ func (c codec) marshal(value reflect.Value) (size int, f func(*wrappers.Packer) numFields := t.NumField() size = 0 - subFuncs := make([]func(*wrappers.Packer) error, 0, numFields) + fieldsMarshalled := 0 + funcsWritten = 0 for i := 0; i < numFields; i++ { // Go through all fields of this struct field := t.Field(i) if !shouldSerialize(field) { // Skip fields we don't need to serialize continue } if unicode.IsLower(rune(field.Name[0])) { // Can only marshal exported fields - return 0, nil, fmt.Errorf("can't marshal unexported field %s", field.Name) + return 0, 0, fmt.Errorf("can't marshal unexported field %s", field.Name) } - fieldVal := value.Field(i) // The field we're serializing - subSize, subfunc, err := c.marshal(fieldVal) // Serialize the field + fieldVal := value.Field(i) // The field we're serializing + subSize, n, err := c.marshal(fieldVal, index+funcsWritten, funcs) // Serialize the field if err != nil { - return 0, nil, err + return 0, 0, err } + fieldsMarshalled++ size += subSize - subFuncs = append(subFuncs, subfunc) - } - - f = func(p *wrappers.Packer) error { - for _, f := range subFuncs { - if err := f(p); err != nil { - return err - } - } - return nil + funcsWritten += n } return default: - return 0, nil, errUnknownType + return 0, 0, errUnknownType } } diff --git a/vms/components/codec/codec_benchmark_test.go b/vms/components/codec/codec_benchmark_test.go index 8e6f9f7..ec4cc8c 100644 --- a/vms/components/codec/codec_benchmark_test.go +++ b/vms/components/codec/codec_benchmark_test.go @@ -53,3 +53,12 @@ func BenchmarkMarshalNonCodec(b *testing.B) { } } } + +func BenchmarkFoo(b *testing.B) { + arr := make([]int, 10000, 10000) + for n := 0; n < b.N; n++ { + for i := 0; i < 10000; i++ { + arr[i] = i + } + } +} diff --git a/vms/components/codec/codec_test.go b/vms/components/codec/codec_test.go index f42e827..74209eb 100644 --- a/vms/components/codec/codec_test.go +++ b/vms/components/codec/codec_test.go @@ -97,7 +97,6 @@ func TestStruct(t *testing.T) { if err != nil { t.Fatal(err) } - t.Logf("myStructBytes: %v", myStructBytes) myStructUnmarshaled := &myStruct{} err = codec.Unmarshal(myStructBytes, myStructUnmarshaled) From 954074abcc1e3a92d4b2b1c42f4e0265e4ecc9ca Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 12 Jun 2020 19:26:03 -0400 Subject: [PATCH 037/183] optimize by reducing amount of data stored on heap --- vms/components/codec/codec.go | 30 ++++++++++++++++++++---------- 1 file changed, 20 insertions(+), 10 deletions(-) diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index 560f2b7..be5acc1 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -134,64 +134,72 @@ func (c codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers.P case reflect.Uint8: size = 1 funcsWritten = 1 + asByte := byte(value.Uint()) (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackByte(byte(value.Uint())) + p.PackByte(asByte) return p.Err } return case reflect.Int8: size = 1 funcsWritten = 1 + asByte := byte(value.Int()) (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackByte(byte(value.Int())) + p.PackByte(asByte) return p.Err } return case reflect.Uint16: size = 2 funcsWritten = 1 + asShort := uint16(value.Uint()) (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackShort(uint16(value.Uint())) + p.PackShort(asShort) return p.Err } return case reflect.Int16: size = 2 funcsWritten = 1 + asShort := uint16(value.Int()) (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackShort(uint16(value.Int())) + p.PackShort(asShort) return p.Err } return case reflect.Uint32: size = 4 funcsWritten = 1 + asInt := uint32(value.Uint()) (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackInt(uint32(value.Uint())) + p.PackInt(asInt) return p.Err } return case reflect.Int32: size = 4 funcsWritten = 1 + asInt := uint32(value.Int()) (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackInt(uint32(value.Int())) + p.PackInt(asInt) return p.Err } return case reflect.Uint64: size = 8 funcsWritten = 1 + asInt := uint64(value.Uint()) (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackLong(uint64(value.Uint())) + p.PackLong(asInt) return p.Err } return case reflect.Int64: size = 8 funcsWritten = 1 + asInt := uint64(value.Int()) (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackLong(uint64(value.Int())) + p.PackLong(asInt) return p.Err } return @@ -207,8 +215,9 @@ func (c codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers.P case reflect.Bool: size = 1 funcsWritten = 1 + asBool := value.Bool() (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackBool(value.Bool()) + p.PackBool(asBool) return p.Err } return @@ -253,8 +262,9 @@ func (c codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers.P subFuncsWritten += n } + numEltsAsUint32 := uint32(numElts) (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackInt(uint32(numElts)) // pack # elements + p.PackInt(numEltsAsUint32) // pack # elements if p.Err != nil { return p.Err } From 760c32c4ac86bfe555657d431ad9b0c52d7d4eb5 Mon Sep 17 00:00:00 2001 From: Alex Willmer Date: Thu, 11 Jun 2020 20:50:13 +0100 Subject: [PATCH 038/183] main: Added database version & default network to -version --- main/params.go | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/main/params.go b/main/params.go index eef8e60..e285bc7 100644 --- a/main/params.go +++ b/main/params.go @@ -37,6 +37,7 @@ const ( var ( Config = node.Config{} Err error + defaultNetworkName = genesis.TestnetName defaultDbDir = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "db")) defaultStakingKeyPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.key")) defaultStakingCertPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.crt")) @@ -169,7 +170,7 @@ func init() { version := fs.Bool("version", false, "If true, print version and quit") // NetworkID: - networkName := fs.String("network-id", genesis.TestnetName, "Network ID this node will connect to") + networkName := fs.String("network-id", defaultNetworkName, "Network ID this node will connect to") // Ava fees: fs.Uint64Var(&Config.AvaTxFee, "ava-tx-fee", 0, "Ava transaction fee, in $nAva") @@ -234,7 +235,15 @@ func init() { ferr := fs.Parse(os.Args[1:]) if *version { // If --version used, print version and exit - fmt.Println(node.Version.String()) + networkID, err := genesis.NetworkID(defaultNetworkName) + if errs.Add(err); err != nil { + return + } + networkGeneration := genesis.NetworkName(networkID) + fmt.Printf( + "%s [database=%s, network=%s/%s]\n", + node.Version, dbVersion, defaultNetworkName, networkGeneration, + ) os.Exit(0) } From d85ef87695ccc593dcca5c8081a88748c833d9d3 Mon Sep 17 00:00:00 2001 From: Gabriel Cardona Date: Sat, 13 Jun 2020 11:06:32 -0700 Subject: [PATCH 039/183] * Use Debug log level for all RPC calls * Use convention: [vm/api: function_name called...]. Ex: "Platform: SampleValidators called... --- api/ipcs/server.go | 2 ++ api/keystore/service.go | 10 ++++----- vms/avm/service.go | 34 ++++++++++++++--------------- vms/platformvm/service.go | 46 +++++++++++++++++++-------------------- 4 files changed, 47 insertions(+), 45 deletions(-) diff --git a/api/ipcs/server.go b/api/ipcs/server.go index 30bcc5d..f4be11b 100644 --- a/api/ipcs/server.go +++ b/api/ipcs/server.go @@ -61,6 +61,7 @@ type PublishBlockchainReply struct { // PublishBlockchain publishes the finalized accepted transactions from the blockchainID over the IPC func (ipc *IPCs) PublishBlockchain(r *http.Request, args *PublishBlockchainArgs, reply *PublishBlockchainReply) error { + ipc.log.Debug("IPCs: PublishBlockchain called with BlockchainID: %s", args.BlockchainID) chainID, err := ipc.chainManager.Lookup(args.BlockchainID) if err != nil { ipc.log.Error("unknown blockchainID: %s", err) @@ -116,6 +117,7 @@ type UnpublishBlockchainReply struct { // UnpublishBlockchain closes publishing of a blockchainID func (ipc *IPCs) UnpublishBlockchain(r *http.Request, args *UnpublishBlockchainArgs, reply *UnpublishBlockchainReply) error { + ipc.log.Debug("IPCs: UnpublishBlockchain called with BlockchainID: %s", args.BlockchainID) chainID, err := ipc.chainManager.Lookup(args.BlockchainID) if err != nil { ipc.log.Error("unknown blockchainID %s: %s", args.BlockchainID, err) diff --git a/api/keystore/service.go b/api/keystore/service.go index 16aca06..7006073 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -135,7 +135,7 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre ks.lock.Lock() defer ks.lock.Unlock() - ks.log.Verbo("CreateUser called with %.*s", maxUserPassLen, args.Username) + ks.log.Debug("Keystore: CreateUser called with %.*s", maxUserPassLen, args.Username) if len(args.Username) > maxUserPassLen || len(args.Password) > maxUserPassLen { return errUserPassMaxLength @@ -183,7 +183,7 @@ func (ks *Keystore) ListUsers(_ *http.Request, args *ListUsersArgs, reply *ListU ks.lock.Lock() defer ks.lock.Unlock() - ks.log.Verbo("ListUsers called") + ks.log.Debug("Keystore: ListUsers called") reply.Users = []string{} @@ -211,7 +211,7 @@ func (ks *Keystore) ExportUser(_ *http.Request, args *ExportUserArgs, reply *Exp ks.lock.Lock() defer ks.lock.Unlock() - ks.log.Verbo("ExportUser called for %s", args.Username) + ks.log.Debug("Keystore: ExportUser called for %s", args.Username) usr, err := ks.getUser(args.Username) if err != nil { @@ -264,7 +264,7 @@ func (ks *Keystore) ImportUser(r *http.Request, args *ImportUserArgs, reply *Imp ks.lock.Lock() defer ks.lock.Unlock() - ks.log.Verbo("ImportUser called for %s", args.Username) + ks.log.Debug("Keystore: ImportUser called for %s", args.Username) if args.Username == "" { return errEmptyUsername @@ -324,7 +324,7 @@ func (ks *Keystore) DeleteUser(_ *http.Request, args *DeleteUserArgs, reply *Del ks.lock.Lock() defer ks.lock.Unlock() - ks.log.Verbo("DeleteUser called with %s", args.Username) + ks.log.Debug("Keystore: DeleteUser called with %s", args.Username) if args.Username == "" { return errEmptyUsername diff --git a/vms/avm/service.go b/vms/avm/service.go index f71d607..4033be4 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -56,7 +56,7 @@ type IssueTxReply struct { // IssueTx attempts to issue a transaction into consensus func (service *Service) IssueTx(r *http.Request, args *IssueTxArgs, reply *IssueTxReply) error { - service.vm.ctx.Log.Verbo("IssueTx called with %s", args.Tx) + service.vm.ctx.Log.Debug("AVM: IssueTx called with %s", args.Tx) txID, err := service.vm.IssueTx(args.Tx.Bytes, nil) if err != nil { @@ -79,7 +79,7 @@ type GetTxStatusReply struct { // GetTxStatus returns the status of the specified transaction func (service *Service) GetTxStatus(r *http.Request, args *GetTxStatusArgs, reply *GetTxStatusReply) error { - service.vm.ctx.Log.Verbo("GetTxStatus called with %s", args.TxID) + service.vm.ctx.Log.Debug("AVM: GetTxStatus called with %s", args.TxID) if args.TxID.IsZero() { return errNilTxID @@ -106,7 +106,7 @@ type GetTxReply struct { // GetTx returns the specified transaction func (service *Service) GetTx(r *http.Request, args *GetTxArgs, reply *GetTxReply) error { - service.vm.ctx.Log.Verbo("GetTx called with %s", args.TxID) + service.vm.ctx.Log.Debug("AVM: GetTx called with %s", args.TxID) if args.TxID.IsZero() { return errNilTxID @@ -136,7 +136,7 @@ type GetUTXOsReply struct { // GetUTXOs creates an empty account with the name passed in func (service *Service) GetUTXOs(r *http.Request, args *GetUTXOsArgs, reply *GetUTXOsReply) error { - service.vm.ctx.Log.Verbo("GetUTXOs called with %s", args.Addresses) + service.vm.ctx.Log.Debug("AVM: GetUTXOs called with %s", args.Addresses) addrSet := ids.Set{} for _, addr := range args.Addresses { @@ -178,7 +178,7 @@ type GetAssetDescriptionReply struct { // GetAssetDescription creates an empty account with the name passed in func (service *Service) GetAssetDescription(_ *http.Request, args *GetAssetDescriptionArgs, reply *GetAssetDescriptionReply) error { - service.vm.ctx.Log.Verbo("GetAssetDescription called with %s", args.AssetID) + service.vm.ctx.Log.Debug("AVM: GetAssetDescription called with %s", args.AssetID) assetID, err := service.vm.Lookup(args.AssetID) if err != nil { @@ -222,7 +222,7 @@ type GetBalanceReply struct { // GetBalance returns the amount of an asset that an address at least partially owns func (service *Service) GetBalance(r *http.Request, args *GetBalanceArgs, reply *GetBalanceReply) error { - service.vm.ctx.Log.Verbo("GetBalance called with address: %s assetID: %s", args.Address, args.AssetID) + service.vm.ctx.Log.Debug("AVM: GetBalance called with address: %s assetID: %s", args.Address, args.AssetID) address, err := service.vm.Parse(args.Address) if err != nil { @@ -287,7 +287,7 @@ type GetAllBalancesReply struct { // Note that balances include assets that the address only _partially_ owns // (ie is one of several addresses specified in a multi-sig) func (service *Service) GetAllBalances(r *http.Request, args *GetAllBalancesArgs, reply *GetAllBalancesReply) error { - service.vm.ctx.Log.Verbo("GetAllBalances called with address: %s", args.Address) + service.vm.ctx.Log.Debug("AVM: GetAllBalances called with address: %s", args.Address) address, err := service.vm.Parse(args.Address) if err != nil { @@ -360,7 +360,7 @@ type CreateFixedCapAssetReply struct { // CreateFixedCapAsset returns ID of the newly created asset func (service *Service) CreateFixedCapAsset(r *http.Request, args *CreateFixedCapAssetArgs, reply *CreateFixedCapAssetReply) error { - service.vm.ctx.Log.Verbo("CreateFixedCapAsset called with name: %s symbol: %s number of holders: %d", + service.vm.ctx.Log.Debug("AVM: CreateFixedCapAsset called with name: %s symbol: %s number of holders: %d", args.Name, args.Symbol, len(args.InitialHolders), @@ -445,7 +445,7 @@ type CreateVariableCapAssetReply struct { // CreateVariableCapAsset returns ID of the newly created asset func (service *Service) CreateVariableCapAsset(r *http.Request, args *CreateVariableCapAssetArgs, reply *CreateVariableCapAssetReply) error { - service.vm.ctx.Log.Verbo("CreateFixedCapAsset called with name: %s symbol: %s number of minters: %d", + service.vm.ctx.Log.Debug("AVM: CreateFixedCapAsset called with name: %s symbol: %s number of minters: %d", args.Name, args.Symbol, len(args.MinterSets), @@ -523,7 +523,7 @@ type CreateAddressReply struct { // CreateAddress creates an address for the user [args.Username] func (service *Service) CreateAddress(r *http.Request, args *CreateAddressArgs, reply *CreateAddressReply) error { - service.vm.ctx.Log.Verbo("CreateAddress called for user '%s'", args.Username) + service.vm.ctx.Log.Debug("AVM: CreateAddress called for user '%s'", args.Username) db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) if err != nil { @@ -603,7 +603,7 @@ type ExportKeyReply struct { // ExportKey returns a private key from the provided user func (service *Service) ExportKey(r *http.Request, args *ExportKeyArgs, reply *ExportKeyReply) error { - service.vm.ctx.Log.Verbo("ExportKey called for user '%s'", args.Username) + service.vm.ctx.Log.Debug("AVM: ExportKey called for user '%s'", args.Username) address, err := service.vm.Parse(args.Address) if err != nil { @@ -645,7 +645,7 @@ type ImportKeyReply struct { // ImportKey adds a private key to the provided user func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *ImportKeyReply) error { - service.vm.ctx.Log.Verbo("ImportKey called for user '%s'", args.Username) + service.vm.ctx.Log.Debug("AVM: ImportKey called for user '%s'", args.Username) db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) if err != nil { @@ -692,7 +692,7 @@ type SendReply struct { // Send returns the ID of the newly created transaction func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) error { - service.vm.ctx.Log.Verbo("Send called with username: %s", args.Username) + service.vm.ctx.Log.Debug("AVM: Send called with username: %s", args.Username) if args.Amount == 0 { return errInvalidAmount @@ -873,7 +873,7 @@ type CreateMintTxReply struct { // CreateMintTx returns the newly created unsigned transaction func (service *Service) CreateMintTx(r *http.Request, args *CreateMintTxArgs, reply *CreateMintTxReply) error { - service.vm.ctx.Log.Verbo("CreateMintTx called") + service.vm.ctx.Log.Debug("AVM: CreateMintTx called") if args.Amount == 0 { return errInvalidMintAmount @@ -990,7 +990,7 @@ type SignMintTxReply struct { // SignMintTx returns the newly signed transaction func (service *Service) SignMintTx(r *http.Request, args *SignMintTxArgs, reply *SignMintTxReply) error { - service.vm.ctx.Log.Verbo("SignMintTx called") + service.vm.ctx.Log.Debug("AVM: SignMintTx called") minter, err := service.vm.Parse(args.Minter) if err != nil { @@ -1116,7 +1116,7 @@ type ImportAVAReply struct { // The AVA must have already been exported from the P-Chain. // Returns the ID of the newly created atomic transaction func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, reply *ImportAVAReply) error { - service.vm.ctx.Log.Verbo("ImportAVA called with username: %s", args.Username) + service.vm.ctx.Log.Debug("AVM: ImportAVA called with username: %s", args.Username) toBytes, err := service.vm.Parse(args.To) if err != nil { @@ -1268,7 +1268,7 @@ type ExportAVAReply struct { // After this tx is accepted, the AVA must be imported to the P-chain with an importTx. // Returns the ID of the newly created atomic transaction func (service *Service) ExportAVA(_ *http.Request, args *ExportAVAArgs, reply *ExportAVAReply) error { - service.vm.ctx.Log.Verbo("ExportAVA called with username: %s", args.Username) + service.vm.ctx.Log.Debug("AVM: ExportAVA called with username: %s", args.Username) if args.Amount == 0 { return errInvalidAmount diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index de2d41b..52010d1 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -234,7 +234,7 @@ type GetCurrentValidatorsReply struct { // GetCurrentValidators returns the list of current validators func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidatorsArgs, reply *GetCurrentValidatorsReply) error { - service.vm.Ctx.Log.Debug("GetCurrentValidators called") + service.vm.Ctx.Log.Debug("Platform: GetCurrentValidators called") if args.SubnetID.IsZero() { args.SubnetID = DefaultSubnetID @@ -298,7 +298,7 @@ type GetPendingValidatorsReply struct { // GetPendingValidators returns the list of current validators func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidatorsArgs, reply *GetPendingValidatorsReply) error { - service.vm.Ctx.Log.Debug("GetPendingValidators called") + service.vm.Ctx.Log.Debug("Platform: GetPendingValidators called") if args.SubnetID.IsZero() { args.SubnetID = DefaultSubnetID @@ -360,7 +360,7 @@ type SampleValidatorsReply struct { // SampleValidators returns a sampling of the list of current validators func (service *Service) SampleValidators(_ *http.Request, args *SampleValidatorsArgs, reply *SampleValidatorsReply) error { - service.vm.Ctx.Log.Debug("Sample called with {Size = %d}", args.Size) + service.vm.Ctx.Log.Debug("Platform: SampleValidators called with {Size = %d}", args.Size) if args.SubnetID.IsZero() { args.SubnetID = DefaultSubnetID @@ -437,7 +437,7 @@ type ListAccountsReply struct { // ListAccounts lists all of the accounts controlled by [args.Username] func (service *Service) ListAccounts(_ *http.Request, args *ListAccountsArgs, reply *ListAccountsReply) error { - service.vm.Ctx.Log.Debug("listAccounts called for user '%s'", args.Username) + service.vm.Ctx.Log.Debug("Platform: ListAccounts called for user '%s'", args.Username) // db holds the user's info that pertains to the Platform Chain userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) @@ -499,7 +499,7 @@ type CreateAccountReply struct { // The account's ID is [privKey].PublicKey().Address(), where [privKey] is a // private key controlled by the user. func (service *Service) CreateAccount(_ *http.Request, args *CreateAccountArgs, reply *CreateAccountReply) error { - service.vm.Ctx.Log.Debug("createAccount called for user '%s'", args.Username) + service.vm.Ctx.Log.Debug("Platform: CreateAccount called for user '%s'", args.Username) // userDB holds the user's info that pertains to the Platform Chain userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) @@ -569,7 +569,7 @@ type AddDefaultSubnetValidatorArgs struct { // AddDefaultSubnetValidator returns an unsigned transaction to add a validator to the default subnet // The returned unsigned transaction should be signed using Sign() func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefaultSubnetValidatorArgs, reply *CreateTxResponse) error { - service.vm.Ctx.Log.Debug("AddDefaultSubnetValidator called") + service.vm.Ctx.Log.Debug("Platform: AddDefaultSubnetValidator called") switch { case args.ID.IsZero(): // If ID unspecified, use this node's ID as validator ID @@ -626,7 +626,7 @@ type AddDefaultSubnetDelegatorArgs struct { // to the default subnet // The returned unsigned transaction should be signed using Sign() func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefaultSubnetDelegatorArgs, reply *CreateTxResponse) error { - service.vm.Ctx.Log.Debug("AddDefaultSubnetDelegator called") + service.vm.Ctx.Log.Debug("Platform: AddDefaultSubnetDelegator called") switch { case args.ID.IsZero(): // If ID unspecified, use this node's ID as validator ID @@ -741,7 +741,7 @@ type CreateSubnetArgs struct { // CreateSubnet returns an unsigned transaction to create a new subnet. // The unsigned transaction must be signed with the key of [args.Payer] func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *CreateTxResponse) error { - service.vm.Ctx.Log.Debug("platform.createSubnet called") + service.vm.Ctx.Log.Debug("Platform: CreateSubnet called") switch { case args.PayerNonce == 0: @@ -796,7 +796,7 @@ type ExportAVAArgs struct { // The unsigned transaction must be signed with the key of the account exporting the AVA // and paying the transaction fee func (service *Service) ExportAVA(_ *http.Request, args *ExportAVAArgs, response *CreateTxResponse) error { - service.vm.Ctx.Log.Debug("platform.ExportAVA called") + service.vm.Ctx.Log.Debug("Platform: ExportAVA called") switch { case args.PayerNonce == 0: @@ -858,7 +858,7 @@ type SignResponse struct { // Sign [args.bytes] func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignResponse) error { - service.vm.Ctx.Log.Debug("sign called") + service.vm.Ctx.Log.Debug("Platform: Sign called") if args.Signer == "" { return errNilSigner @@ -915,7 +915,7 @@ func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignRespons // Sign [unsigned] with [key] func (service *Service) signAddDefaultSubnetValidatorTx(tx *addDefaultSubnetValidatorTx, key *crypto.PrivateKeySECP256K1R) (*addDefaultSubnetValidatorTx, error) { - service.vm.Ctx.Log.Debug("signAddDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("Platform: signAddDefaultSubnetValidatorTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetValidatorTx) @@ -938,7 +938,7 @@ func (service *Service) signAddDefaultSubnetValidatorTx(tx *addDefaultSubnetVali // Sign [unsigned] with [key] func (service *Service) signAddDefaultSubnetDelegatorTx(tx *addDefaultSubnetDelegatorTx, key *crypto.PrivateKeySECP256K1R) (*addDefaultSubnetDelegatorTx, error) { - service.vm.Ctx.Log.Debug("signAddDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("Platform: signAddDefaultSubnetDelegatorTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetDelegatorTx) @@ -961,7 +961,7 @@ func (service *Service) signAddDefaultSubnetDelegatorTx(tx *addDefaultSubnetDele // Sign [xt] with [key] func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.PrivateKeySECP256K1R) (*CreateSubnetTx, error) { - service.vm.Ctx.Log.Debug("signAddDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("Platform: signCreateSubnetTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedCreateSubnetTx) @@ -984,7 +984,7 @@ func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.Priva // Sign [tx] with [key] func (service *Service) signExportTx(tx *ExportTx, key *crypto.PrivateKeySECP256K1R) (*ExportTx, error) { - service.vm.Ctx.Log.Debug("platform.signAddDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("Platform: signExportTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedExportTx) @@ -1012,7 +1012,7 @@ func (service *Service) signExportTx(tx *ExportTx, key *crypto.PrivateKeySECP256 // Sorts tx.ControlSigs before returning // Assumes each element of tx.ControlSigs is actually a signature, not just empty bytes func (service *Service) signAddNonDefaultSubnetValidatorTx(tx *addNonDefaultSubnetValidatorTx, key *crypto.PrivateKeySECP256K1R) (*addNonDefaultSubnetValidatorTx, error) { - service.vm.Ctx.Log.Debug("signAddNonDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("Platform: signAddNonDefaultSubnetValidatorTx called") // Compute the byte repr. of the unsigned tx and the signature of [key] over it unsignedIntf := interface{}(&tx.UnsignedAddNonDefaultSubnetValidatorTx) @@ -1075,7 +1075,7 @@ type ImportAVAArgs struct { // The AVA must have already been exported from the X-Chain. // The unsigned transaction must be signed with the key of the tx fee payer. func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, response *SignResponse) error { - service.vm.Ctx.Log.Debug("platform.ImportAVA called") + service.vm.Ctx.Log.Debug("Platform: ImportAVA called") switch { case args.To == "": @@ -1203,7 +1203,7 @@ func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, response // Sorts tx.ControlSigs before returning // Assumes each element of tx.ControlSigs is actually a signature, not just empty bytes func (service *Service) signCreateChainTx(tx *CreateChainTx, key *crypto.PrivateKeySECP256K1R) (*CreateChainTx, error) { - service.vm.Ctx.Log.Debug("signCreateChainTx called") + service.vm.Ctx.Log.Debug("Platform: signCreateChainTx called") // Compute the byte repr. of the unsigned tx and the signature of [key] over it unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) @@ -1263,7 +1263,7 @@ type IssueTxResponse struct { // IssueTx issues the transaction [args.Tx] to the network func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *IssueTxResponse) error { - service.vm.Ctx.Log.Debug("issueTx called") + service.vm.Ctx.Log.Debug("Platform: IssueTx called") genTx := genericTx{} if err := Codec.Unmarshal(args.Tx.Bytes, &genTx); err != nil { @@ -1327,7 +1327,7 @@ type CreateBlockchainArgs struct { // CreateBlockchain returns an unsigned transaction to create a new blockchain // Must be signed with the Subnet's control keys and with a key that pays the transaction fee before issuance func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, response *CreateTxResponse) error { - service.vm.Ctx.Log.Debug("createBlockchain called") + service.vm.Ctx.Log.Debug("Platform: CreateBlockchain called") switch { case args.PayerNonce == 0: @@ -1410,7 +1410,7 @@ type GetBlockchainStatusReply struct { // GetBlockchainStatus gets the status of a blockchain with the ID [args.BlockchainID]. func (service *Service) GetBlockchainStatus(_ *http.Request, args *GetBlockchainStatusArgs, reply *GetBlockchainStatusReply) error { - service.vm.Ctx.Log.Debug("getBlockchainStatus called") + service.vm.Ctx.Log.Debug("Platform: GetBlockchainStatus called") switch { case args.BlockchainID == "": @@ -1490,7 +1490,7 @@ type ValidatedByResponse struct { // ValidatedBy returns the ID of the Subnet that validates [args.BlockchainID] func (service *Service) ValidatedBy(_ *http.Request, args *ValidatedByArgs, response *ValidatedByResponse) error { - service.vm.Ctx.Log.Debug("validatedBy called") + service.vm.Ctx.Log.Debug("Platform: ValidatedBy called") switch { case args.BlockchainID == "": @@ -1522,7 +1522,7 @@ type ValidatesResponse struct { // Validates returns the IDs of the blockchains validated by [args.SubnetID] func (service *Service) Validates(_ *http.Request, args *ValidatesArgs, response *ValidatesResponse) error { - service.vm.Ctx.Log.Debug("validates called") + service.vm.Ctx.Log.Debug("Platform: Validates called") switch { case args.SubnetID == "": @@ -1576,7 +1576,7 @@ type GetBlockchainsResponse struct { // GetBlockchains returns all of the blockchains that exist func (service *Service) GetBlockchains(_ *http.Request, args *struct{}, response *GetBlockchainsResponse) error { - service.vm.Ctx.Log.Debug("getBlockchains called") + service.vm.Ctx.Log.Debug("Platform: GetBlockchains called") chains, err := service.vm.getChains(service.vm.DB) if err != nil { From f6cabee51b2cf9e897798de8bd8edc585ff49e30 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Sun, 14 Jun 2020 10:56:43 -0400 Subject: [PATCH 040/183] cache serializable fields of struct types; change codec methods to be on pointer type; change variable names; change benchmark toinclude both marshaling and unmarshaling --- vms/components/codec/codec.go | 136 ++++++++++--------- vms/components/codec/codec_benchmark_test.go | 11 +- 2 files changed, 82 insertions(+), 65 deletions(-) diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index be5acc1..29cfaef 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -41,6 +41,8 @@ type codec struct { typeIDToType map[uint32]reflect.Type typeToTypeID map[reflect.Type]uint32 + + serializedFields map[reflect.Type][]int } // Codec marshals and unmarshals @@ -52,7 +54,7 @@ type Codec interface { // New returns a new codec func New(maxSize, maxSliceLen int) Codec { - return codec{ + return &codec{ maxSize: maxSize, maxSliceLen: maxSliceLen, typeIDToType: map[uint32]reflect.Type{}, @@ -65,7 +67,7 @@ func NewDefault() Codec { return New(defaultMaxSize, defaultMaxSliceLength) } // RegisterType is used to register types that may be unmarshaled into an interface // [val] is a value of the type being registered -func (c codec) RegisterType(val interface{}) error { +func (c *codec) RegisterType(val interface{}) error { valType := reflect.TypeOf(val) if _, exists := c.typeToTypeID[valType]; exists { return fmt.Errorf("type %v has already been registered", valType) @@ -89,7 +91,7 @@ func (c codec) RegisterType(val interface{}) error { // 8) nil slices are marshaled as empty slices // To marshal an interface, [value] must be a pointer to the interface -func (c codec) Marshal(value interface{}) ([]byte, error) { +func (c *codec) Marshal(value interface{}) ([]byte, error) { if value == nil { return nil, errNil } @@ -118,7 +120,7 @@ func (c codec) Marshal(value interface{}) ([]byte, error) { // and returns the number of bytes it wrote. // When these functions are called in order, they write [value] to a byte slice. // 3) An error -func (c codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers.Packer) error) (size int, funcsWritten int, err error) { +func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers.Packer) error) (size int, funcsWritten int, err error) { valueKind := value.Kind() // Case: Value can't be marshalled @@ -238,10 +240,7 @@ func (c codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers.P size = 4 + subsize // 4 because we pack the type ID, a uint32 (*funcs)[index] = func(p *wrappers.Packer) error { p.PackInt(typeID) - if p.Err != nil { - return p.Err - } - return nil + return p.Err } funcsWritten = 1 + subFuncsWritten return @@ -265,10 +264,7 @@ func (c codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers.P numEltsAsUint32 := uint32(numElts) (*funcs)[index] = func(p *wrappers.Packer) error { p.PackInt(numEltsAsUint32) // pack # elements - if p.Err != nil { - return p.Err - } - return nil + return p.Err } funcsWritten = subFuncsWritten + 1 return @@ -291,20 +287,17 @@ func (c codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers.P return case reflect.Struct: t := value.Type() - numFields := t.NumField() size = 0 fieldsMarshalled := 0 funcsWritten = 0 - for i := 0; i < numFields; i++ { // Go through all fields of this struct - field := t.Field(i) - if !shouldSerialize(field) { // Skip fields we don't need to serialize - continue - } - if unicode.IsLower(rune(field.Name[0])) { // Can only marshal exported fields - return 0, 0, fmt.Errorf("can't marshal unexported field %s", field.Name) - } - fieldVal := value.Field(i) // The field we're serializing + serializedFields, subErr := c.getSerializedFieldIndices(t) + if subErr != nil { + return 0, 0, subErr + } + + for _, f := range serializedFields { // Go through all fields of this struct + fieldVal := value.Field(f) // The field we're serializing subSize, n, err := c.marshal(fieldVal, index+funcsWritten, funcs) // Serialize the field if err != nil { return 0, 0, err @@ -321,7 +314,7 @@ func (c codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers.P // Unmarshal unmarshals [bytes] into [dest], where // [dest] must be a pointer or interface -func (c codec) Unmarshal(bytes []byte, dest interface{}) error { +func (c *codec) Unmarshal(bytes []byte, dest interface{}) error { switch { case len(bytes) > c.maxSize: return errSliceTooLarge @@ -343,92 +336,90 @@ func (c codec) Unmarshal(bytes []byte, dest interface{}) error { return nil } -// Unmarshal bytes from [bytes] into [field] -// [field] must be addressable -func (c codec) unmarshal(p *wrappers.Packer, field reflect.Value) error { - kind := field.Kind() - switch kind { +// Unmarshal from [bytes] into [value]. [value] must be addressable +func (c *codec) unmarshal(p *wrappers.Packer, value reflect.Value) error { + switch value.Kind() { case reflect.Uint8: b := p.UnpackByte() if p.Err != nil { return p.Err } - field.SetUint(uint64(b)) + value.SetUint(uint64(b)) return nil case reflect.Int8: b := p.UnpackByte() if p.Err != nil { return p.Err } - field.SetInt(int64(b)) + value.SetInt(int64(b)) return nil case reflect.Uint16: b := p.UnpackShort() if p.Err != nil { return p.Err } - field.SetUint(uint64(b)) + value.SetUint(uint64(b)) return nil case reflect.Int16: b := p.UnpackShort() if p.Err != nil { return p.Err } - field.SetInt(int64(b)) + value.SetInt(int64(b)) return nil case reflect.Uint32: b := p.UnpackInt() if p.Err != nil { return p.Err } - field.SetUint(uint64(b)) + value.SetUint(uint64(b)) return nil case reflect.Int32: b := p.UnpackInt() if p.Err != nil { return p.Err } - field.SetInt(int64(b)) + value.SetInt(int64(b)) return nil case reflect.Uint64: b := p.UnpackLong() if p.Err != nil { return p.Err } - field.SetUint(uint64(b)) + value.SetUint(uint64(b)) return nil case reflect.Int64: b := p.UnpackLong() if p.Err != nil { return p.Err } - field.SetInt(int64(b)) + value.SetInt(int64(b)) return nil case reflect.Bool: b := p.UnpackBool() if p.Err != nil { return p.Err } - field.SetBool(b) + value.SetBool(b) return nil case reflect.Slice: numElts := int(p.UnpackInt()) if p.Err != nil { return p.Err } - // set [field] to be a slice of the appropriate type/capacity (right now [field] is nil) - slice := reflect.MakeSlice(field.Type(), numElts, numElts) - field.Set(slice) + // set [value] to be a slice of the appropriate type/capacity (right now [value] is nil) + slice := reflect.MakeSlice(value.Type(), numElts, numElts) + value.Set(slice) // Unmarshal each element into the appropriate index of the slice for i := 0; i < numElts; i++ { - if err := c.unmarshal(p, field.Index(i)); err != nil { + if err := c.unmarshal(p, value.Index(i)); err != nil { return err } } return nil case reflect.Array: - for i := 0; i < field.Len(); i++ { - if err := c.unmarshal(p, field.Index(i)); err != nil { + for i := 0; i < value.Len(); i++ { + if err := c.unmarshal(p, value.Index(i)); err != nil { return err } } @@ -438,7 +429,7 @@ func (c codec) unmarshal(p *wrappers.Packer, field reflect.Value) error { if p.Err != nil { return p.Err } - field.SetString(str) + value.SetString(str) return nil case reflect.Interface: typeID := p.UnpackInt() // Get the type ID @@ -451,31 +442,28 @@ func (c codec) unmarshal(p *wrappers.Packer, field reflect.Value) error { return errUnmarshalUnregisteredType } // Ensure struct actually does implement the interface - fieldType := field.Type() - if !typ.Implements(fieldType) { - return fmt.Errorf("%s does not implement interface %s", typ, fieldType) + valueType := value.Type() + if !typ.Implements(valueType) { + return fmt.Errorf("%s does not implement interface %s", typ, valueType) } concreteInstancePtr := reflect.New(typ) // instance of the proper type // Unmarshal into the struct if err := c.unmarshal(p, concreteInstancePtr.Elem()); err != nil { return err } - // And assign the filled struct to the field - field.Set(concreteInstancePtr.Elem()) + // And assign the filled struct to the value + value.Set(concreteInstancePtr.Elem()) return nil case reflect.Struct: // Type of this struct - structType := reflect.TypeOf(field.Interface()) + t := reflect.TypeOf(value.Interface()) + serializedFieldIndices, err := c.getSerializedFieldIndices(t) + if err != nil { + return err + } // Go through all the fields and umarshal into each - for i := 0; i < structType.NumField(); i++ { - structField := structType.Field(i) - if !shouldSerialize(structField) { // Skip fields we don't need to unmarshal - continue - } - if unicode.IsLower(rune(structField.Name[0])) { // Only unmarshal into exported field - return errUnmarshalUnexportedField - } - field := field.Field(i) // Get the field + for _, index := range serializedFieldIndices { + field := value.Field(index) // Get the field if err := c.unmarshal(p, field); err != nil { // Unmarshal into the field return err } @@ -483,7 +471,7 @@ func (c codec) unmarshal(p *wrappers.Packer, field reflect.Value) error { return nil case reflect.Ptr: // Get the type this pointer points to - underlyingType := field.Type().Elem() + underlyingType := value.Type().Elem() // Create a new pointer to a new value of the underlying type underlyingValue := reflect.New(underlyingType) // Fill the value @@ -491,7 +479,7 @@ func (c codec) unmarshal(p *wrappers.Packer, field reflect.Value) error { return err } // Assign to the top-level struct's member - field.Set(underlyingValue) + value.Set(underlyingValue) return nil case reflect.Invalid: return errNil @@ -500,7 +488,27 @@ func (c codec) unmarshal(p *wrappers.Packer, field reflect.Value) error { } } -// Returns true iff [field] should be serialized -func shouldSerialize(field reflect.StructField) bool { - return field.Tag.Get("serialize") == "true" +// Returns the indices of the serializable fields of [t], which is a struct type +// Returns an error if a field has tag "serialize: true" but the field is unexported +func (c *codec) getSerializedFieldIndices(t reflect.Type) ([]int, error) { + if c.serializedFields == nil { + c.serializedFields = make(map[reflect.Type][]int) + } + if serializedFields, ok := c.serializedFields[t]; ok { + return serializedFields, nil + } + numFields := t.NumField() + serializedFields := make([]int, 0, numFields) + for i := 0; i < numFields; i++ { // Go through all fields of this struct + field := t.Field(i) + if field.Tag.Get("serialize") != "true" { // Skip fields we don't need to serialize + continue + } + if unicode.IsLower(rune(field.Name[0])) { // Can only marshal exported fields + return []int{}, fmt.Errorf("can't marshal unexported field %s", field.Name) + } + serializedFields = append(serializedFields, i) + } + c.serializedFields[t] = serializedFields + return serializedFields, nil } diff --git a/vms/components/codec/codec_benchmark_test.go b/vms/components/codec/codec_benchmark_test.go index ec4cc8c..25af563 100644 --- a/vms/components/codec/codec_benchmark_test.go +++ b/vms/components/codec/codec_benchmark_test.go @@ -35,13 +35,22 @@ func BenchmarkMarshal(b *testing.B) { }, MyPointer: &temp, } + var unmarshaledMyStructInstance myStruct codec := NewDefault() codec.RegisterType(&MyInnerStruct{}) // Register the types that may be unmarshaled into interfaces codec.RegisterType(&MyInnerStruct2{}) + codec.Marshal(myStructInstance) // warm up serializedFields cache b.ResetTimer() for n := 0; n < b.N; n++ { - codec.Marshal(myStructInstance) + bytes, err := codec.Marshal(myStructInstance) + if err != nil { + b.Fatal(err) + } + if err := codec.Unmarshal(bytes, &unmarshaledMyStructInstance); err != nil { + b.Fatal(err) + } + } } From 7b5b3d1f1cf879c1e6a991b95a7deafde0b958d7 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Sun, 14 Jun 2020 11:53:19 -0400 Subject: [PATCH 041/183] more optimizations/cleanup --- vms/components/codec/codec.go | 42 ++++++++++++++++------------------- 1 file changed, 19 insertions(+), 23 deletions(-) diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index 29cfaef..5000a6a 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -42,7 +42,7 @@ type codec struct { typeIDToType map[uint32]reflect.Type typeToTypeID map[reflect.Type]uint32 - serializedFields map[reflect.Type][]int + serializedFieldIndices map[reflect.Type][]int } // Codec marshals and unmarshals @@ -55,10 +55,11 @@ type Codec interface { // New returns a new codec func New(maxSize, maxSliceLen int) Codec { return &codec{ - maxSize: maxSize, - maxSliceLen: maxSliceLen, - typeIDToType: map[uint32]reflect.Type{}, - typeToTypeID: map[reflect.Type]uint32{}, + maxSize: maxSize, + maxSliceLen: maxSliceLen, + typeIDToType: map[uint32]reflect.Type{}, + typeToTypeID: map[reflect.Type]uint32{}, + serializedFieldIndices: map[reflect.Type][]int{}, } } @@ -226,13 +227,14 @@ func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers. case reflect.Uintptr, reflect.Ptr: return c.marshal(value.Elem(), index, funcs) case reflect.Interface: - typeID, ok := c.typeToTypeID[reflect.TypeOf(value.Interface())] // Get the type ID of the value being marshaled + underlyingValue := value.Interface() + typeID, ok := c.typeToTypeID[reflect.TypeOf(underlyingValue)] // Get the type ID of the value being marshaled if !ok { - return 0, 0, fmt.Errorf("can't marshal unregistered type '%v'", reflect.TypeOf(value.Interface()).String()) + return 0, 0, fmt.Errorf("can't marshal unregistered type '%v'", reflect.TypeOf(underlyingValue).String()) } (*funcs)[index] = nil - subsize, subFuncsWritten, subErr := c.marshal(reflect.ValueOf(value.Interface()), index+1, funcs) + subsize, subFuncsWritten, subErr := c.marshal(value.Elem(), index+1, funcs) if subErr != nil { return 0, 0, subErr } @@ -408,8 +410,7 @@ func (c *codec) unmarshal(p *wrappers.Packer, value reflect.Value) error { return p.Err } // set [value] to be a slice of the appropriate type/capacity (right now [value] is nil) - slice := reflect.MakeSlice(value.Type(), numElts, numElts) - value.Set(slice) + value.Set(reflect.MakeSlice(value.Type(), numElts, numElts)) // Unmarshal each element into the appropriate index of the slice for i := 0; i < numElts; i++ { if err := c.unmarshal(p, value.Index(i)); err != nil { @@ -425,12 +426,8 @@ func (c *codec) unmarshal(p *wrappers.Packer, value reflect.Value) error { } return nil case reflect.String: - str := p.UnpackStr() - if p.Err != nil { - return p.Err - } - value.SetString(str) - return nil + value.SetString(p.UnpackStr()) + return p.Err case reflect.Interface: typeID := p.UnpackInt() // Get the type ID if p.Err != nil { @@ -456,15 +453,14 @@ func (c *codec) unmarshal(p *wrappers.Packer, value reflect.Value) error { return nil case reflect.Struct: // Type of this struct - t := reflect.TypeOf(value.Interface()) + t := value.Type() serializedFieldIndices, err := c.getSerializedFieldIndices(t) if err != nil { return err } // Go through all the fields and umarshal into each for _, index := range serializedFieldIndices { - field := value.Field(index) // Get the field - if err := c.unmarshal(p, field); err != nil { // Unmarshal into the field + if err := c.unmarshal(p, value.Field(index)); err != nil { // Unmarshal into the field return err } } @@ -491,10 +487,10 @@ func (c *codec) unmarshal(p *wrappers.Packer, value reflect.Value) error { // Returns the indices of the serializable fields of [t], which is a struct type // Returns an error if a field has tag "serialize: true" but the field is unexported func (c *codec) getSerializedFieldIndices(t reflect.Type) ([]int, error) { - if c.serializedFields == nil { - c.serializedFields = make(map[reflect.Type][]int) + if c.serializedFieldIndices == nil { + c.serializedFieldIndices = make(map[reflect.Type][]int) } - if serializedFields, ok := c.serializedFields[t]; ok { + if serializedFields, ok := c.serializedFieldIndices[t]; ok { return serializedFields, nil } numFields := t.NumField() @@ -509,6 +505,6 @@ func (c *codec) getSerializedFieldIndices(t reflect.Type) ([]int, error) { } serializedFields = append(serializedFields, i) } - c.serializedFields[t] = serializedFields + c.serializedFieldIndices[t] = serializedFields return serializedFields, nil } From ee1cf620a18c1c12c6284969cb77c99d4fb4fe58 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Sun, 14 Jun 2020 12:06:39 -0400 Subject: [PATCH 042/183] cleanup --- vms/components/codec/codec.go | 88 ++++++++++------------------------- 1 file changed, 25 insertions(+), 63 deletions(-) diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index 5000a6a..cda5fba 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -342,68 +342,32 @@ func (c *codec) Unmarshal(bytes []byte, dest interface{}) error { func (c *codec) unmarshal(p *wrappers.Packer, value reflect.Value) error { switch value.Kind() { case reflect.Uint8: - b := p.UnpackByte() - if p.Err != nil { - return p.Err - } - value.SetUint(uint64(b)) - return nil + value.SetUint(uint64(p.UnpackByte())) + return p.Err case reflect.Int8: - b := p.UnpackByte() - if p.Err != nil { - return p.Err - } - value.SetInt(int64(b)) - return nil + value.SetInt(int64(p.UnpackByte())) + return p.Err case reflect.Uint16: - b := p.UnpackShort() - if p.Err != nil { - return p.Err - } - value.SetUint(uint64(b)) - return nil + value.SetUint(uint64(p.UnpackShort())) + return p.Err case reflect.Int16: - b := p.UnpackShort() - if p.Err != nil { - return p.Err - } - value.SetInt(int64(b)) - return nil + value.SetInt(int64(p.UnpackShort())) + return p.Err case reflect.Uint32: - b := p.UnpackInt() - if p.Err != nil { - return p.Err - } - value.SetUint(uint64(b)) - return nil + value.SetUint(uint64(p.UnpackInt())) + return p.Err case reflect.Int32: - b := p.UnpackInt() - if p.Err != nil { - return p.Err - } - value.SetInt(int64(b)) - return nil + value.SetInt(int64(p.UnpackInt())) + return p.Err case reflect.Uint64: - b := p.UnpackLong() - if p.Err != nil { - return p.Err - } - value.SetUint(uint64(b)) - return nil + value.SetUint(uint64(p.UnpackLong())) + return p.Err case reflect.Int64: - b := p.UnpackLong() - if p.Err != nil { - return p.Err - } - value.SetInt(int64(b)) - return nil + value.SetInt(int64(p.UnpackLong())) + return p.Err case reflect.Bool: - b := p.UnpackBool() - if p.Err != nil { - return p.Err - } - value.SetBool(b) - return nil + value.SetBool(p.UnpackBool()) + return p.Err case reflect.Slice: numElts := int(p.UnpackInt()) if p.Err != nil { @@ -433,28 +397,26 @@ func (c *codec) unmarshal(p *wrappers.Packer, value reflect.Value) error { if p.Err != nil { return p.Err } - // Get a struct that implements the interface + // Get a type that implements the interface typ, ok := c.typeIDToType[typeID] if !ok { return errUnmarshalUnregisteredType } - // Ensure struct actually does implement the interface - valueType := value.Type() - if !typ.Implements(valueType) { + // Ensure type actually does implement the interface + if valueType := value.Type(); !typ.Implements(valueType) { return fmt.Errorf("%s does not implement interface %s", typ, valueType) } - concreteInstancePtr := reflect.New(typ) // instance of the proper type + concreteInstance := reflect.New(typ).Elem() // instance of the proper type // Unmarshal into the struct - if err := c.unmarshal(p, concreteInstancePtr.Elem()); err != nil { + if err := c.unmarshal(p, concreteInstance); err != nil { return err } // And assign the filled struct to the value - value.Set(concreteInstancePtr.Elem()) + value.Set(concreteInstance) return nil case reflect.Struct: // Type of this struct - t := value.Type() - serializedFieldIndices, err := c.getSerializedFieldIndices(t) + serializedFieldIndices, err := c.getSerializedFieldIndices(value.Type()) if err != nil { return err } From 9c4cfecf4e5bdd1b0119bd28bf2c360f9cd1c07b Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Sun, 14 Jun 2020 12:23:05 -0400 Subject: [PATCH 043/183] pack pointer to string instead of string...halves memory footprint --- utils/wrappers/packing.go | 10 ++++++++++ vms/components/codec/codec.go | 5 ++--- 2 files changed, 12 insertions(+), 3 deletions(-) diff --git a/utils/wrappers/packing.go b/utils/wrappers/packing.go index 1038852..3759c63 100644 --- a/utils/wrappers/packing.go +++ b/utils/wrappers/packing.go @@ -284,6 +284,16 @@ func (p *Packer) PackStr(str string) { p.PackFixedBytes([]byte(str)) } +// PackStrPtr appends a string to the byte array +func (p *Packer) PackStrPtr(str *string) { + strSize := len(*str) + if strSize > MaxStringLen { + p.Add(errInvalidInput) + } + p.PackShort(uint16(strSize)) + p.PackFixedBytes([]byte(*str)) +} + // UnpackStr unpacks a string from the byte array func (p *Packer) UnpackStr() string { strSize := p.UnpackShort() diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index cda5fba..094f9e1 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -127,12 +127,11 @@ func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers. // Case: Value can't be marshalled switch valueKind { case reflect.Interface, reflect.Ptr, reflect.Invalid: - if value.IsNil() { // Can't marshal nil or nil pointers + if value.IsNil() { // Can't marshal nil (except nil slices) return 0, 0, errNil } } - // Case: Value is of known size; return its byte repr. switch valueKind { case reflect.Uint8: size = 1 @@ -211,7 +210,7 @@ func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers. asStr := value.String() size = len(asStr) + wrappers.ShortLen (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackStr(asStr) + p.PackStrPtr(&asStr) return p.Err } return From d1796c8b0bd0bb7f9122b50c371442c561df7eb3 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Sun, 14 Jun 2020 18:15:44 -0400 Subject: [PATCH 044/183] cleanup; revert string packing method --- utils/wrappers/packing.go | 10 -------- vms/components/codec/codec.go | 46 +++++++++++++++-------------------- 2 files changed, 20 insertions(+), 36 deletions(-) diff --git a/utils/wrappers/packing.go b/utils/wrappers/packing.go index 3759c63..1038852 100644 --- a/utils/wrappers/packing.go +++ b/utils/wrappers/packing.go @@ -284,16 +284,6 @@ func (p *Packer) PackStr(str string) { p.PackFixedBytes([]byte(str)) } -// PackStrPtr appends a string to the byte array -func (p *Packer) PackStrPtr(str *string) { - strSize := len(*str) - if strSize > MaxStringLen { - p.Add(errInvalidInput) - } - p.PackShort(uint16(strSize)) - p.PackFixedBytes([]byte(*str)) -} - // UnpackStr unpacks a string from the byte array func (p *Packer) UnpackStr() string { strSize := p.UnpackShort() diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index 094f9e1..ce488e7 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -97,7 +97,7 @@ func (c *codec) Marshal(value interface{}) ([]byte, error) { return nil, errNil } - funcs := make([]func(*wrappers.Packer) error, 512, 512) + funcs := make([]func(*wrappers.Packer) error, 256, 256) size, _, err := c.marshal(reflect.ValueOf(value), 0, &funcs) if err != nil { return nil, err @@ -134,7 +134,7 @@ func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers. switch valueKind { case reflect.Uint8: - size = 1 + size = wrappers.ByteLen funcsWritten = 1 asByte := byte(value.Uint()) (*funcs)[index] = func(p *wrappers.Packer) error { @@ -143,7 +143,7 @@ func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers. } return case reflect.Int8: - size = 1 + size = wrappers.ByteLen funcsWritten = 1 asByte := byte(value.Int()) (*funcs)[index] = func(p *wrappers.Packer) error { @@ -152,7 +152,7 @@ func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers. } return case reflect.Uint16: - size = 2 + size = wrappers.ShortLen funcsWritten = 1 asShort := uint16(value.Uint()) (*funcs)[index] = func(p *wrappers.Packer) error { @@ -161,7 +161,7 @@ func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers. } return case reflect.Int16: - size = 2 + size = wrappers.ShortLen funcsWritten = 1 asShort := uint16(value.Int()) (*funcs)[index] = func(p *wrappers.Packer) error { @@ -170,7 +170,7 @@ func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers. } return case reflect.Uint32: - size = 4 + size = wrappers.IntLen funcsWritten = 1 asInt := uint32(value.Uint()) (*funcs)[index] = func(p *wrappers.Packer) error { @@ -179,7 +179,7 @@ func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers. } return case reflect.Int32: - size = 4 + size = wrappers.IntLen funcsWritten = 1 asInt := uint32(value.Int()) (*funcs)[index] = func(p *wrappers.Packer) error { @@ -188,7 +188,7 @@ func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers. } return case reflect.Uint64: - size = 8 + size = wrappers.LongLen funcsWritten = 1 asInt := uint64(value.Uint()) (*funcs)[index] = func(p *wrappers.Packer) error { @@ -197,7 +197,7 @@ func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers. } return case reflect.Int64: - size = 8 + size = wrappers.LongLen funcsWritten = 1 asInt := uint64(value.Int()) (*funcs)[index] = func(p *wrappers.Packer) error { @@ -206,16 +206,17 @@ func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers. } return case reflect.String: + // Note: it actually saves memory allocations to not do s := value.String() + // and use s in place of value.String(). Hence we don't do that. funcsWritten = 1 - asStr := value.String() - size = len(asStr) + wrappers.ShortLen + size = len(value.String()) + wrappers.ShortLen (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackStrPtr(&asStr) + p.PackStr(value.String()) return p.Err } return case reflect.Bool: - size = 1 + size = wrappers.BoolLen funcsWritten = 1 asBool := value.Bool() (*funcs)[index] = func(p *wrappers.Packer) error { @@ -232,13 +233,11 @@ func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers. return 0, 0, fmt.Errorf("can't marshal unregistered type '%v'", reflect.TypeOf(underlyingValue).String()) } - (*funcs)[index] = nil subsize, subFuncsWritten, subErr := c.marshal(value.Elem(), index+1, funcs) if subErr != nil { return 0, 0, subErr } - - size = 4 + subsize // 4 because we pack the type ID, a uint32 + size = wrappers.IntLen + subsize (*funcs)[index] = func(p *wrappers.Packer) error { p.PackInt(typeID) return p.Err @@ -287,23 +286,18 @@ func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers. } return case reflect.Struct: - t := value.Type() - - size = 0 - fieldsMarshalled := 0 - funcsWritten = 0 - serializedFields, subErr := c.getSerializedFieldIndices(t) + serializedFields, subErr := c.getSerializedFieldIndices(value.Type()) if subErr != nil { return 0, 0, subErr } - for _, f := range serializedFields { // Go through all fields of this struct - fieldVal := value.Field(f) // The field we're serializing - subSize, n, err := c.marshal(fieldVal, index+funcsWritten, funcs) // Serialize the field + size = 0 + funcsWritten = 0 + for _, fieldIndex := range serializedFields { // Go through all fields of this struct + subSize, n, err := c.marshal(value.Field(fieldIndex), index+funcsWritten, funcs) // Serialize the field if err != nil { return 0, 0, err } - fieldsMarshalled++ size += subSize funcsWritten += n } From b269f8cfb0b7a3a66ca31570cf27108eb1cbf1a2 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Sun, 14 Jun 2020 21:38:07 -0400 Subject: [PATCH 045/183] marshal writes directly to packer rather than creating array of functions --- vms/components/codec/codec.go | 196 +++++++++------------------------- 1 file changed, 52 insertions(+), 144 deletions(-) diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index ce488e7..8e223f9 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -97,21 +97,11 @@ func (c *codec) Marshal(value interface{}) ([]byte, error) { return nil, errNil } - funcs := make([]func(*wrappers.Packer) error, 256, 256) - size, _, err := c.marshal(reflect.ValueOf(value), 0, &funcs) - if err != nil { + p := &wrappers.Packer{MaxSize: 512, Bytes: make([]byte, 0, 512)} + if err := c.marshal(reflect.ValueOf(value), p); err != nil { return nil, err } - p := &wrappers.Packer{MaxSize: size, Bytes: make([]byte, 0, size)} - for _, f := range funcs { - if f == nil { - break - } else if err := f(p); err != nil { - return nil, err - } - } - return p.Bytes, nil } @@ -121,189 +111,107 @@ func (c *codec) Marshal(value interface{}) ([]byte, error) { // and returns the number of bytes it wrote. // When these functions are called in order, they write [value] to a byte slice. // 3) An error -func (c *codec) marshal(value reflect.Value, index int, funcs *[]func(*wrappers.Packer) error) (size int, funcsWritten int, err error) { +func (c *codec) marshal(value reflect.Value, p *wrappers.Packer) error { valueKind := value.Kind() // Case: Value can't be marshalled switch valueKind { case reflect.Interface, reflect.Ptr, reflect.Invalid: if value.IsNil() { // Can't marshal nil (except nil slices) - return 0, 0, errNil + return errNil } } switch valueKind { case reflect.Uint8: - size = wrappers.ByteLen - funcsWritten = 1 - asByte := byte(value.Uint()) - (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackByte(asByte) - return p.Err - } - return + p.PackByte(uint8(value.Uint())) + return p.Err case reflect.Int8: - size = wrappers.ByteLen - funcsWritten = 1 - asByte := byte(value.Int()) - (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackByte(asByte) - return p.Err - } - return + p.PackByte(uint8(value.Int())) + return p.Err case reflect.Uint16: - size = wrappers.ShortLen - funcsWritten = 1 - asShort := uint16(value.Uint()) - (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackShort(asShort) - return p.Err - } - return + p.PackShort(uint16(value.Uint())) + return p.Err case reflect.Int16: - size = wrappers.ShortLen - funcsWritten = 1 - asShort := uint16(value.Int()) - (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackShort(asShort) - return p.Err - } - return + p.PackShort(uint16(value.Int())) + return p.Err case reflect.Uint32: - size = wrappers.IntLen - funcsWritten = 1 - asInt := uint32(value.Uint()) - (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackInt(asInt) - return p.Err - } - return + p.PackInt(uint32(value.Uint())) + return p.Err case reflect.Int32: - size = wrappers.IntLen - funcsWritten = 1 - asInt := uint32(value.Int()) - (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackInt(asInt) - return p.Err - } - return + p.PackInt(uint32(value.Int())) + return p.Err case reflect.Uint64: - size = wrappers.LongLen - funcsWritten = 1 - asInt := uint64(value.Uint()) - (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackLong(asInt) - return p.Err - } - return + p.PackLong(value.Uint()) + return p.Err case reflect.Int64: - size = wrappers.LongLen - funcsWritten = 1 - asInt := uint64(value.Int()) - (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackLong(asInt) - return p.Err - } - return + p.PackLong(uint64(value.Int())) + return p.Err case reflect.String: - // Note: it actually saves memory allocations to not do s := value.String() - // and use s in place of value.String(). Hence we don't do that. - funcsWritten = 1 - size = len(value.String()) + wrappers.ShortLen - (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackStr(value.String()) - return p.Err - } - return + p.PackStr(value.String()) + return p.Err case reflect.Bool: - size = wrappers.BoolLen - funcsWritten = 1 - asBool := value.Bool() - (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackBool(asBool) - return p.Err - } - return + p.PackBool(value.Bool()) + return p.Err case reflect.Uintptr, reflect.Ptr: - return c.marshal(value.Elem(), index, funcs) + return c.marshal(value.Elem(), p) case reflect.Interface: underlyingValue := value.Interface() typeID, ok := c.typeToTypeID[reflect.TypeOf(underlyingValue)] // Get the type ID of the value being marshaled if !ok { - return 0, 0, fmt.Errorf("can't marshal unregistered type '%v'", reflect.TypeOf(underlyingValue).String()) + return fmt.Errorf("can't marshal unregistered type '%v'", reflect.TypeOf(underlyingValue).String()) } - - subsize, subFuncsWritten, subErr := c.marshal(value.Elem(), index+1, funcs) - if subErr != nil { - return 0, 0, subErr - } - size = wrappers.IntLen + subsize - (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackInt(typeID) + p.PackInt(typeID) + if p.Err != nil { return p.Err } - funcsWritten = 1 + subFuncsWritten - return + if err := c.marshal(value.Elem(), p); err != nil { + return err + } + return p.Err case reflect.Slice: numElts := value.Len() // # elements in the slice/array. 0 if this slice is nil. if numElts > c.maxSliceLen { - return 0, 0, fmt.Errorf("slice length, %d, exceeds maximum length, %d", numElts, c.maxSliceLen) + return fmt.Errorf("slice length, %d, exceeds maximum length, %d", numElts, c.maxSliceLen) } - - size = wrappers.IntLen // for # elements - subFuncsWritten := 0 - for i := 0; i < numElts; i++ { // Process each element in the slice - subSize, n, subErr := c.marshal(value.Index(i), index+subFuncsWritten+1, funcs) - if subErr != nil { - return 0, 0, subErr - } - size += subSize - subFuncsWritten += n - } - - numEltsAsUint32 := uint32(numElts) - (*funcs)[index] = func(p *wrappers.Packer) error { - p.PackInt(numEltsAsUint32) // pack # elements + p.PackInt(uint32(numElts)) // pack # elements + if p.Err != nil { return p.Err } - funcsWritten = subFuncsWritten + 1 - return + + for i := 0; i < numElts; i++ { // Process each element in the slice + if err := c.marshal(value.Index(i), p); err != nil { + return err + } + } + + return nil case reflect.Array: numElts := value.Len() if numElts > c.maxSliceLen { - return 0, 0, fmt.Errorf("array length, %d, exceeds maximum length, %d", numElts, c.maxSliceLen) + return fmt.Errorf("array length, %d, exceeds maximum length, %d", numElts, c.maxSliceLen) } - size = 0 - funcsWritten = 0 for i := 0; i < numElts; i++ { // Process each element in the array - subSize, n, subErr := c.marshal(value.Index(i), index+funcsWritten, funcs) - if subErr != nil { - return 0, 0, subErr + if err := c.marshal(value.Index(i), p); err != nil { + return err } - size += subSize - funcsWritten += n } - return + return nil case reflect.Struct: serializedFields, subErr := c.getSerializedFieldIndices(value.Type()) if subErr != nil { - return 0, 0, subErr + return subErr } - size = 0 - funcsWritten = 0 for _, fieldIndex := range serializedFields { // Go through all fields of this struct - subSize, n, err := c.marshal(value.Field(fieldIndex), index+funcsWritten, funcs) // Serialize the field - if err != nil { - return 0, 0, err + if err := c.marshal(value.Field(fieldIndex), p); err != nil { // Serialize the field + return err } - size += subSize - funcsWritten += n } - return + return nil default: - return 0, 0, errUnknownType + return errUnknownType } } From ba2b214b587310f074dfb62bc3dfce054faa0fa5 Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Fri, 12 Jun 2020 22:20:50 -0400 Subject: [PATCH 046/183] Decouple staking and tls encryption for internode communication --- main/main.go | 5 ++++- main/params.go | 15 ++++++++++++--- node/config.go | 1 + node/node.go | 4 ++-- 4 files changed, 19 insertions(+), 6 deletions(-) diff --git a/main/main.go b/main/main.go index 5aca025..98cb581 100644 --- a/main/main.go +++ b/main/main.go @@ -45,7 +45,10 @@ func main() { } // Track if sybil control is enforced - if !Config.EnableStaking { + if !Config.EnableStaking && Config.EnableP2PTLS { + log.Warn("Staking is disabled. Sybil control is not enforced.") + } + if !Config.EnableStaking && !Config.EnableP2PTLS { log.Warn("Staking and p2p encryption are disabled. Packet spoofing is possible.") } diff --git a/main/params.go b/main/params.go index eef8e60..150c0e8 100644 --- a/main/params.go +++ b/main/params.go @@ -49,7 +49,8 @@ var ( ) var ( - errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs") + errBootstrapMismatch = errors.New("more bootstrap IDs provided than bootstrap IPs") + errStakingRequiresTLS = errors.New("if staking is enabled, network TLS must also be enabled") ) // GetIPs returns the default IPs for each network @@ -200,7 +201,9 @@ func init() { // Staking: consensusPort := fs.Uint("staking-port", 9651, "Port of the consensus server") - fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Require TLS to authenticate staking connections") + // TODO - keeping same flag for backwards compatibility, should be changed to "staking-enabled" + fs.BoolVar(&Config.EnableStaking, "staking-tls-enabled", true, "Enable staking. If enabled, Network TLS is required.") + fs.BoolVar(&Config.EnableP2PTLS, "p2p-tls-enabled", true, "Require TLS to authenticate network communication") fs.StringVar(&Config.StakingKeyFile, "staking-tls-key-file", defaultStakingKeyPath, "TLS private key for staking") fs.StringVar(&Config.StakingCertFile, "staking-tls-cert-file", defaultStakingCertPath, "TLS certificate for staking") @@ -318,7 +321,13 @@ func init() { *bootstrapIDs = strings.Join(defaultBootstrapIDs, ",") } } - if Config.EnableStaking { + + if Config.EnableStaking && !Config.EnableP2PTLS { + errs.Add(errStakingRequiresTLS) + return + } + + if Config.EnableP2PTLS { i := 0 cb58 := formatting.CB58{} for _, id := range strings.Split(*bootstrapIDs, ",") { diff --git a/node/config.go b/node/config.go index 74ff491..2504276 100644 --- a/node/config.go +++ b/node/config.go @@ -34,6 +34,7 @@ type Config struct { // Staking configuration StakingIP utils.IPDesc + EnableP2PTLS bool EnableStaking bool StakingKeyFile string StakingCertFile string diff --git a/node/node.go b/node/node.go index ea0e8fc..5e817fa 100644 --- a/node/node.go +++ b/node/node.go @@ -119,7 +119,7 @@ func (n *Node) initNetworking() error { dialer := network.NewDialer(TCP) var serverUpgrader, clientUpgrader network.Upgrader - if n.Config.EnableStaking { + if n.Config.EnableP2PTLS { cert, err := tls.LoadX509KeyPair(n.Config.StakingCertFile, n.Config.StakingKeyFile) if err != nil { return err @@ -253,7 +253,7 @@ func (n *Node) initDatabase() error { // Otherwise, it is a hash of the TLS certificate that this node // uses for P2P communication func (n *Node) initNodeID() error { - if !n.Config.EnableStaking { + if !n.Config.EnableP2PTLS { n.ID = ids.NewShortID(hashing.ComputeHash160Array([]byte(n.Config.StakingIP.String()))) n.Log.Info("Set the node's ID to %s", n.ID) return nil From f28b69b81940ca08d0567bad854cc73db7fffabb Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 15 Jun 2020 10:06:40 -0400 Subject: [PATCH 047/183] set initial slice capacity for packer; packer pre-allocates capacity when possible --- utils/wrappers/packing.go | 35 ++++--- vms/components/codec/codec.go | 167 +++++++++++++++++++--------------- 2 files changed, 118 insertions(+), 84 deletions(-) diff --git a/utils/wrappers/packing.go b/utils/wrappers/packing.go index 1038852..22c7464 100644 --- a/utils/wrappers/packing.go +++ b/utils/wrappers/packing.go @@ -16,6 +16,11 @@ const ( // MaxStringLen ... MaxStringLen = math.MaxUint16 + // When the byte array is expanded, this many extra bytes + // are added to capacity of the array. + // Higher value --> need to expand byte array less --> less memory allocations + expansionBoost = 256 + // ByteLen is the number of bytes per byte... ByteLen = 1 // ShortLen is the number of bytes per short @@ -61,27 +66,35 @@ func (p *Packer) CheckSpace(bytes int) { } } -// Expand ensures that there is [bytes] bytes left of space in the byte array. -// If this is not allowed due to the maximum size, an error is added to the -// packer +// Expand ensures that there is [bytes] bytes left of space in the byte slice. +// If this is not allowed due to the maximum size, an error is added to the packer +// In order to understand this code, its important to understand the difference +// between a slice's length and its capacity. func (p *Packer) Expand(bytes int) { p.CheckSpace(0) if p.Errored() { return } - neededSize := bytes + p.Offset - if neededSize <= len(p.Bytes) { + neededSize := bytes + p.Offset // Need byte slice's length to be at least [neededSize] + if neededSize <= len(p.Bytes) { // Byte slice has sufficient length already + return + } else if neededSize > p.MaxSize { // Lengthening the byte slice would cause it to grow too large + p.Add(errBadLength) + return + } else if neededSize <= cap(p.Bytes) { // Byte slice has sufficient capacity to lengthen it without mem alloc + p.Bytes = p.Bytes[:neededSize] return } - if neededSize > p.MaxSize { - p.Add(errBadLength) - } else if neededSize > cap(p.Bytes) { - p.Bytes = append(p.Bytes[:cap(p.Bytes)], make([]byte, neededSize-cap(p.Bytes))...) - } else { - p.Bytes = p.Bytes[:neededSize] + // See if we can expand the byte slice an extra [expansionBoost] bytes in order to + // prevent need for future expansions (and therefore memory allocations) + capToAdd := neededSize - cap(p.Bytes) + expansionBoost + if capToAdd > p.MaxSize { + capToAdd = neededSize - cap(p.Bytes) } + // increase slice's length and capacity + p.Bytes = append(p.Bytes[:cap(p.Bytes)], make([]byte, neededSize-cap(p.Bytes), capToAdd)...) } // PackByte append a byte to the byte array diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index 8e223f9..2f974a9 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -6,7 +6,6 @@ package codec import ( "errors" "fmt" - "math" "reflect" "unicode" @@ -16,22 +15,16 @@ import ( const ( defaultMaxSize = 1 << 18 // default max size, in bytes, of something being marshalled by Marshal() defaultMaxSliceLength = 1 << 18 // default max length of a slice being marshalled by Marshal(). Should be <= math.MaxUint32. - maxStringLen = math.MaxUint16 + + // initial capacity of byte slice that values are marshaled into. + // Larger value --> need less memory allocations but possibly have allocated but unused memory + // Smaller value --> need more memory allocations but more efficient use of allocated memory + initialSliceCap = 2048 ) -// ErrBadCodec is returned when one tries to perform an operation -// using an unknown codec var ( - errBadCodec = errors.New("wrong or unknown codec used") - errNil = errors.New("can't marshal/unmarshal nil value") - errNeedPointer = errors.New("must unmarshal into a pointer") - errMarshalUnregisteredType = errors.New("can't marshal an unregistered type") - errUnmarshalUnregisteredType = errors.New("can't unmarshal an unregistered type") - errUnknownType = errors.New("don't know how to marshal/unmarshal this type") - errMarshalUnexportedField = errors.New("can't serialize an unexported field") - errUnmarshalUnexportedField = errors.New("can't deserialize into an unexported field") - errOutOfMemory = errors.New("out of memory") - errSliceTooLarge = errors.New("slice too large") + errNil = errors.New("can't marshal/unmarshal nil pointer or interface") + errNeedPointer = errors.New("argument to unmarshal should be a pointer") ) // Codec handles marshaling and unmarshaling of structs @@ -42,6 +35,12 @@ type codec struct { typeIDToType map[uint32]reflect.Type typeToTypeID map[reflect.Type]uint32 + // Key: a struct type + // Value: Slice where each element is index in the struct type + // of a field that is serialized/deserialized + // e.g. Foo --> [1,5,8] means Foo.Field(1), etc. are to be serialized/deserialized + // We assume this cache is pretty small (a few hundred keys at most) + // and doesn't take up much memory serializedFieldIndices map[reflect.Type][]int } @@ -94,10 +93,10 @@ func (c *codec) RegisterType(val interface{}) error { // To marshal an interface, [value] must be a pointer to the interface func (c *codec) Marshal(value interface{}) ([]byte, error) { if value == nil { - return nil, errNil + return nil, errNil // can't marshal nil } - p := &wrappers.Packer{MaxSize: 512, Bytes: make([]byte, 0, 512)} + p := &wrappers.Packer{MaxSize: c.maxSize, Bytes: make([]byte, 0, initialSliceCap)} if err := c.marshal(reflect.ValueOf(value), p); err != nil { return nil, err } @@ -105,16 +104,10 @@ func (c *codec) Marshal(value interface{}) ([]byte, error) { return p.Bytes, nil } -// marshal returns: -// 1) The size, in bytes, of the byte representation of [value] -// 2) A slice of functions, where each function writes bytes to its argument -// and returns the number of bytes it wrote. -// When these functions are called in order, they write [value] to a byte slice. -// 3) An error +// marshal writes the byte representation of [value] to [p] +// [value]'s underlying value must not be a nil pointer or interface func (c *codec) marshal(value reflect.Value, p *wrappers.Packer) error { valueKind := value.Kind() - - // Case: Value can't be marshalled switch valueKind { case reflect.Interface, reflect.Ptr, reflect.Invalid: if value.IsNil() { // Can't marshal nil (except nil slices) @@ -161,7 +154,7 @@ func (c *codec) marshal(value reflect.Value, p *wrappers.Packer) error { if !ok { return fmt.Errorf("can't marshal unregistered type '%v'", reflect.TypeOf(underlyingValue).String()) } - p.PackInt(typeID) + p.PackInt(typeID) // Pack type ID so we know what to unmarshal this into if p.Err != nil { return p.Err } @@ -178,20 +171,17 @@ func (c *codec) marshal(value reflect.Value, p *wrappers.Packer) error { if p.Err != nil { return p.Err } - for i := 0; i < numElts; i++ { // Process each element in the slice if err := c.marshal(value.Index(i), p); err != nil { return err } } - return nil case reflect.Array: numElts := value.Len() if numElts > c.maxSliceLen { return fmt.Errorf("array length, %d, exceeds maximum length, %d", numElts, c.maxSliceLen) } - for i := 0; i < numElts; i++ { // Process each element in the array if err := c.marshal(value.Index(i), p); err != nil { return err @@ -199,19 +189,18 @@ func (c *codec) marshal(value reflect.Value, p *wrappers.Packer) error { } return nil case reflect.Struct: - serializedFields, subErr := c.getSerializedFieldIndices(value.Type()) - if subErr != nil { - return subErr + serializedFields, err := c.getSerializedFieldIndices(value.Type()) + if err != nil { + return err } - - for _, fieldIndex := range serializedFields { // Go through all fields of this struct - if err := c.marshal(value.Field(fieldIndex), p); err != nil { // Serialize the field + for _, fieldIndex := range serializedFields { // Go through all fields of this struct that are serialized + if err := c.marshal(value.Field(fieldIndex), p); err != nil { // Serialize the field and write to byte array return err } } return nil default: - return errUnknownType + return fmt.Errorf("can't marshal unknown kind %s", valueKind) } } @@ -220,7 +209,7 @@ func (c *codec) marshal(value reflect.Value, p *wrappers.Packer) error { func (c *codec) Unmarshal(bytes []byte, dest interface{}) error { switch { case len(bytes) > c.maxSize: - return errSliceTooLarge + return fmt.Errorf("byte array exceeds maximum length, %d", c.maxSize) case dest == nil: return errNil } @@ -239,121 +228,153 @@ func (c *codec) Unmarshal(bytes []byte, dest interface{}) error { return nil } -// Unmarshal from [bytes] into [value]. [value] must be addressable +// Unmarshal from p.Bytes into [value]. [value] must be addressable. func (c *codec) unmarshal(p *wrappers.Packer, value reflect.Value) error { switch value.Kind() { case reflect.Uint8: value.SetUint(uint64(p.UnpackByte())) - return p.Err + if p.Err != nil { + return fmt.Errorf("couldn't marshal uint8: %s", p.Err) + } + return nil case reflect.Int8: value.SetInt(int64(p.UnpackByte())) - return p.Err + if p.Err != nil { + return fmt.Errorf("couldn't marshal int8: %s", p.Err) + } + return nil case reflect.Uint16: value.SetUint(uint64(p.UnpackShort())) - return p.Err + if p.Err != nil { + return fmt.Errorf("couldn't marshal uint16: %s", p.Err) + } + return nil case reflect.Int16: value.SetInt(int64(p.UnpackShort())) - return p.Err + if p.Err != nil { + return fmt.Errorf("couldn't marshal int16: %s", p.Err) + } + return nil case reflect.Uint32: value.SetUint(uint64(p.UnpackInt())) - return p.Err + if p.Err != nil { + return fmt.Errorf("couldn't marshal uint32: %s", p.Err) + } + return nil case reflect.Int32: value.SetInt(int64(p.UnpackInt())) - return p.Err + if p.Err != nil { + return fmt.Errorf("couldn't marshal int32: %s", p.Err) + } + return nil case reflect.Uint64: value.SetUint(uint64(p.UnpackLong())) - return p.Err + if p.Err != nil { + return fmt.Errorf("couldn't marshal uint64: %s", p.Err) + } + return nil case reflect.Int64: value.SetInt(int64(p.UnpackLong())) - return p.Err + if p.Err != nil { + return fmt.Errorf("couldn't marshal int64: %s", p.Err) + } + return nil case reflect.Bool: value.SetBool(p.UnpackBool()) - return p.Err + if p.Err != nil { + return fmt.Errorf("couldn't marshal bool: %s", p.Err) + } + return nil case reflect.Slice: numElts := int(p.UnpackInt()) if p.Err != nil { - return p.Err + return fmt.Errorf("couldn't marshal slice: %s", p.Err) } - // set [value] to be a slice of the appropriate type/capacity (right now [value] is nil) + // set [value] to be a slice of the appropriate type/capacity (right now it is nil) value.Set(reflect.MakeSlice(value.Type(), numElts, numElts)) // Unmarshal each element into the appropriate index of the slice for i := 0; i < numElts; i++ { if err := c.unmarshal(p, value.Index(i)); err != nil { - return err + return fmt.Errorf("couldn't marshal slice element: %s", err) } } return nil case reflect.Array: for i := 0; i < value.Len(); i++ { if err := c.unmarshal(p, value.Index(i)); err != nil { - return err + return fmt.Errorf("couldn't marshal array element: %s", err) } } return nil case reflect.String: value.SetString(p.UnpackStr()) - return p.Err + if p.Err != nil { + return fmt.Errorf("couldn't marshal string: %s", p.Err) + } + return nil case reflect.Interface: typeID := p.UnpackInt() // Get the type ID if p.Err != nil { - return p.Err + return fmt.Errorf("couldn't marshal interface: %s", p.Err) } // Get a type that implements the interface - typ, ok := c.typeIDToType[typeID] + implementingType, ok := c.typeIDToType[typeID] if !ok { - return errUnmarshalUnregisteredType + return fmt.Errorf("couldn't marshal interface: unknown type ID %d", typeID) } // Ensure type actually does implement the interface - if valueType := value.Type(); !typ.Implements(valueType) { - return fmt.Errorf("%s does not implement interface %s", typ, valueType) + if valueType := value.Type(); !implementingType.Implements(valueType) { + return fmt.Errorf("couldn't marshal interface: %s does not implement interface %s", implementingType, valueType) } - concreteInstance := reflect.New(typ).Elem() // instance of the proper type + intfImplementor := reflect.New(implementingType).Elem() // instance of the proper type // Unmarshal into the struct - if err := c.unmarshal(p, concreteInstance); err != nil { - return err + if err := c.unmarshal(p, intfImplementor); err != nil { + return fmt.Errorf("couldn't marshal interface: %s", err) } // And assign the filled struct to the value - value.Set(concreteInstance) + value.Set(intfImplementor) return nil case reflect.Struct: - // Type of this struct + // Get indices of fields that will be unmarshaled into serializedFieldIndices, err := c.getSerializedFieldIndices(value.Type()) if err != nil { - return err + return fmt.Errorf("couldn't marshal struct: %s", err) } - // Go through all the fields and umarshal into each + // Go through the fields and umarshal into them for _, index := range serializedFieldIndices { - if err := c.unmarshal(p, value.Field(index)); err != nil { // Unmarshal into the field - return err + if err := c.unmarshal(p, value.Field(index)); err != nil { + return fmt.Errorf("couldn't marshal struct: %s", err) } } return nil case reflect.Ptr: // Get the type this pointer points to - underlyingType := value.Type().Elem() + t := value.Type().Elem() // Create a new pointer to a new value of the underlying type - underlyingValue := reflect.New(underlyingType) + v := reflect.New(t) // Fill the value - if err := c.unmarshal(p, underlyingValue.Elem()); err != nil { - return err + if err := c.unmarshal(p, v.Elem()); err != nil { + return fmt.Errorf("couldn't marshal pointer: %s", err) } // Assign to the top-level struct's member - value.Set(underlyingValue) + value.Set(v) return nil case reflect.Invalid: return errNil default: - return errUnknownType + return fmt.Errorf("can't unmarshal unknown type %s", value.Kind().String()) } } // Returns the indices of the serializable fields of [t], which is a struct type // Returns an error if a field has tag "serialize: true" but the field is unexported +// e.g. getSerializedFieldIndices(Foo) --> [1,5,8] means Foo.Field(1), Foo.Field(5), Foo.Field(8) +// are to be serialized/deserialized func (c *codec) getSerializedFieldIndices(t reflect.Type) ([]int, error) { if c.serializedFieldIndices == nil { c.serializedFieldIndices = make(map[reflect.Type][]int) } - if serializedFields, ok := c.serializedFieldIndices[t]; ok { + if serializedFields, ok := c.serializedFieldIndices[t]; ok { // use pre-computed result return serializedFields, nil } numFields := t.NumField() @@ -368,6 +389,6 @@ func (c *codec) getSerializedFieldIndices(t reflect.Type) ([]int, error) { } serializedFields = append(serializedFields, i) } - c.serializedFieldIndices[t] = serializedFields + c.serializedFieldIndices[t] = serializedFields // cache result return serializedFields, nil } From cea79f66b29739ec5934232a2bc4cdd7f60571cd Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 15 Jun 2020 10:33:08 -0400 Subject: [PATCH 048/183] add to tests; comment them --- vms/components/codec/codec_test.go | 82 +++++++++++++++++++----------- 1 file changed, 52 insertions(+), 30 deletions(-) diff --git a/vms/components/codec/codec_test.go b/vms/components/codec/codec_test.go index 74209eb..edd3f85 100644 --- a/vms/components/codec/codec_test.go +++ b/vms/components/codec/codec_test.go @@ -5,6 +5,7 @@ package codec import ( "bytes" + "math" "reflect" "testing" ) @@ -104,36 +105,8 @@ func TestStruct(t *testing.T) { t.Fatal(err) } - if !reflect.DeepEqual(myStructUnmarshaled.Member1, myStructInstance.Member1) { - t.Fatal("expected unmarshaled struct to be same as original struct") - } else if !bytes.Equal(myStructUnmarshaled.MySlice, myStructInstance.MySlice) { - t.Fatal("expected unmarshaled struct to be same as original struct") - } else if !reflect.DeepEqual(myStructUnmarshaled.MySlice2, myStructInstance.MySlice2) { - t.Fatal("expected unmarshaled struct to be same as original struct") - } else if !reflect.DeepEqual(myStructUnmarshaled.MySlice3, myStructInstance.MySlice3) { - t.Fatal("expected unmarshaled struct to be same as original struct") - } else if !reflect.DeepEqual(myStructUnmarshaled.MySlice3, myStructInstance.MySlice3) { - t.Fatal("expected unmarshaled struct to be same as original struct") - } else if !reflect.DeepEqual(myStructUnmarshaled.MySlice4, myStructInstance.MySlice4) { - t.Fatal("expected unmarshaled struct to be same as original struct") - } else if !reflect.DeepEqual(myStructUnmarshaled.InnerStruct, myStructInstance.InnerStruct) { - t.Fatal("expected unmarshaled struct to be same as original struct") - } else if !reflect.DeepEqual(myStructUnmarshaled.InnerStruct2, myStructInstance.InnerStruct2) { - t.Fatal("expected unmarshaled struct to be same as original struct") - } else if !reflect.DeepEqual(myStructUnmarshaled.MyArray2, myStructInstance.MyArray2) { - t.Fatal("expected unmarshaled struct to be same as original struct") - } else if !reflect.DeepEqual(myStructUnmarshaled.MyArray3, myStructInstance.MyArray3) { - t.Fatal("expected unmarshaled struct to be same as original struct") - } else if !reflect.DeepEqual(myStructUnmarshaled.MyArray4, myStructInstance.MyArray4) { - t.Fatal("expected unmarshaled struct to be same as original struct") - } else if !reflect.DeepEqual(myStructUnmarshaled.MyInterface, myStructInstance.MyInterface) { - t.Fatal("expected unmarshaled struct to be same as original struct") - } else if !reflect.DeepEqual(myStructUnmarshaled.MySlice5, myStructInstance.MySlice5) { - t.Fatal("expected unmarshaled struct to be same as original struct") - } else if !reflect.DeepEqual(myStructUnmarshaled.InnerStruct3, myStructInstance.InnerStruct3) { - t.Fatal("expected unmarshaled struct to be same as original struct") - } else if !reflect.DeepEqual(myStructUnmarshaled.MyPointer, myStructInstance.MyPointer) { - t.Fatal("expected unmarshaled struct to be same as original struct") + if !reflect.DeepEqual(*myStructUnmarshaled, myStructInstance) { + t.Fatal("should be same") } } @@ -173,6 +146,28 @@ func TestSlice(t *testing.T) { } } +// Test marshalling/unmarshalling largest possible slice +func TestMaxSizeSlice(t *testing.T) { + mySlice := make([]string, math.MaxUint16, math.MaxUint16) + mySlice[0] = "first!" + mySlice[math.MaxUint16-1] = "last!" + codec := NewDefault() + bytes, err := codec.Marshal(mySlice) + if err != nil { + t.Fatal(err) + } + + var sliceUnmarshaled []string + if err := codec.Unmarshal(bytes, &sliceUnmarshaled); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(mySlice, sliceUnmarshaled) { + t.Fatal("expected marshaled and unmarshaled values to match") + } +} + +// Test marshalling a bool func TestBool(t *testing.T) { myBool := true codec := NewDefault() @@ -191,6 +186,7 @@ func TestBool(t *testing.T) { } } +// Test marshalling an array func TestArray(t *testing.T) { myArr := [5]uint64{5, 6, 7, 8, 9} codec := NewDefault() @@ -209,6 +205,26 @@ func TestArray(t *testing.T) { } } +// Test marshalling a really big array +func TestBigArray(t *testing.T) { + myArr := [30000]uint64{5, 6, 7, 8, 9} + codec := NewDefault() + bytes, err := codec.Marshal(myArr) + if err != nil { + t.Fatal(err) + } + + var myArrUnmarshaled [30000]uint64 + if err := codec.Unmarshal(bytes, &myArrUnmarshaled); err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(myArr, myArrUnmarshaled) { + t.Fatal("expected marshaled and unmarshaled values to match") + } +} + +// Test marshalling a pointer to a struct func TestPointerToStruct(t *testing.T) { myPtr := &MyInnerStruct{Str: "Hello!"} codec := NewDefault() @@ -227,6 +243,7 @@ func TestPointerToStruct(t *testing.T) { } } +// Test marshalling a slice of structs func TestSliceOfStruct(t *testing.T) { mySlice := []MyInnerStruct3{ MyInnerStruct3{ @@ -257,6 +274,7 @@ func TestSliceOfStruct(t *testing.T) { } } +// Test marshalling an interface func TestInterface(t *testing.T) { codec := NewDefault() codec.RegisterType(&MyInnerStruct2{}) @@ -278,6 +296,7 @@ func TestInterface(t *testing.T) { } } +// Test marshalling a slice of interfaces func TestSliceOfInterface(t *testing.T) { mySlice := []Foo{ &MyInnerStruct{ @@ -304,6 +323,7 @@ func TestSliceOfInterface(t *testing.T) { } } +// Test marshalling an array of interfaces func TestArrayOfInterface(t *testing.T) { myArray := [2]Foo{ &MyInnerStruct{ @@ -330,6 +350,7 @@ func TestArrayOfInterface(t *testing.T) { } } +// Test marshalling a pointer to an interface func TestPointerToInterface(t *testing.T) { var myinnerStruct Foo = &MyInnerStruct{Str: "Hello!"} var myPtr *Foo = &myinnerStruct @@ -352,6 +373,7 @@ func TestPointerToInterface(t *testing.T) { } } +// Test marshalling a string func TestString(t *testing.T) { myString := "Ayy" codec := NewDefault() From 979477d68f4102df8c3fe0bcbfdd6d3a4d758b36 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 15 Jun 2020 10:46:22 -0400 Subject: [PATCH 049/183] change initialSliceCap --> 1024 --- vms/components/codec/codec.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index 2f974a9..d644ad5 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -19,7 +19,7 @@ const ( // initial capacity of byte slice that values are marshaled into. // Larger value --> need less memory allocations but possibly have allocated but unused memory // Smaller value --> need more memory allocations but more efficient use of allocated memory - initialSliceCap = 2048 + initialSliceCap = 1024 ) var ( From a84abacea5fa07ae44276f6bccc30b34673816f1 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 15 Jun 2020 10:55:09 -0400 Subject: [PATCH 050/183] fix typos and removed useless benchmark --- vms/components/codec/codec.go | 40 ++++++++++---------- vms/components/codec/codec_benchmark_test.go | 9 ----- 2 files changed, 20 insertions(+), 29 deletions(-) diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index d644ad5..53852a9 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -234,102 +234,102 @@ func (c *codec) unmarshal(p *wrappers.Packer, value reflect.Value) error { case reflect.Uint8: value.SetUint(uint64(p.UnpackByte())) if p.Err != nil { - return fmt.Errorf("couldn't marshal uint8: %s", p.Err) + return fmt.Errorf("couldn't unmarshal uint8: %s", p.Err) } return nil case reflect.Int8: value.SetInt(int64(p.UnpackByte())) if p.Err != nil { - return fmt.Errorf("couldn't marshal int8: %s", p.Err) + return fmt.Errorf("couldn't unmarshal int8: %s", p.Err) } return nil case reflect.Uint16: value.SetUint(uint64(p.UnpackShort())) if p.Err != nil { - return fmt.Errorf("couldn't marshal uint16: %s", p.Err) + return fmt.Errorf("couldn't unmarshal uint16: %s", p.Err) } return nil case reflect.Int16: value.SetInt(int64(p.UnpackShort())) if p.Err != nil { - return fmt.Errorf("couldn't marshal int16: %s", p.Err) + return fmt.Errorf("couldn't unmarshal int16: %s", p.Err) } return nil case reflect.Uint32: value.SetUint(uint64(p.UnpackInt())) if p.Err != nil { - return fmt.Errorf("couldn't marshal uint32: %s", p.Err) + return fmt.Errorf("couldn't unmarshal uint32: %s", p.Err) } return nil case reflect.Int32: value.SetInt(int64(p.UnpackInt())) if p.Err != nil { - return fmt.Errorf("couldn't marshal int32: %s", p.Err) + return fmt.Errorf("couldn't unmarshal int32: %s", p.Err) } return nil case reflect.Uint64: value.SetUint(uint64(p.UnpackLong())) if p.Err != nil { - return fmt.Errorf("couldn't marshal uint64: %s", p.Err) + return fmt.Errorf("couldn't unmarshal uint64: %s", p.Err) } return nil case reflect.Int64: value.SetInt(int64(p.UnpackLong())) if p.Err != nil { - return fmt.Errorf("couldn't marshal int64: %s", p.Err) + return fmt.Errorf("couldn't unmarshal int64: %s", p.Err) } return nil case reflect.Bool: value.SetBool(p.UnpackBool()) if p.Err != nil { - return fmt.Errorf("couldn't marshal bool: %s", p.Err) + return fmt.Errorf("couldn't unmarshal bool: %s", p.Err) } return nil case reflect.Slice: numElts := int(p.UnpackInt()) if p.Err != nil { - return fmt.Errorf("couldn't marshal slice: %s", p.Err) + return fmt.Errorf("couldn't unmarshal slice: %s", p.Err) } // set [value] to be a slice of the appropriate type/capacity (right now it is nil) value.Set(reflect.MakeSlice(value.Type(), numElts, numElts)) // Unmarshal each element into the appropriate index of the slice for i := 0; i < numElts; i++ { if err := c.unmarshal(p, value.Index(i)); err != nil { - return fmt.Errorf("couldn't marshal slice element: %s", err) + return fmt.Errorf("couldn't unmarshal slice element: %s", err) } } return nil case reflect.Array: for i := 0; i < value.Len(); i++ { if err := c.unmarshal(p, value.Index(i)); err != nil { - return fmt.Errorf("couldn't marshal array element: %s", err) + return fmt.Errorf("couldn't unmarshal array element: %s", err) } } return nil case reflect.String: value.SetString(p.UnpackStr()) if p.Err != nil { - return fmt.Errorf("couldn't marshal string: %s", p.Err) + return fmt.Errorf("couldn't unmarshal string: %s", p.Err) } return nil case reflect.Interface: typeID := p.UnpackInt() // Get the type ID if p.Err != nil { - return fmt.Errorf("couldn't marshal interface: %s", p.Err) + return fmt.Errorf("couldn't unmarshal interface: %s", p.Err) } // Get a type that implements the interface implementingType, ok := c.typeIDToType[typeID] if !ok { - return fmt.Errorf("couldn't marshal interface: unknown type ID %d", typeID) + return fmt.Errorf("couldn't unmarshal interface: unknown type ID %d", typeID) } // Ensure type actually does implement the interface if valueType := value.Type(); !implementingType.Implements(valueType) { - return fmt.Errorf("couldn't marshal interface: %s does not implement interface %s", implementingType, valueType) + return fmt.Errorf("couldn't unmarshal interface: %s does not implement interface %s", implementingType, valueType) } intfImplementor := reflect.New(implementingType).Elem() // instance of the proper type // Unmarshal into the struct if err := c.unmarshal(p, intfImplementor); err != nil { - return fmt.Errorf("couldn't marshal interface: %s", err) + return fmt.Errorf("couldn't unmarshal interface: %s", err) } // And assign the filled struct to the value value.Set(intfImplementor) @@ -338,12 +338,12 @@ func (c *codec) unmarshal(p *wrappers.Packer, value reflect.Value) error { // Get indices of fields that will be unmarshaled into serializedFieldIndices, err := c.getSerializedFieldIndices(value.Type()) if err != nil { - return fmt.Errorf("couldn't marshal struct: %s", err) + return fmt.Errorf("couldn't unmarshal struct: %s", err) } // Go through the fields and umarshal into them for _, index := range serializedFieldIndices { if err := c.unmarshal(p, value.Field(index)); err != nil { - return fmt.Errorf("couldn't marshal struct: %s", err) + return fmt.Errorf("couldn't unmarshal struct: %s", err) } } return nil @@ -354,7 +354,7 @@ func (c *codec) unmarshal(p *wrappers.Packer, value reflect.Value) error { v := reflect.New(t) // Fill the value if err := c.unmarshal(p, v.Elem()); err != nil { - return fmt.Errorf("couldn't marshal pointer: %s", err) + return fmt.Errorf("couldn't unmarshal pointer: %s", err) } // Assign to the top-level struct's member value.Set(v) diff --git a/vms/components/codec/codec_benchmark_test.go b/vms/components/codec/codec_benchmark_test.go index 25af563..4adfa52 100644 --- a/vms/components/codec/codec_benchmark_test.go +++ b/vms/components/codec/codec_benchmark_test.go @@ -62,12 +62,3 @@ func BenchmarkMarshalNonCodec(b *testing.B) { } } } - -func BenchmarkFoo(b *testing.B) { - arr := make([]int, 10000, 10000) - for n := 0; n < b.N; n++ { - for i := 0; i < 10000; i++ { - arr[i] = i - } - } -} From f59f45a20fb8eaee395e2a2e269847e79d43bad0 Mon Sep 17 00:00:00 2001 From: Gabriel Cardona Date: Mon, 15 Jun 2020 09:35:41 -0700 Subject: [PATCH 051/183] Make all RPC call logs `Info` level. --- api/admin/service.go | 24 ++++++++++---------- api/health/service.go | 2 +- api/ipcs/server.go | 4 ++-- api/keystore/service.go | 10 ++++----- vms/avm/service.go | 34 ++++++++++++++--------------- vms/platformvm/service.go | 46 +++++++++++++++++++-------------------- 6 files changed, 60 insertions(+), 60 deletions(-) diff --git a/api/admin/service.go b/api/admin/service.go index e05a440..0718dfd 100644 --- a/api/admin/service.go +++ b/api/admin/service.go @@ -57,7 +57,7 @@ type GetNodeVersionReply struct { // GetNodeVersion returns the version this node is running func (service *Admin) GetNodeVersion(_ *http.Request, _ *struct{}, reply *GetNodeVersionReply) error { - service.log.Debug("Admin: GetNodeVersion called") + service.log.Info("Admin: GetNodeVersion called") reply.Version = service.version.String() return nil @@ -70,7 +70,7 @@ type GetNodeIDReply struct { // GetNodeID returns the node ID of this node func (service *Admin) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error { - service.log.Debug("Admin: GetNodeID called") + service.log.Info("Admin: GetNodeID called") reply.NodeID = service.nodeID return nil @@ -83,7 +83,7 @@ type GetNetworkIDReply struct { // GetNetworkID returns the network ID this node is running on func (service *Admin) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error { - service.log.Debug("Admin: GetNetworkID called") + service.log.Info("Admin: GetNetworkID called") reply.NetworkID = cjson.Uint32(service.networkID) return nil @@ -96,7 +96,7 @@ type GetNetworkNameReply struct { // GetNetworkName returns the network name this node is running on func (service *Admin) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error { - service.log.Debug("Admin: GetNetworkName called") + service.log.Info("Admin: GetNetworkName called") reply.NetworkName = genesis.NetworkName(service.networkID) return nil @@ -114,7 +114,7 @@ type GetBlockchainIDReply struct { // GetBlockchainID returns the blockchain ID that resolves the alias that was supplied func (service *Admin) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error { - service.log.Debug("Admin: GetBlockchainID called") + service.log.Info("Admin: GetBlockchainID called") bID, err := service.chainManager.Lookup(args.Alias) reply.BlockchainID = bID.String() @@ -128,7 +128,7 @@ type PeersReply struct { // Peers returns the list of current validators func (service *Admin) Peers(_ *http.Request, _ *struct{}, reply *PeersReply) error { - service.log.Debug("Admin: Peers called") + service.log.Info("Admin: Peers called") reply.Peers = service.networking.Peers() return nil } @@ -145,7 +145,7 @@ type StartCPUProfilerReply struct { // StartCPUProfiler starts a cpu profile writing to the specified file func (service *Admin) StartCPUProfiler(_ *http.Request, args *StartCPUProfilerArgs, reply *StartCPUProfilerReply) error { - service.log.Debug("Admin: StartCPUProfiler called with %s", args.Filename) + service.log.Info("Admin: StartCPUProfiler called with %s", args.Filename) reply.Success = true return service.performance.StartCPUProfiler(args.Filename) } @@ -157,7 +157,7 @@ type StopCPUProfilerReply struct { // StopCPUProfiler stops the cpu profile func (service *Admin) StopCPUProfiler(_ *http.Request, _ *struct{}, reply *StopCPUProfilerReply) error { - service.log.Debug("Admin: StopCPUProfiler called") + service.log.Info("Admin: StopCPUProfiler called") reply.Success = true return service.performance.StopCPUProfiler() } @@ -174,7 +174,7 @@ type MemoryProfileReply struct { // MemoryProfile runs a memory profile writing to the specified file func (service *Admin) MemoryProfile(_ *http.Request, args *MemoryProfileArgs, reply *MemoryProfileReply) error { - service.log.Debug("Admin: MemoryProfile called with %s", args.Filename) + service.log.Info("Admin: MemoryProfile called with %s", args.Filename) reply.Success = true return service.performance.MemoryProfile(args.Filename) } @@ -191,7 +191,7 @@ type LockProfileReply struct { // LockProfile runs a mutex profile writing to the specified file func (service *Admin) LockProfile(_ *http.Request, args *LockProfileArgs, reply *LockProfileReply) error { - service.log.Debug("Admin: LockProfile called with %s", args.Filename) + service.log.Info("Admin: LockProfile called with %s", args.Filename) reply.Success = true return service.performance.LockProfile(args.Filename) } @@ -209,7 +209,7 @@ type AliasReply struct { // Alias attempts to alias an HTTP endpoint to a new name func (service *Admin) Alias(_ *http.Request, args *AliasArgs, reply *AliasReply) error { - service.log.Debug("Admin: Alias called with URL: %s, Alias: %s", args.Endpoint, args.Alias) + service.log.Info("Admin: Alias called with URL: %s, Alias: %s", args.Endpoint, args.Alias) reply.Success = true return service.httpServer.AddAliasesWithReadLock(args.Endpoint, args.Alias) } @@ -227,7 +227,7 @@ type AliasChainReply struct { // AliasChain attempts to alias a chain to a new name func (service *Admin) AliasChain(_ *http.Request, args *AliasChainArgs, reply *AliasChainReply) error { - service.log.Debug("Admin: AliasChain called with Chain: %s, Alias: %s", args.Chain, args.Alias) + service.log.Info("Admin: AliasChain called with Chain: %s, Alias: %s", args.Chain, args.Alias) chainID, err := service.chainManager.Lookup(args.Chain) if err != nil { diff --git a/api/health/service.go b/api/health/service.go index db33640..fdd405b 100644 --- a/api/health/service.go +++ b/api/health/service.go @@ -74,7 +74,7 @@ type GetLivenessReply struct { // GetLiveness returns a summation of the health of the node func (h *Health) GetLiveness(_ *http.Request, _ *GetLivenessArgs, reply *GetLivenessReply) error { - h.log.Debug("Health: GetLiveness called") + h.log.Info("Health: GetLiveness called") reply.Checks, reply.Healthy = h.health.Results() return nil } diff --git a/api/ipcs/server.go b/api/ipcs/server.go index f4be11b..72a78c1 100644 --- a/api/ipcs/server.go +++ b/api/ipcs/server.go @@ -61,7 +61,7 @@ type PublishBlockchainReply struct { // PublishBlockchain publishes the finalized accepted transactions from the blockchainID over the IPC func (ipc *IPCs) PublishBlockchain(r *http.Request, args *PublishBlockchainArgs, reply *PublishBlockchainReply) error { - ipc.log.Debug("IPCs: PublishBlockchain called with BlockchainID: %s", args.BlockchainID) + ipc.log.Info("IPCs: PublishBlockchain called with BlockchainID: %s", args.BlockchainID) chainID, err := ipc.chainManager.Lookup(args.BlockchainID) if err != nil { ipc.log.Error("unknown blockchainID: %s", err) @@ -117,7 +117,7 @@ type UnpublishBlockchainReply struct { // UnpublishBlockchain closes publishing of a blockchainID func (ipc *IPCs) UnpublishBlockchain(r *http.Request, args *UnpublishBlockchainArgs, reply *UnpublishBlockchainReply) error { - ipc.log.Debug("IPCs: UnpublishBlockchain called with BlockchainID: %s", args.BlockchainID) + ipc.log.Info("IPCs: UnpublishBlockchain called with BlockchainID: %s", args.BlockchainID) chainID, err := ipc.chainManager.Lookup(args.BlockchainID) if err != nil { ipc.log.Error("unknown blockchainID %s: %s", args.BlockchainID, err) diff --git a/api/keystore/service.go b/api/keystore/service.go index 7006073..bf2e1fc 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -135,7 +135,7 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre ks.lock.Lock() defer ks.lock.Unlock() - ks.log.Debug("Keystore: CreateUser called with %.*s", maxUserPassLen, args.Username) + ks.log.Info("Keystore: CreateUser called with %.*s", maxUserPassLen, args.Username) if len(args.Username) > maxUserPassLen || len(args.Password) > maxUserPassLen { return errUserPassMaxLength @@ -183,7 +183,7 @@ func (ks *Keystore) ListUsers(_ *http.Request, args *ListUsersArgs, reply *ListU ks.lock.Lock() defer ks.lock.Unlock() - ks.log.Debug("Keystore: ListUsers called") + ks.log.Info("Keystore: ListUsers called") reply.Users = []string{} @@ -211,7 +211,7 @@ func (ks *Keystore) ExportUser(_ *http.Request, args *ExportUserArgs, reply *Exp ks.lock.Lock() defer ks.lock.Unlock() - ks.log.Debug("Keystore: ExportUser called for %s", args.Username) + ks.log.Info("Keystore: ExportUser called for %s", args.Username) usr, err := ks.getUser(args.Username) if err != nil { @@ -264,7 +264,7 @@ func (ks *Keystore) ImportUser(r *http.Request, args *ImportUserArgs, reply *Imp ks.lock.Lock() defer ks.lock.Unlock() - ks.log.Debug("Keystore: ImportUser called for %s", args.Username) + ks.log.Info("Keystore: ImportUser called for %s", args.Username) if args.Username == "" { return errEmptyUsername @@ -324,7 +324,7 @@ func (ks *Keystore) DeleteUser(_ *http.Request, args *DeleteUserArgs, reply *Del ks.lock.Lock() defer ks.lock.Unlock() - ks.log.Debug("Keystore: DeleteUser called with %s", args.Username) + ks.log.Info("Keystore: DeleteUser called with %s", args.Username) if args.Username == "" { return errEmptyUsername diff --git a/vms/avm/service.go b/vms/avm/service.go index 4033be4..02f0216 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -56,7 +56,7 @@ type IssueTxReply struct { // IssueTx attempts to issue a transaction into consensus func (service *Service) IssueTx(r *http.Request, args *IssueTxArgs, reply *IssueTxReply) error { - service.vm.ctx.Log.Debug("AVM: IssueTx called with %s", args.Tx) + service.vm.ctx.log.Info("AVM: IssueTx called with %s", args.Tx) txID, err := service.vm.IssueTx(args.Tx.Bytes, nil) if err != nil { @@ -79,7 +79,7 @@ type GetTxStatusReply struct { // GetTxStatus returns the status of the specified transaction func (service *Service) GetTxStatus(r *http.Request, args *GetTxStatusArgs, reply *GetTxStatusReply) error { - service.vm.ctx.Log.Debug("AVM: GetTxStatus called with %s", args.TxID) + service.vm.ctx.log.Info("AVM: GetTxStatus called with %s", args.TxID) if args.TxID.IsZero() { return errNilTxID @@ -106,7 +106,7 @@ type GetTxReply struct { // GetTx returns the specified transaction func (service *Service) GetTx(r *http.Request, args *GetTxArgs, reply *GetTxReply) error { - service.vm.ctx.Log.Debug("AVM: GetTx called with %s", args.TxID) + service.vm.ctx.log.Info("AVM: GetTx called with %s", args.TxID) if args.TxID.IsZero() { return errNilTxID @@ -136,7 +136,7 @@ type GetUTXOsReply struct { // GetUTXOs creates an empty account with the name passed in func (service *Service) GetUTXOs(r *http.Request, args *GetUTXOsArgs, reply *GetUTXOsReply) error { - service.vm.ctx.Log.Debug("AVM: GetUTXOs called with %s", args.Addresses) + service.vm.ctx.log.Info("AVM: GetUTXOs called with %s", args.Addresses) addrSet := ids.Set{} for _, addr := range args.Addresses { @@ -178,7 +178,7 @@ type GetAssetDescriptionReply struct { // GetAssetDescription creates an empty account with the name passed in func (service *Service) GetAssetDescription(_ *http.Request, args *GetAssetDescriptionArgs, reply *GetAssetDescriptionReply) error { - service.vm.ctx.Log.Debug("AVM: GetAssetDescription called with %s", args.AssetID) + service.vm.ctx.log.Info("AVM: GetAssetDescription called with %s", args.AssetID) assetID, err := service.vm.Lookup(args.AssetID) if err != nil { @@ -222,7 +222,7 @@ type GetBalanceReply struct { // GetBalance returns the amount of an asset that an address at least partially owns func (service *Service) GetBalance(r *http.Request, args *GetBalanceArgs, reply *GetBalanceReply) error { - service.vm.ctx.Log.Debug("AVM: GetBalance called with address: %s assetID: %s", args.Address, args.AssetID) + service.vm.ctx.log.Info("AVM: GetBalance called with address: %s assetID: %s", args.Address, args.AssetID) address, err := service.vm.Parse(args.Address) if err != nil { @@ -287,7 +287,7 @@ type GetAllBalancesReply struct { // Note that balances include assets that the address only _partially_ owns // (ie is one of several addresses specified in a multi-sig) func (service *Service) GetAllBalances(r *http.Request, args *GetAllBalancesArgs, reply *GetAllBalancesReply) error { - service.vm.ctx.Log.Debug("AVM: GetAllBalances called with address: %s", args.Address) + service.vm.ctx.log.Info("AVM: GetAllBalances called with address: %s", args.Address) address, err := service.vm.Parse(args.Address) if err != nil { @@ -360,7 +360,7 @@ type CreateFixedCapAssetReply struct { // CreateFixedCapAsset returns ID of the newly created asset func (service *Service) CreateFixedCapAsset(r *http.Request, args *CreateFixedCapAssetArgs, reply *CreateFixedCapAssetReply) error { - service.vm.ctx.Log.Debug("AVM: CreateFixedCapAsset called with name: %s symbol: %s number of holders: %d", + service.vm.ctx.log.Info("AVM: CreateFixedCapAsset called with name: %s symbol: %s number of holders: %d", args.Name, args.Symbol, len(args.InitialHolders), @@ -445,7 +445,7 @@ type CreateVariableCapAssetReply struct { // CreateVariableCapAsset returns ID of the newly created asset func (service *Service) CreateVariableCapAsset(r *http.Request, args *CreateVariableCapAssetArgs, reply *CreateVariableCapAssetReply) error { - service.vm.ctx.Log.Debug("AVM: CreateFixedCapAsset called with name: %s symbol: %s number of minters: %d", + service.vm.ctx.log.Info("AVM: CreateFixedCapAsset called with name: %s symbol: %s number of minters: %d", args.Name, args.Symbol, len(args.MinterSets), @@ -523,7 +523,7 @@ type CreateAddressReply struct { // CreateAddress creates an address for the user [args.Username] func (service *Service) CreateAddress(r *http.Request, args *CreateAddressArgs, reply *CreateAddressReply) error { - service.vm.ctx.Log.Debug("AVM: CreateAddress called for user '%s'", args.Username) + service.vm.ctx.log.Info("AVM: CreateAddress called for user '%s'", args.Username) db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) if err != nil { @@ -603,7 +603,7 @@ type ExportKeyReply struct { // ExportKey returns a private key from the provided user func (service *Service) ExportKey(r *http.Request, args *ExportKeyArgs, reply *ExportKeyReply) error { - service.vm.ctx.Log.Debug("AVM: ExportKey called for user '%s'", args.Username) + service.vm.ctx.log.Info("AVM: ExportKey called for user '%s'", args.Username) address, err := service.vm.Parse(args.Address) if err != nil { @@ -645,7 +645,7 @@ type ImportKeyReply struct { // ImportKey adds a private key to the provided user func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *ImportKeyReply) error { - service.vm.ctx.Log.Debug("AVM: ImportKey called for user '%s'", args.Username) + service.vm.ctx.log.Info("AVM: ImportKey called for user '%s'", args.Username) db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) if err != nil { @@ -692,7 +692,7 @@ type SendReply struct { // Send returns the ID of the newly created transaction func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) error { - service.vm.ctx.Log.Debug("AVM: Send called with username: %s", args.Username) + service.vm.ctx.log.Info("AVM: Send called with username: %s", args.Username) if args.Amount == 0 { return errInvalidAmount @@ -873,7 +873,7 @@ type CreateMintTxReply struct { // CreateMintTx returns the newly created unsigned transaction func (service *Service) CreateMintTx(r *http.Request, args *CreateMintTxArgs, reply *CreateMintTxReply) error { - service.vm.ctx.Log.Debug("AVM: CreateMintTx called") + service.vm.ctx.log.Info("AVM: CreateMintTx called") if args.Amount == 0 { return errInvalidMintAmount @@ -990,7 +990,7 @@ type SignMintTxReply struct { // SignMintTx returns the newly signed transaction func (service *Service) SignMintTx(r *http.Request, args *SignMintTxArgs, reply *SignMintTxReply) error { - service.vm.ctx.Log.Debug("AVM: SignMintTx called") + service.vm.ctx.log.Info("AVM: SignMintTx called") minter, err := service.vm.Parse(args.Minter) if err != nil { @@ -1116,7 +1116,7 @@ type ImportAVAReply struct { // The AVA must have already been exported from the P-Chain. // Returns the ID of the newly created atomic transaction func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, reply *ImportAVAReply) error { - service.vm.ctx.Log.Debug("AVM: ImportAVA called with username: %s", args.Username) + service.vm.ctx.log.Info("AVM: ImportAVA called with username: %s", args.Username) toBytes, err := service.vm.Parse(args.To) if err != nil { @@ -1268,7 +1268,7 @@ type ExportAVAReply struct { // After this tx is accepted, the AVA must be imported to the P-chain with an importTx. // Returns the ID of the newly created atomic transaction func (service *Service) ExportAVA(_ *http.Request, args *ExportAVAArgs, reply *ExportAVAReply) error { - service.vm.ctx.Log.Debug("AVM: ExportAVA called with username: %s", args.Username) + service.vm.ctx.log.Info("AVM: ExportAVA called with username: %s", args.Username) if args.Amount == 0 { return errInvalidAmount diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 52010d1..809ef8a 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -234,7 +234,7 @@ type GetCurrentValidatorsReply struct { // GetCurrentValidators returns the list of current validators func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidatorsArgs, reply *GetCurrentValidatorsReply) error { - service.vm.Ctx.Log.Debug("Platform: GetCurrentValidators called") + service.vm.Ctx.log.Info("Platform: GetCurrentValidators called") if args.SubnetID.IsZero() { args.SubnetID = DefaultSubnetID @@ -298,7 +298,7 @@ type GetPendingValidatorsReply struct { // GetPendingValidators returns the list of current validators func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidatorsArgs, reply *GetPendingValidatorsReply) error { - service.vm.Ctx.Log.Debug("Platform: GetPendingValidators called") + service.vm.Ctx.log.Info("Platform: GetPendingValidators called") if args.SubnetID.IsZero() { args.SubnetID = DefaultSubnetID @@ -360,7 +360,7 @@ type SampleValidatorsReply struct { // SampleValidators returns a sampling of the list of current validators func (service *Service) SampleValidators(_ *http.Request, args *SampleValidatorsArgs, reply *SampleValidatorsReply) error { - service.vm.Ctx.Log.Debug("Platform: SampleValidators called with {Size = %d}", args.Size) + service.vm.Ctx.log.Info("Platform: SampleValidators called with {Size = %d}", args.Size) if args.SubnetID.IsZero() { args.SubnetID = DefaultSubnetID @@ -437,7 +437,7 @@ type ListAccountsReply struct { // ListAccounts lists all of the accounts controlled by [args.Username] func (service *Service) ListAccounts(_ *http.Request, args *ListAccountsArgs, reply *ListAccountsReply) error { - service.vm.Ctx.Log.Debug("Platform: ListAccounts called for user '%s'", args.Username) + service.vm.Ctx.log.Info("Platform: ListAccounts called for user '%s'", args.Username) // db holds the user's info that pertains to the Platform Chain userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) @@ -499,7 +499,7 @@ type CreateAccountReply struct { // The account's ID is [privKey].PublicKey().Address(), where [privKey] is a // private key controlled by the user. func (service *Service) CreateAccount(_ *http.Request, args *CreateAccountArgs, reply *CreateAccountReply) error { - service.vm.Ctx.Log.Debug("Platform: CreateAccount called for user '%s'", args.Username) + service.vm.Ctx.log.Info("Platform: CreateAccount called for user '%s'", args.Username) // userDB holds the user's info that pertains to the Platform Chain userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) @@ -569,7 +569,7 @@ type AddDefaultSubnetValidatorArgs struct { // AddDefaultSubnetValidator returns an unsigned transaction to add a validator to the default subnet // The returned unsigned transaction should be signed using Sign() func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefaultSubnetValidatorArgs, reply *CreateTxResponse) error { - service.vm.Ctx.Log.Debug("Platform: AddDefaultSubnetValidator called") + service.vm.Ctx.log.Info("Platform: AddDefaultSubnetValidator called") switch { case args.ID.IsZero(): // If ID unspecified, use this node's ID as validator ID @@ -626,7 +626,7 @@ type AddDefaultSubnetDelegatorArgs struct { // to the default subnet // The returned unsigned transaction should be signed using Sign() func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefaultSubnetDelegatorArgs, reply *CreateTxResponse) error { - service.vm.Ctx.Log.Debug("Platform: AddDefaultSubnetDelegator called") + service.vm.Ctx.log.Info("Platform: AddDefaultSubnetDelegator called") switch { case args.ID.IsZero(): // If ID unspecified, use this node's ID as validator ID @@ -741,7 +741,7 @@ type CreateSubnetArgs struct { // CreateSubnet returns an unsigned transaction to create a new subnet. // The unsigned transaction must be signed with the key of [args.Payer] func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *CreateTxResponse) error { - service.vm.Ctx.Log.Debug("Platform: CreateSubnet called") + service.vm.Ctx.log.Info("Platform: CreateSubnet called") switch { case args.PayerNonce == 0: @@ -796,7 +796,7 @@ type ExportAVAArgs struct { // The unsigned transaction must be signed with the key of the account exporting the AVA // and paying the transaction fee func (service *Service) ExportAVA(_ *http.Request, args *ExportAVAArgs, response *CreateTxResponse) error { - service.vm.Ctx.Log.Debug("Platform: ExportAVA called") + service.vm.Ctx.log.Info("Platform: ExportAVA called") switch { case args.PayerNonce == 0: @@ -858,7 +858,7 @@ type SignResponse struct { // Sign [args.bytes] func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignResponse) error { - service.vm.Ctx.Log.Debug("Platform: Sign called") + service.vm.Ctx.log.Info("Platform: Sign called") if args.Signer == "" { return errNilSigner @@ -915,7 +915,7 @@ func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignRespons // Sign [unsigned] with [key] func (service *Service) signAddDefaultSubnetValidatorTx(tx *addDefaultSubnetValidatorTx, key *crypto.PrivateKeySECP256K1R) (*addDefaultSubnetValidatorTx, error) { - service.vm.Ctx.Log.Debug("Platform: signAddDefaultSubnetValidatorTx called") + service.vm.Ctx.log.Info("Platform: signAddDefaultSubnetValidatorTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetValidatorTx) @@ -938,7 +938,7 @@ func (service *Service) signAddDefaultSubnetValidatorTx(tx *addDefaultSubnetVali // Sign [unsigned] with [key] func (service *Service) signAddDefaultSubnetDelegatorTx(tx *addDefaultSubnetDelegatorTx, key *crypto.PrivateKeySECP256K1R) (*addDefaultSubnetDelegatorTx, error) { - service.vm.Ctx.Log.Debug("Platform: signAddDefaultSubnetDelegatorTx called") + service.vm.Ctx.log.Info("Platform: signAddDefaultSubnetDelegatorTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetDelegatorTx) @@ -961,7 +961,7 @@ func (service *Service) signAddDefaultSubnetDelegatorTx(tx *addDefaultSubnetDele // Sign [xt] with [key] func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.PrivateKeySECP256K1R) (*CreateSubnetTx, error) { - service.vm.Ctx.Log.Debug("Platform: signCreateSubnetTx called") + service.vm.Ctx.log.Info("Platform: signCreateSubnetTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedCreateSubnetTx) @@ -984,7 +984,7 @@ func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.Priva // Sign [tx] with [key] func (service *Service) signExportTx(tx *ExportTx, key *crypto.PrivateKeySECP256K1R) (*ExportTx, error) { - service.vm.Ctx.Log.Debug("Platform: signExportTx called") + service.vm.Ctx.log.Info("Platform: signExportTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedExportTx) @@ -1012,7 +1012,7 @@ func (service *Service) signExportTx(tx *ExportTx, key *crypto.PrivateKeySECP256 // Sorts tx.ControlSigs before returning // Assumes each element of tx.ControlSigs is actually a signature, not just empty bytes func (service *Service) signAddNonDefaultSubnetValidatorTx(tx *addNonDefaultSubnetValidatorTx, key *crypto.PrivateKeySECP256K1R) (*addNonDefaultSubnetValidatorTx, error) { - service.vm.Ctx.Log.Debug("Platform: signAddNonDefaultSubnetValidatorTx called") + service.vm.Ctx.log.Info("Platform: signAddNonDefaultSubnetValidatorTx called") // Compute the byte repr. of the unsigned tx and the signature of [key] over it unsignedIntf := interface{}(&tx.UnsignedAddNonDefaultSubnetValidatorTx) @@ -1075,7 +1075,7 @@ type ImportAVAArgs struct { // The AVA must have already been exported from the X-Chain. // The unsigned transaction must be signed with the key of the tx fee payer. func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, response *SignResponse) error { - service.vm.Ctx.Log.Debug("Platform: ImportAVA called") + service.vm.Ctx.log.Info("Platform: ImportAVA called") switch { case args.To == "": @@ -1203,7 +1203,7 @@ func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, response // Sorts tx.ControlSigs before returning // Assumes each element of tx.ControlSigs is actually a signature, not just empty bytes func (service *Service) signCreateChainTx(tx *CreateChainTx, key *crypto.PrivateKeySECP256K1R) (*CreateChainTx, error) { - service.vm.Ctx.Log.Debug("Platform: signCreateChainTx called") + service.vm.Ctx.log.Info("Platform: signCreateChainTx called") // Compute the byte repr. of the unsigned tx and the signature of [key] over it unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) @@ -1263,7 +1263,7 @@ type IssueTxResponse struct { // IssueTx issues the transaction [args.Tx] to the network func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *IssueTxResponse) error { - service.vm.Ctx.Log.Debug("Platform: IssueTx called") + service.vm.Ctx.log.Info("Platform: IssueTx called") genTx := genericTx{} if err := Codec.Unmarshal(args.Tx.Bytes, &genTx); err != nil { @@ -1327,7 +1327,7 @@ type CreateBlockchainArgs struct { // CreateBlockchain returns an unsigned transaction to create a new blockchain // Must be signed with the Subnet's control keys and with a key that pays the transaction fee before issuance func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, response *CreateTxResponse) error { - service.vm.Ctx.Log.Debug("Platform: CreateBlockchain called") + service.vm.Ctx.log.Info("Platform: CreateBlockchain called") switch { case args.PayerNonce == 0: @@ -1410,7 +1410,7 @@ type GetBlockchainStatusReply struct { // GetBlockchainStatus gets the status of a blockchain with the ID [args.BlockchainID]. func (service *Service) GetBlockchainStatus(_ *http.Request, args *GetBlockchainStatusArgs, reply *GetBlockchainStatusReply) error { - service.vm.Ctx.Log.Debug("Platform: GetBlockchainStatus called") + service.vm.Ctx.log.Info("Platform: GetBlockchainStatus called") switch { case args.BlockchainID == "": @@ -1490,7 +1490,7 @@ type ValidatedByResponse struct { // ValidatedBy returns the ID of the Subnet that validates [args.BlockchainID] func (service *Service) ValidatedBy(_ *http.Request, args *ValidatedByArgs, response *ValidatedByResponse) error { - service.vm.Ctx.Log.Debug("Platform: ValidatedBy called") + service.vm.Ctx.log.Info("Platform: ValidatedBy called") switch { case args.BlockchainID == "": @@ -1522,7 +1522,7 @@ type ValidatesResponse struct { // Validates returns the IDs of the blockchains validated by [args.SubnetID] func (service *Service) Validates(_ *http.Request, args *ValidatesArgs, response *ValidatesResponse) error { - service.vm.Ctx.Log.Debug("Platform: Validates called") + service.vm.Ctx.log.Info("Platform: Validates called") switch { case args.SubnetID == "": @@ -1576,7 +1576,7 @@ type GetBlockchainsResponse struct { // GetBlockchains returns all of the blockchains that exist func (service *Service) GetBlockchains(_ *http.Request, args *struct{}, response *GetBlockchainsResponse) error { - service.vm.Ctx.Log.Debug("Platform: GetBlockchains called") + service.vm.Ctx.log.Info("Platform: GetBlockchains called") chains, err := service.vm.getChains(service.vm.DB) if err != nil { From cdac10c23b64e6c331f7d8d2bd98ae2ae2578481 Mon Sep 17 00:00:00 2001 From: Gabriel Cardona Date: Mon, 15 Jun 2020 09:45:21 -0700 Subject: [PATCH 052/183] Fix typos. --- vms/avm/service.go | 34 ++++++++++++++--------------- vms/platformvm/service.go | 46 +++++++++++++++++++-------------------- 2 files changed, 40 insertions(+), 40 deletions(-) diff --git a/vms/avm/service.go b/vms/avm/service.go index 02f0216..a9fab92 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -56,7 +56,7 @@ type IssueTxReply struct { // IssueTx attempts to issue a transaction into consensus func (service *Service) IssueTx(r *http.Request, args *IssueTxArgs, reply *IssueTxReply) error { - service.vm.ctx.log.Info("AVM: IssueTx called with %s", args.Tx) + service.vm.ctx.Log.Info("AVM: IssueTx called with %s", args.Tx) txID, err := service.vm.IssueTx(args.Tx.Bytes, nil) if err != nil { @@ -79,7 +79,7 @@ type GetTxStatusReply struct { // GetTxStatus returns the status of the specified transaction func (service *Service) GetTxStatus(r *http.Request, args *GetTxStatusArgs, reply *GetTxStatusReply) error { - service.vm.ctx.log.Info("AVM: GetTxStatus called with %s", args.TxID) + service.vm.ctx.Log.Info("AVM: GetTxStatus called with %s", args.TxID) if args.TxID.IsZero() { return errNilTxID @@ -106,7 +106,7 @@ type GetTxReply struct { // GetTx returns the specified transaction func (service *Service) GetTx(r *http.Request, args *GetTxArgs, reply *GetTxReply) error { - service.vm.ctx.log.Info("AVM: GetTx called with %s", args.TxID) + service.vm.ctx.Log.Info("AVM: GetTx called with %s", args.TxID) if args.TxID.IsZero() { return errNilTxID @@ -136,7 +136,7 @@ type GetUTXOsReply struct { // GetUTXOs creates an empty account with the name passed in func (service *Service) GetUTXOs(r *http.Request, args *GetUTXOsArgs, reply *GetUTXOsReply) error { - service.vm.ctx.log.Info("AVM: GetUTXOs called with %s", args.Addresses) + service.vm.ctx.Log.Info("AVM: GetUTXOs called with %s", args.Addresses) addrSet := ids.Set{} for _, addr := range args.Addresses { @@ -178,7 +178,7 @@ type GetAssetDescriptionReply struct { // GetAssetDescription creates an empty account with the name passed in func (service *Service) GetAssetDescription(_ *http.Request, args *GetAssetDescriptionArgs, reply *GetAssetDescriptionReply) error { - service.vm.ctx.log.Info("AVM: GetAssetDescription called with %s", args.AssetID) + service.vm.ctx.Log.Info("AVM: GetAssetDescription called with %s", args.AssetID) assetID, err := service.vm.Lookup(args.AssetID) if err != nil { @@ -222,7 +222,7 @@ type GetBalanceReply struct { // GetBalance returns the amount of an asset that an address at least partially owns func (service *Service) GetBalance(r *http.Request, args *GetBalanceArgs, reply *GetBalanceReply) error { - service.vm.ctx.log.Info("AVM: GetBalance called with address: %s assetID: %s", args.Address, args.AssetID) + service.vm.ctx.Log.Info("AVM: GetBalance called with address: %s assetID: %s", args.Address, args.AssetID) address, err := service.vm.Parse(args.Address) if err != nil { @@ -287,7 +287,7 @@ type GetAllBalancesReply struct { // Note that balances include assets that the address only _partially_ owns // (ie is one of several addresses specified in a multi-sig) func (service *Service) GetAllBalances(r *http.Request, args *GetAllBalancesArgs, reply *GetAllBalancesReply) error { - service.vm.ctx.log.Info("AVM: GetAllBalances called with address: %s", args.Address) + service.vm.ctx.Log.Info("AVM: GetAllBalances called with address: %s", args.Address) address, err := service.vm.Parse(args.Address) if err != nil { @@ -360,7 +360,7 @@ type CreateFixedCapAssetReply struct { // CreateFixedCapAsset returns ID of the newly created asset func (service *Service) CreateFixedCapAsset(r *http.Request, args *CreateFixedCapAssetArgs, reply *CreateFixedCapAssetReply) error { - service.vm.ctx.log.Info("AVM: CreateFixedCapAsset called with name: %s symbol: %s number of holders: %d", + service.vm.ctx.Log.Info("AVM: CreateFixedCapAsset called with name: %s symbol: %s number of holders: %d", args.Name, args.Symbol, len(args.InitialHolders), @@ -445,7 +445,7 @@ type CreateVariableCapAssetReply struct { // CreateVariableCapAsset returns ID of the newly created asset func (service *Service) CreateVariableCapAsset(r *http.Request, args *CreateVariableCapAssetArgs, reply *CreateVariableCapAssetReply) error { - service.vm.ctx.log.Info("AVM: CreateFixedCapAsset called with name: %s symbol: %s number of minters: %d", + service.vm.ctx.Log.Info("AVM: CreateFixedCapAsset called with name: %s symbol: %s number of minters: %d", args.Name, args.Symbol, len(args.MinterSets), @@ -523,7 +523,7 @@ type CreateAddressReply struct { // CreateAddress creates an address for the user [args.Username] func (service *Service) CreateAddress(r *http.Request, args *CreateAddressArgs, reply *CreateAddressReply) error { - service.vm.ctx.log.Info("AVM: CreateAddress called for user '%s'", args.Username) + service.vm.ctx.Log.Info("AVM: CreateAddress called for user '%s'", args.Username) db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) if err != nil { @@ -603,7 +603,7 @@ type ExportKeyReply struct { // ExportKey returns a private key from the provided user func (service *Service) ExportKey(r *http.Request, args *ExportKeyArgs, reply *ExportKeyReply) error { - service.vm.ctx.log.Info("AVM: ExportKey called for user '%s'", args.Username) + service.vm.ctx.Log.Info("AVM: ExportKey called for user '%s'", args.Username) address, err := service.vm.Parse(args.Address) if err != nil { @@ -645,7 +645,7 @@ type ImportKeyReply struct { // ImportKey adds a private key to the provided user func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *ImportKeyReply) error { - service.vm.ctx.log.Info("AVM: ImportKey called for user '%s'", args.Username) + service.vm.ctx.Log.Info("AVM: ImportKey called for user '%s'", args.Username) db, err := service.vm.ctx.Keystore.GetDatabase(args.Username, args.Password) if err != nil { @@ -692,7 +692,7 @@ type SendReply struct { // Send returns the ID of the newly created transaction func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) error { - service.vm.ctx.log.Info("AVM: Send called with username: %s", args.Username) + service.vm.ctx.Log.Info("AVM: Send called with username: %s", args.Username) if args.Amount == 0 { return errInvalidAmount @@ -873,7 +873,7 @@ type CreateMintTxReply struct { // CreateMintTx returns the newly created unsigned transaction func (service *Service) CreateMintTx(r *http.Request, args *CreateMintTxArgs, reply *CreateMintTxReply) error { - service.vm.ctx.log.Info("AVM: CreateMintTx called") + service.vm.ctx.Log.Info("AVM: CreateMintTx called") if args.Amount == 0 { return errInvalidMintAmount @@ -990,7 +990,7 @@ type SignMintTxReply struct { // SignMintTx returns the newly signed transaction func (service *Service) SignMintTx(r *http.Request, args *SignMintTxArgs, reply *SignMintTxReply) error { - service.vm.ctx.log.Info("AVM: SignMintTx called") + service.vm.ctx.Log.Info("AVM: SignMintTx called") minter, err := service.vm.Parse(args.Minter) if err != nil { @@ -1116,7 +1116,7 @@ type ImportAVAReply struct { // The AVA must have already been exported from the P-Chain. // Returns the ID of the newly created atomic transaction func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, reply *ImportAVAReply) error { - service.vm.ctx.log.Info("AVM: ImportAVA called with username: %s", args.Username) + service.vm.ctx.Log.Info("AVM: ImportAVA called with username: %s", args.Username) toBytes, err := service.vm.Parse(args.To) if err != nil { @@ -1268,7 +1268,7 @@ type ExportAVAReply struct { // After this tx is accepted, the AVA must be imported to the P-chain with an importTx. // Returns the ID of the newly created atomic transaction func (service *Service) ExportAVA(_ *http.Request, args *ExportAVAArgs, reply *ExportAVAReply) error { - service.vm.ctx.log.Info("AVM: ExportAVA called with username: %s", args.Username) + service.vm.ctx.Log.Info("AVM: ExportAVA called with username: %s", args.Username) if args.Amount == 0 { return errInvalidAmount diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 809ef8a..69cfb67 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -234,7 +234,7 @@ type GetCurrentValidatorsReply struct { // GetCurrentValidators returns the list of current validators func (service *Service) GetCurrentValidators(_ *http.Request, args *GetCurrentValidatorsArgs, reply *GetCurrentValidatorsReply) error { - service.vm.Ctx.log.Info("Platform: GetCurrentValidators called") + service.vm.Ctx.Log.Info("Platform: GetCurrentValidators called") if args.SubnetID.IsZero() { args.SubnetID = DefaultSubnetID @@ -298,7 +298,7 @@ type GetPendingValidatorsReply struct { // GetPendingValidators returns the list of current validators func (service *Service) GetPendingValidators(_ *http.Request, args *GetPendingValidatorsArgs, reply *GetPendingValidatorsReply) error { - service.vm.Ctx.log.Info("Platform: GetPendingValidators called") + service.vm.Ctx.Log.Info("Platform: GetPendingValidators called") if args.SubnetID.IsZero() { args.SubnetID = DefaultSubnetID @@ -360,7 +360,7 @@ type SampleValidatorsReply struct { // SampleValidators returns a sampling of the list of current validators func (service *Service) SampleValidators(_ *http.Request, args *SampleValidatorsArgs, reply *SampleValidatorsReply) error { - service.vm.Ctx.log.Info("Platform: SampleValidators called with {Size = %d}", args.Size) + service.vm.Ctx.Log.Info("Platform: SampleValidators called with {Size = %d}", args.Size) if args.SubnetID.IsZero() { args.SubnetID = DefaultSubnetID @@ -437,7 +437,7 @@ type ListAccountsReply struct { // ListAccounts lists all of the accounts controlled by [args.Username] func (service *Service) ListAccounts(_ *http.Request, args *ListAccountsArgs, reply *ListAccountsReply) error { - service.vm.Ctx.log.Info("Platform: ListAccounts called for user '%s'", args.Username) + service.vm.Ctx.Log.Info("Platform: ListAccounts called for user '%s'", args.Username) // db holds the user's info that pertains to the Platform Chain userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) @@ -499,7 +499,7 @@ type CreateAccountReply struct { // The account's ID is [privKey].PublicKey().Address(), where [privKey] is a // private key controlled by the user. func (service *Service) CreateAccount(_ *http.Request, args *CreateAccountArgs, reply *CreateAccountReply) error { - service.vm.Ctx.log.Info("Platform: CreateAccount called for user '%s'", args.Username) + service.vm.Ctx.Log.Info("Platform: CreateAccount called for user '%s'", args.Username) // userDB holds the user's info that pertains to the Platform Chain userDB, err := service.vm.Ctx.Keystore.GetDatabase(args.Username, args.Password) @@ -569,7 +569,7 @@ type AddDefaultSubnetValidatorArgs struct { // AddDefaultSubnetValidator returns an unsigned transaction to add a validator to the default subnet // The returned unsigned transaction should be signed using Sign() func (service *Service) AddDefaultSubnetValidator(_ *http.Request, args *AddDefaultSubnetValidatorArgs, reply *CreateTxResponse) error { - service.vm.Ctx.log.Info("Platform: AddDefaultSubnetValidator called") + service.vm.Ctx.Log.Info("Platform: AddDefaultSubnetValidator called") switch { case args.ID.IsZero(): // If ID unspecified, use this node's ID as validator ID @@ -626,7 +626,7 @@ type AddDefaultSubnetDelegatorArgs struct { // to the default subnet // The returned unsigned transaction should be signed using Sign() func (service *Service) AddDefaultSubnetDelegator(_ *http.Request, args *AddDefaultSubnetDelegatorArgs, reply *CreateTxResponse) error { - service.vm.Ctx.log.Info("Platform: AddDefaultSubnetDelegator called") + service.vm.Ctx.Log.Info("Platform: AddDefaultSubnetDelegator called") switch { case args.ID.IsZero(): // If ID unspecified, use this node's ID as validator ID @@ -741,7 +741,7 @@ type CreateSubnetArgs struct { // CreateSubnet returns an unsigned transaction to create a new subnet. // The unsigned transaction must be signed with the key of [args.Payer] func (service *Service) CreateSubnet(_ *http.Request, args *CreateSubnetArgs, response *CreateTxResponse) error { - service.vm.Ctx.log.Info("Platform: CreateSubnet called") + service.vm.Ctx.Log.Info("Platform: CreateSubnet called") switch { case args.PayerNonce == 0: @@ -796,7 +796,7 @@ type ExportAVAArgs struct { // The unsigned transaction must be signed with the key of the account exporting the AVA // and paying the transaction fee func (service *Service) ExportAVA(_ *http.Request, args *ExportAVAArgs, response *CreateTxResponse) error { - service.vm.Ctx.log.Info("Platform: ExportAVA called") + service.vm.Ctx.Log.Info("Platform: ExportAVA called") switch { case args.PayerNonce == 0: @@ -858,7 +858,7 @@ type SignResponse struct { // Sign [args.bytes] func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignResponse) error { - service.vm.Ctx.log.Info("Platform: Sign called") + service.vm.Ctx.Log.Info("Platform: Sign called") if args.Signer == "" { return errNilSigner @@ -915,7 +915,7 @@ func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignRespons // Sign [unsigned] with [key] func (service *Service) signAddDefaultSubnetValidatorTx(tx *addDefaultSubnetValidatorTx, key *crypto.PrivateKeySECP256K1R) (*addDefaultSubnetValidatorTx, error) { - service.vm.Ctx.log.Info("Platform: signAddDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Info("Platform: signAddDefaultSubnetValidatorTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetValidatorTx) @@ -938,7 +938,7 @@ func (service *Service) signAddDefaultSubnetValidatorTx(tx *addDefaultSubnetVali // Sign [unsigned] with [key] func (service *Service) signAddDefaultSubnetDelegatorTx(tx *addDefaultSubnetDelegatorTx, key *crypto.PrivateKeySECP256K1R) (*addDefaultSubnetDelegatorTx, error) { - service.vm.Ctx.log.Info("Platform: signAddDefaultSubnetDelegatorTx called") + service.vm.Ctx.Log.Info("Platform: signAddDefaultSubnetDelegatorTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetDelegatorTx) @@ -961,7 +961,7 @@ func (service *Service) signAddDefaultSubnetDelegatorTx(tx *addDefaultSubnetDele // Sign [xt] with [key] func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.PrivateKeySECP256K1R) (*CreateSubnetTx, error) { - service.vm.Ctx.log.Info("Platform: signCreateSubnetTx called") + service.vm.Ctx.Log.Info("Platform: signCreateSubnetTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedCreateSubnetTx) @@ -984,7 +984,7 @@ func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.Priva // Sign [tx] with [key] func (service *Service) signExportTx(tx *ExportTx, key *crypto.PrivateKeySECP256K1R) (*ExportTx, error) { - service.vm.Ctx.log.Info("Platform: signExportTx called") + service.vm.Ctx.Log.Info("Platform: signExportTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedExportTx) @@ -1012,7 +1012,7 @@ func (service *Service) signExportTx(tx *ExportTx, key *crypto.PrivateKeySECP256 // Sorts tx.ControlSigs before returning // Assumes each element of tx.ControlSigs is actually a signature, not just empty bytes func (service *Service) signAddNonDefaultSubnetValidatorTx(tx *addNonDefaultSubnetValidatorTx, key *crypto.PrivateKeySECP256K1R) (*addNonDefaultSubnetValidatorTx, error) { - service.vm.Ctx.log.Info("Platform: signAddNonDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Info("Platform: signAddNonDefaultSubnetValidatorTx called") // Compute the byte repr. of the unsigned tx and the signature of [key] over it unsignedIntf := interface{}(&tx.UnsignedAddNonDefaultSubnetValidatorTx) @@ -1075,7 +1075,7 @@ type ImportAVAArgs struct { // The AVA must have already been exported from the X-Chain. // The unsigned transaction must be signed with the key of the tx fee payer. func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, response *SignResponse) error { - service.vm.Ctx.log.Info("Platform: ImportAVA called") + service.vm.Ctx.Log.Info("Platform: ImportAVA called") switch { case args.To == "": @@ -1203,7 +1203,7 @@ func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, response // Sorts tx.ControlSigs before returning // Assumes each element of tx.ControlSigs is actually a signature, not just empty bytes func (service *Service) signCreateChainTx(tx *CreateChainTx, key *crypto.PrivateKeySECP256K1R) (*CreateChainTx, error) { - service.vm.Ctx.log.Info("Platform: signCreateChainTx called") + service.vm.Ctx.Log.Info("Platform: signCreateChainTx called") // Compute the byte repr. of the unsigned tx and the signature of [key] over it unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) @@ -1263,7 +1263,7 @@ type IssueTxResponse struct { // IssueTx issues the transaction [args.Tx] to the network func (service *Service) IssueTx(_ *http.Request, args *IssueTxArgs, response *IssueTxResponse) error { - service.vm.Ctx.log.Info("Platform: IssueTx called") + service.vm.Ctx.Log.Info("Platform: IssueTx called") genTx := genericTx{} if err := Codec.Unmarshal(args.Tx.Bytes, &genTx); err != nil { @@ -1327,7 +1327,7 @@ type CreateBlockchainArgs struct { // CreateBlockchain returns an unsigned transaction to create a new blockchain // Must be signed with the Subnet's control keys and with a key that pays the transaction fee before issuance func (service *Service) CreateBlockchain(_ *http.Request, args *CreateBlockchainArgs, response *CreateTxResponse) error { - service.vm.Ctx.log.Info("Platform: CreateBlockchain called") + service.vm.Ctx.Log.Info("Platform: CreateBlockchain called") switch { case args.PayerNonce == 0: @@ -1410,7 +1410,7 @@ type GetBlockchainStatusReply struct { // GetBlockchainStatus gets the status of a blockchain with the ID [args.BlockchainID]. func (service *Service) GetBlockchainStatus(_ *http.Request, args *GetBlockchainStatusArgs, reply *GetBlockchainStatusReply) error { - service.vm.Ctx.log.Info("Platform: GetBlockchainStatus called") + service.vm.Ctx.Log.Info("Platform: GetBlockchainStatus called") switch { case args.BlockchainID == "": @@ -1490,7 +1490,7 @@ type ValidatedByResponse struct { // ValidatedBy returns the ID of the Subnet that validates [args.BlockchainID] func (service *Service) ValidatedBy(_ *http.Request, args *ValidatedByArgs, response *ValidatedByResponse) error { - service.vm.Ctx.log.Info("Platform: ValidatedBy called") + service.vm.Ctx.Log.Info("Platform: ValidatedBy called") switch { case args.BlockchainID == "": @@ -1522,7 +1522,7 @@ type ValidatesResponse struct { // Validates returns the IDs of the blockchains validated by [args.SubnetID] func (service *Service) Validates(_ *http.Request, args *ValidatesArgs, response *ValidatesResponse) error { - service.vm.Ctx.log.Info("Platform: Validates called") + service.vm.Ctx.Log.Info("Platform: Validates called") switch { case args.SubnetID == "": @@ -1576,7 +1576,7 @@ type GetBlockchainsResponse struct { // GetBlockchains returns all of the blockchains that exist func (service *Service) GetBlockchains(_ *http.Request, args *struct{}, response *GetBlockchainsResponse) error { - service.vm.Ctx.log.Info("Platform: GetBlockchains called") + service.vm.Ctx.Log.Info("Platform: GetBlockchains called") chains, err := service.vm.getChains(service.vm.DB) if err != nil { From acbb9a7e0cbde173b59a12ebf72b4da4464669bd Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 15 Jun 2020 13:12:55 -0400 Subject: [PATCH 053/183] remove expansionBoost from packer (Go's append does similar already). change initialSliceCap 1024 --> 256. Streamline packer.Expand, as this method is called very often --- utils/wrappers/packing.go | 32 ++++++++------------------------ vms/components/codec/codec.go | 2 +- 2 files changed, 9 insertions(+), 25 deletions(-) diff --git a/utils/wrappers/packing.go b/utils/wrappers/packing.go index 22c7464..c048f9c 100644 --- a/utils/wrappers/packing.go +++ b/utils/wrappers/packing.go @@ -16,11 +16,6 @@ const ( // MaxStringLen ... MaxStringLen = math.MaxUint16 - // When the byte array is expanded, this many extra bytes - // are added to capacity of the array. - // Higher value --> need to expand byte array less --> less memory allocations - expansionBoost = 256 - // ByteLen is the number of bytes per byte... ByteLen = 1 // ShortLen is the number of bytes per short @@ -71,30 +66,19 @@ func (p *Packer) CheckSpace(bytes int) { // In order to understand this code, its important to understand the difference // between a slice's length and its capacity. func (p *Packer) Expand(bytes int) { - p.CheckSpace(0) - if p.Errored() { + neededSize := bytes + p.Offset // Need byte slice's length to be at least [neededSize] + switch { + case neededSize <= len(p.Bytes): // Byte slice has sufficient length already return - } - - neededSize := bytes + p.Offset // Need byte slice's length to be at least [neededSize] - if neededSize <= len(p.Bytes) { // Byte slice has sufficient length already + case neededSize > p.MaxSize: // Lengthening the byte slice would cause it to grow too large + p.Err = errBadLength return - } else if neededSize > p.MaxSize { // Lengthening the byte slice would cause it to grow too large - p.Add(errBadLength) - return - } else if neededSize <= cap(p.Bytes) { // Byte slice has sufficient capacity to lengthen it without mem alloc + case neededSize <= cap(p.Bytes): // Byte slice has sufficient capacity to lengthen it without mem alloc p.Bytes = p.Bytes[:neededSize] return + default: // Add capacity/length to byte slice + p.Bytes = append(p.Bytes[:cap(p.Bytes)], make([]byte, neededSize-cap(p.Bytes))...) } - - // See if we can expand the byte slice an extra [expansionBoost] bytes in order to - // prevent need for future expansions (and therefore memory allocations) - capToAdd := neededSize - cap(p.Bytes) + expansionBoost - if capToAdd > p.MaxSize { - capToAdd = neededSize - cap(p.Bytes) - } - // increase slice's length and capacity - p.Bytes = append(p.Bytes[:cap(p.Bytes)], make([]byte, neededSize-cap(p.Bytes), capToAdd)...) } // PackByte append a byte to the byte array diff --git a/vms/components/codec/codec.go b/vms/components/codec/codec.go index 53852a9..6521993 100644 --- a/vms/components/codec/codec.go +++ b/vms/components/codec/codec.go @@ -19,7 +19,7 @@ const ( // initial capacity of byte slice that values are marshaled into. // Larger value --> need less memory allocations but possibly have allocated but unused memory // Smaller value --> need more memory allocations but more efficient use of allocated memory - initialSliceCap = 1024 + initialSliceCap = 256 ) var ( From fa4be45d8aa8c8097c830eee3169af50202063dd Mon Sep 17 00:00:00 2001 From: Gabriel Cardona Date: Mon, 15 Jun 2020 10:15:43 -0700 Subject: [PATCH 054/183] Update go.sum --- go.sum | 1 + 1 file changed, 1 insertion(+) diff --git a/go.sum b/go.sum index 774be35..d79e9a8 100644 --- a/go.sum +++ b/go.sum @@ -2,6 +2,7 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT github.com/AppsFlyer/go-sundheit v0.2.0 h1:FArqX+HbqZ6U32RC3giEAWRUpkggqxHj91KIvxNgwjU= github.com/AppsFlyer/go-sundheit v0.2.0/go.mod h1:rCRkVTMQo7/krF7xQ9X0XEF1an68viFR6/Gy02q+4ds= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.26.1/go.mod h1:NbSGBSSndYaIhRcBtY9V0U7AyH+x71bG668AuWys/yU= From 18c0b0a65b62880ba1268d0a33b095b44517e84d Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 15 Jun 2020 13:20:30 -0400 Subject: [PATCH 055/183] move codec to utils --- api/keystore/service.go | 2 +- chains/atomic/memory.go | 2 +- database/encdb/db.go | 2 +- genesis/genesis.go | 2 +- {vms/components => utils}/codec/codec.go | 0 {vms/components => utils}/codec/codec_benchmark_test.go | 0 {vms/components => utils}/codec/codec_test.go | 0 vms/avm/base_tx.go | 2 +- vms/avm/create_asset_tx.go | 2 +- vms/avm/create_asset_tx_test.go | 2 +- vms/avm/export_tx.go | 2 +- vms/avm/export_tx_test.go | 2 +- vms/avm/import_tx.go | 2 +- vms/avm/initial_state.go | 2 +- vms/avm/initial_state_test.go | 2 +- vms/avm/operation.go | 2 +- vms/avm/operation_test.go | 2 +- vms/avm/operation_tx.go | 2 +- vms/avm/static_service.go | 2 +- vms/avm/tx.go | 2 +- vms/avm/tx_test.go | 2 +- vms/avm/vm.go | 2 +- vms/components/ava/asset_test.go | 2 +- vms/components/ava/prefixed_state.go | 2 +- vms/components/ava/prefixed_state_test.go | 2 +- vms/components/ava/state.go | 2 +- vms/components/ava/transferables.go | 2 +- vms/components/ava/transferables_test.go | 2 +- vms/components/ava/utxo_id_test.go | 2 +- vms/components/ava/utxo_test.go | 2 +- vms/nftfx/fx_test.go | 2 +- vms/platformvm/vm.go | 2 +- vms/propertyfx/fx_test.go | 2 +- vms/secp256k1fx/credential_test.go | 2 +- vms/secp256k1fx/fx_test.go | 2 +- vms/secp256k1fx/transer_input_test.go | 2 +- vms/secp256k1fx/transfer_output_test.go | 2 +- vms/secp256k1fx/vm.go | 2 +- vms/timestampvm/vm.go | 2 +- xputtest/avmwallet/wallet.go | 2 +- 40 files changed, 37 insertions(+), 37 deletions(-) rename {vms/components => utils}/codec/codec.go (100%) rename {vms/components => utils}/codec/codec_benchmark_test.go (100%) rename {vms/components => utils}/codec/codec_test.go (100%) diff --git a/api/keystore/service.go b/api/keystore/service.go index 16aca06..25e9a02 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -19,7 +19,7 @@ import ( "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/logging" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" jsoncodec "github.com/ava-labs/gecko/utils/json" zxcvbn "github.com/nbutton23/zxcvbn-go" diff --git a/chains/atomic/memory.go b/chains/atomic/memory.go index 448e6c9..9774711 100644 --- a/chains/atomic/memory.go +++ b/chains/atomic/memory.go @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/logging" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" ) type rcLock struct { diff --git a/database/encdb/db.go b/database/encdb/db.go index eb06549..4814805 100644 --- a/database/encdb/db.go +++ b/database/encdb/db.go @@ -14,7 +14,7 @@ import ( "github.com/ava-labs/gecko/database/nodb" "github.com/ava-labs/gecko/utils" "github.com/ava-labs/gecko/utils/hashing" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" ) // Database encrypts all values that are provided diff --git a/genesis/genesis.go b/genesis/genesis.go index 4cad047..c4245b9 100644 --- a/genesis/genesis.go +++ b/genesis/genesis.go @@ -14,7 +14,7 @@ import ( "github.com/ava-labs/gecko/utils/units" "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/avm" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/nftfx" "github.com/ava-labs/gecko/vms/platformvm" "github.com/ava-labs/gecko/vms/propertyfx" diff --git a/vms/components/codec/codec.go b/utils/codec/codec.go similarity index 100% rename from vms/components/codec/codec.go rename to utils/codec/codec.go diff --git a/vms/components/codec/codec_benchmark_test.go b/utils/codec/codec_benchmark_test.go similarity index 100% rename from vms/components/codec/codec_benchmark_test.go rename to utils/codec/codec_benchmark_test.go diff --git a/vms/components/codec/codec_test.go b/utils/codec/codec_test.go similarity index 100% rename from vms/components/codec/codec_test.go rename to utils/codec/codec_test.go diff --git a/vms/avm/base_tx.go b/vms/avm/base_tx.go index 33cba51..0ab3fa4 100644 --- a/vms/avm/base_tx.go +++ b/vms/avm/base_tx.go @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/verify" ) diff --git a/vms/avm/create_asset_tx.go b/vms/avm/create_asset_tx.go index 9f95a15..77aae2f 100644 --- a/vms/avm/create_asset_tx.go +++ b/vms/avm/create_asset_tx.go @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" ) const ( diff --git a/vms/avm/create_asset_tx_test.go b/vms/avm/create_asset_tx_test.go index a26a815..324f403 100644 --- a/vms/avm/create_asset_tx_test.go +++ b/vms/avm/create_asset_tx_test.go @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/verify" "github.com/ava-labs/gecko/vms/secp256k1fx" ) diff --git a/vms/avm/export_tx.go b/vms/avm/export_tx.go index d5222f4..d788360 100644 --- a/vms/avm/export_tx.go +++ b/vms/avm/export_tx.go @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/gecko/database/versiondb" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/verify" ) diff --git a/vms/avm/export_tx_test.go b/vms/avm/export_tx_test.go index 4e9d064..fdef399 100644 --- a/vms/avm/export_tx_test.go +++ b/vms/avm/export_tx_test.go @@ -16,7 +16,7 @@ import ( "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/secp256k1fx" ) diff --git a/vms/avm/import_tx.go b/vms/avm/import_tx.go index 09dec6e..1729221 100644 --- a/vms/avm/import_tx.go +++ b/vms/avm/import_tx.go @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/verify" ) diff --git a/vms/avm/initial_state.go b/vms/avm/initial_state.go index c3d4b16..73ad6e4 100644 --- a/vms/avm/initial_state.go +++ b/vms/avm/initial_state.go @@ -8,7 +8,7 @@ import ( "errors" "sort" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/verify" ) diff --git a/vms/avm/initial_state_test.go b/vms/avm/initial_state_test.go index 67c4b15..b61876c 100644 --- a/vms/avm/initial_state_test.go +++ b/vms/avm/initial_state_test.go @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/verify" "github.com/ava-labs/gecko/vms/secp256k1fx" ) diff --git a/vms/avm/operation.go b/vms/avm/operation.go index 3b5fc9a..ef9317b 100644 --- a/vms/avm/operation.go +++ b/vms/avm/operation.go @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/gecko/utils" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/verify" ) diff --git a/vms/avm/operation_test.go b/vms/avm/operation_test.go index 8948388..8b85901 100644 --- a/vms/avm/operation_test.go +++ b/vms/avm/operation_test.go @@ -8,7 +8,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/verify" ) diff --git a/vms/avm/operation_tx.go b/vms/avm/operation_tx.go index 9384f8d..ec419c7 100644 --- a/vms/avm/operation_tx.go +++ b/vms/avm/operation_tx.go @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/verify" ) diff --git a/vms/avm/static_service.go b/vms/avm/static_service.go index 3fd58f3..48b58a9 100644 --- a/vms/avm/static_service.go +++ b/vms/avm/static_service.go @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/wrappers" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/secp256k1fx" cjson "github.com/ava-labs/gecko/utils/json" diff --git a/vms/avm/tx.go b/vms/avm/tx.go index c35fd80..f1d0b71 100644 --- a/vms/avm/tx.go +++ b/vms/avm/tx.go @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/verify" ) diff --git a/vms/avm/tx_test.go b/vms/avm/tx_test.go index 2f269e9..53e20de 100644 --- a/vms/avm/tx_test.go +++ b/vms/avm/tx_test.go @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/units" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/verify" "github.com/ava-labs/gecko/vms/secp256k1fx" ) diff --git a/vms/avm/vm.go b/vms/avm/vm.go index b7f7252..4c0820d 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -25,7 +25,7 @@ import ( "github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" cjson "github.com/ava-labs/gecko/utils/json" ) diff --git a/vms/components/ava/asset_test.go b/vms/components/ava/asset_test.go index 40d6ea8..79ae7d5 100644 --- a/vms/components/ava/asset_test.go +++ b/vms/components/ava/asset_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" ) func TestAssetVerifyNil(t *testing.T) { diff --git a/vms/components/ava/prefixed_state.go b/vms/components/ava/prefixed_state.go index 92b3491..9906381 100644 --- a/vms/components/ava/prefixed_state.go +++ b/vms/components/ava/prefixed_state.go @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/utils/hashing" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" ) // Addressable is the interface a feature extension must provide to be able to diff --git a/vms/components/ava/prefixed_state_test.go b/vms/components/ava/prefixed_state_test.go index 06cb1df..d3019d5 100644 --- a/vms/components/ava/prefixed_state_test.go +++ b/vms/components/ava/prefixed_state_test.go @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/gecko/database/memdb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/hashing" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/stretchr/testify/assert" ) diff --git a/vms/components/ava/state.go b/vms/components/ava/state.go index fc3b929..df724a4 100644 --- a/vms/components/ava/state.go +++ b/vms/components/ava/state.go @@ -11,7 +11,7 @@ import ( "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/choices" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" ) var ( diff --git a/vms/components/ava/transferables.go b/vms/components/ava/transferables.go index 4aa906d..85c2414 100644 --- a/vms/components/ava/transferables.go +++ b/vms/components/ava/transferables.go @@ -10,7 +10,7 @@ import ( "github.com/ava-labs/gecko/utils" "github.com/ava-labs/gecko/utils/crypto" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/verify" ) diff --git a/vms/components/ava/transferables_test.go b/vms/components/ava/transferables_test.go index 80205a6..08d7b69 100644 --- a/vms/components/ava/transferables_test.go +++ b/vms/components/ava/transferables_test.go @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/formatting" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/secp256k1fx" ) diff --git a/vms/components/ava/utxo_id_test.go b/vms/components/ava/utxo_id_test.go index 7944961..d1be00f 100644 --- a/vms/components/ava/utxo_id_test.go +++ b/vms/components/ava/utxo_id_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" ) func TestUTXOIDVerifyNil(t *testing.T) { diff --git a/vms/components/ava/utxo_test.go b/vms/components/ava/utxo_test.go index 07b067a..151e219 100644 --- a/vms/components/ava/utxo_test.go +++ b/vms/components/ava/utxo_test.go @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/formatting" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/secp256k1fx" ) diff --git a/vms/nftfx/fx_test.go b/vms/nftfx/fx_test.go index d965902..0cfbd87 100644 --- a/vms/nftfx/fx_test.go +++ b/vms/nftfx/fx_test.go @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/timer" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/secp256k1fx" ) diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 9f1ce53..1a945cb 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -27,7 +27,7 @@ import ( "github.com/ava-labs/gecko/utils/units" "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/core" "github.com/ava-labs/gecko/vms/secp256k1fx" ) diff --git a/vms/propertyfx/fx_test.go b/vms/propertyfx/fx_test.go index cfdf5c9..887cf73 100644 --- a/vms/propertyfx/fx_test.go +++ b/vms/propertyfx/fx_test.go @@ -9,7 +9,7 @@ import ( "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/timer" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/secp256k1fx" ) diff --git a/vms/secp256k1fx/credential_test.go b/vms/secp256k1fx/credential_test.go index 5157fab..e85ce1b 100644 --- a/vms/secp256k1fx/credential_test.go +++ b/vms/secp256k1fx/credential_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/ava-labs/gecko/utils/crypto" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" ) func TestCredentialVerify(t *testing.T) { diff --git a/vms/secp256k1fx/fx_test.go b/vms/secp256k1fx/fx_test.go index 79e6c89..566b4cb 100644 --- a/vms/secp256k1fx/fx_test.go +++ b/vms/secp256k1fx/fx_test.go @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/timer" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" ) var ( diff --git a/vms/secp256k1fx/transer_input_test.go b/vms/secp256k1fx/transer_input_test.go index e954af0..00e894f 100644 --- a/vms/secp256k1fx/transer_input_test.go +++ b/vms/secp256k1fx/transer_input_test.go @@ -7,7 +7,7 @@ import ( "bytes" "testing" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" ) func TestTransferInputAmount(t *testing.T) { diff --git a/vms/secp256k1fx/transfer_output_test.go b/vms/secp256k1fx/transfer_output_test.go index 7e87875..09bb0ce 100644 --- a/vms/secp256k1fx/transfer_output_test.go +++ b/vms/secp256k1fx/transfer_output_test.go @@ -8,7 +8,7 @@ import ( "testing" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" ) func TestOutputAmount(t *testing.T) { diff --git a/vms/secp256k1fx/vm.go b/vms/secp256k1fx/vm.go index bb59166..37aa23b 100644 --- a/vms/secp256k1fx/vm.go +++ b/vms/secp256k1fx/vm.go @@ -6,7 +6,7 @@ package secp256k1fx import ( "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/timer" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" ) // VM that this Fx must be run by diff --git a/vms/timestampvm/vm.go b/vms/timestampvm/vm.go index c571d9a..5376e2f 100644 --- a/vms/timestampvm/vm.go +++ b/vms/timestampvm/vm.go @@ -12,7 +12,7 @@ import ( "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/consensus/snowman" "github.com/ava-labs/gecko/snow/engine/common" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/core" ) diff --git a/xputtest/avmwallet/wallet.go b/xputtest/avmwallet/wallet.go index ef01eb0..c5d2cd9 100644 --- a/xputtest/avmwallet/wallet.go +++ b/xputtest/avmwallet/wallet.go @@ -19,7 +19,7 @@ import ( "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/avm" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/vms/components/codec" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/secp256k1fx" ) From 8783844aca5cfde531120dc83a7d04bccde614f9 Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Mon, 15 Jun 2020 14:20:16 -0400 Subject: [PATCH 056/183] Fix nitpick --- vms/avm/service.go | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/vms/avm/service.go b/vms/avm/service.go index 37a37a3..039e07f 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -668,9 +668,9 @@ func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *I addresses, _ := user.Addresses(db) newAddress := sk.PublicKey().Address() + reply.Address = service.vm.Format(newAddress.Bytes()) for _, address := range addresses { if newAddress.Equals(address) { - reply.Address = service.vm.Format(newAddress.Bytes()) return nil } } @@ -680,7 +680,6 @@ func (service *Service) ImportKey(r *http.Request, args *ImportKeyArgs, reply *I return fmt.Errorf("problem saving addresses: %w", err) } - reply.Address = service.vm.Format(newAddress.Bytes()) return nil } From c3c9cec1ea7b2f1dede07168a32732712d30b0d7 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Mon, 15 Jun 2020 15:51:12 -0400 Subject: [PATCH 057/183] updated new messages to match new logging format --- network/network.go | 3 +-- .../router/{subnet_router.go => chain_router.go} | 8 ++++---- 2 files changed, 5 insertions(+), 6 deletions(-) rename snow/networking/router/{subnet_router.go => chain_router.go} (97%) diff --git a/network/network.go b/network/network.go index 3fc6893..a280731 100644 --- a/network/network.go +++ b/network/network.go @@ -12,8 +12,6 @@ import ( "sync/atomic" "time" - "github.com/ava-labs/gecko/utils/formatting" - "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/gecko/api/health" @@ -23,6 +21,7 @@ import ( "github.com/ava-labs/gecko/snow/triggers" "github.com/ava-labs/gecko/snow/validators" "github.com/ava-labs/gecko/utils" + "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/random" "github.com/ava-labs/gecko/utils/timer" diff --git a/snow/networking/router/subnet_router.go b/snow/networking/router/chain_router.go similarity index 97% rename from snow/networking/router/subnet_router.go rename to snow/networking/router/chain_router.go index 006d220..4505bec 100644 --- a/snow/networking/router/subnet_router.go +++ b/snow/networking/router/chain_router.go @@ -185,7 +185,7 @@ func (sr *ChainRouter) GetAcceptedFailed(validatorID ids.ShortID, chainID ids.ID return } } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Error("GetAcceptedFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID) } sr.timeouts.Cancel(validatorID, chainID, requestID) } @@ -200,7 +200,7 @@ func (sr *ChainRouter) GetAncestors(validatorID ids.ShortID, chainID ids.ID, req if chain, exists := sr.chains[chainID.Key()]; exists { chain.GetAncestors(validatorID, requestID, containerID) } else { - sr.log.Error("GetAcceptedFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID) + sr.log.Debug("GetAncestors(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID) } } @@ -217,7 +217,7 @@ func (sr *ChainRouter) MultiPut(validatorID ids.ShortID, chainID ids.ID, request sr.timeouts.Cancel(validatorID, chainID, requestID) } } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Debug("MultiPut(%s, %s, %d, %d) dropped due to unknown chain", validatorID, chainID, requestID, len(containers)) } } @@ -236,7 +236,7 @@ func (sr *ChainRouter) GetAncestorsFailed(validatorID ids.ShortID, chainID ids.I return } } else { - sr.log.Debug("message referenced a chain, %s, this node doesn't validate", chainID) + sr.log.Error("GetAncestorsFailed(%s, %s, %d, %d) dropped due to unknown chain", validatorID, chainID, requestID) } sr.timeouts.Cancel(validatorID, chainID, requestID) } From 91852fe932ecc617449dc500430e87e9cf4af1b0 Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Mon, 15 Jun 2020 17:08:25 -0400 Subject: [PATCH 058/183] nit --- api/keystore/service.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/keystore/service.go b/api/keystore/service.go index 6481419..1efaafb 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -36,8 +36,8 @@ const ( // // As per issue https://github.com/ava-labs/gecko/issues/195 it was found // the longer the length of password the slower zxcvbn.PasswordStrength() - // performs. To avoid performance issues and a DoS vector, we only check the - // first 50 characters of the password. + // performs. To avoid performance issues, and a DoS vector, we only check + // the first 50 characters of the password. maxCheckedPassLen = 50 // requiredPassScore defines the score a password must achieve to be From ec953d6ec38c3658a596bee1807d11e58349ddfc Mon Sep 17 00:00:00 2001 From: Gabriel Cardona Date: Mon, 15 Jun 2020 15:08:03 -0700 Subject: [PATCH 059/183] Fix log level. --- api/keystore/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/keystore/service.go b/api/keystore/service.go index 997b223..14ae57e 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -146,7 +146,7 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre ks.lock.Lock() defer ks.lock.Unlock() - ks.log.Verbo("CreateUser called with %.*s", maxUserPassLen, args.Username) + ks.log.Debug("CreateUser called with %.*s", maxUserPassLen, args.Username) if err := ks.AddUser(args.Username, args.Password); err != nil { return err } From b950f016d884564b89df62e4f9996a79fb6a5095 Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Mon, 15 Jun 2020 21:53:29 -0400 Subject: [PATCH 060/183] Fix platform bootstrapped function to initialize fx --- vms/platformvm/vm.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 8d5a71c..2c71593 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -405,10 +405,14 @@ func (vm *VM) createChain(tx *CreateChainTx) { } // Bootstrapping marks this VM as bootstrapping -func (vm *VM) Bootstrapping() error { return nil } +func (vm *VM) Bootstrapping() error { + return vm.fx.Bootstrapping() +} // Bootstrapped marks this VM as bootstrapped -func (vm *VM) Bootstrapped() error { return nil } +func (vm *VM) Bootstrapped() error { + return vm.fx.Bootstrapped() +} // Shutdown this blockchain func (vm *VM) Shutdown() error { From aa5422696e5ee5620d0fd827385cec1ad3bfd8f8 Mon Sep 17 00:00:00 2001 From: Gabriel Cardona Date: Mon, 15 Jun 2020 21:14:54 -0700 Subject: [PATCH 061/183] Set helpers to Debug log level. --- api/keystore/service.go | 2 +- vms/platformvm/service.go | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/api/keystore/service.go b/api/keystore/service.go index 14ae57e..ec48d48 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -146,7 +146,7 @@ func (ks *Keystore) CreateUser(_ *http.Request, args *CreateUserArgs, reply *Cre ks.lock.Lock() defer ks.lock.Unlock() - ks.log.Debug("CreateUser called with %.*s", maxUserPassLen, args.Username) + ks.log.Info("Keystore: CreateUser called with %.*s", maxUserPassLen, args.Username) if err := ks.AddUser(args.Username, args.Password); err != nil { return err } diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index 2be9e12..9913608 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -915,7 +915,7 @@ func (service *Service) Sign(_ *http.Request, args *SignArgs, reply *SignRespons // Sign [unsigned] with [key] func (service *Service) signAddDefaultSubnetValidatorTx(tx *addDefaultSubnetValidatorTx, key *crypto.PrivateKeySECP256K1R) (*addDefaultSubnetValidatorTx, error) { - service.vm.Ctx.Log.Info("Platform: signAddDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("signAddDefaultSubnetValidatorTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetValidatorTx) @@ -938,7 +938,7 @@ func (service *Service) signAddDefaultSubnetValidatorTx(tx *addDefaultSubnetVali // Sign [unsigned] with [key] func (service *Service) signAddDefaultSubnetDelegatorTx(tx *addDefaultSubnetDelegatorTx, key *crypto.PrivateKeySECP256K1R) (*addDefaultSubnetDelegatorTx, error) { - service.vm.Ctx.Log.Info("Platform: signAddDefaultSubnetDelegatorTx called") + service.vm.Ctx.Log.Debug("signAddDefaultSubnetDelegatorTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedAddDefaultSubnetDelegatorTx) @@ -961,7 +961,7 @@ func (service *Service) signAddDefaultSubnetDelegatorTx(tx *addDefaultSubnetDele // Sign [xt] with [key] func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.PrivateKeySECP256K1R) (*CreateSubnetTx, error) { - service.vm.Ctx.Log.Info("Platform: signCreateSubnetTx called") + service.vm.Ctx.Log.Debug("signCreateSubnetTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedCreateSubnetTx) @@ -984,7 +984,7 @@ func (service *Service) signCreateSubnetTx(tx *CreateSubnetTx, key *crypto.Priva // Sign [tx] with [key] func (service *Service) signExportTx(tx *ExportTx, key *crypto.PrivateKeySECP256K1R) (*ExportTx, error) { - service.vm.Ctx.Log.Info("Platform: signExportTx called") + service.vm.Ctx.Log.Debug("signExportTx called") // TODO: Should we check if tx is already signed? unsignedIntf := interface{}(&tx.UnsignedExportTx) @@ -1012,7 +1012,7 @@ func (service *Service) signExportTx(tx *ExportTx, key *crypto.PrivateKeySECP256 // Sorts tx.ControlSigs before returning // Assumes each element of tx.ControlSigs is actually a signature, not just empty bytes func (service *Service) signAddNonDefaultSubnetValidatorTx(tx *addNonDefaultSubnetValidatorTx, key *crypto.PrivateKeySECP256K1R) (*addNonDefaultSubnetValidatorTx, error) { - service.vm.Ctx.Log.Info("Platform: signAddNonDefaultSubnetValidatorTx called") + service.vm.Ctx.Log.Debug("signAddNonDefaultSubnetValidatorTx called") // Compute the byte repr. of the unsigned tx and the signature of [key] over it unsignedIntf := interface{}(&tx.UnsignedAddNonDefaultSubnetValidatorTx) @@ -1203,7 +1203,7 @@ func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, response // Sorts tx.ControlSigs before returning // Assumes each element of tx.ControlSigs is actually a signature, not just empty bytes func (service *Service) signCreateChainTx(tx *CreateChainTx, key *crypto.PrivateKeySECP256K1R) (*CreateChainTx, error) { - service.vm.Ctx.Log.Info("Platform: signCreateChainTx called") + service.vm.Ctx.Log.Debug("signCreateChainTx called") // Compute the byte repr. of the unsigned tx and the signature of [key] over it unsignedIntf := interface{}(&tx.UnsignedCreateChainTx) From 0fdddae9fcb290a66a9843d6cd857ba3f2646dbb Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Tue, 16 Jun 2020 11:53:57 -0400 Subject: [PATCH 062/183] Optimize DAG traversal in insertFrom --- snow/engine/avalanche/transitive.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/snow/engine/avalanche/transitive.go b/snow/engine/avalanche/transitive.go index e48b167..82ee859 100644 --- a/snow/engine/avalanche/transitive.go +++ b/snow/engine/avalanche/transitive.go @@ -335,10 +335,10 @@ func (t *Transitive) reinsertFrom(vdr ids.ShortID, vtxID ids.ID) (bool, error) { func (t *Transitive) insertFrom(vdr ids.ShortID, vtx avalanche.Vertex) (bool, error) { issued := true - vts := []avalanche.Vertex{vtx} - for len(vts) > 0 { - vtx := vts[0] - vts = vts[1:] + vertexHeap := newMaxVertexHeap() + vertexHeap.Push(vtx) + for vertexHeap.Len() > 0 { + vtx := vertexHeap.Pop() if t.Consensus.VertexIssued(vtx) { continue @@ -353,7 +353,7 @@ func (t *Transitive) insertFrom(vdr ids.ShortID, vtx avalanche.Vertex) (bool, er t.sendRequest(vdr, parent.ID()) issued = false } else { - vts = append(vts, parent) + vertexHeap.Push(parent) } } From aaa00b34880aba88421230d9f923fd2e9d62503a Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Sun, 7 Jun 2020 20:02:29 -0400 Subject: [PATCH 063/183] upnp --- go.mod | 2 +- go.sum | 2 + main/main.go | 8 +- main/params.go | 4 +- nat/mapper.go | 143 ------------------------------- nat/nat.go | 94 ++++++++++++++++++++ nat/no_router.go | 28 ------ nat/pmp.go | 71 --------------- nat/router.go | 65 -------------- nat/upnp.go | 219 ++++++++++++++++++----------------------------- node/config.go | 2 +- 11 files changed, 187 insertions(+), 451 deletions(-) delete mode 100644 nat/mapper.go create mode 100644 nat/nat.go delete mode 100644 nat/no_router.go delete mode 100644 nat/pmp.go delete mode 100644 nat/router.go diff --git a/go.mod b/go.mod index 4636c8c..c6f4e79 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( github.com/karalabe/usb v0.0.0-20191104083709-911d15fe12a9 // indirect github.com/mattn/go-colorable v0.1.6 // indirect github.com/mitchellh/go-homedir v1.1.0 - github.com/mr-tron/base58 v1.1.3 + github.com/mr-tron/base58 v1.2.0 github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d github.com/olekukonko/tablewriter v0.0.4 // indirect github.com/pborman/uuid v1.2.0 // indirect diff --git a/go.sum b/go.sum index d79e9a8..2f989e1 100644 --- a/go.sum +++ b/go.sum @@ -165,6 +165,8 @@ github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lN github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mr-tron/base58 v1.1.3 h1:v+sk57XuaCKGXpWtVBX8YJzO7hMGx4Aajh4TQbdEFdc= github.com/mr-tron/base58 v1.1.3/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= +github.com/mr-tron/base58 v1.2.0 h1:T/HDJBh4ZCPbU39/+c3rRvE0uKBQlU27+QI8LJ4t64o= +github.com/mr-tron/base58 v1.2.0/go.mod h1:BinMc/sQntlIE1frQmRFPUoPA1Zkr8VRgBdjWI2mNwc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E= github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU= diff --git a/main/main.go b/main/main.go index 98cb581..b49d320 100644 --- a/main/main.go +++ b/main/main.go @@ -68,11 +68,11 @@ func main() { log.Debug("assertions are enabled. This may slow down execution") } - mapper := nat.NewDefaultMapper(log, Config.Nat, nat.TCP, "gecko") - defer mapper.UnmapAllPorts() + router := nat.NewRouter(log, Config.Nat) + defer router.UnmapAllPorts() - mapper.MapPort(Config.StakingIP.Port, Config.StakingIP.Port) - mapper.MapPort(Config.HTTPPort, Config.HTTPPort) + router.Map("TCP", Config.StakingIP.Port, Config.StakingIP.Port, "gecko") + router.Map("TCP", Config.HTTPPort, Config.HTTPPort, "gecko http") node := node.Node{} diff --git a/main/params.go b/main/params.go index 6dcad06..86eca34 100644 --- a/main/params.go +++ b/main/params.go @@ -281,12 +281,12 @@ func init() { Config.DB = memdb.New() } - Config.Nat = nat.NewRouter() + Config.Nat = nat.GetNATRouter() var ip net.IP // If public IP is not specified, get it using shell command dig if *consensusIP == "" { - ip, err = Config.Nat.IP() + ip, err = Config.Nat.ExternalIP() if err != nil { ip = net.IPv4zero // Couldn't get my IP...set to 0.0.0.0 } diff --git a/nat/mapper.go b/nat/mapper.go deleted file mode 100644 index 3beaedd..0000000 --- a/nat/mapper.go +++ /dev/null @@ -1,143 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package nat - -import ( - "sync" - "time" - - "github.com/ava-labs/gecko/utils/logging" - "github.com/ava-labs/gecko/utils/wrappers" -) - -const ( - defaultMappingTimeout = 30 * time.Minute - defaultMappingUpdateInterval = 3 * defaultMappingTimeout / 4 -) - -// Mapper maps port -type Mapper interface { - MapPort(newInternalPort, newExternalPort uint16) error - UnmapAllPorts() error -} - -type mapper struct { - log logging.Logger - router Router - networkProtocol NetworkProtocol - mappingNames string - mappingTimeout time.Duration - mappingUpdateInterval time.Duration - - closer chan struct{} - wg sync.WaitGroup - errLock sync.Mutex - errs wrappers.Errs -} - -// NewMapper returns a new mapper that can map ports on a router -func NewMapper( - log logging.Logger, - router Router, - networkProtocol NetworkProtocol, - mappingNames string, - mappingTimeout time.Duration, - mappingUpdateInterval time.Duration, -) Mapper { - return &mapper{ - log: log, - router: router, - networkProtocol: networkProtocol, - mappingNames: mappingNames, - mappingTimeout: mappingTimeout, - mappingUpdateInterval: mappingUpdateInterval, - closer: make(chan struct{}), - } -} - -// NewDefaultMapper returns a new mapper that can map ports on a router with -// default settings -func NewDefaultMapper( - log logging.Logger, - router Router, - networkProtocol NetworkProtocol, - mappingNames string, -) Mapper { - return NewMapper( - log, - router, - networkProtocol, - mappingNames, - defaultMappingTimeout, // uses the default value - defaultMappingUpdateInterval, // uses the default value - ) -} - -// MapPort maps a local port to a port on the router until UnmapAllPorts is -// called. -func (m *mapper) MapPort(newInternalPort, newExternalPort uint16) error { - m.wg.Add(1) - go m.mapPort(newInternalPort, newExternalPort) - return nil -} - -func (m *mapper) mapPort(newInternalPort, newExternalPort uint16) { - // duration is set to 0 here so that the select case will execute - // immediately - updateTimer := time.NewTimer(0) - defer func() { - updateTimer.Stop() - - m.errLock.Lock() - m.errs.Add(m.router.UnmapPort( - m.networkProtocol, - newInternalPort, - newExternalPort)) - m.errLock.Unlock() - - m.log.Debug("Unmapped external port %d to internal port %d", - newExternalPort, - newInternalPort) - - m.wg.Done() - }() - - for { - select { - case <-updateTimer.C: - err := m.router.MapPort( - m.networkProtocol, - newInternalPort, - newExternalPort, - m.mappingNames, - m.mappingTimeout) - - if err != nil { - m.errLock.Lock() - m.errs.Add(err) - m.errLock.Unlock() - - m.log.Debug("Failed to add mapping from external port %d to internal port %d due to %s", - newExternalPort, - newInternalPort, - err) - } else { - m.log.Debug("Mapped external port %d to internal port %d", - newExternalPort, - newInternalPort) - } - - // remap the port in m.mappingUpdateInterval - updateTimer.Reset(m.mappingUpdateInterval) - case _, _ = <-m.closer: - return // only return when all ports are unmapped - } - } -} - -func (m *mapper) UnmapAllPorts() error { - close(m.closer) - m.wg.Wait() - return m.errs.Err -} diff --git a/nat/nat.go b/nat/nat.go new file mode 100644 index 0000000..1c3c2fa --- /dev/null +++ b/nat/nat.go @@ -0,0 +1,94 @@ +package nat + +import ( + "net" + "sync" + "time" + + "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/wrappers" +) + +const ( + mapTimeout = 30 * time.Minute + mapUpdate = mapTimeout / 2 +) + +type NATRouter interface { + MapPort(protocol string, intport, extport uint16, desc string, duration time.Duration) error + UnmapPort(protocol string, extport uint16) error + ExternalIP() (net.IP, error) +} + +func GetNATRouter() NATRouter { + //TODO other protocol + return getUPnPRouter() +} + +type Router struct { + log logging.Logger + r NATRouter + closer chan struct{} + wg sync.WaitGroup + errLock sync.Mutex + errs wrappers.Errs +} + +func NewRouter(log logging.Logger, r NATRouter) Router { + return Router{ + log: log, + r: r, + closer: make(chan struct{}), + } +} + +func (dev *Router) Map(protocol string, intport, extport uint16, desc string) { + dev.wg.Add(1) + go dev.mapPort(protocol, intport, extport, desc) +} + +func (dev *Router) mapPort(protocol string, intport, extport uint16, desc string) { + updater := time.NewTimer(mapUpdate) + defer func() { + updater.Stop() + + dev.log.Info("Unmap protocol %s external port %d", protocol, extport) + dev.errLock.Lock() + dev.errs.Add(dev.r.UnmapPort(protocol, extport)) + dev.errLock.Unlock() + + dev.wg.Done() + }() + + if err := dev.r.MapPort(protocol, intport, extport, desc, mapTimeout); err != nil { + dev.log.Warn("Map port failed. Protocol %s Internal %d External %d. %s", + protocol, intport, extport, err) + dev.errLock.Lock() + dev.errs.Add(err) + dev.errLock.Unlock() + } else { + dev.log.Info("Mapped Protocol %s Internal %d External %d. %s", protocol, + intport, extport) + } + + for { + select { + case <-updater.C: + if err := dev.r.MapPort(protocol, intport, extport, desc, mapTimeout); err != nil { + dev.log.Warn("Renew port mapping failed. Protocol %s Internal %d External %d. %s", + protocol, intport, extport, err) + } else { + dev.log.Info("Renew port mapping Protocol %s Internal %d External %d. %s", protocol, + intport, extport) + } + + updater.Reset(mapUpdate) + } + } +} + +func (dev *Router) UnmapAllPorts() error { + close(dev.closer) + dev.wg.Wait() + return dev.errs.Err +} diff --git a/nat/no_router.go b/nat/no_router.go deleted file mode 100644 index edb86b6..0000000 --- a/nat/no_router.go +++ /dev/null @@ -1,28 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package nat - -import ( - "errors" - "net" - "time" -) - -var ( - errNoRouter = errors.New("no nat enabled router was discovered") -) - -type noRouter struct{} - -func (noRouter) MapPort(_ NetworkProtocol, _, _ uint16, _ string, _ time.Duration) error { - return errNoRouter -} - -func (noRouter) UnmapPort(_ NetworkProtocol, _, _ uint16) error { - return errNoRouter -} - -func (noRouter) IP() (net.IP, error) { - return nil, errNoRouter -} diff --git a/nat/pmp.go b/nat/pmp.go deleted file mode 100644 index 311375d..0000000 --- a/nat/pmp.go +++ /dev/null @@ -1,71 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package nat - -import ( - "net" - "time" - - "github.com/jackpal/gateway" - "github.com/jackpal/go-nat-pmp" -) - -var ( - pmpClientTimeout = 500 * time.Millisecond -) - -// natPMPClient adapts the NAT-PMP protocol implementation so it conforms to -// the common interface. -type pmpClient struct { - client *natpmp.Client -} - -func (pmp *pmpClient) MapPort( - networkProtocol NetworkProtocol, - newInternalPort uint16, - newExternalPort uint16, - mappingName string, - mappingDuration time.Duration) error { - protocol := string(networkProtocol) - internalPort := int(newInternalPort) - externalPort := int(newExternalPort) - // go-nat-pmp uses seconds to denote their lifetime - lifetime := int(mappingDuration / time.Second) - - _, err := pmp.client.AddPortMapping(protocol, internalPort, externalPort, lifetime) - return err -} - -func (pmp *pmpClient) UnmapPort( - networkProtocol NetworkProtocol, - internalPort uint16, - _ uint16) error { - protocol := string(networkProtocol) - internalPortInt := int(internalPort) - - _, err := pmp.client.AddPortMapping(protocol, internalPortInt, 0, 0) - return err -} - -func (pmp *pmpClient) IP() (net.IP, error) { - response, err := pmp.client.GetExternalAddress() - if err != nil { - return nil, err - } - return response.ExternalIPAddress[:], nil -} - -func getPMPRouter() Router { - gatewayIP, err := gateway.DiscoverGateway() - if err != nil { - return nil - } - - pmp := &pmpClient{natpmp.NewClientWithTimeout(gatewayIP, pmpClientTimeout)} - if _, err := pmp.IP(); err != nil { - return nil - } - - return pmp -} diff --git a/nat/router.go b/nat/router.go deleted file mode 100644 index 11b58f9..0000000 --- a/nat/router.go +++ /dev/null @@ -1,65 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -// Package nat performs network address translation and provides helpers for -// routing ports. -package nat - -import ( - "net" - "time" -) - -// NetworkProtocol is a protocol that will be used through a port -type NetworkProtocol string - -// Available protocol -const ( - TCP NetworkProtocol = "TCP" - UDP NetworkProtocol = "UDP" -) - -// Router provides a standard NAT router functions. Specifically, allowing the -// fetching of public IPs and port forwarding to this computer. -type Router interface { - // mapPort creates a mapping between a port on the local computer to an - // external port on the router. - // - // The mappingName is something displayed on the router, so it is included - // for completeness. - MapPort( - networkProtocol NetworkProtocol, - newInternalPort uint16, - newExternalPort uint16, - mappingName string, - mappingDuration time.Duration) error - - // UnmapPort clears a mapping that was previous made by a call to MapPort - UnmapPort( - networkProtocol NetworkProtocol, - internalPort uint16, - externalPort uint16) error - - // Returns the routers IP address on the network the router considers - // external - IP() (net.IP, error) -} - -// NewRouter returns a new router discovered on the local network -func NewRouter() Router { - routers := make(chan Router) - // Because getting a router can take a noticeable amount of time to error, - // we run these requests in parallel - go func() { - routers <- getUPnPRouter() - }() - go func() { - routers <- getPMPRouter() - }() - for i := 0; i < 2; i++ { - if router := <-routers; router != nil { - return router - } - } - return noRouter{} -} diff --git a/nat/upnp.go b/nat/upnp.go index e60cd6e..7f62760 100644 --- a/nat/upnp.go +++ b/nat/upnp.go @@ -1,10 +1,6 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - package nat import ( - "errors" "fmt" "net" "time" @@ -14,13 +10,7 @@ import ( "github.com/huin/goupnp/dcps/internetgateway2" ) -const ( - soapTimeout = time.Second -) - -var ( - errNoGateway = errors.New("Failed to connect to any avaliable gateways") -) +const soapRequestTimeout = 3 * time.Second // upnpClient is the interface used by goupnp for their client implementations type upnpClient interface { @@ -50,66 +40,13 @@ type upnpClient interface { } type upnpRouter struct { - root *goupnp.RootDevice + dev *goupnp.RootDevice client upnpClient } -func (n *upnpRouter) MapPort( - networkProtocol NetworkProtocol, - newInternalPort uint16, - newExternalPort uint16, - mappingName string, - mappingDuration time.Duration, -) error { - ip, err := n.localAddress() - if err != nil { - return err - } - - protocol := string(networkProtocol) - // goupnp uses seconds to denote their lifetime - lifetime := uint32(mappingDuration / time.Second) - - // UnmapPort's error is intentionally dropped, because the mapping may not - // exist. - n.UnmapPort(networkProtocol, newInternalPort, newExternalPort) - - return n.client.AddPortMapping( - "", // newRemoteHost isn't used to limit the mapping to a host - newExternalPort, - protocol, - newInternalPort, - ip.String(), // newInternalClient is the client traffic should be sent to - true, // newEnabled enables port mappings - mappingName, - lifetime, - ) -} - -func (n *upnpRouter) UnmapPort(networkProtocol NetworkProtocol, _, externalPort uint16) error { - protocol := string(networkProtocol) - return n.client.DeletePortMapping( - "", // newRemoteHost isn't used to limit the mapping to a host - externalPort, - protocol) -} - -func (n *upnpRouter) IP() (net.IP, error) { - ipStr, err := n.client.GetExternalIPAddress() - if err != nil { - return nil, err - } - - ip := net.ParseIP(ipStr) - if ip == nil { - return nil, fmt.Errorf("invalid IP %s", ipStr) - } - return ip, nil -} - -func (n *upnpRouter) localAddress() (net.IP, error) { +func (r *upnpRouter) localIP() (net.IP, error) { // attempt to get an address on the router - deviceAddr, err := net.ResolveUDPAddr("udp4", n.root.URLBase.Host) + deviceAddr, err := net.ResolveUDPAddr("udp4", r.dev.URLBase.Host) if err != nil { return nil, err } @@ -142,27 +79,34 @@ func (n *upnpRouter) localAddress() (net.IP, error) { } } return nil, fmt.Errorf("couldn't find the local address in the same network as %s", deviceIP) + } -// getUPnPRouter searches for all Gateway Devices that have avaliable -// connections in the goupnp library and returns the first connection it can -// find. -func getUPnPRouter() Router { - routers := make(chan *upnpRouter) - // Because DiscoverDevices takes a noticeable amount of time to error, we - // run these requests in parallel - go func() { - routers <- connectToGateway(internetgateway1.URN_WANConnectionDevice_1, gateway1) - }() - go func() { - routers <- connectToGateway(internetgateway2.URN_WANConnectionDevice_2, gateway2) - }() - for i := 0; i < 2; i++ { - if router := <-routers; router != nil { - return router - } +func (r *upnpRouter) ExternalIP() (net.IP, error) { + str, err := r.client.GetExternalIPAddress() + if err != nil { + return nil, err } - return nil + + ip := net.ParseIP(str) + if ip == nil { + return nil, fmt.Errorf("invalid IP %s", str) + } + return ip, nil +} + +func (r *upnpRouter) MapPort(protocol string, intport, extport uint16, desc string, duration time.Duration) error { + ip, err := r.localIP() + if err != nil { + return nil + } + lifetime := uint32(duration / time.Second) + r.UnmapPort(protocol, extport) + return r.client.AddPortMapping("", extport, protocol, intport, ip.String(), true, desc, lifetime) +} + +func (r *upnpRouter) UnmapPort(protocol string, extport uint16) error { + return r.client.DeletePortMapping("", extport, protocol) } func gateway1(client goupnp.ServiceClient) upnpClient { @@ -175,7 +119,6 @@ func gateway1(client goupnp.ServiceClient) upnpClient { return nil } } - func gateway2(client goupnp.ServiceClient) upnpClient { switch client.Service.ServiceType { case internetgateway2.URN_WANIPConnection_1: @@ -189,65 +132,69 @@ func gateway2(client goupnp.ServiceClient) upnpClient { } } -func connectToGateway(deviceType string, toClient func(goupnp.ServiceClient) upnpClient) *upnpRouter { - devs, err := goupnp.DiscoverDevices(deviceType) +func getUpnpClient(client goupnp.ServiceClient) upnpClient { + c := gateway1(client) + if c != nil { + return c + } + return gateway2(client) +} + +func getRootDevice(dev *goupnp.MaybeRootDevice) *upnpRouter { + var router *upnpRouter + dev.Root.Device.VisitServices(func(service *goupnp.Service) { + c := goupnp.ServiceClient{ + SOAPClient: service.NewSOAPClient(), + RootDevice: dev.Root, + Location: dev.Location, + Service: service, + } + c.SOAPClient.HTTPClient.Timeout = soapRequestTimeout + client := getUpnpClient(c) + if client == nil { + return + } + router = &upnpRouter{dev.Root, client} + if router == nil { + return + } + if _, nat, err := router.client.GetNATRSIPStatus(); err != nil || !nat { + router = nil + return + } + }) + return router +} + +func discover(target string) *upnpRouter { + devs, err := goupnp.DiscoverDevices(target) if err != nil { return nil } - // we are iterating over all the network devices, acting a possible roots - for i := range devs { - dev := &devs[i] - if dev.Root == nil { + for i := 0; i < len(devs); i++ { + if devs[i].Root == nil { continue } - - // the root device may be a router, so attempt to connect to that - rootDevice := &dev.Root.Device - if upnp := getRouter(dev, rootDevice, toClient); upnp != nil { - return upnp - } - - // attempt to connect to any sub devices - devices := rootDevice.Devices - for i := range devices { - if upnp := getRouter(dev, &devices[i], toClient); upnp != nil { - return upnp - } + u := getRootDevice(&devs[i]) + if u != nil { + return u } } return nil } -func getRouter(rootDevice *goupnp.MaybeRootDevice, device *goupnp.Device, toClient func(goupnp.ServiceClient) upnpClient) *upnpRouter { - for i := range device.Services { - service := &device.Services[i] - - soapClient := service.NewSOAPClient() - // make sure the client times out if needed - soapClient.HTTPClient.Timeout = soapTimeout - - // attempt to create a client connection - serviceClient := goupnp.ServiceClient{ - SOAPClient: soapClient, - RootDevice: rootDevice.Root, - Location: rootDevice.Location, - Service: service, - } - client := toClient(serviceClient) - if client == nil { - continue - } - - // check whether port mapping is enabled - if _, nat, err := client.GetNATRSIPStatus(); err != nil || !nat { - continue - } - - // we found a router! - return &upnpRouter{ - root: rootDevice.Root, - client: client, - } +func getUPnPRouter() *upnpRouter { + r := discover(internetgateway1.URN_WANConnectionDevice_1) + if r != nil { + return r } - return nil + return discover(internetgateway2.URN_WANConnectionDevice_2) +} + +func GetUPnP() *upnpRouter { + r := discover(internetgateway1.URN_WANConnectionDevice_1) + if r != nil { + return r + } + return discover(internetgateway2.URN_WANConnectionDevice_2) } diff --git a/node/config.go b/node/config.go index 2504276..aaadb7a 100644 --- a/node/config.go +++ b/node/config.go @@ -15,7 +15,7 @@ import ( // Config contains all of the configurations of an Ava node. type Config struct { // protocol to use for opening the network interface - Nat nat.Router + Nat nat.NATRouter // ID of the network this node should connect to NetworkID uint32 From 96cfcc0b5b5e1b2de8baf95e9c31fced2bbfda21 Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Fri, 12 Jun 2020 10:07:17 -0400 Subject: [PATCH 064/183] NAT test --- nat/nat.go | 16 ++++++---- nat/nat_test.go | 77 +++++++++++++++++++++++++++++++++++++++++++++++++ nat/upnp.go | 51 ++++++++++++++++++++++---------- 3 files changed, 124 insertions(+), 20 deletions(-) create mode 100644 nat/nat_test.go diff --git a/nat/nat.go b/nat/nat.go index 1c3c2fa..60d3a83 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -22,7 +22,10 @@ type NATRouter interface { func GetNATRouter() NATRouter { //TODO other protocol - return getUPnPRouter() + if r := getUPnPRouter(); r != nil { + return r + } + return nil } type Router struct { @@ -61,13 +64,13 @@ func (dev *Router) mapPort(protocol string, intport, extport uint16, desc string }() if err := dev.r.MapPort(protocol, intport, extport, desc, mapTimeout); err != nil { - dev.log.Warn("Map port failed. Protocol %s Internal %d External %d. %s", + dev.log.Error("Map port failed. Protocol %s Internal %d External %d. %s", protocol, intport, extport, err) dev.errLock.Lock() dev.errs.Add(err) dev.errLock.Unlock() } else { - dev.log.Info("Mapped Protocol %s Internal %d External %d. %s", protocol, + dev.log.Info("Mapped Protocol %s Internal %d External %d.", protocol, intport, extport) } @@ -75,14 +78,16 @@ func (dev *Router) mapPort(protocol string, intport, extport uint16, desc string select { case <-updater.C: if err := dev.r.MapPort(protocol, intport, extport, desc, mapTimeout); err != nil { - dev.log.Warn("Renew port mapping failed. Protocol %s Internal %d External %d. %s", + dev.log.Error("Renew port mapping failed. Protocol %s Internal %d External %d. %s", protocol, intport, extport, err) } else { - dev.log.Info("Renew port mapping Protocol %s Internal %d External %d. %s", protocol, + dev.log.Info("Renew port mapping Protocol %s Internal %d External %d.", protocol, intport, extport) } updater.Reset(mapUpdate) + case _, _ = <-dev.closer: + return } } } @@ -90,5 +95,6 @@ func (dev *Router) mapPort(protocol string, intport, extport uint16, desc string func (dev *Router) UnmapAllPorts() error { close(dev.closer) dev.wg.Wait() + dev.log.Info("Unmapped all ports") return dev.errs.Err } diff --git a/nat/nat_test.go b/nat/nat_test.go new file mode 100644 index 0000000..acb06cd --- /dev/null +++ b/nat/nat_test.go @@ -0,0 +1,77 @@ +package nat + +// go test -run 'HTTP' + +import ( + "context" + "fmt" + "net/http" + "os" + "os/signal" + "strconv" + "testing" + "time" + + "github.com/ava-labs/gecko/utils/logging" +) + +const ( + externalPort = 9876 + localPort = 8080 +) + +func TestHTTP(t *testing.T) { + config, err := logging.DefaultConfig() + if err != nil { + return + } + factory := logging.NewFactory(config) + defer factory.Close() + + log, err := factory.Make() + if err != nil { + return + } + defer log.Stop() + defer log.StopOnPanic() + + log.Info("Logger Initialized") + + n := GetNATRouter() + if n == nil { + log.Error("Unable to get UPnP Device") + return + } + + ip, err := n.ExternalIP() + if err != nil { + log.Error("Unable to get external IP: %v", err) + return + } + log.Info("External Address %s:%d", ip.String(), externalPort) + + r := NewRouter(log, n) + defer r.UnmapAllPorts() + + r.Map("TCP", localPort, externalPort, "AVA UPnP Test") + + log.Info("Starting HTTP Service") + server := &http.Server{Addr: ":" + strconv.Itoa(localPort)} + http.HandleFunc("/", hello) + go func() { + server.ListenAndServe() + }() + + stop := make(chan os.Signal, 1) + signal.Notify(stop, os.Interrupt) + + <-stop + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + server.Shutdown(ctx) +} + +func hello(w http.ResponseWriter, req *http.Request) { + fmt.Fprintf(w, "AVA UPnP Test\n") +} diff --git a/nat/upnp.go b/nat/upnp.go index 7f62760..f83b845 100644 --- a/nat/upnp.go +++ b/nat/upnp.go @@ -10,7 +10,10 @@ import ( "github.com/huin/goupnp/dcps/internetgateway2" ) -const soapRequestTimeout = 3 * time.Second +const ( + soapRequestTimeout = 3 * time.Second + mapRetry = 20 +) // upnpClient is the interface used by goupnp for their client implementations type upnpClient interface { @@ -79,7 +82,6 @@ func (r *upnpRouter) localIP() (net.IP, error) { } } return nil, fmt.Errorf("couldn't find the local address in the same network as %s", deviceIP) - } func (r *upnpRouter) ExternalIP() (net.IP, error) { @@ -95,14 +97,26 @@ func (r *upnpRouter) ExternalIP() (net.IP, error) { return ip, nil } -func (r *upnpRouter) MapPort(protocol string, intport, extport uint16, desc string, duration time.Duration) error { +func (r *upnpRouter) MapPort(protocol string, intport, extport uint16, + desc string, duration time.Duration) error { ip, err := r.localIP() if err != nil { return nil } lifetime := uint32(duration / time.Second) - r.UnmapPort(protocol, extport) - return r.client.AddPortMapping("", extport, protocol, intport, ip.String(), true, desc, lifetime) + + for i := 0; i < mapRetry; i++ { + externalPort := extport + uint16(i) + err = r.client.AddPortMapping("", externalPort, protocol, intport, + ip.String(), true, desc, lifetime) + if err == nil { + fmt.Printf("Mapped external port %d to local %s:%d\n", externalPort, + ip.String(), intport) + return nil + } + fmt.Printf("Unable to map port, retry with port %d\n", externalPort+1) + } + return err } func (r *upnpRouter) UnmapPort(protocol string, extport uint16) error { @@ -184,17 +198,24 @@ func discover(target string) *upnpRouter { } func getUPnPRouter() *upnpRouter { - r := discover(internetgateway1.URN_WANConnectionDevice_1) - if r != nil { - return r + targets := []string{ + internetgateway1.URN_WANConnectionDevice_1, + internetgateway2.URN_WANConnectionDevice_2, } - return discover(internetgateway2.URN_WANConnectionDevice_2) -} -func GetUPnP() *upnpRouter { - r := discover(internetgateway1.URN_WANConnectionDevice_1) - if r != nil { - return r + routers := make(chan *upnpRouter, len(targets)) + + for _, urn := range targets { + go func(urn string) { + routers <- discover(urn) + }(urn) } - return discover(internetgateway2.URN_WANConnectionDevice_2) + + for i := 0; i < len(targets); i++ { + if r := <-routers; r != nil { + return r + } + } + + return nil } From 54e1c4031e0eea9b0c8471d6d76638f4988551bb Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Sun, 7 Jun 2020 20:02:29 -0400 Subject: [PATCH 065/183] upnp --- nat/upnp.go | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/nat/upnp.go b/nat/upnp.go index f83b845..bfc6c5e 100644 --- a/nat/upnp.go +++ b/nat/upnp.go @@ -82,6 +82,7 @@ func (r *upnpRouter) localIP() (net.IP, error) { } } return nil, fmt.Errorf("couldn't find the local address in the same network as %s", deviceIP) + } func (r *upnpRouter) ExternalIP() (net.IP, error) { @@ -194,7 +195,7 @@ func discover(target string) *upnpRouter { return u } } - return nil + return gateway2(client) } func getUPnPRouter() *upnpRouter { @@ -219,3 +220,19 @@ func getUPnPRouter() *upnpRouter { return nil } + +func getUPnPRouter() *upnpRouter { + r := discover(internetgateway1.URN_WANConnectionDevice_1) + if r != nil { + return r + } + return discover(internetgateway2.URN_WANConnectionDevice_2) +} + +func GetUPnP() *upnpRouter { + r := discover(internetgateway1.URN_WANConnectionDevice_1) + if r != nil { + return r + } + return discover(internetgateway2.URN_WANConnectionDevice_2) +} From 3281da4ff2ff0f6dda6f8c6f3ba340388365615e Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Fri, 12 Jun 2020 10:07:17 -0400 Subject: [PATCH 066/183] NAT test --- nat/upnp.go | 28 +++++++++++++++++----------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/nat/upnp.go b/nat/upnp.go index bfc6c5e..eed99fe 100644 --- a/nat/upnp.go +++ b/nat/upnp.go @@ -82,7 +82,6 @@ func (r *upnpRouter) localIP() (net.IP, error) { } } return nil, fmt.Errorf("couldn't find the local address in the same network as %s", deviceIP) - } func (r *upnpRouter) ExternalIP() (net.IP, error) { @@ -222,17 +221,24 @@ func getUPnPRouter() *upnpRouter { } func getUPnPRouter() *upnpRouter { - r := discover(internetgateway1.URN_WANConnectionDevice_1) - if r != nil { - return r + targets := []string{ + internetgateway1.URN_WANConnectionDevice_1, + internetgateway2.URN_WANConnectionDevice_2, } - return discover(internetgateway2.URN_WANConnectionDevice_2) -} -func GetUPnP() *upnpRouter { - r := discover(internetgateway1.URN_WANConnectionDevice_1) - if r != nil { - return r + routers := make(chan *upnpRouter, len(targets)) + + for _, urn := range targets { + go func(urn string) { + routers <- discover(urn) + }(urn) } - return discover(internetgateway2.URN_WANConnectionDevice_2) + + for i := 0; i < len(targets); i++ { + if r := <-routers; r != nil { + return r + } + } + + return nil } From 661ee3a5428ba4e0b4781942ce91e0074dabda77 Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Fri, 12 Jun 2020 17:40:41 -0400 Subject: [PATCH 067/183] support machine with public IP --- nat/nat.go | 23 +++++++++++++++++++---- nat/nat_test.go | 15 +++++++++++++++ nat/public_ip.go | 48 ++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 82 insertions(+), 4 deletions(-) create mode 100644 nat/public_ip.go diff --git a/nat/nat.go b/nat/nat.go index 60d3a83..aa83be0 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -1,6 +1,7 @@ package nat import ( + "fmt" "net" "sync" "time" @@ -21,11 +22,25 @@ type NATRouter interface { } func GetNATRouter() NATRouter { - //TODO other protocol - if r := getUPnPRouter(); r != nil { - return r + router := make(chan NATRouter) + + go func() { + r := getUPnPRouter() + if r != nil { + fmt.Println("Found UPnP Router") + router <- r + } else { + router <- nil + } + }() + + for i := 0; i < 1; i++ { + if r := <-router; r != nil { + return r + } } - return nil + + return NewPublicIP() } type Router struct { diff --git a/nat/nat_test.go b/nat/nat_test.go index acb06cd..efa2233 100644 --- a/nat/nat_test.go +++ b/nat/nat_test.go @@ -20,6 +20,21 @@ const ( localPort = 8080 ) +func TestRouter(t *testing.T) { + n := GetNATRouter() + if n == nil { + fmt.Println("NAT Router is nil") + return + } + + ip, err := n.ExternalIP() + if err != nil { + fmt.Printf("Unable to get external IP: %v\n", err) + return + } + fmt.Printf("External Address %s:%d\n", ip.String(), externalPort) +} + func TestHTTP(t *testing.T) { config, err := logging.DefaultConfig() if err != nil { diff --git a/nat/public_ip.go b/nat/public_ip.go new file mode 100644 index 0000000..3900942 --- /dev/null +++ b/nat/public_ip.go @@ -0,0 +1,48 @@ +package nat + +import ( + "fmt" + "net" + "time" +) + +const googleDNSServer = "8.8.8.8:80" + +type publicIP struct { + ip net.IP +} + +func (publicIP) MapPort(protocol string, intport, extport uint16, desc string, duration time.Duration) error { + if intport != extport { + return fmt.Errorf("cannot map port %d to %d", intport, extport) + } + return nil +} + +func (publicIP) UnmapPort(protocol string, extport uint16) error { + return nil +} + +func (r publicIP) ExternalIP() (net.IP, error) { + return r.ip, nil +} + +func getOutboundIP() (net.IP, error) { + conn, err := net.Dial("udp", googleDNSServer) + if err != nil { + return nil, err + } + defer conn.Close() + + return conn.LocalAddr().(*net.UDPAddr).IP, nil +} + +func NewPublicIP() *publicIP { + ip, err := getOutboundIP() + if err != nil { + return nil + } + return &publicIP{ + ip: ip, + } +} From f8301f11c2a966434d0dc76a6e40f9daca47d7d7 Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Sat, 13 Jun 2020 15:29:28 -0400 Subject: [PATCH 068/183] fix upnp port detection and retry on mapping clean up rm test --- nat/nat.go | 105 +++++++++++++++++------------ nat/nat_test.go | 92 ------------------------- nat/{public_ip.go => no_router.go} | 16 +++-- nat/upnp.go | 44 +++++++----- 4 files changed, 97 insertions(+), 160 deletions(-) delete mode 100644 nat/nat_test.go rename nat/{public_ip.go => no_router.go} (64%) diff --git a/nat/nat.go b/nat/nat.go index aa83be0..026113e 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -1,7 +1,6 @@ package nat import ( - "fmt" "net" "sync" "time" @@ -11,36 +10,25 @@ import ( ) const ( - mapTimeout = 30 * time.Minute - mapUpdate = mapTimeout / 2 + mapTimeout = 30 * time.Minute + mapUpdateTimeout = mapTimeout / 2 + maxRetries = 20 ) type NATRouter interface { MapPort(protocol string, intport, extport uint16, desc string, duration time.Duration) error UnmapPort(protocol string, extport uint16) error ExternalIP() (net.IP, error) + IsMapped(extport uint16, protocol string) bool } func GetNATRouter() NATRouter { - router := make(chan NATRouter) - - go func() { - r := getUPnPRouter() - if r != nil { - fmt.Println("Found UPnP Router") - router <- r - } else { - router <- nil - } - }() - - for i := 0; i < 1; i++ { - if r := <-router; r != nil { - return r - } + //TODO add PMP support + if r := getUPnPRouter(); r != nil { + return r } - return NewPublicIP() + return NewNoRouter() } type Router struct { @@ -60,47 +48,76 @@ func NewRouter(log logging.Logger, r NATRouter) Router { } } -func (dev *Router) Map(protocol string, intport, extport uint16, desc string) { +// Map sets up port mapping using given protocol, internal and external ports +// and returns the final port mapped. It returns 0 if mapping failed after the +// maximun number of retries +func (dev *Router) Map(protocol string, intport, extport uint16, desc string) uint16 { + mappedPort := make(chan uint16) + dev.wg.Add(1) - go dev.mapPort(protocol, intport, extport, desc) + go dev.keepPortMapping(mappedPort, protocol, intport, extport, desc) + + return <-mappedPort } -func (dev *Router) mapPort(protocol string, intport, extport uint16, desc string) { - updater := time.NewTimer(mapUpdate) - defer func() { - updater.Stop() +// keepPortMapping runs in the background to keep a port mapped. It renews the +// the port mapping in mapUpdateTimeout. +func (dev *Router) keepPortMapping(mappedPort chan<- uint16, protocol string, + intport, extport uint16, desc string) { + updateTimer := time.NewTimer(mapUpdateTimeout) + var port uint16 = 0 - dev.log.Info("Unmap protocol %s external port %d", protocol, extport) - dev.errLock.Lock() - dev.errs.Add(dev.r.UnmapPort(protocol, extport)) - dev.errLock.Unlock() + defer func() { + updateTimer.Stop() + + dev.log.Info("Unmap protocol %s external port %d", protocol, port) + if port > 0 { + dev.errLock.Lock() + dev.errs.Add(dev.r.UnmapPort(protocol, port)) + dev.errLock.Unlock() + } dev.wg.Done() }() - if err := dev.r.MapPort(protocol, intport, extport, desc, mapTimeout); err != nil { - dev.log.Error("Map port failed. Protocol %s Internal %d External %d. %s", - protocol, intport, extport, err) - dev.errLock.Lock() - dev.errs.Add(err) - dev.errLock.Unlock() - } else { - dev.log.Info("Mapped Protocol %s Internal %d External %d.", protocol, - intport, extport) + for i := 0; i < maxRetries; i++ { + port = extport + uint16(i) + if dev.r.IsMapped(port, protocol) { + dev.log.Info("Port %d is occupied, retry with the next port", port) + continue + } + if err := dev.r.MapPort(protocol, intport, port, desc, mapTimeout); err != nil { + dev.log.Error("Map port failed. Protocol %s Internal %d External %d. %s", + protocol, intport, port, err) + dev.errLock.Lock() + dev.errs.Add(err) + dev.errLock.Unlock() + } else { + dev.log.Info("Mapped Protocol %s Internal %d External %d.", protocol, + intport, port) + mappedPort <- port + break + } + } + + if port == 0 { + dev.log.Error("Unable to map port %d", extport) + mappedPort <- port + return } for { select { - case <-updater.C: - if err := dev.r.MapPort(protocol, intport, extport, desc, mapTimeout); err != nil { + case <-updateTimer.C: + if err := dev.r.MapPort(protocol, intport, port, desc, mapTimeout); err != nil { dev.log.Error("Renew port mapping failed. Protocol %s Internal %d External %d. %s", - protocol, intport, extport, err) + protocol, intport, port, err) } else { dev.log.Info("Renew port mapping Protocol %s Internal %d External %d.", protocol, - intport, extport) + intport, port) } - updater.Reset(mapUpdate) + updateTimer.Reset(mapUpdateTimeout) case _, _ = <-dev.closer: return } diff --git a/nat/nat_test.go b/nat/nat_test.go deleted file mode 100644 index efa2233..0000000 --- a/nat/nat_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package nat - -// go test -run 'HTTP' - -import ( - "context" - "fmt" - "net/http" - "os" - "os/signal" - "strconv" - "testing" - "time" - - "github.com/ava-labs/gecko/utils/logging" -) - -const ( - externalPort = 9876 - localPort = 8080 -) - -func TestRouter(t *testing.T) { - n := GetNATRouter() - if n == nil { - fmt.Println("NAT Router is nil") - return - } - - ip, err := n.ExternalIP() - if err != nil { - fmt.Printf("Unable to get external IP: %v\n", err) - return - } - fmt.Printf("External Address %s:%d\n", ip.String(), externalPort) -} - -func TestHTTP(t *testing.T) { - config, err := logging.DefaultConfig() - if err != nil { - return - } - factory := logging.NewFactory(config) - defer factory.Close() - - log, err := factory.Make() - if err != nil { - return - } - defer log.Stop() - defer log.StopOnPanic() - - log.Info("Logger Initialized") - - n := GetNATRouter() - if n == nil { - log.Error("Unable to get UPnP Device") - return - } - - ip, err := n.ExternalIP() - if err != nil { - log.Error("Unable to get external IP: %v", err) - return - } - log.Info("External Address %s:%d", ip.String(), externalPort) - - r := NewRouter(log, n) - defer r.UnmapAllPorts() - - r.Map("TCP", localPort, externalPort, "AVA UPnP Test") - - log.Info("Starting HTTP Service") - server := &http.Server{Addr: ":" + strconv.Itoa(localPort)} - http.HandleFunc("/", hello) - go func() { - server.ListenAndServe() - }() - - stop := make(chan os.Signal, 1) - signal.Notify(stop, os.Interrupt) - - <-stop - - ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) - defer cancel() - server.Shutdown(ctx) -} - -func hello(w http.ResponseWriter, req *http.Request) { - fmt.Fprintf(w, "AVA UPnP Test\n") -} diff --git a/nat/public_ip.go b/nat/no_router.go similarity index 64% rename from nat/public_ip.go rename to nat/no_router.go index 3900942..d09731b 100644 --- a/nat/public_ip.go +++ b/nat/no_router.go @@ -8,25 +8,29 @@ import ( const googleDNSServer = "8.8.8.8:80" -type publicIP struct { +type noRouter struct { ip net.IP } -func (publicIP) MapPort(protocol string, intport, extport uint16, desc string, duration time.Duration) error { +func (noRouter) MapPort(protocol string, intport, extport uint16, desc string, duration time.Duration) error { if intport != extport { return fmt.Errorf("cannot map port %d to %d", intport, extport) } return nil } -func (publicIP) UnmapPort(protocol string, extport uint16) error { +func (noRouter) UnmapPort(protocol string, extport uint16) error { return nil } -func (r publicIP) ExternalIP() (net.IP, error) { +func (r noRouter) ExternalIP() (net.IP, error) { return r.ip, nil } +func (noRouter) IsMapped(uint16, string) bool { + return false +} + func getOutboundIP() (net.IP, error) { conn, err := net.Dial("udp", googleDNSServer) if err != nil { @@ -37,12 +41,12 @@ func getOutboundIP() (net.IP, error) { return conn.LocalAddr().(*net.UDPAddr).IP, nil } -func NewPublicIP() *publicIP { +func NewNoRouter() *noRouter { ip, err := getOutboundIP() if err != nil { return nil } - return &publicIP{ + return &noRouter{ ip: ip, } } diff --git a/nat/upnp.go b/nat/upnp.go index eed99fe..943d2e4 100644 --- a/nat/upnp.go +++ b/nat/upnp.go @@ -12,7 +12,6 @@ import ( const ( soapRequestTimeout = 3 * time.Second - mapRetry = 20 ) // upnpClient is the interface used by goupnp for their client implementations @@ -40,6 +39,20 @@ type upnpClient interface { // returns if there is rsip available, nat enabled, or an unexpected error. GetNATRSIPStatus() (newRSIPAvailable bool, natEnabled bool, err error) + + // attempts to get port mapping information give a external port and protocol + GetSpecificPortMappingEntry( + NewRemoteHost string, + NewExternalPort uint16, + NewProtocol string, + ) ( + NewInternalPort uint16, + NewInternalClient string, + NewEnabled bool, + NewPortMappingDescription string, + NewLeaseDuration uint32, + err error, + ) } type upnpRouter struct { @@ -68,9 +81,6 @@ func (r *upnpRouter) localIP() (net.IP, error) { } for _, addr := range addrs { - // this is pretty janky, but it seems to be the best way to get the - // ip mask and properly check if the ip references the device we are - // connected to ipNet, ok := addr.(*net.IPNet) if !ok { continue @@ -105,24 +115,19 @@ func (r *upnpRouter) MapPort(protocol string, intport, extport uint16, } lifetime := uint32(duration / time.Second) - for i := 0; i < mapRetry; i++ { - externalPort := extport + uint16(i) - err = r.client.AddPortMapping("", externalPort, protocol, intport, - ip.String(), true, desc, lifetime) - if err == nil { - fmt.Printf("Mapped external port %d to local %s:%d\n", externalPort, - ip.String(), intport) - return nil - } - fmt.Printf("Unable to map port, retry with port %d\n", externalPort+1) - } - return err + return r.client.AddPortMapping("", extport, protocol, intport, + ip.String(), true, desc, lifetime) } func (r *upnpRouter) UnmapPort(protocol string, extport uint16) error { return r.client.DeletePortMapping("", extport, protocol) } +func (r *upnpRouter) IsMapped(extport uint16, protocol string) bool { + _, _, enabled, _, _, _ := r.client.GetSpecificPortMappingEntry("", extport, protocol) + return enabled +} + func gateway1(client goupnp.ServiceClient) upnpClient { switch client.Service.ServiceType { case internetgateway1.URN_WANIPConnection_1: @@ -133,6 +138,7 @@ func gateway1(client goupnp.ServiceClient) upnpClient { return nil } } + func gateway2(client goupnp.ServiceClient) upnpClient { switch client.Service.ServiceType { case internetgateway2.URN_WANIPConnection_1: @@ -146,7 +152,7 @@ func gateway2(client goupnp.ServiceClient) upnpClient { } } -func getUpnpClient(client goupnp.ServiceClient) upnpClient { +func getUPnPClient(client goupnp.ServiceClient) upnpClient { c := gateway1(client) if c != nil { return c @@ -164,7 +170,7 @@ func getRootDevice(dev *goupnp.MaybeRootDevice) *upnpRouter { Service: service, } c.SOAPClient.HTTPClient.Timeout = soapRequestTimeout - client := getUpnpClient(c) + client := getUPnPClient(c) if client == nil { return } @@ -220,6 +226,8 @@ func getUPnPRouter() *upnpRouter { return nil } +// getUPnPRouter searches for internet gateway using both Device Control Protocol +// and returns the first one it can find. It returns nil if no UPnP gateway is found func getUPnPRouter() *upnpRouter { targets := []string{ internetgateway1.URN_WANConnectionDevice_1, From 3cfba77c70e8d431d24de67e4a4a9a436c2ba95e Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Sun, 14 Jun 2020 03:51:31 -0400 Subject: [PATCH 069/183] staking internal port and external port could be different get mapped port entry; change interface to mapper --- main/main.go | 8 ++-- main/params.go | 1 + nat/nat.go | 24 +++++++---- nat/no_router.go | 4 +- nat/upnp.go | 108 +++++++++++++++-------------------------------- node/config.go | 11 ++--- node/node.go | 2 +- 7 files changed, 63 insertions(+), 95 deletions(-) diff --git a/main/main.go b/main/main.go index b49d320..1a5e8d4 100644 --- a/main/main.go +++ b/main/main.go @@ -68,11 +68,11 @@ func main() { log.Debug("assertions are enabled. This may slow down execution") } - router := nat.NewRouter(log, Config.Nat) - defer router.UnmapAllPorts() + mapper := nat.NewPortMapper(log, Config.Nat) + defer mapper.UnmapAllPorts() - router.Map("TCP", Config.StakingIP.Port, Config.StakingIP.Port, "gecko") - router.Map("TCP", Config.HTTPPort, Config.HTTPPort, "gecko http") + Config.StakingIP.Port = mapper.Map("TCP", Config.StakingLocalPort, Config.StakingIP.Port, "gecko") + Config.HTTPPort = mapper.Map("TCP", Config.HTTPPort, Config.HTTPPort, "gecko http") node := node.Node{} diff --git a/main/params.go b/main/params.go index 86eca34..b339585 100644 --- a/main/params.go +++ b/main/params.go @@ -303,6 +303,7 @@ func init() { IP: ip, Port: uint16(*consensusPort), } + Config.StakingLocalPort = uint16(*consensusPort) defaultBootstrapIPs, defaultBootstrapIDs := GetDefaultBootstraps(networkID, 5) diff --git a/nat/nat.go b/nat/nat.go index 026113e..6c30808 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -19,7 +19,12 @@ type NATRouter interface { MapPort(protocol string, intport, extport uint16, desc string, duration time.Duration) error UnmapPort(protocol string, extport uint16) error ExternalIP() (net.IP, error) - IsMapped(extport uint16, protocol string) bool + GetPortMappingEntry(extport uint16, protocol string) ( + InternalIP string, + InternalPort uint16, + Description string, + err error, + ) } func GetNATRouter() NATRouter { @@ -31,7 +36,7 @@ func GetNATRouter() NATRouter { return NewNoRouter() } -type Router struct { +type Mapper struct { log logging.Logger r NATRouter closer chan struct{} @@ -40,8 +45,8 @@ type Router struct { errs wrappers.Errs } -func NewRouter(log logging.Logger, r NATRouter) Router { - return Router{ +func NewPortMapper(log logging.Logger, r NATRouter) Mapper { + return Mapper{ log: log, r: r, closer: make(chan struct{}), @@ -51,7 +56,7 @@ func NewRouter(log logging.Logger, r NATRouter) Router { // Map sets up port mapping using given protocol, internal and external ports // and returns the final port mapped. It returns 0 if mapping failed after the // maximun number of retries -func (dev *Router) Map(protocol string, intport, extport uint16, desc string) uint16 { +func (dev *Mapper) Map(protocol string, intport, extport uint16, desc string) uint16 { mappedPort := make(chan uint16) dev.wg.Add(1) @@ -62,7 +67,7 @@ func (dev *Router) Map(protocol string, intport, extport uint16, desc string) ui // keepPortMapping runs in the background to keep a port mapped. It renews the // the port mapping in mapUpdateTimeout. -func (dev *Router) keepPortMapping(mappedPort chan<- uint16, protocol string, +func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string, intport, extport uint16, desc string) { updateTimer := time.NewTimer(mapUpdateTimeout) var port uint16 = 0 @@ -82,8 +87,9 @@ func (dev *Router) keepPortMapping(mappedPort chan<- uint16, protocol string, for i := 0; i < maxRetries; i++ { port = extport + uint16(i) - if dev.r.IsMapped(port, protocol) { - dev.log.Info("Port %d is occupied, retry with the next port", port) + if intaddr, intport, desc, err := dev.r.GetPortMappingEntry(port, protocol); err == nil { + dev.log.Info("Port %d is mapped to %s:%d: %s, retry with the next port", + port, intaddr, intport, desc) continue } if err := dev.r.MapPort(protocol, intport, port, desc, mapTimeout); err != nil { @@ -124,7 +130,7 @@ func (dev *Router) keepPortMapping(mappedPort chan<- uint16, protocol string, } } -func (dev *Router) UnmapAllPorts() error { +func (dev *Mapper) UnmapAllPorts() error { close(dev.closer) dev.wg.Wait() dev.log.Info("Unmapped all ports") diff --git a/nat/no_router.go b/nat/no_router.go index d09731b..07ac025 100644 --- a/nat/no_router.go +++ b/nat/no_router.go @@ -27,8 +27,8 @@ func (r noRouter) ExternalIP() (net.IP, error) { return r.ip, nil } -func (noRouter) IsMapped(uint16, string) bool { - return false +func (noRouter) GetPortMappingEntry(uint16, string) (string, uint16, string, error) { + return "", 0, "", nil } func getOutboundIP() (net.IP, error) { diff --git a/nat/upnp.go b/nat/upnp.go index 943d2e4..4e122f4 100644 --- a/nat/upnp.go +++ b/nat/upnp.go @@ -62,7 +62,7 @@ type upnpRouter struct { func (r *upnpRouter) localIP() (net.IP, error) { // attempt to get an address on the router - deviceAddr, err := net.ResolveUDPAddr("udp4", r.dev.URLBase.Host) + deviceAddr, err := net.ResolveUDPAddr("udp", r.dev.URLBase.Host) if err != nil { return nil, err } @@ -73,7 +73,7 @@ func (r *upnpRouter) localIP() (net.IP, error) { return nil, err } - // attempt to find one of my ips that the router would know about + // attempt to find one of my IPs that matches router's record for _, netInterface := range netInterfaces { addrs, err := netInterface.Addrs() if err != nil { @@ -123,102 +123,62 @@ func (r *upnpRouter) UnmapPort(protocol string, extport uint16) error { return r.client.DeletePortMapping("", extport, protocol) } -func (r *upnpRouter) IsMapped(extport uint16, protocol string) bool { - _, _, enabled, _, _, _ := r.client.GetSpecificPortMappingEntry("", extport, protocol) - return enabled +func (r *upnpRouter) GetPortMappingEntry(extport uint16, protocol string) (string, uint16, string, error) { + intport, intaddr, _, desc, _, err := r.client.GetSpecificPortMappingEntry("", extport, protocol) + return intaddr, intport, desc, err } -func gateway1(client goupnp.ServiceClient) upnpClient { +// create UPnP SOAP service client with URN +func getUPnPClient(client goupnp.ServiceClient) upnpClient { switch client.Service.ServiceType { case internetgateway1.URN_WANIPConnection_1: return &internetgateway1.WANIPConnection1{ServiceClient: client} case internetgateway1.URN_WANPPPConnection_1: return &internetgateway1.WANPPPConnection1{ServiceClient: client} - default: - return nil - } -} - -func gateway2(client goupnp.ServiceClient) upnpClient { - switch client.Service.ServiceType { - case internetgateway2.URN_WANIPConnection_1: - return &internetgateway2.WANIPConnection1{ServiceClient: client} case internetgateway2.URN_WANIPConnection_2: return &internetgateway2.WANIPConnection2{ServiceClient: client} - case internetgateway2.URN_WANPPPConnection_1: - return &internetgateway2.WANPPPConnection1{ServiceClient: client} default: return nil } } -func getUPnPClient(client goupnp.ServiceClient) upnpClient { - c := gateway1(client) - if c != nil { - return c - } - return gateway2(client) -} - -func getRootDevice(dev *goupnp.MaybeRootDevice) *upnpRouter { - var router *upnpRouter - dev.Root.Device.VisitServices(func(service *goupnp.Service) { - c := goupnp.ServiceClient{ - SOAPClient: service.NewSOAPClient(), - RootDevice: dev.Root, - Location: dev.Location, - Service: service, - } - c.SOAPClient.HTTPClient.Timeout = soapRequestTimeout - client := getUPnPClient(c) - if client == nil { - return - } - router = &upnpRouter{dev.Root, client} - if router == nil { - return - } - if _, nat, err := router.client.GetNATRSIPStatus(); err != nil || !nat { - router = nil - return - } - }) - return router -} - +// discover() tries to find gateway device func discover(target string) *upnpRouter { devs, err := goupnp.DiscoverDevices(target) if err != nil { return nil } + + router := make(chan *upnpRouter) for i := 0; i < len(devs); i++ { if devs[i].Root == nil { continue } - u := getRootDevice(&devs[i]) - if u != nil { - return u - } - } - return gateway2(client) -} - -func getUPnPRouter() *upnpRouter { - targets := []string{ - internetgateway1.URN_WANConnectionDevice_1, - internetgateway2.URN_WANConnectionDevice_2, + go func(dev *goupnp.MaybeRootDevice) { + var r *upnpRouter = nil + dev.Root.Device.VisitServices(func(service *goupnp.Service) { + c := goupnp.ServiceClient{ + SOAPClient: service.NewSOAPClient(), + RootDevice: dev.Root, + Location: dev.Location, + Service: service, + } + c.SOAPClient.HTTPClient.Timeout = soapRequestTimeout + client := getUPnPClient(c) + if client == nil { + return + } + if _, nat, err := client.GetNATRSIPStatus(); err != nil || !nat { + return + } + r = &upnpRouter{dev.Root, client} + }) + router <- r + }(&devs[i]) } - routers := make(chan *upnpRouter, len(targets)) - - for _, urn := range targets { - go func(urn string) { - routers <- discover(urn) - }(urn) - } - - for i := 0; i < len(targets); i++ { - if r := <-routers; r != nil { + for i := 0; i < len(devs); i++ { + if r := <-router; r != nil { return r } } @@ -234,7 +194,7 @@ func getUPnPRouter() *upnpRouter { internetgateway2.URN_WANConnectionDevice_2, } - routers := make(chan *upnpRouter, len(targets)) + routers := make(chan *upnpRouter) for _, urn := range targets { go func(urn string) { diff --git a/node/config.go b/node/config.go index aaadb7a..e8db1fb 100644 --- a/node/config.go +++ b/node/config.go @@ -33,11 +33,12 @@ type Config struct { DB database.Database // Staking configuration - StakingIP utils.IPDesc - EnableP2PTLS bool - EnableStaking bool - StakingKeyFile string - StakingCertFile string + StakingIP utils.IPDesc + StakingLocalPort uint16 + EnableP2PTLS bool + EnableStaking bool + StakingKeyFile string + StakingCertFile string // Bootstrapping configuration BootstrapPeers []*Peer diff --git a/node/node.go b/node/node.go index 5e817fa..31577ad 100644 --- a/node/node.go +++ b/node/node.go @@ -112,7 +112,7 @@ type Node struct { */ func (n *Node) initNetworking() error { - listener, err := net.Listen(TCP, n.Config.StakingIP.PortString()) + listener, err := net.Listen(TCP, fmt.Sprintf(":%d", n.Config.StakingLocalPort)) if err != nil { return err } From fb51e6a443fefb1192410c2a1fa5dad37da04598 Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Sun, 14 Jun 2020 16:08:09 -0400 Subject: [PATCH 070/183] check failed port mapping handle failed mapping set retry to 20 --- main/main.go | 9 +++-- nat/nat.go | 98 +++++++++++++++++++++++++--------------------------- nat/upnp.go | 14 ++++---- 3 files changed, 59 insertions(+), 62 deletions(-) diff --git a/main/main.go b/main/main.go index 1a5e8d4..97829c1 100644 --- a/main/main.go +++ b/main/main.go @@ -40,10 +40,6 @@ func main() { defer log.StopOnPanic() defer Config.DB.Close() - if Config.StakingIP.IsZero() { - log.Warn("NAT traversal has failed. It will be able to connect to less nodes.") - } - // Track if sybil control is enforced if !Config.EnableStaking && Config.EnableP2PTLS { log.Warn("Staking is disabled. Sybil control is not enforced.") @@ -72,7 +68,10 @@ func main() { defer mapper.UnmapAllPorts() Config.StakingIP.Port = mapper.Map("TCP", Config.StakingLocalPort, Config.StakingIP.Port, "gecko") - Config.HTTPPort = mapper.Map("TCP", Config.HTTPPort, Config.HTTPPort, "gecko http") + + if Config.StakingIP.IsZero() { + log.Warn("NAT traversal has failed. It will be able to connect to less nodes.") + } node := node.Node{} diff --git a/nat/nat.go b/nat/nat.go index 6c30808..76beb2b 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -16,10 +16,10 @@ const ( ) type NATRouter interface { - MapPort(protocol string, intport, extport uint16, desc string, duration time.Duration) error - UnmapPort(protocol string, extport uint16) error + MapPort(protocol string, intPort, extPort uint16, desc string, duration time.Duration) error + UnmapPort(protocol string, extPort uint16) error ExternalIP() (net.IP, error) - GetPortMappingEntry(extport uint16, protocol string) ( + GetPortMappingEntry(extPort uint16, protocol string) ( InternalIP string, InternalPort uint16, Description string, @@ -56,11 +56,10 @@ func NewPortMapper(log logging.Logger, r NATRouter) Mapper { // Map sets up port mapping using given protocol, internal and external ports // and returns the final port mapped. It returns 0 if mapping failed after the // maximun number of retries -func (dev *Mapper) Map(protocol string, intport, extport uint16, desc string) uint16 { +func (dev *Mapper) Map(protocol string, intPort, extPort uint16, desc string) uint16 { mappedPort := make(chan uint16) - dev.wg.Add(1) - go dev.keepPortMapping(mappedPort, protocol, intport, extport, desc) + go dev.keepPortMapping(mappedPort, protocol, intPort, extPort, desc) return <-mappedPort } @@ -68,66 +67,65 @@ func (dev *Mapper) Map(protocol string, intport, extport uint16, desc string) ui // keepPortMapping runs in the background to keep a port mapped. It renews the // the port mapping in mapUpdateTimeout. func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string, - intport, extport uint16, desc string) { + intPort, extPort uint16, desc string) { updateTimer := time.NewTimer(mapUpdateTimeout) - var port uint16 = 0 - defer func() { - updateTimer.Stop() - - dev.log.Info("Unmap protocol %s external port %d", protocol, port) - if port > 0 { - dev.errLock.Lock() - dev.errs.Add(dev.r.UnmapPort(protocol, port)) - dev.errLock.Unlock() - } - - dev.wg.Done() - }() - - for i := 0; i < maxRetries; i++ { - port = extport + uint16(i) - if intaddr, intport, desc, err := dev.r.GetPortMappingEntry(port, protocol); err == nil { + for i := 0; i <= maxRetries; i++ { + port := extPort + uint16(i) + if intaddr, intPort, desc, err := dev.r.GetPortMappingEntry(port, protocol); err == nil { dev.log.Info("Port %d is mapped to %s:%d: %s, retry with the next port", - port, intaddr, intport, desc) + port, intaddr, intPort, desc) + port = 0 continue } - if err := dev.r.MapPort(protocol, intport, port, desc, mapTimeout); err != nil { + if err := dev.r.MapPort(protocol, intPort, port, desc, mapTimeout); err != nil { dev.log.Error("Map port failed. Protocol %s Internal %d External %d. %s", - protocol, intport, port, err) + protocol, intPort, port, err) dev.errLock.Lock() dev.errs.Add(err) dev.errLock.Unlock() } else { dev.log.Info("Mapped Protocol %s Internal %d External %d.", protocol, - intport, port) + intPort, port) mappedPort <- port + + dev.wg.Add(1) + + defer func(port uint16) { + updateTimer.Stop() + + dev.log.Info("Unmap protocol %s external port %d", protocol, port) + if port > 0 { + dev.errLock.Lock() + dev.errs.Add(dev.r.UnmapPort(protocol, port)) + dev.errLock.Unlock() + } + + dev.wg.Done() + }(port) + + for { + select { + case <-updateTimer.C: + if err := dev.r.MapPort(protocol, intPort, port, desc, mapTimeout); err != nil { + dev.log.Error("Renew port mapping failed. Protocol %s Internal %d External %d. %s", + protocol, intPort, port, err) + } else { + dev.log.Info("Renew port mapping Protocol %s Internal %d External %d.", protocol, + intPort, port) + } + + updateTimer.Reset(mapUpdateTimeout) + case _, _ = <-dev.closer: + return + } + } break } } - if port == 0 { - dev.log.Error("Unable to map port %d", extport) - mappedPort <- port - return - } - - for { - select { - case <-updateTimer.C: - if err := dev.r.MapPort(protocol, intport, port, desc, mapTimeout); err != nil { - dev.log.Error("Renew port mapping failed. Protocol %s Internal %d External %d. %s", - protocol, intport, port, err) - } else { - dev.log.Info("Renew port mapping Protocol %s Internal %d External %d.", protocol, - intport, port) - } - - updateTimer.Reset(mapUpdateTimeout) - case _, _ = <-dev.closer: - return - } - } + dev.log.Warn("Unable to map port %d~%d", extPort, extPort+maxRetries) + mappedPort <- 0 } func (dev *Mapper) UnmapAllPorts() error { diff --git a/nat/upnp.go b/nat/upnp.go index 4e122f4..863544e 100644 --- a/nat/upnp.go +++ b/nat/upnp.go @@ -107,7 +107,7 @@ func (r *upnpRouter) ExternalIP() (net.IP, error) { return ip, nil } -func (r *upnpRouter) MapPort(protocol string, intport, extport uint16, +func (r *upnpRouter) MapPort(protocol string, intPort, extPort uint16, desc string, duration time.Duration) error { ip, err := r.localIP() if err != nil { @@ -115,17 +115,17 @@ func (r *upnpRouter) MapPort(protocol string, intport, extport uint16, } lifetime := uint32(duration / time.Second) - return r.client.AddPortMapping("", extport, protocol, intport, + return r.client.AddPortMapping("", extPort, protocol, intPort, ip.String(), true, desc, lifetime) } -func (r *upnpRouter) UnmapPort(protocol string, extport uint16) error { - return r.client.DeletePortMapping("", extport, protocol) +func (r *upnpRouter) UnmapPort(protocol string, extPort uint16) error { + return r.client.DeletePortMapping("", extPort, protocol) } -func (r *upnpRouter) GetPortMappingEntry(extport uint16, protocol string) (string, uint16, string, error) { - intport, intaddr, _, desc, _, err := r.client.GetSpecificPortMappingEntry("", extport, protocol) - return intaddr, intport, desc, err +func (r *upnpRouter) GetPortMappingEntry(extPort uint16, protocol string) (string, uint16, string, error) { + intPort, intAddr, _, desc, _, err := r.client.GetSpecificPortMappingEntry("", extPort, protocol) + return intAddr, intPort, desc, err } // create UPnP SOAP service client with URN From 571b6e597be88eced0ff0b373117c01d4475d397 Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Mon, 15 Jun 2020 15:16:24 -0400 Subject: [PATCH 071/183] bring pmp back --- nat/nat.go | 20 +++++++------ nat/no_router.go | 11 ++++--- nat/pmp.go | 78 ++++++++++++++++++++++++++++++++++++++++++++++++ nat/upnp.go | 5 +++- 4 files changed, 100 insertions(+), 14 deletions(-) create mode 100644 nat/pmp.go diff --git a/nat/nat.go b/nat/nat.go index 76beb2b..9fe97f6 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -1,3 +1,6 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + package nat import ( @@ -10,14 +13,14 @@ import ( ) const ( - mapTimeout = 30 * time.Minute + mapTimeout = 30 * time.Second mapUpdateTimeout = mapTimeout / 2 maxRetries = 20 ) type NATRouter interface { MapPort(protocol string, intPort, extPort uint16, desc string, duration time.Duration) error - UnmapPort(protocol string, extPort uint16) error + UnmapPort(protocol string, intPort, extPort uint16) error ExternalIP() (net.IP, error) GetPortMappingEntry(extPort uint16, protocol string) ( InternalIP string, @@ -28,10 +31,12 @@ type NATRouter interface { } func GetNATRouter() NATRouter { - //TODO add PMP support if r := getUPnPRouter(); r != nil { return r } + if r := getPMPRouter(); r != nil { + return r + } return NewNoRouter() } @@ -95,11 +100,9 @@ func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string, updateTimer.Stop() dev.log.Info("Unmap protocol %s external port %d", protocol, port) - if port > 0 { - dev.errLock.Lock() - dev.errs.Add(dev.r.UnmapPort(protocol, port)) - dev.errLock.Unlock() - } + dev.errLock.Lock() + dev.errs.Add(dev.r.UnmapPort(protocol, intPort, port)) + dev.errLock.Unlock() dev.wg.Done() }(port) @@ -120,7 +123,6 @@ func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string, return } } - break } } diff --git a/nat/no_router.go b/nat/no_router.go index 07ac025..7a15601 100644 --- a/nat/no_router.go +++ b/nat/no_router.go @@ -1,3 +1,6 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + package nat import ( @@ -12,14 +15,14 @@ type noRouter struct { ip net.IP } -func (noRouter) MapPort(protocol string, intport, extport uint16, desc string, duration time.Duration) error { - if intport != extport { - return fmt.Errorf("cannot map port %d to %d", intport, extport) +func (noRouter) MapPort(_ string, intPort, extPort uint16, _ string, _ time.Duration) error { + if intPort != extPort { + return fmt.Errorf("cannot map port %d to %d", intPort, extPort) } return nil } -func (noRouter) UnmapPort(protocol string, extport uint16) error { +func (noRouter) UnmapPort(string, uint16, uint16) error { return nil } diff --git a/nat/pmp.go b/nat/pmp.go new file mode 100644 index 0000000..7c5a800 --- /dev/null +++ b/nat/pmp.go @@ -0,0 +1,78 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package nat + +import ( + "fmt" + "net" + "time" + + "github.com/jackpal/gateway" + "github.com/jackpal/go-nat-pmp" +) + +var ( + pmpClientTimeout = 500 * time.Millisecond +) + +// natPMPClient adapts the NAT-PMP protocol implementation so it conforms to +// the common interface. +type pmpRouter struct { + client *natpmp.Client +} + +func (pmp *pmpRouter) MapPort( + networkProtocol string, + newInternalPort uint16, + newExternalPort uint16, + mappingName string, + mappingDuration time.Duration) error { + protocol := string(networkProtocol) + internalPort := int(newInternalPort) + externalPort := int(newExternalPort) + // go-nat-pmp uses seconds to denote their lifetime + lifetime := int(mappingDuration / time.Second) + + _, err := pmp.client.AddPortMapping(protocol, internalPort, externalPort, lifetime) + return err +} + +func (pmp *pmpRouter) UnmapPort( + networkProtocol string, + internalPort uint16, + _ uint16) error { + protocol := string(networkProtocol) + internalPortInt := int(internalPort) + + _, err := pmp.client.AddPortMapping(protocol, internalPortInt, 0, 0) + return err +} + +func (pmp *pmpRouter) ExternalIP() (net.IP, error) { + response, err := pmp.client.GetExternalAddress() + if err != nil { + return nil, err + } + return response.ExternalIPAddress[:], nil +} + +// go-nat-pmp does not support port mapping entry query +func (pmp *pmpRouter) GetPortMappingEntry(externalPort uint16, protocol string) ( + string, uint16, string, error) { + return "", 0, "", fmt.Errorf("port mapping entry not found") +} + +func getPMPRouter() *pmpRouter { + gatewayIP, err := gateway.DiscoverGateway() + if err != nil { + return nil + } + + pmp := &pmpRouter{natpmp.NewClientWithTimeout(gatewayIP, pmpClientTimeout)} + if _, err := pmp.ExternalIP(); err != nil { + return nil + } + + return pmp +} diff --git a/nat/upnp.go b/nat/upnp.go index 863544e..3191bc8 100644 --- a/nat/upnp.go +++ b/nat/upnp.go @@ -1,3 +1,6 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + package nat import ( @@ -119,7 +122,7 @@ func (r *upnpRouter) MapPort(protocol string, intPort, extPort uint16, ip.String(), true, desc, lifetime) } -func (r *upnpRouter) UnmapPort(protocol string, extPort uint16) error { +func (r *upnpRouter) UnmapPort(protocol string, _, extPort uint16) error { return r.client.DeletePortMapping("", extPort, protocol) } From 210ad164f3af42f1f41746833a9d5cd7334ef329 Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Tue, 16 Jun 2020 11:29:50 -0400 Subject: [PATCH 072/183] resolve comments for PR 71; change log leves; type check type check ... --- main/main.go | 2 +- main/params.go | 2 +- nat/nat.go | 54 +++++++++++++++++++++++------------------------- nat/no_router.go | 6 +++++- node/config.go | 2 +- 5 files changed, 34 insertions(+), 32 deletions(-) diff --git a/main/main.go b/main/main.go index 97829c1..d95b8c7 100644 --- a/main/main.go +++ b/main/main.go @@ -67,7 +67,7 @@ func main() { mapper := nat.NewPortMapper(log, Config.Nat) defer mapper.UnmapAllPorts() - Config.StakingIP.Port = mapper.Map("TCP", Config.StakingLocalPort, Config.StakingIP.Port, "gecko") + Config.StakingIP.Port = mapper.Map("TCP", Config.StakingLocalPort, "gecko") if Config.StakingIP.IsZero() { log.Warn("NAT traversal has failed. It will be able to connect to less nodes.") diff --git a/main/params.go b/main/params.go index b339585..e53ed25 100644 --- a/main/params.go +++ b/main/params.go @@ -281,7 +281,7 @@ func init() { Config.DB = memdb.New() } - Config.Nat = nat.GetNATRouter() + Config.Nat = nat.GetRouter() var ip net.IP // If public IP is not specified, get it using shell command dig diff --git a/nat/nat.go b/nat/nat.go index 9fe97f6..f8f5ca7 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -18,7 +18,7 @@ const ( maxRetries = 20 ) -type NATRouter interface { +type Router interface { MapPort(protocol string, intPort, extPort uint16, desc string, duration time.Duration) error UnmapPort(protocol string, intPort, extPort uint16) error ExternalIP() (net.IP, error) @@ -30,7 +30,7 @@ type NATRouter interface { ) } -func GetNATRouter() NATRouter { +func GetRouter() Router { if r := getUPnPRouter(); r != nil { return r } @@ -43,14 +43,14 @@ func GetNATRouter() NATRouter { type Mapper struct { log logging.Logger - r NATRouter + r Router closer chan struct{} wg sync.WaitGroup errLock sync.Mutex errs wrappers.Errs } -func NewPortMapper(log logging.Logger, r NATRouter) Mapper { +func NewPortMapper(log logging.Logger, r Router) Mapper { return Mapper{ log: log, r: r, @@ -61,10 +61,10 @@ func NewPortMapper(log logging.Logger, r NATRouter) Mapper { // Map sets up port mapping using given protocol, internal and external ports // and returns the final port mapped. It returns 0 if mapping failed after the // maximun number of retries -func (dev *Mapper) Map(protocol string, intPort, extPort uint16, desc string) uint16 { +func (dev *Mapper) Map(protocol string, intPort uint16, desc string) uint16 { mappedPort := make(chan uint16) - go dev.keepPortMapping(mappedPort, protocol, intPort, extPort, desc) + go dev.keepPortMapping(mappedPort, protocol, intPort, desc) return <-mappedPort } @@ -72,50 +72,48 @@ func (dev *Mapper) Map(protocol string, intPort, extPort uint16, desc string) ui // keepPortMapping runs in the background to keep a port mapped. It renews the // the port mapping in mapUpdateTimeout. func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string, - intPort, extPort uint16, desc string) { + intPort uint16, desc string) { updateTimer := time.NewTimer(mapUpdateTimeout) for i := 0; i <= maxRetries; i++ { - port := extPort + uint16(i) - if intaddr, intPort, desc, err := dev.r.GetPortMappingEntry(port, protocol); err == nil { - dev.log.Info("Port %d is mapped to %s:%d: %s, retry with the next port", - port, intaddr, intPort, desc) - port = 0 + extPort := intPort + uint16(i) + if intaddr, intPort, desc, err := dev.r.GetPortMappingEntry(extPort, protocol); err == nil { + dev.log.Debug("Port %d is taken by %s:%d: %s, retry with the next port", + extPort, intaddr, intPort, desc) continue } - if err := dev.r.MapPort(protocol, intPort, port, desc, mapTimeout); err != nil { + if err := dev.r.MapPort(protocol, intPort, extPort, desc, mapTimeout); err != nil { dev.log.Error("Map port failed. Protocol %s Internal %d External %d. %s", - protocol, intPort, port, err) + protocol, intPort, extPort, err) dev.errLock.Lock() dev.errs.Add(err) dev.errLock.Unlock() } else { dev.log.Info("Mapped Protocol %s Internal %d External %d.", protocol, - intPort, port) - mappedPort <- port + intPort, extPort) dev.wg.Add(1) - defer func(port uint16) { + mappedPort <- extPort + + defer func(extPort uint16) { updateTimer.Stop() - dev.log.Info("Unmap protocol %s external port %d", protocol, port) - dev.errLock.Lock() - dev.errs.Add(dev.r.UnmapPort(protocol, intPort, port)) - dev.errLock.Unlock() + dev.log.Debug("Unmap protocol %s external port %d", protocol, extPort) + dev.r.UnmapPort(protocol, intPort, extPort) dev.wg.Done() - }(port) + }(extPort) for { select { case <-updateTimer.C: - if err := dev.r.MapPort(protocol, intPort, port, desc, mapTimeout); err != nil { - dev.log.Error("Renew port mapping failed. Protocol %s Internal %d External %d. %s", - protocol, intPort, port, err) + if err := dev.r.MapPort(protocol, intPort, extPort, desc, mapTimeout); err != nil { + dev.log.Error("Renewing port mapping from external port %d to internal port %d failed with %s", + intPort, extPort, err) } else { - dev.log.Info("Renew port mapping Protocol %s Internal %d External %d.", protocol, - intPort, port) + dev.log.Info("Renewed port mapping from external port %d to internal port %d.", + intPort, extPort) } updateTimer.Reset(mapUpdateTimeout) @@ -126,7 +124,7 @@ func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string, } } - dev.log.Warn("Unable to map port %d~%d", extPort, extPort+maxRetries) + dev.log.Debug("Unable to map port %d~%d", intPort, intPort+maxRetries) mappedPort <- 0 } diff --git a/nat/no_router.go b/nat/no_router.go index 7a15601..7b7c8d7 100644 --- a/nat/no_router.go +++ b/nat/no_router.go @@ -41,7 +41,11 @@ func getOutboundIP() (net.IP, error) { } defer conn.Close() - return conn.LocalAddr().(*net.UDPAddr).IP, nil + if udpAddr, ok := conn.LocalAddr().(*net.UDPAddr); ok { + return udpAddr.IP, conn.Close() + } + + return nil, fmt.Errorf("getting outbound IP failed") } func NewNoRouter() *noRouter { diff --git a/node/config.go b/node/config.go index e8db1fb..69412a6 100644 --- a/node/config.go +++ b/node/config.go @@ -15,7 +15,7 @@ import ( // Config contains all of the configurations of an Ava node. type Config struct { // protocol to use for opening the network interface - Nat nat.NATRouter + Nat nat.Router // ID of the network this node should connect to NetworkID uint32 From 8fdeef5eb64f4459ec5f3baf65ccc0e4c90a4aa8 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 16 Jun 2020 15:17:13 -0400 Subject: [PATCH 073/183] pre-allocate slices for List in set, bag, shortSet --- ids/bag.go | 17 ++++++++++--- ids/bag_benchmark_test.go | 53 +++++++++++++++++++++++++++++++++++++++ ids/bag_test.go | 4 +-- ids/set.go | 14 +++++++++-- ids/set_benchmark_test.go | 53 +++++++++++++++++++++++++++++++++++++++ ids/short_set.go | 13 ++++++++-- 6 files changed, 144 insertions(+), 10 deletions(-) create mode 100644 ids/bag_benchmark_test.go create mode 100644 ids/set_benchmark_test.go diff --git a/ids/bag.go b/ids/bag.go index de0af46..1d16c64 100644 --- a/ids/bag.go +++ b/ids/bag.go @@ -8,6 +8,10 @@ import ( "strings" ) +const ( + minBagSize = 16 +) + // Bag is a multiset of IDs. // // A bag has the ability to split and filter on it's bits for ease of use for @@ -25,7 +29,7 @@ type Bag struct { func (b *Bag) init() { if b.counts == nil { - b.counts = make(map[[32]byte]int) + b.counts = make(map[[32]byte]int, minBagSize) } } @@ -72,16 +76,21 @@ func (b *Bag) AddCount(id ID, count int) { } // Count returns the number of times the id has been added. -func (b *Bag) Count(id ID) int { return b.counts[*id.ID] } +func (b *Bag) Count(id ID) int { + b.init() + return b.counts[*id.ID] +} // Len returns the number of times an id has been added. func (b *Bag) Len() int { return b.size } // List returns a list of all ids that have been added. func (b *Bag) List() []ID { - idList := []ID(nil) + idList := make([]ID, len(b.counts), len(b.counts)) + i := 0 for id := range b.counts { - idList = append(idList, NewID(id)) + idList[i] = NewID(id) + i++ } return idList } diff --git a/ids/bag_benchmark_test.go b/ids/bag_benchmark_test.go new file mode 100644 index 0000000..e856505 --- /dev/null +++ b/ids/bag_benchmark_test.go @@ -0,0 +1,53 @@ +package ids + +import ( + "crypto/rand" + "testing" +) + +// +func BenchmarkBagListSmall(b *testing.B) { + smallLen := 5 + bag := Bag{} + for i := 0; i < smallLen; i++ { + var idBytes [32]byte + rand.Read(idBytes[:]) + NewID(idBytes) + bag.Add(NewID(idBytes)) + } + b.ResetTimer() + for n := 0; n < b.N; n++ { + bag.List() + } +} + +func BenchmarkBagListMedium(b *testing.B) { + mediumLen := 25 + bag := Bag{} + for i := 0; i < mediumLen; i++ { + var idBytes [32]byte + rand.Read(idBytes[:]) + NewID(idBytes) + bag.Add(NewID(idBytes)) + } + b.ResetTimer() + + for n := 0; n < b.N; n++ { + bag.List() + } +} + +func BenchmarkBagListLarsge(b *testing.B) { + largeLen := 100000 + bag := Bag{} + for i := 0; i < largeLen; i++ { + var idBytes [32]byte + rand.Read(idBytes[:]) + NewID(idBytes) + bag.Add(NewID(idBytes)) + } + b.ResetTimer() + for n := 0; n < b.N; n++ { + bag.List() + } +} diff --git a/ids/bag_test.go b/ids/bag_test.go index ed35233..af0965b 100644 --- a/ids/bag_test.go +++ b/ids/bag_test.go @@ -18,8 +18,8 @@ func TestBagAdd(t *testing.T) { } else if count := bag.Count(id1); count != 0 { t.Fatalf("Bag.Count returned %d expected %d", count, 0) } else if size := bag.Len(); size != 0 { - t.Fatalf("Bag.Len returned %d expected %d", count, 0) - } else if list := bag.List(); list != nil { + t.Fatalf("Bag.Len returned %d elements expected %d", count, 0) + } else if list := bag.List(); len(list) != 0 { t.Fatalf("Bag.List returned %v expected %v", list, nil) } else if mode, freq := bag.Mode(); !mode.IsZero() { t.Fatalf("Bag.Mode[0] returned %s expected %s", mode, ID{}) diff --git a/ids/set.go b/ids/set.go index 9d0b1ec..c3aa024 100644 --- a/ids/set.go +++ b/ids/set.go @@ -7,11 +7,19 @@ import ( "strings" ) +const ( + // The minimum capacity of a set + minSetSize = 16 +) + // Set is a set of IDs type Set map[[32]byte]bool func (ids *Set) init(size int) { if *ids == nil { + if minSetSize > size { + size = minSetSize + } *ids = make(map[[32]byte]bool, size) } } @@ -70,9 +78,11 @@ func (ids *Set) Clear() { *ids = nil } // List converts this set into a list func (ids Set) List() []ID { - idList := []ID(nil) + idList := make([]ID, ids.Len(), ids.Len()) + i := 0 for id := range ids { - idList = append(idList, NewID(id)) + idList[i] = NewID(id) + i++ } return idList } diff --git a/ids/set_benchmark_test.go b/ids/set_benchmark_test.go new file mode 100644 index 0000000..c88b4f9 --- /dev/null +++ b/ids/set_benchmark_test.go @@ -0,0 +1,53 @@ +package ids + +import ( + "crypto/rand" + "testing" +) + +// +func BenchmarkSetListSmall(b *testing.B) { + smallLen := 5 + set := Set{} + for i := 0; i < smallLen; i++ { + var idBytes [32]byte + rand.Read(idBytes[:]) + NewID(idBytes) + set.Add(NewID(idBytes)) + } + b.ResetTimer() + for n := 0; n < b.N; n++ { + set.List() + } +} + +func BenchmarkSetListMedium(b *testing.B) { + mediumLen := 25 + set := Set{} + for i := 0; i < mediumLen; i++ { + var idBytes [32]byte + rand.Read(idBytes[:]) + NewID(idBytes) + set.Add(NewID(idBytes)) + } + b.ResetTimer() + + for n := 0; n < b.N; n++ { + set.List() + } +} + +func BenchmarkSetListLarsge(b *testing.B) { + largeLen := 100000 + set := Set{} + for i := 0; i < largeLen; i++ { + var idBytes [32]byte + rand.Read(idBytes[:]) + NewID(idBytes) + set.Add(NewID(idBytes)) + } + b.ResetTimer() + for n := 0; n < b.N; n++ { + set.List() + } +} diff --git a/ids/short_set.go b/ids/short_set.go index 690cc3a..6977863 100644 --- a/ids/short_set.go +++ b/ids/short_set.go @@ -5,11 +5,18 @@ package ids import "strings" +const ( + minShortSetSize = 16 +) + // ShortSet is a set of ShortIDs type ShortSet map[[20]byte]bool func (ids *ShortSet) init(size int) { if *ids == nil { + if minShortSetSize > size { + size = minShortSetSize + } *ids = make(map[[20]byte]bool, size) } } @@ -65,9 +72,11 @@ func (ids ShortSet) CappedList(size int) []ShortID { // List converts this set into a list func (ids ShortSet) List() []ShortID { - idList := make([]ShortID, len(ids))[:0] + idList := make([]ShortID, len(ids), len(ids)) + i := 0 for id := range ids { - idList = append(idList, NewShortID(id)) + idList[i] = NewShortID(id) + i++ } return idList } From 191cd485935bc1b90d1bd445c248fea03af3becf Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 16 Jun 2020 15:34:34 -0400 Subject: [PATCH 074/183] add minimum map size to Blocker --- snow/events/blocker.go | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/snow/events/blocker.go b/snow/events/blocker.go index 6bfdd7b..3491b24 100644 --- a/snow/events/blocker.go +++ b/snow/events/blocker.go @@ -10,12 +10,16 @@ import ( "github.com/ava-labs/gecko/ids" ) +const ( + minBlockerSize = 16 +) + // Blocker tracks objects that are blocked type Blocker map[[32]byte][]Blockable func (b *Blocker) init() { if *b == nil { - *b = make(map[[32]byte][]Blockable) + *b = make(map[[32]byte][]Blockable, minBlockerSize) } } From 4ecd92efba77399ce0c5637ccbe56b3a845fa5d2 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 16 Jun 2020 15:43:30 -0400 Subject: [PATCH 075/183] add minimum size to uniqueBag and Requests --- ids/unique_bag.go | 6 +++++- snow/engine/common/requests.go | 8 ++++++-- 2 files changed, 11 insertions(+), 3 deletions(-) diff --git a/ids/unique_bag.go b/ids/unique_bag.go index d5d3e36..461a6bc 100644 --- a/ids/unique_bag.go +++ b/ids/unique_bag.go @@ -8,12 +8,16 @@ import ( "strings" ) +const ( + minUniqueBagSize = 16 +) + // UniqueBag ... type UniqueBag map[[32]byte]BitSet func (b *UniqueBag) init() { if *b == nil { - *b = make(map[[32]byte]BitSet) + *b = make(map[[32]byte]BitSet, minUniqueBagSize) } } diff --git a/snow/engine/common/requests.go b/snow/engine/common/requests.go index 22d5759..51f6cb3 100644 --- a/snow/engine/common/requests.go +++ b/snow/engine/common/requests.go @@ -7,6 +7,10 @@ import ( "github.com/ava-labs/gecko/ids" ) +const ( + minRequestsSize = 32 +) + type req struct { vdr ids.ShortID id uint32 @@ -22,7 +26,7 @@ type Requests struct { // are only in one request at a time. func (r *Requests) Add(vdr ids.ShortID, requestID uint32, containerID ids.ID) { if r.reqsToID == nil { - r.reqsToID = make(map[[20]byte]map[uint32]ids.ID) + r.reqsToID = make(map[[20]byte]map[uint32]ids.ID, minRequestsSize) } vdrKey := vdr.Key() vdrReqs, ok := r.reqsToID[vdrKey] @@ -33,7 +37,7 @@ func (r *Requests) Add(vdr ids.ShortID, requestID uint32, containerID ids.ID) { vdrReqs[requestID] = containerID if r.idToReq == nil { - r.idToReq = make(map[[32]byte]req) + r.idToReq = make(map[[32]byte]req, minRequestsSize) } r.idToReq[containerID.Key()] = req{ vdr: vdr, From 77d24022fefab4a7d8966cd69004e4b2dd1f6c85 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 16 Jun 2020 16:11:21 -0400 Subject: [PATCH 076/183] add minimumCacheSize --- cache/lru_cache.go | 8 +++-- cache/lru_cache_benchmark_test.go | 53 +++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 2 deletions(-) create mode 100644 cache/lru_cache_benchmark_test.go diff --git a/cache/lru_cache.go b/cache/lru_cache.go index 629d6bd..ae04138 100644 --- a/cache/lru_cache.go +++ b/cache/lru_cache.go @@ -10,6 +10,10 @@ import ( "github.com/ava-labs/gecko/ids" ) +const ( + minCacheSize = 32 +) + type entry struct { Key ids.ID Value interface{} @@ -59,7 +63,7 @@ func (c *LRU) Flush() { func (c *LRU) init() { if c.entryMap == nil { - c.entryMap = make(map[[32]byte]*list.Element) + c.entryMap = make(map[[32]byte]*list.Element, minCacheSize) } if c.entryList == nil { c.entryList = list.New() @@ -134,6 +138,6 @@ func (c *LRU) evict(key ids.ID) { func (c *LRU) flush() { c.init() - c.entryMap = make(map[[32]byte]*list.Element) + c.entryMap = make(map[[32]byte]*list.Element, minCacheSize) c.entryList = list.New() } diff --git a/cache/lru_cache_benchmark_test.go b/cache/lru_cache_benchmark_test.go new file mode 100644 index 0000000..6bdbaf8 --- /dev/null +++ b/cache/lru_cache_benchmark_test.go @@ -0,0 +1,53 @@ +package cache + +import ( + "crypto/rand" + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func BenchmarkLRUCachePutSmall(b *testing.B) { + smallLen := 5 + cache := &LRU{Size: smallLen} + for n := 0; n < b.N; n++ { + for i := 0; i < smallLen; i++ { + var idBytes [32]byte + rand.Read(idBytes[:]) + cache.Put(ids.NewID(idBytes), n) + } + b.StopTimer() + cache.Flush() + b.StartTimer() + } +} + +func BenchmarkLRUCachePutMedium(b *testing.B) { + mediumLen := 250 + cache := &LRU{Size: mediumLen} + for n := 0; n < b.N; n++ { + for i := 0; i < mediumLen; i++ { + var idBytes [32]byte + rand.Read(idBytes[:]) + cache.Put(ids.NewID(idBytes), n) + } + b.StopTimer() + cache.Flush() + b.StartTimer() + } +} + +func BenchmarkLRUCachePutLarge(b *testing.B) { + largeLen := 10000 + cache := &LRU{Size: largeLen} + for n := 0; n < b.N; n++ { + for i := 0; i < largeLen; i++ { + var idBytes [32]byte + rand.Read(idBytes[:]) + cache.Put(ids.NewID(idBytes), n) + } + b.StopTimer() + cache.Flush() + b.StartTimer() + } +} From 8edcb1689b6e28343638485f73a4bcc814b72dbe Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 16 Jun 2020 16:52:46 -0400 Subject: [PATCH 077/183] bump version for everest --- genesis/config.go | 118 ++++++++++++++++++++++++++++++++++++++++++ genesis/network_id.go | 6 ++- main/params.go | 2 +- node/node.go | 2 +- 4 files changed, 125 insertions(+), 3 deletions(-) diff --git a/genesis/config.go b/genesis/config.go index 7442ca0..832d694 100644 --- a/genesis/config.go +++ b/genesis/config.go @@ -50,6 +50,122 @@ func (c *Config) init() error { // Hard coded genesis constants var ( + EverestConfig = Config{ + MintAddresses: []string{ + "95YUFjhDG892VePMzpwKF9JzewGKvGRi3", + }, + FundedAddresses: []string{ + "9uKvvA7E35QCwLvAaohXTCfFejbf3Rv17", + "JLrYNMYXANGj43BfWXBxMMAEenUBp1Sbn", + "7TUTzwrU6nbZtWHjTHEpdneUvjKBxb3EM", + "77mPUXBdQKwQpPoX6rckCZGLGGdkuG1G6", + "4gGWdFZ4Gax1B466YKXyKRRpWLb42Afdt", + "CKTkzAPsRxCreyiDTnjGxLmjMarxF28fi", + "4ABm9gFHVtsNdcKSd1xsacFkGneSgzpaa", + "DpL8PTsrjtLzv5J8LL3D2A6YcnCTqrNH9", + "ZdhZv6oZrmXLyFDy6ovXAu6VxmbTsT2h", + "6cesTteH62Y5mLoDBUASaBvCXuL2AthL", + }, + StakerIDs: []string{ + "LQwRLm4cbJ7T2kxcxp4uXCU5XD8DFrE1C", + "hArafGhY2HFTbwaaVh1CSCUCUCiJ2Vfb", + "2m38qc95mhHXtrhjyGbe7r2NhniqHHJRB", + "4QBwET5o8kUhvt9xArhir4d3R25CtmZho", + "NpagUxt6KQiwPch9Sd4osv8kD1TZnkjdk", + }, + EVMBytes: []byte{ + 0x7b, 0x22, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x22, 0x3a, 0x7b, 0x22, 0x63, 0x68, 0x61, 0x69, + 0x6e, 0x49, 0x64, 0x22, 0x3a, 0x34, 0x33, 0x31, + 0x31, 0x30, 0x2c, 0x22, 0x68, 0x6f, 0x6d, 0x65, + 0x73, 0x74, 0x65, 0x61, 0x64, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x64, + 0x61, 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, + 0x64, 0x61, 0x6f, 0x46, 0x6f, 0x72, 0x6b, 0x53, + 0x75, 0x70, 0x70, 0x6f, 0x72, 0x74, 0x22, 0x3a, + 0x74, 0x72, 0x75, 0x65, 0x2c, 0x22, 0x65, 0x69, + 0x70, 0x31, 0x35, 0x30, 0x42, 0x6c, 0x6f, 0x63, + 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, 0x65, 0x69, + 0x70, 0x31, 0x35, 0x30, 0x48, 0x61, 0x73, 0x68, + 0x22, 0x3a, 0x22, 0x30, 0x78, 0x32, 0x30, 0x38, + 0x36, 0x37, 0x39, 0x39, 0x61, 0x65, 0x65, 0x62, + 0x65, 0x61, 0x65, 0x31, 0x33, 0x35, 0x63, 0x32, + 0x34, 0x36, 0x63, 0x36, 0x35, 0x30, 0x32, 0x31, + 0x63, 0x38, 0x32, 0x62, 0x34, 0x65, 0x31, 0x35, + 0x61, 0x32, 0x63, 0x34, 0x35, 0x31, 0x33, 0x34, + 0x30, 0x39, 0x39, 0x33, 0x61, 0x61, 0x63, 0x66, + 0x64, 0x32, 0x37, 0x35, 0x31, 0x38, 0x38, 0x36, + 0x35, 0x31, 0x34, 0x66, 0x30, 0x22, 0x2c, 0x22, + 0x65, 0x69, 0x70, 0x31, 0x35, 0x35, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, + 0x65, 0x69, 0x70, 0x31, 0x35, 0x38, 0x42, 0x6c, + 0x6f, 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x2c, 0x22, + 0x62, 0x79, 0x7a, 0x61, 0x6e, 0x74, 0x69, 0x75, + 0x6d, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, + 0x30, 0x2c, 0x22, 0x63, 0x6f, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x74, 0x69, 0x6e, 0x6f, 0x70, 0x6c, + 0x65, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x22, 0x3a, + 0x30, 0x2c, 0x22, 0x70, 0x65, 0x74, 0x65, 0x72, + 0x73, 0x62, 0x75, 0x72, 0x67, 0x42, 0x6c, 0x6f, + 0x63, 0x6b, 0x22, 0x3a, 0x30, 0x7d, 0x2c, 0x22, + 0x6e, 0x6f, 0x6e, 0x63, 0x65, 0x22, 0x3a, 0x22, + 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, 0x74, 0x69, + 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, + 0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, + 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, + 0x61, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, 0x30, + 0x22, 0x2c, 0x22, 0x67, 0x61, 0x73, 0x4c, 0x69, + 0x6d, 0x69, 0x74, 0x22, 0x3a, 0x22, 0x30, 0x78, + 0x35, 0x66, 0x35, 0x65, 0x31, 0x30, 0x30, 0x22, + 0x2c, 0x22, 0x64, 0x69, 0x66, 0x66, 0x69, 0x63, + 0x75, 0x6c, 0x74, 0x79, 0x22, 0x3a, 0x22, 0x30, + 0x78, 0x30, 0x22, 0x2c, 0x22, 0x6d, 0x69, 0x78, + 0x48, 0x61, 0x73, 0x68, 0x22, 0x3a, 0x22, 0x30, + 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x22, 0x2c, 0x22, 0x63, 0x6f, 0x69, 0x6e, + 0x62, 0x61, 0x73, 0x65, 0x22, 0x3a, 0x22, 0x30, + 0x78, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x22, 0x2c, 0x22, 0x61, 0x6c, 0x6c, 0x6f, + 0x63, 0x22, 0x3a, 0x7b, 0x22, 0x35, 0x37, 0x32, + 0x66, 0x34, 0x64, 0x38, 0x30, 0x66, 0x31, 0x30, + 0x66, 0x36, 0x36, 0x33, 0x62, 0x35, 0x30, 0x34, + 0x39, 0x66, 0x37, 0x38, 0x39, 0x35, 0x34, 0x36, + 0x66, 0x32, 0x35, 0x66, 0x37, 0x30, 0x62, 0x62, + 0x36, 0x32, 0x61, 0x37, 0x66, 0x22, 0x3a, 0x7b, + 0x22, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x22, 0x3a, 0x22, 0x30, 0x78, 0x33, 0x33, 0x62, + 0x32, 0x65, 0x33, 0x63, 0x39, 0x66, 0x64, 0x30, + 0x38, 0x30, 0x34, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x22, 0x7d, 0x7d, 0x2c, + 0x22, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x22, + 0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, + 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x22, + 0x3a, 0x22, 0x30, 0x78, 0x30, 0x22, 0x2c, 0x22, + 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, + 0x73, 0x68, 0x22, 0x3a, 0x22, 0x30, 0x78, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, + 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x22, + 0x7d, + }, + } DenaliConfig = Config{ MintAddresses: []string{ "95YUFjhDG892VePMzpwKF9JzewGKvGRi3", @@ -393,6 +509,8 @@ var ( // GetConfig ... func GetConfig(networkID uint32) *Config { switch networkID { + case EverestID: + return &EverestConfig case DenaliID: return &DenaliConfig case CascadeID: diff --git a/genesis/network_id.go b/genesis/network_id.go index 7be7968..f318a36 100644 --- a/genesis/network_id.go +++ b/genesis/network_id.go @@ -16,13 +16,15 @@ var ( MainnetID uint32 = 1 CascadeID uint32 = 2 DenaliID uint32 = 3 + EverestID uint32 = 4 - TestnetID uint32 = 3 + TestnetID uint32 = 4 LocalID uint32 = 12345 MainnetName = "mainnet" CascadeName = "cascade" DenaliName = "denali" + EverestName = "everest" TestnetName = "testnet" LocalName = "local" @@ -31,6 +33,7 @@ var ( MainnetID: MainnetName, CascadeID: CascadeName, DenaliID: DenaliName, + EverestID: EverestName, LocalID: LocalName, } @@ -38,6 +41,7 @@ var ( MainnetName: MainnetID, CascadeName: CascadeID, DenaliName: DenaliID, + EverestName: EverestID, TestnetName: TestnetID, LocalName: LocalID, diff --git a/main/params.go b/main/params.go index 6dcad06..94db609 100644 --- a/main/params.go +++ b/main/params.go @@ -30,7 +30,7 @@ import ( ) const ( - dbVersion = "v0.5.0" + dbVersion = "v0.6.0" ) // Results of parsing the CLI diff --git a/node/node.go b/node/node.go index 5e817fa..942a746 100644 --- a/node/node.go +++ b/node/node.go @@ -56,7 +56,7 @@ var ( genesisHashKey = []byte("genesisID") // Version is the version of this code - Version = version.NewDefaultVersion("avalanche", 0, 5, 5) + Version = version.NewDefaultVersion("avalanche", 0, 6, 0) versionParser = version.NewDefaultParser() ) From aca163714d1dd076fce3fee0f19f350b7487cbed Mon Sep 17 00:00:00 2001 From: Stephen Buttolph Date: Tue, 16 Jun 2020 17:21:23 -0400 Subject: [PATCH 078/183] fixed typo --- ids/bag_benchmark_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ids/bag_benchmark_test.go b/ids/bag_benchmark_test.go index e856505..7007ddb 100644 --- a/ids/bag_benchmark_test.go +++ b/ids/bag_benchmark_test.go @@ -37,7 +37,7 @@ func BenchmarkBagListMedium(b *testing.B) { } } -func BenchmarkBagListLarsge(b *testing.B) { +func BenchmarkBagListLarge(b *testing.B) { largeLen := 100000 bag := Bag{} for i := 0; i < largeLen; i++ { From e0d00e25c720119b46fc9f3001819a19bdef4c47 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 16 Jun 2020 17:23:48 -0400 Subject: [PATCH 079/183] fix typo --- ids/set_benchmark_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/ids/set_benchmark_test.go b/ids/set_benchmark_test.go index c88b4f9..17c1c7a 100644 --- a/ids/set_benchmark_test.go +++ b/ids/set_benchmark_test.go @@ -37,7 +37,7 @@ func BenchmarkSetListMedium(b *testing.B) { } } -func BenchmarkSetListLarsge(b *testing.B) { +func BenchmarkSetListLarge(b *testing.B) { largeLen := 100000 set := Set{} for i := 0; i < largeLen; i++ { From 4223e1f9d53bf769aada5a9ee0e2703d7029b23d Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 16 Jun 2020 17:51:49 -0400 Subject: [PATCH 080/183] remove unnecessary call to Has --- vms/components/state/state.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/vms/components/state/state.go b/vms/components/state/state.go index d4d0da8..24b50e3 100644 --- a/vms/components/state/state.go +++ b/vms/components/state/state.go @@ -128,19 +128,10 @@ func (s *state) Get(db database.Database, typeID uint64, key ids.ID) (interface{ // The unique ID of this key/typeID pair uID := s.uniqueID(key, typeID) - // See if exists in database - exists, err := db.Has(uID.Bytes()) - if err != nil { - return nil, err - } - if !exists { - return nil, database.ErrNotFound - } - // Get the value from the database valueBytes, err := db.Get(uID.Bytes()) if err != nil { - return nil, fmt.Errorf("problem getting value from database: %w", err) + return nil, err } // Unmarshal the value from bytes and return it From ddcc2d73a2022c260f0d0b24906435e46ae29765 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 16 Jun 2020 18:14:16 -0400 Subject: [PATCH 081/183] lazily fetch block status --- vms/components/core/block.go | 4 +-- vms/components/core/block_test.go | 48 +++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+), 3 deletions(-) create mode 100644 vms/components/core/block_test.go diff --git a/vms/components/core/block.go b/vms/components/core/block.go index 3609477..6d1d37b 100644 --- a/vms/components/core/block.go +++ b/vms/components/core/block.go @@ -34,8 +34,7 @@ type Block struct { func (b *Block) Initialize(bytes []byte, vm *SnowmanVM) { b.VM = vm b.Metadata.Initialize(bytes) - status := b.VM.State.GetStatus(vm.DB, b.ID()) - b.SetStatus(status) + b.SetStatus(choices.Unknown) // don't set status until it is queried } // ParentID returns [b]'s parent's ID @@ -55,7 +54,6 @@ func (b *Block) Parent() snowman.Block { // Recall that b.vm.DB.Commit() must be called to persist to the DB func (b *Block) Accept() error { b.SetStatus(choices.Accepted) // Change state of this block - blkID := b.ID() // Persist data diff --git a/vms/components/core/block_test.go b/vms/components/core/block_test.go new file mode 100644 index 0000000..d9d30bc --- /dev/null +++ b/vms/components/core/block_test.go @@ -0,0 +1,48 @@ +package core + +import ( + "testing" + + "github.com/ava-labs/gecko/snow/choices" + "github.com/ava-labs/gecko/snow/consensus/snowman" + + "github.com/ava-labs/gecko/ids" + + "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/database/versiondb" +) + +func TestBlock(t *testing.T) { + parentID := ids.NewID([32]byte{1, 2, 3, 4, 5}) + db := versiondb.New(memdb.New()) + state, err := NewSnowmanState(func([]byte) (snowman.Block, error) { return nil, nil }) + if err != nil { + t.Fatal(err) + } + b := NewBlock(parentID) + + b.Initialize([]byte{1, 2, 3}, &SnowmanVM{ + DB: db, + State: state, + }) + + // should be unknown until someone queries for it + if status := b.Metadata.status; status != choices.Unknown { + t.Fatalf("status should be unknown but is %s", status) + } + + // querying should change status to processing + if status := b.Status(); status != choices.Processing { + t.Fatalf("status should be processing but is %s", status) + } + + b.Accept() + if status := b.Status(); status != choices.Accepted { + t.Fatalf("status should be accepted but is %s", status) + } + + b.Reject() + if status := b.Status(); status != choices.Rejected { + t.Fatalf("status should be rejected but is %s", status) + } +} From aab8f5f3d46b17b0377de00dfbd42095b9c1d9ad Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Tue, 16 Jun 2020 16:28:58 -0400 Subject: [PATCH 082/183] Implement early termination case for avalanche polling --- snow/engine/avalanche/polls.go | 41 +++++++++++- snow/engine/avalanche/polls_test.go | 99 +++++++++++++++++++++++++++++ snow/engine/avalanche/transitive.go | 4 +- 3 files changed, 140 insertions(+), 4 deletions(-) create mode 100644 snow/engine/avalanche/polls_test.go diff --git a/snow/engine/avalanche/polls.go b/snow/engine/avalanche/polls.go index fa1e7df..3bf0498 100644 --- a/snow/engine/avalanche/polls.go +++ b/snow/engine/avalanche/polls.go @@ -32,9 +32,19 @@ import ( type polls struct { log logging.Logger numPolls prometheus.Gauge + alpha int m map[uint32]poll } +func newPolls(alpha int, log logging.Logger, numPolls prometheus.Gauge) polls { + return polls{ + log: log, + numPolls: numPolls, + alpha: alpha, + m: make(map[uint32]poll), + } +} + // Add to the current set of polls // Returns true if the poll was registered correctly and the network sample // should be made. @@ -42,6 +52,7 @@ func (p *polls) Add(requestID uint32, vdrs ids.ShortSet) bool { poll, exists := p.m[requestID] if !exists { poll.polled = vdrs + poll.alpha = p.alpha p.m[requestID] = poll p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics @@ -85,6 +96,7 @@ func (p *polls) String() string { type poll struct { votes ids.UniqueBag polled ids.ShortSet + alpha int } // Vote registers a vote for this poll @@ -97,5 +109,32 @@ func (p *poll) Vote(votes []ids.ID, vdr ids.ShortID) { // Finished returns true if the poll has completed, with no more required // responses -func (p poll) Finished() bool { return p.polled.Len() == 0 } +func (p poll) Finished() bool { + // If I have no outstanding polls, I'm finished + numPending := p.polled.Len() + if numPending == 0 { + return true + } + // If there are still enough pending responses to include another vertex, + // then I can't stop polling + if numPending > p.alpha { + return false + } + + // I ignore any vertex that has already received alpha votes. + // To safely skip DAG traversal, assume that all votes for + // vertices with less than alpha votes will be applied to a + // single shared ancestor. + // In this case, I can terminate early, iff there are not enough + // pending votes for this ancestor to receive alpha votes. + partialVotes := ids.BitSet(0) + for _, vote := range p.votes.List() { + voters := p.votes.GetSet(vote) + if voters.Len() >= p.alpha { + continue + } + partialVotes.Union(voters) + } + return partialVotes.Len()+numPending < p.alpha +} func (p poll) String() string { return fmt.Sprintf("Waiting on %d chits", p.polled.Len()) } diff --git a/snow/engine/avalanche/polls_test.go b/snow/engine/avalanche/polls_test.go new file mode 100644 index 0000000..cbb1ea4 --- /dev/null +++ b/snow/engine/avalanche/polls_test.go @@ -0,0 +1,99 @@ +package avalanche + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestPollTerminatesEarlyVirtuousCase(t *testing.T) { + alpha := 3 + + vtxID := GenerateID() + votes := []ids.ID{vtxID} + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) + vdr3 := ids.NewShortID([20]byte{3}) + vdr4 := ids.NewShortID([20]byte{4}) + vdr5 := ids.NewShortID([20]byte{5}) // k = 5 + + vdrs := ids.ShortSet{} + vdrs.Add(vdr1) + vdrs.Add(vdr2) + vdrs.Add(vdr3) + vdrs.Add(vdr4) + vdrs.Add(vdr5) + + poll := poll{ + votes: make(ids.UniqueBag), + polled: vdrs, + alpha: alpha, + } + + poll.Vote(votes, vdr1) + if poll.Finished() { + t.Fatalf("Poll finished after less than alpha votes") + } + poll.Vote(votes, vdr2) + if poll.Finished() { + t.Fatalf("Poll finished after less than alpha votes") + } + poll.Vote(votes, vdr3) + if !poll.Finished() { + t.Fatalf("Poll did not terminate early after receiving alpha votes for one vertex and none for other vertices") + } +} + +func TestPollAccountsForSharedAncestor(t *testing.T) { + alpha := 4 + + vtxA := GenerateID() + vtxB := GenerateID() + vtxC := GenerateID() + vtxD := GenerateID() + + // If validators 1-3 vote for frontier vertices + // B, C, and D respectively, which all share the common ancestor + // A, then we cannot terminate early with alpha = k = 4 + // If the final vote is cast for any of A, B, C, or D, then + // vertex A will have transitively received alpha = 4 votes + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) + vdr3 := ids.NewShortID([20]byte{3}) + vdr4 := ids.NewShortID([20]byte{4}) + + vdrs := ids.ShortSet{} + vdrs.Add(vdr1) + vdrs.Add(vdr2) + vdrs.Add(vdr3) + vdrs.Add(vdr4) + + poll := poll{ + votes: make(ids.UniqueBag), + polled: vdrs, + alpha: alpha, + } + + votes1 := []ids.ID{vtxB} + poll.Vote(votes1, vdr1) + if poll.Finished() { + t.Fatalf("Poll finished early after receiving one vote") + } + votes2 := []ids.ID{vtxC} + poll.Vote(votes2, vdr2) + if poll.Finished() { + t.Fatalf("Poll finished early after receiving two votes") + } + votes3 := []ids.ID{vtxD} + poll.Vote(votes3, vdr3) + if poll.Finished() { + t.Fatalf("Poll terminated early, when a shared ancestor could have received alpha votes") + } + + votes4 := []ids.ID{vtxA} + poll.Vote(votes4, vdr4) + if !poll.Finished() { + t.Fatalf("Poll did not terminate after receiving all outstanding votes") + } +} diff --git a/snow/engine/avalanche/transitive.go b/snow/engine/avalanche/transitive.go index 71fbbe0..966e2e5 100644 --- a/snow/engine/avalanche/transitive.go +++ b/snow/engine/avalanche/transitive.go @@ -57,9 +57,7 @@ func (t *Transitive) Initialize(config Config) error { t.onFinished = t.finishBootstrapping - t.polls.log = config.Context.Log - t.polls.numPolls = t.numPolls - t.polls.m = make(map[uint32]poll) + t.polls = newPolls(int(config.Alpha), config.Context.Log, t.numPolls) return t.bootstrapper.Initialize(config.BootstrapConfig) } From 3d6fff70e072252e101bba491835bfd346821944 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 16 Jun 2020 23:53:19 -0400 Subject: [PATCH 083/183] nits to clean up the PR --- snow/engine/avalanche/polls.go | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-) diff --git a/snow/engine/avalanche/polls.go b/snow/engine/avalanche/polls.go index 3bf0498..ac3fe5c 100644 --- a/snow/engine/avalanche/polls.go +++ b/snow/engine/avalanche/polls.go @@ -110,30 +110,27 @@ func (p *poll) Vote(votes []ids.ID, vdr ids.ShortID) { // Finished returns true if the poll has completed, with no more required // responses func (p poll) Finished() bool { - // If I have no outstanding polls, I'm finished + // If there are no outstanding queries, the poll is finished numPending := p.polled.Len() if numPending == 0 { return true } // If there are still enough pending responses to include another vertex, - // then I can't stop polling + // then the poll must wait for more responses if numPending > p.alpha { return false } - // I ignore any vertex that has already received alpha votes. - // To safely skip DAG traversal, assume that all votes for - // vertices with less than alpha votes will be applied to a - // single shared ancestor. - // In this case, I can terminate early, iff there are not enough - // pending votes for this ancestor to receive alpha votes. + // Ignore any vertex that has already received alpha votes. To safely skip + // DAG traversal, assume that all votes for vertices with less than alpha + // votes will be applied to a single shared ancestor. In this case, the poll + // can terminate early, iff there are not enough pending votes for this + // ancestor to receive alpha votes. partialVotes := ids.BitSet(0) for _, vote := range p.votes.List() { - voters := p.votes.GetSet(vote) - if voters.Len() >= p.alpha { - continue + if voters := p.votes.GetSet(vote); voters.Len() < p.alpha { + partialVotes.Union(voters) } - partialVotes.Union(voters) } return partialVotes.Len()+numPending < p.alpha } From 82b91e52445149d642d25906e92a656082ac407d Mon Sep 17 00:00:00 2001 From: Gabriel Cardona Date: Wed, 17 Jun 2020 10:00:41 -0700 Subject: [PATCH 084/183] Add tests for platform.addDefaultSubnetDelegator for confirming tx fails when attempting to delegate too much as well as confirming balance is correct after delegating. --- .../add_default_subnet_delegator_tx_test.go | 48 +++++++++++++++++++ 1 file changed, 48 insertions(+) diff --git a/vms/platformvm/add_default_subnet_delegator_tx_test.go b/vms/platformvm/add_default_subnet_delegator_tx_test.go index 65a0a71..0dcdd7b 100644 --- a/vms/platformvm/add_default_subnet_delegator_tx_test.go +++ b/vms/platformvm/add_default_subnet_delegator_tx_test.go @@ -386,4 +386,52 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { t.Fatal("should have failed verification because payer account has no $AVA to pay fee") } txFee = txFeeSaved // Reset tx fee + + // Case 8: fail verification for spending more funds than it has + tx, err = vm.newAddDefaultSubnetDelegatorTx( + 1, // nonce (new account has nonce 0 so use nonce 1) + defaultBalance*2, // weight + uint64(defaultValidateStartTime.Unix()), // start time + uint64(defaultValidateEndTime.Unix()), // end time + defaultKey.PublicKey().Address(), // node ID + defaultKey.PublicKey().Address(), // destination + testNetworkID, // network ID + newAcctKey.(*crypto.PrivateKeySECP256K1R), // tx fee payer + ) + if err != nil { + t.Fatal(err) + } + _, _, _, _, err = tx.SemanticVerify(vm.DB) + if err == nil { + t.Fatal("should have failed verification because payer account spent twice the default balance") + } + + // Case 9: Confirm balance is correct + tx, err = vm.newAddDefaultSubnetDelegatorTx( + 1, // nonce (new account has nonce 0 so use nonce 1) + defaultStakeAmount, // weight + uint64(defaultValidateStartTime.Unix()), // start time + uint64(defaultValidateEndTime.Unix()), // end time + defaultKey.PublicKey().Address(), // node ID + defaultKey.PublicKey().Address(), // destination + testNetworkID, // network ID + newAcctKey.(*crypto.PrivateKeySECP256K1R), // tx fee payer + ) + if err != nil { + t.Fatal(err) + } + + onCommitDB, _, _, _, err := tx.SemanticVerify(vm.DB) + if err != nil { + t.Fatal(err) + } + account, err := tx.vm.getAccount(onCommitDB, defaultKey.PublicKey().Address()) + if err != nil { + t.Fatal(err) + } + balance := account.Balance + + if balance == defaultBalance-(defaultStakeAmount+txFee) { + t.Fatal("") + } } From 077afc20e73367b0b91570a38de44f89bc788b7e Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Wed, 17 Jun 2020 13:16:37 -0400 Subject: [PATCH 085/183] Adjust delegator test cases --- .../add_default_subnet_delegator_tx_test.go | 44 +++++++++---------- 1 file changed, 22 insertions(+), 22 deletions(-) diff --git a/vms/platformvm/add_default_subnet_delegator_tx_test.go b/vms/platformvm/add_default_subnet_delegator_tx_test.go index 0dcdd7b..99bdc11 100644 --- a/vms/platformvm/add_default_subnet_delegator_tx_test.go +++ b/vms/platformvm/add_default_subnet_delegator_tx_test.go @@ -335,9 +335,9 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { } tx, err = vm.newAddDefaultSubnetDelegatorTx( - defaultNonce+1, // nonce - defaultStakeAmount, // weight - uint64(newTimestamp.Unix()), // start time + defaultNonce+1, // nonce + defaultStakeAmount, // weight + uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(MinimumStakingDuration).Unix()), // end time defaultKey.PublicKey().Address(), // node ID defaultKey.PublicKey().Address(), // destination @@ -389,33 +389,33 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { // Case 8: fail verification for spending more funds than it has tx, err = vm.newAddDefaultSubnetDelegatorTx( - 1, // nonce (new account has nonce 0 so use nonce 1) - defaultBalance*2, // weight - uint64(defaultValidateStartTime.Unix()), // start time - uint64(defaultValidateEndTime.Unix()), // end time - defaultKey.PublicKey().Address(), // node ID - defaultKey.PublicKey().Address(), // destination - testNetworkID, // network ID - newAcctKey.(*crypto.PrivateKeySECP256K1R), // tx fee payer + defaultNonce+1, + defaultBalance*2, // weight + uint64(defaultValidateStartTime.Unix()), // start time + uint64(defaultValidateEndTime.Unix()), // end time + defaultKey.PublicKey().Address(), // node ID + defaultKey.PublicKey().Address(), // destination + testNetworkID, // network ID + defaultKey, // tx fee payer ) if err != nil { t.Fatal(err) } _, _, _, _, err = tx.SemanticVerify(vm.DB) if err == nil { - t.Fatal("should have failed verification because payer account spent twice the default balance") + t.Fatal("should have failed verification because payer account spent twice the account's balance") } // Case 9: Confirm balance is correct tx, err = vm.newAddDefaultSubnetDelegatorTx( - 1, // nonce (new account has nonce 0 so use nonce 1) - defaultStakeAmount, // weight - uint64(defaultValidateStartTime.Unix()), // start time - uint64(defaultValidateEndTime.Unix()), // end time - defaultKey.PublicKey().Address(), // node ID - defaultKey.PublicKey().Address(), // destination - testNetworkID, // network ID - newAcctKey.(*crypto.PrivateKeySECP256K1R), // tx fee payer + defaultNonce+1, + defaultStakeAmount, // weight + uint64(defaultValidateStartTime.Unix()), // start time + uint64(defaultValidateEndTime.Unix()), // end time + defaultKey.PublicKey().Address(), // node ID + defaultKey.PublicKey().Address(), // destination + testNetworkID, // network ID + defaultKey, // tx fee payer ) if err != nil { t.Fatal(err) @@ -431,7 +431,7 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { } balance := account.Balance - if balance == defaultBalance-(defaultStakeAmount+txFee) { - t.Fatal("") + if balance != defaultBalance-(defaultStakeAmount+txFee) { + t.Fatalf("balance was not updated correctly after subnet delegator tx") } } From f40fa7d7e6a5aec02bcddd8dc749257dcbc2fa04 Mon Sep 17 00:00:00 2001 From: Gabriel Cardona Date: Wed, 17 Jun 2020 10:21:11 -0700 Subject: [PATCH 086/183] Formatting. --- vms/platformvm/add_default_subnet_delegator_tx_test.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/vms/platformvm/add_default_subnet_delegator_tx_test.go b/vms/platformvm/add_default_subnet_delegator_tx_test.go index 99bdc11..9380001 100644 --- a/vms/platformvm/add_default_subnet_delegator_tx_test.go +++ b/vms/platformvm/add_default_subnet_delegator_tx_test.go @@ -335,9 +335,9 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { } tx, err = vm.newAddDefaultSubnetDelegatorTx( - defaultNonce+1, // nonce - defaultStakeAmount, // weight - uint64(newTimestamp.Unix()), // start time + defaultNonce+1, // nonce + defaultStakeAmount, // weight + uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(MinimumStakingDuration).Unix()), // end time defaultKey.PublicKey().Address(), // node ID defaultKey.PublicKey().Address(), // destination From 20637a0f2328c671e6118f35c14bfe67a4d27ae1 Mon Sep 17 00:00:00 2001 From: Hongbo Zhang Date: Wed, 17 Jun 2020 17:13:35 -0400 Subject: [PATCH 087/183] dropped error msg from unmapallport --- nat/nat.go | 22 +++++++--------------- nat/no_router.go | 2 +- 2 files changed, 8 insertions(+), 16 deletions(-) diff --git a/nat/nat.go b/nat/nat.go index f8f5ca7..8aa9d9e 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -9,7 +9,6 @@ import ( "time" "github.com/ava-labs/gecko/utils/logging" - "github.com/ava-labs/gecko/utils/wrappers" ) const ( @@ -42,12 +41,10 @@ func GetRouter() Router { } type Mapper struct { - log logging.Logger - r Router - closer chan struct{} - wg sync.WaitGroup - errLock sync.Mutex - errs wrappers.Errs + log logging.Logger + r Router + closer chan struct{} + wg sync.WaitGroup } func NewPortMapper(log logging.Logger, r Router) Mapper { @@ -81,13 +78,9 @@ func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string, dev.log.Debug("Port %d is taken by %s:%d: %s, retry with the next port", extPort, intaddr, intPort, desc) continue - } - if err := dev.r.MapPort(protocol, intPort, extPort, desc, mapTimeout); err != nil { - dev.log.Error("Map port failed. Protocol %s Internal %d External %d. %s", + } else if err := dev.r.MapPort(protocol, intPort, extPort, desc, mapTimeout); err != nil { + dev.log.Debug("Map port failed. Protocol %s Internal %d External %d. %s", protocol, intPort, extPort, err) - dev.errLock.Lock() - dev.errs.Add(err) - dev.errLock.Unlock() } else { dev.log.Info("Mapped Protocol %s Internal %d External %d.", protocol, intPort, extPort) @@ -128,9 +121,8 @@ func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string, mappedPort <- 0 } -func (dev *Mapper) UnmapAllPorts() error { +func (dev *Mapper) UnmapAllPorts() { close(dev.closer) dev.wg.Wait() dev.log.Info("Unmapped all ports") - return dev.errs.Err } diff --git a/nat/no_router.go b/nat/no_router.go index 7b7c8d7..364c338 100644 --- a/nat/no_router.go +++ b/nat/no_router.go @@ -39,12 +39,12 @@ func getOutboundIP() (net.IP, error) { if err != nil { return nil, err } - defer conn.Close() if udpAddr, ok := conn.LocalAddr().(*net.UDPAddr); ok { return udpAddr.IP, conn.Close() } + conn.Close() return nil, fmt.Errorf("getting outbound IP failed") } From 4da1ce58fb96629cac424d9f63bc811487167ace Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 17 Jun 2020 19:42:11 -0400 Subject: [PATCH 088/183] patch for bug that caused bootstrapping to never finish --- snow/engine/avalanche/bootstrapper.go | 3 +++ snow/engine/snowman/bootstrapper.go | 3 +++ 2 files changed, 6 insertions(+) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index 705d841..b2198fd 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -107,6 +107,9 @@ func (b *bootstrapper) fetch(vtxID ids.ID) error { // Make sure we don't already have this vertex if _, err := b.State.GetVertex(vtxID); err == nil { + if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier { + return b.finish() + } return nil } diff --git a/snow/engine/snowman/bootstrapper.go b/snow/engine/snowman/bootstrapper.go index b7df6b8..e811b81 100644 --- a/snow/engine/snowman/bootstrapper.go +++ b/snow/engine/snowman/bootstrapper.go @@ -115,6 +115,9 @@ func (b *bootstrapper) fetch(blkID ids.ID) error { // Make sure we don't already have this block if _, err := b.VM.GetBlock(blkID); err == nil { + if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier { + return b.finish() + } return nil } From f4a789b433334ca8c14cdbea5fa93b4fca54f706 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 17 Jun 2020 20:15:42 -0400 Subject: [PATCH 089/183] use heap in bootstrapping to reduce amount of work done. Move cache check to reduce memory allocations. --- snow/engine/avalanche/bootstrapper.go | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index 705d841..43d2b47 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -124,11 +124,10 @@ func (b *bootstrapper) fetch(vtxID ids.ID) error { // Process vertices func (b *bootstrapper) process(vtx avalanche.Vertex) error { - toProcess := []avalanche.Vertex{vtx} - for len(toProcess) > 0 { - newLen := len(toProcess) - 1 - vtx := toProcess[newLen] - toProcess = toProcess[:newLen] + toProcess := newMaxVertexHeap() + toProcess.Push(vtx) + for toProcess.Len() > 0 { + vtx := toProcess.Pop() if _, ok := b.processedCache.Get(vtx.ID()); ok { // already processed this continue } @@ -168,7 +167,10 @@ func (b *bootstrapper) process(vtx avalanche.Vertex) error { } } for _, parent := range vtx.Parents() { - toProcess = append(toProcess, parent) + if _, ok := b.processedCache.Get(parent.ID()); ok { // already processed this + continue + } + toProcess.Push(parent) } b.processedCache.Put(vtx.ID(), nil) } From 9a43e1222b7af7f1f3bc4e7790d29ab5f7a782b5 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Wed, 17 Jun 2020 20:31:13 -0400 Subject: [PATCH 090/183] added tests --- snow/engine/avalanche/bootstrapper_test.go | 110 ++++++++++++++++++ snow/engine/snowman/bootstrapper_test.go | 125 +++++++++++++++++++++ 2 files changed, 235 insertions(+) diff --git a/snow/engine/avalanche/bootstrapper_test.go b/snow/engine/avalanche/bootstrapper_test.go index 85e3a6e..c488d11 100644 --- a/snow/engine/avalanche/bootstrapper_test.go +++ b/snow/engine/avalanche/bootstrapper_test.go @@ -805,3 +805,113 @@ func TestBootstrapperIncompleteMultiPut(t *testing.T) { t.Fatal("should be accepted") } } + +func TestBootstrapperFinalized(t *testing.T) { + config, peerID, sender, state, vm := newConfig(t) + + vtxID0 := ids.Empty.Prefix(0) + vtxID1 := ids.Empty.Prefix(1) + + vtxBytes0 := []byte{0} + vtxBytes1 := []byte{1} + + vtx0 := &Vtx{ + id: vtxID0, + height: 0, + status: choices.Unknown, + bytes: vtxBytes0, + } + vtx1 := &Vtx{ + id: vtxID1, + height: 1, + parents: []avalanche.Vertex{vtx0}, + status: choices.Unknown, + bytes: vtxBytes1, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + finished := new(bool) + bs.onFinished = func() error { *finished = true; return nil } + + acceptedIDs := ids.Set{} + acceptedIDs.Add(vtxID0) + acceptedIDs.Add(vtxID1) + + parsedVtx0 := false + parsedVtx1 := false + state.getVertex = func(vtxID ids.ID) (avalanche.Vertex, error) { + switch { + case vtxID.Equals(vtxID0): + if parsedVtx0 { + return vtx0, nil + } + return nil, errUnknownVertex + case vtxID.Equals(vtxID1): + if parsedVtx1 { + return vtx1, nil + } + return nil, errUnknownVertex + default: + t.Fatal(errUnknownVertex) + panic(errUnknownVertex) + } + } + state.parseVertex = func(vtxBytes []byte) (avalanche.Vertex, error) { + switch { + case bytes.Equal(vtxBytes, vtxBytes0): + vtx0.status = choices.Processing + parsedVtx0 = true + return vtx0, nil + case bytes.Equal(vtxBytes, vtxBytes1): + vtx1.status = choices.Processing + parsedVtx1 = true + return vtx1, nil + } + t.Fatal(errUnknownVertex) + return nil, errUnknownVertex + } + + requestIDs := map[[32]byte]uint32{} + sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) + } + requestIDs[vtxID.Key()] = reqID + } + + vm.CantBootstrapping = false + + if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request vtx0 and vtx1 + t.Fatal(err) + } + + reqID, ok := requestIDs[vtxID1.Key()] + if !ok { + t.Fatalf("should have requested vtx1") + } + + vm.CantBootstrapped = false + + if err := bs.MultiPut(peerID, reqID, [][]byte{vtxBytes1, vtxBytes0}); err != nil { + t.Fatal(err) + } + + reqID, ok = requestIDs[vtxID0.Key()] + if !ok { + t.Fatalf("should have requested vtx0") + } + + if err := bs.GetAncestorsFailed(peerID, reqID); err != nil { + t.Fatal(err) + } + + if !*finished { + t.Fatalf("Bootstrapping should have finished") + } else if vtx0.Status() != choices.Accepted { + t.Fatalf("Vertex should be accepted") + } else if vtx1.Status() != choices.Accepted { + t.Fatalf("Vertex should be accepted") + } +} diff --git a/snow/engine/snowman/bootstrapper_test.go b/snow/engine/snowman/bootstrapper_test.go index 286d9d7..016e05c 100644 --- a/snow/engine/snowman/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrapper_test.go @@ -622,3 +622,128 @@ func TestBootstrapperFilterAccepted(t *testing.T) { t.Fatalf("Blk shouldn't be accepted") } } + +func TestBootstrapperFinalized(t *testing.T) { + config, peerID, sender, vm := newConfig(t) + + blkID0 := ids.Empty.Prefix(0) + blkID1 := ids.Empty.Prefix(1) + blkID2 := ids.Empty.Prefix(2) + + blkBytes0 := []byte{0} + blkBytes1 := []byte{1} + blkBytes2 := []byte{2} + + blk0 := &Blk{ + id: blkID0, + height: 0, + status: choices.Accepted, + bytes: blkBytes0, + } + blk1 := &Blk{ + parent: blk0, + id: blkID1, + height: 1, + status: choices.Unknown, + bytes: blkBytes1, + } + blk2 := &Blk{ + parent: blk1, + id: blkID2, + height: 2, + status: choices.Unknown, + bytes: blkBytes2, + } + + bs := bootstrapper{} + bs.metrics.Initialize(config.Context.Log, fmt.Sprintf("gecko_%s", config.Context.ChainID), prometheus.NewRegistry()) + bs.Initialize(config) + finished := new(bool) + bs.onFinished = func() error { *finished = true; return nil } + + acceptedIDs := ids.Set{} + acceptedIDs.Add(blkID1) + acceptedIDs.Add(blkID2) + + parsedBlk1 := false + parsedBlk2 := false + vm.GetBlockF = func(blkID ids.ID) (snowman.Block, error) { + switch { + case blkID.Equals(blkID0): + return blk0, nil + case blkID.Equals(blkID1): + if parsedBlk1 { + return blk1, nil + } + return nil, errUnknownBlock + case blkID.Equals(blkID2): + if parsedBlk2 { + return blk2, nil + } + return nil, errUnknownBlock + default: + t.Fatal(errUnknownBlock) + panic(errUnknownBlock) + } + } + vm.ParseBlockF = func(blkBytes []byte) (snowman.Block, error) { + switch { + case bytes.Equal(blkBytes, blkBytes0): + return blk0, nil + case bytes.Equal(blkBytes, blkBytes1): + blk1.status = choices.Processing + parsedBlk1 = true + return blk1, nil + case bytes.Equal(blkBytes, blkBytes2): + blk2.status = choices.Processing + parsedBlk2 = true + return blk2, nil + } + t.Fatal(errUnknownBlock) + return nil, errUnknownBlock + } + + requestIDs := map[[32]byte]uint32{} + sender.GetAncestorsF = func(vdr ids.ShortID, reqID uint32, vtxID ids.ID) { + if !vdr.Equals(peerID) { + t.Fatalf("Should have requested block from %s, requested from %s", peerID, vdr) + } + requestIDs[vtxID.Key()] = reqID + } + + vm.CantBootstrapping = false + + if err := bs.ForceAccepted(acceptedIDs); err != nil { // should request blk0 and blk1 + t.Fatal(err) + } + + reqID, ok := requestIDs[blkID2.Key()] + if !ok { + t.Fatalf("should have requested blk2") + } + + vm.CantBootstrapped = false + + if err := bs.MultiPut(peerID, reqID, [][]byte{blkBytes2, blkBytes1}); err != nil { + t.Fatal(err) + } + + reqID, ok = requestIDs[blkID1.Key()] + if !ok { + t.Fatalf("should have requested blk1") + } + + if err := bs.GetAncestorsFailed(peerID, reqID); err != nil { + t.Fatal(err) + } + + if !*finished { + t.Fatalf("Bootstrapping should have finished") + } else if blk0.Status() != choices.Accepted { + t.Fatalf("Block should be accepted") + } else if blk1.Status() != choices.Accepted { + t.Fatalf("Block should be accepted") + } else if blk2.Status() != choices.Accepted { + t.Fatalf("Block should be accepted") + } +} From f0cd642c2d5ab2933c9c7297b7b192c55246a81c Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 17 Jun 2020 20:35:28 -0400 Subject: [PATCH 091/183] move cache check out of loop --- snow/engine/avalanche/bootstrapper.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index 43d2b47..3958fe7 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -125,13 +125,11 @@ func (b *bootstrapper) fetch(vtxID ids.ID) error { // Process vertices func (b *bootstrapper) process(vtx avalanche.Vertex) error { toProcess := newMaxVertexHeap() - toProcess.Push(vtx) + if _, ok := b.processedCache.Get(vtx.ID()); !ok { // only process if we haven't already + toProcess.Push(vtx) + } for toProcess.Len() > 0 { vtx := toProcess.Pop() - if _, ok := b.processedCache.Get(vtx.ID()); ok { // already processed this - continue - } - switch vtx.Status() { case choices.Unknown: if err := b.fetch(vtx.ID()); err != nil { From 15898c4ac2ea7a7c63d8e962229d2026f4bf8698 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 17 Jun 2020 20:41:02 -0400 Subject: [PATCH 092/183] style fix --- snow/engine/avalanche/bootstrapper.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index 3958fe7..0f8c1d2 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -165,10 +165,9 @@ func (b *bootstrapper) process(vtx avalanche.Vertex) error { } } for _, parent := range vtx.Parents() { - if _, ok := b.processedCache.Get(parent.ID()); ok { // already processed this - continue + if _, ok := b.processedCache.Get(parent.ID()); !ok { // already processed this + toProcess.Push(parent) } - toProcess.Push(parent) } b.processedCache.Put(vtx.ID(), nil) } From a1b1ad2da449435098deec8e3dfebea777773793 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Wed, 17 Jun 2020 22:33:41 -0400 Subject: [PATCH 093/183] address golint errors --- nat/nat.go | 8 +++++++- nat/no_router.go | 3 ++- nat/pmp.go | 3 ++- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/nat/nat.go b/nat/nat.go index 8aa9d9e..8c351c7 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -17,6 +17,8 @@ const ( maxRetries = 20 ) +// Router describes the functionality that a network device must support to be +// able to open ports to an external IP. type Router interface { MapPort(protocol string, intPort, extPort uint16, desc string, duration time.Duration) error UnmapPort(protocol string, intPort, extPort uint16) error @@ -29,6 +31,7 @@ type Router interface { ) } +// GetRouter returns a router on the current network. func GetRouter() Router { if r := getUPnPRouter(); r != nil { return r @@ -40,6 +43,7 @@ func GetRouter() Router { return NewNoRouter() } +// Mapper attempts to open a set of ports on a router type Mapper struct { log logging.Logger r Router @@ -47,6 +51,7 @@ type Mapper struct { wg sync.WaitGroup } +// NewPortMapper returns an initialized mapper func NewPortMapper(log logging.Logger, r Router) Mapper { return Mapper{ log: log, @@ -77,7 +82,6 @@ func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string, if intaddr, intPort, desc, err := dev.r.GetPortMappingEntry(extPort, protocol); err == nil { dev.log.Debug("Port %d is taken by %s:%d: %s, retry with the next port", extPort, intaddr, intPort, desc) - continue } else if err := dev.r.MapPort(protocol, intPort, extPort, desc, mapTimeout); err != nil { dev.log.Debug("Map port failed. Protocol %s Internal %d External %d. %s", protocol, intPort, extPort, err) @@ -121,6 +125,8 @@ func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string, mappedPort <- 0 } +// UnmapAllPorts stops mapping all ports from this mapper and attempts to unmap +// them. func (dev *Mapper) UnmapAllPorts() { close(dev.closer) dev.wg.Wait() diff --git a/nat/no_router.go b/nat/no_router.go index 364c338..0971b46 100644 --- a/nat/no_router.go +++ b/nat/no_router.go @@ -48,7 +48,8 @@ func getOutboundIP() (net.IP, error) { return nil, fmt.Errorf("getting outbound IP failed") } -func NewNoRouter() *noRouter { +// NewNoRouter returns a router that assumes the network is public +func NewNoRouter() Router { ip, err := getOutboundIP() if err != nil { return nil diff --git a/nat/pmp.go b/nat/pmp.go index 7c5a800..ce40362 100644 --- a/nat/pmp.go +++ b/nat/pmp.go @@ -9,7 +9,8 @@ import ( "time" "github.com/jackpal/gateway" - "github.com/jackpal/go-nat-pmp" + + natpmp "github.com/jackpal/go-nat-pmp" ) var ( From 3eb9788976653cec362dc709b51ee84c095ec43e Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Wed, 17 Jun 2020 23:29:30 -0400 Subject: [PATCH 094/183] formatting changes --- api/health/service.go | 6 ++++-- api/keystore/service.go | 5 +++-- api/metrics/service.go | 3 ++- chains/atomic/memory.go | 2 +- database/encdb/db.go | 2 +- database/leveldb/db.go | 5 +++-- database/nodb/db.go | 2 +- database/rpcdb/db_client.go | 27 +++++++++++++++------------ database/rpcdb/db_server.go | 32 ++++++++++++++++++-------------- genesis/aliases.go | 34 +++++++++++++++++----------------- genesis/genesis.go | 12 ++++++------ go.sum | 7 +++++++ 12 files changed, 78 insertions(+), 59 deletions(-) diff --git a/api/health/service.go b/api/health/service.go index fdd405b..1989ab3 100644 --- a/api/health/service.go +++ b/api/health/service.go @@ -7,11 +7,13 @@ import ( "net/http" "time" - "github.com/AppsFlyer/go-sundheit" + health "github.com/AppsFlyer/go-sundheit" + + "github.com/gorilla/rpc/v2" + "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/utils/json" "github.com/ava-labs/gecko/utils/logging" - "github.com/gorilla/rpc/v2" ) // defaultCheckOpts is a Check whose properties represent a default Check diff --git a/api/keystore/service.go b/api/keystore/service.go index ac9e4e6..e6979fa 100644 --- a/api/keystore/service.go +++ b/api/keystore/service.go @@ -12,6 +12,8 @@ import ( "github.com/gorilla/rpc/v2" + zxcvbn "github.com/nbutton23/zxcvbn-go" + "github.com/ava-labs/gecko/chains/atomic" "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/encdb" @@ -19,12 +21,11 @@ import ( "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/logging" - "github.com/ava-labs/gecko/utils/codec" jsoncodec "github.com/ava-labs/gecko/utils/json" - zxcvbn "github.com/nbutton23/zxcvbn-go" ) const ( diff --git a/api/metrics/service.go b/api/metrics/service.go index 5fa9206..463456a 100644 --- a/api/metrics/service.go +++ b/api/metrics/service.go @@ -4,9 +4,10 @@ package metrics import ( - "github.com/ava-labs/gecko/snow/engine/common" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + + "github.com/ava-labs/gecko/snow/engine/common" ) // NewService returns a new prometheus service diff --git a/chains/atomic/memory.go b/chains/atomic/memory.go index 9774711..778b9e5 100644 --- a/chains/atomic/memory.go +++ b/chains/atomic/memory.go @@ -10,9 +10,9 @@ import ( "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/logging" - "github.com/ava-labs/gecko/utils/codec" ) type rcLock struct { diff --git a/database/encdb/db.go b/database/encdb/db.go index 4814805..fe33fa7 100644 --- a/database/encdb/db.go +++ b/database/encdb/db.go @@ -13,8 +13,8 @@ import ( "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/database/nodb" "github.com/ava-labs/gecko/utils" - "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/codec" + "github.com/ava-labs/gecko/utils/hashing" ) // Database encrypts all values that are provided diff --git a/database/leveldb/db.go b/database/leveldb/db.go index edcb4be..7055d61 100644 --- a/database/leveldb/db.go +++ b/database/leveldb/db.go @@ -6,14 +6,15 @@ package leveldb import ( "bytes" - "github.com/ava-labs/gecko/database" - "github.com/ava-labs/gecko/utils" "github.com/syndtr/goleveldb/leveldb" "github.com/syndtr/goleveldb/leveldb/errors" "github.com/syndtr/goleveldb/leveldb/filter" "github.com/syndtr/goleveldb/leveldb/iterator" "github.com/syndtr/goleveldb/leveldb/opt" "github.com/syndtr/goleveldb/leveldb/util" + + "github.com/ava-labs/gecko/database" + "github.com/ava-labs/gecko/utils" ) const ( diff --git a/database/nodb/db.go b/database/nodb/db.go index 3f1bceb..9a7525f 100644 --- a/database/nodb/db.go +++ b/database/nodb/db.go @@ -17,7 +17,7 @@ func (*Database) Has([]byte) (bool, error) { return false, database.ErrClosed } func (*Database) Get([]byte) ([]byte, error) { return nil, database.ErrClosed } // Put returns nil -func (*Database) Put(_ []byte, _ []byte) error { return database.ErrClosed } +func (*Database) Put(_, _ []byte) error { return database.ErrClosed } // Delete returns nil func (*Database) Delete([]byte) error { return database.ErrClosed } diff --git a/database/rpcdb/db_client.go b/database/rpcdb/db_client.go index 67af7ef..dc3f60b 100644 --- a/database/rpcdb/db_client.go +++ b/database/rpcdb/db_client.go @@ -27,7 +27,7 @@ func NewClient(client rpcdbproto.DatabaseClient) *DatabaseClient { return &DatabaseClient{client: client} } -// Has returns false, nil +// Has attempts to return if the database has a key with the provided value. func (db *DatabaseClient) Has(key []byte) (bool, error) { resp, err := db.client.Has(context.Background(), &rpcdbproto.HasRequest{ Key: key, @@ -38,7 +38,7 @@ func (db *DatabaseClient) Has(key []byte) (bool, error) { return resp.Has, nil } -// Get returns nil, error +// Get attempts to return the value that was mapped to the key that was provided func (db *DatabaseClient) Get(key []byte) ([]byte, error) { resp, err := db.client.Get(context.Background(), &rpcdbproto.GetRequest{ Key: key, @@ -49,7 +49,7 @@ func (db *DatabaseClient) Get(key []byte) ([]byte, error) { return resp.Value, nil } -// Put returns nil +// Put attempts to set the value this key maps to func (db *DatabaseClient) Put(key, value []byte) error { _, err := db.client.Put(context.Background(), &rpcdbproto.PutRequest{ Key: key, @@ -58,7 +58,7 @@ func (db *DatabaseClient) Put(key, value []byte) error { return updateError(err) } -// Delete returns nil +// Delete attempts to remove any mapping from the key func (db *DatabaseClient) Delete(key []byte) error { _, err := db.client.Delete(context.Background(), &rpcdbproto.DeleteRequest{ Key: key, @@ -99,7 +99,7 @@ func (db *DatabaseClient) NewIteratorWithStartAndPrefix(start, prefix []byte) da } } -// Stat returns an error +// Stat attempts to return the statistic of this database func (db *DatabaseClient) Stat(property string) (string, error) { resp, err := db.client.Stat(context.Background(), &rpcdbproto.StatRequest{ Property: property, @@ -110,7 +110,7 @@ func (db *DatabaseClient) Stat(property string) (string, error) { return resp.Stat, nil } -// Compact returns nil +// Compact attempts to optimize the space utilization in the provided range func (db *DatabaseClient) Compact(start, limit []byte) error { _, err := db.client.Compact(context.Background(), &rpcdbproto.CompactRequest{ Start: start, @@ -119,7 +119,7 @@ func (db *DatabaseClient) Compact(start, limit []byte) error { return updateError(err) } -// Close returns nil +// Close attempts to close the database func (db *DatabaseClient) Close() error { _, err := db.client.Close(context.Background(), &rpcdbproto.CloseRequest{}) return updateError(err) @@ -207,7 +207,8 @@ type iterator struct { err error } -// Next returns false +// Next attempts to move the iterator to the next element and returns if this +// succeeded func (it *iterator) Next() bool { resp, err := it.db.client.IteratorNext(context.Background(), &rpcdbproto.IteratorNextRequest{ Id: it.id, @@ -221,7 +222,7 @@ func (it *iterator) Next() bool { return resp.FoundNext } -// Error returns any errors +// Error returns any that occurred while iterating func (it *iterator) Error() error { if it.err != nil { return it.err @@ -234,19 +235,21 @@ func (it *iterator) Error() error { return it.err } -// Key returns nil +// Key returns the key of the current element func (it *iterator) Key() []byte { return it.key } -// Value returns nil +// Value returns the value of the current element func (it *iterator) Value() []byte { return it.value } -// Release does nothing +// Release frees any resources held by the iterator func (it *iterator) Release() { it.db.client.IteratorRelease(context.Background(), &rpcdbproto.IteratorReleaseRequest{ Id: it.id, }) } +// updateError sets the error value to the errors required by the Database +// interface func updateError(err error) error { if err == nil { return nil diff --git a/database/rpcdb/db_server.go b/database/rpcdb/db_server.go index fe4b47a..cc32db8 100644 --- a/database/rpcdb/db_server.go +++ b/database/rpcdb/db_server.go @@ -34,16 +34,16 @@ func NewServer(db database.Database) *DatabaseServer { } } -// Has ... +// Has delegates the Has call to the managed database and returns the result func (db *DatabaseServer) Has(_ context.Context, req *rpcdbproto.HasRequest) (*rpcdbproto.HasResponse, error) { has, err := db.db.Has(req.Key) if err != nil { return nil, err } - return &rpcdbproto.HasResponse{Has: has}, nil + return &rpcdbproto.HasResponse{Has: has}, err } -// Get ... +// Get delegates the Get call to the managed database and returns the result func (db *DatabaseServer) Get(_ context.Context, req *rpcdbproto.GetRequest) (*rpcdbproto.GetResponse, error) { value, err := db.db.Get(req.Key) if err != nil { @@ -52,17 +52,18 @@ func (db *DatabaseServer) Get(_ context.Context, req *rpcdbproto.GetRequest) (*r return &rpcdbproto.GetResponse{Value: value}, nil } -// Put ... +// Put delegates the Put call to the managed database and returns the result func (db *DatabaseServer) Put(_ context.Context, req *rpcdbproto.PutRequest) (*rpcdbproto.PutResponse, error) { return &rpcdbproto.PutResponse{}, db.db.Put(req.Key, req.Value) } -// Delete ... +// Delete delegates the Delete call to the managed database and returns the +// result func (db *DatabaseServer) Delete(_ context.Context, req *rpcdbproto.DeleteRequest) (*rpcdbproto.DeleteResponse, error) { return &rpcdbproto.DeleteResponse{}, db.db.Delete(req.Key) } -// Stat ... +// Stat delegates the Stat call to the managed database and returns the result func (db *DatabaseServer) Stat(_ context.Context, req *rpcdbproto.StatRequest) (*rpcdbproto.StatResponse, error) { stat, err := db.db.Stat(req.Property) if err != nil { @@ -71,17 +72,19 @@ func (db *DatabaseServer) Stat(_ context.Context, req *rpcdbproto.StatRequest) ( return &rpcdbproto.StatResponse{Stat: stat}, nil } -// Compact ... +// Compact delegates the Compact call to the managed database and returns the +// result func (db *DatabaseServer) Compact(_ context.Context, req *rpcdbproto.CompactRequest) (*rpcdbproto.CompactResponse, error) { return &rpcdbproto.CompactResponse{}, db.db.Compact(req.Start, req.Limit) } -// Close ... -func (db *DatabaseServer) Close(_ context.Context, _ *rpcdbproto.CloseRequest) (*rpcdbproto.CloseResponse, error) { +// Close delegates the Close call to the managed database and returns the result +func (db *DatabaseServer) Close(context.Context, *rpcdbproto.CloseRequest) (*rpcdbproto.CloseResponse, error) { return &rpcdbproto.CloseResponse{}, db.db.Close() } -// WriteBatch ... +// WriteBatch takes in a set of key-value pairs and atomically writes them to +// the internal database func (db *DatabaseServer) WriteBatch(_ context.Context, req *rpcdbproto.WriteBatchRequest) (*rpcdbproto.WriteBatchResponse, error) { db.batch.Reset() @@ -100,7 +103,8 @@ func (db *DatabaseServer) WriteBatch(_ context.Context, req *rpcdbproto.WriteBat return &rpcdbproto.WriteBatchResponse{}, db.batch.Write() } -// NewIteratorWithStartAndPrefix ... +// NewIteratorWithStartAndPrefix allocates an iterator and returns the iterator +// ID func (db *DatabaseServer) NewIteratorWithStartAndPrefix(_ context.Context, req *rpcdbproto.NewIteratorWithStartAndPrefixRequest) (*rpcdbproto.NewIteratorWithStartAndPrefixResponse, error) { id := db.nextIteratorID it := db.db.NewIteratorWithStartAndPrefix(req.Start, req.Prefix) @@ -110,7 +114,7 @@ func (db *DatabaseServer) NewIteratorWithStartAndPrefix(_ context.Context, req * return &rpcdbproto.NewIteratorWithStartAndPrefixResponse{Id: id}, nil } -// IteratorNext ... +// IteratorNext attempts to call next on the requested iterator func (db *DatabaseServer) IteratorNext(_ context.Context, req *rpcdbproto.IteratorNextRequest) (*rpcdbproto.IteratorNextResponse, error) { it, exists := db.iterators[req.Id] if !exists { @@ -123,7 +127,7 @@ func (db *DatabaseServer) IteratorNext(_ context.Context, req *rpcdbproto.Iterat }, nil } -// IteratorError ... +// IteratorError attempts to report any errors that occurred during iteration func (db *DatabaseServer) IteratorError(_ context.Context, req *rpcdbproto.IteratorErrorRequest) (*rpcdbproto.IteratorErrorResponse, error) { it, exists := db.iterators[req.Id] if !exists { @@ -132,7 +136,7 @@ func (db *DatabaseServer) IteratorError(_ context.Context, req *rpcdbproto.Itera return &rpcdbproto.IteratorErrorResponse{}, it.Error() } -// IteratorRelease ... +// IteratorRelease attempts to release the resources allocated to an iterator func (db *DatabaseServer) IteratorRelease(_ context.Context, req *rpcdbproto.IteratorReleaseRequest) (*rpcdbproto.IteratorReleaseResponse, error) { it, exists := db.iterators[req.Id] if exists { diff --git a/genesis/aliases.go b/genesis/aliases.go index 80e5dcd..b1eae46 100644 --- a/genesis/aliases.go +++ b/genesis/aliases.go @@ -18,27 +18,27 @@ import ( // Aliases returns the default aliases based on the network ID func Aliases(networkID uint32) (map[string][]string, map[[32]byte][]string, map[[32]byte][]string, error) { generalAliases := map[string][]string{ - "vm/" + platformvm.ID.String(): []string{"vm/platform"}, - "vm/" + avm.ID.String(): []string{"vm/avm"}, - "vm/" + EVMID.String(): []string{"vm/evm"}, - "vm/" + spdagvm.ID.String(): []string{"vm/spdag"}, - "vm/" + spchainvm.ID.String(): []string{"vm/spchain"}, - "vm/" + timestampvm.ID.String(): []string{"vm/timestamp"}, - "bc/" + ids.Empty.String(): []string{"P", "platform", "bc/P", "bc/platform"}, + "vm/" + platformvm.ID.String(): {"vm/platform"}, + "vm/" + avm.ID.String(): {"vm/avm"}, + "vm/" + EVMID.String(): {"vm/evm"}, + "vm/" + spdagvm.ID.String(): {"vm/spdag"}, + "vm/" + spchainvm.ID.String(): {"vm/spchain"}, + "vm/" + timestampvm.ID.String(): {"vm/timestamp"}, + "bc/" + ids.Empty.String(): {"P", "platform", "bc/P", "bc/platform"}, } chainAliases := map[[32]byte][]string{ - ids.Empty.Key(): []string{"P", "platform"}, + ids.Empty.Key(): {"P", "platform"}, } vmAliases := map[[32]byte][]string{ - platformvm.ID.Key(): []string{"platform"}, - avm.ID.Key(): []string{"avm"}, - EVMID.Key(): []string{"evm"}, - spdagvm.ID.Key(): []string{"spdag"}, - spchainvm.ID.Key(): []string{"spchain"}, - timestampvm.ID.Key(): []string{"timestamp"}, - secp256k1fx.ID.Key(): []string{"secp256k1fx"}, - nftfx.ID.Key(): []string{"nftfx"}, - propertyfx.ID.Key(): []string{"propertyfx"}, + platformvm.ID.Key(): {"platform"}, + avm.ID.Key(): {"avm"}, + EVMID.Key(): {"evm"}, + spdagvm.ID.Key(): {"spdag"}, + spchainvm.ID.Key(): {"spchain"}, + timestampvm.ID.Key(): {"timestamp"}, + secp256k1fx.ID.Key(): {"secp256k1fx"}, + nftfx.ID.Key(): {"nftfx"}, + propertyfx.ID.Key(): {"propertyfx"}, } genesisBytes, err := Genesis(networkID) diff --git a/genesis/genesis.go b/genesis/genesis.go index c4245b9..f37a584 100644 --- a/genesis/genesis.go +++ b/genesis/genesis.go @@ -9,12 +9,12 @@ import ( "time" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/json" "github.com/ava-labs/gecko/utils/units" "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/avm" - "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/nftfx" "github.com/ava-labs/gecko/vms/platformvm" "github.com/ava-labs/gecko/vms/propertyfx" @@ -156,7 +156,7 @@ func FromConfig(networkID uint32, config *Config) ([]byte, error) { // Specify the chains that exist upon this network's creation platformvmArgs.Chains = []platformvm.APIChain{ - platformvm.APIChain{ + { GenesisData: avmReply.Bytes, SubnetID: platformvm.DefaultSubnetID, VMID: avm.ID, @@ -167,25 +167,25 @@ func FromConfig(networkID uint32, config *Config) ([]byte, error) { }, Name: "X-Chain", }, - platformvm.APIChain{ + { GenesisData: formatting.CB58{Bytes: config.EVMBytes}, SubnetID: platformvm.DefaultSubnetID, VMID: EVMID, Name: "C-Chain", }, - platformvm.APIChain{ + { GenesisData: spdagvmReply.Bytes, SubnetID: platformvm.DefaultSubnetID, VMID: spdagvm.ID, Name: "Simple DAG Payments", }, - platformvm.APIChain{ + { GenesisData: spchainvmReply.Bytes, SubnetID: platformvm.DefaultSubnetID, VMID: spchainvm.ID, Name: "Simple Chain Payments", }, - platformvm.APIChain{ + { GenesisData: formatting.CB58{Bytes: []byte{}}, // There is no genesis data SubnetID: platformvm.DefaultSubnetID, VMID: timestampvm.ID, diff --git a/go.sum b/go.sum index d46c985..3f809ed 100644 --- a/go.sum +++ b/go.sum @@ -64,6 +64,7 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= @@ -122,6 +123,7 @@ github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+l github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb h1:b5rjCoWHc7eqmAS4/qyk21ZsHyb6Mxv/jykxvNTkU4M= github.com/hashicorp/yamux v0.0.0-20180604194846-3520598351bb/go.mod h1:+NfK9FKeTrX5uv1uIXGdwYDTeHna2qgaIlx54MXqjAM= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huin/goupnp v1.0.0 h1:wg75sLpL6DZqwHQN6E1Cfk6mtfzS45z8OV+ic+DtHRo= github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7Bnc= @@ -177,8 +179,10 @@ github.com/olekukonko/tablewriter v0.0.4 h1:vHD/YYe1Wolo78koG299f7V/VAS08c6IpCLn github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1 h1:q/mM8GF/n0shIN8SaAZ0V+jnLPzen6WIVZdiwrRlMlo= github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw= @@ -230,6 +234,7 @@ github.com/steakknife/bloomfilter v0.0.0-20180922174646-6819c0d2a570/go.mod h1:8 github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3 h1:njlZPzLwU639dk2kqnCPPv+wNjq7Xb6EfUxe/oX0/NM= github.com/steakknife/hamming v0.0.0-20180906055917-c99c65617cd3/go.mod h1:hpGUWaI9xL8pRQCTXQgocU38Qw1g0Us7n5PxxTwTCYU= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1 h1:2vfRuCMp5sSVIDSqO8oNnWJq7mPa6KVP3iPIwFBuy8A= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= @@ -345,6 +350,7 @@ gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLks gopkg.in/bsm/ratelimit.v1 v1.0.0-20160220154919-db14e161995a/go.mod h1:KF9sEfUPAXdG8Oev9e99iLGnl2uJMjc5B+4y3O7x610= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= gopkg.in/jcmturner/dnsutils.v1 v1.0.1/go.mod h1:m3v+5svpVOhtFAP/wSz+yzh4Mc0Fg7eRhxkJMWSIz9Q= @@ -354,6 +360,7 @@ gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLv gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200316214253-d7b0ff38cac9 h1:ITeyKbRetrVzqR3U1eY+ywgp7IBspGd1U/bkwd1gWu4= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200316214253-d7b0ff38cac9/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= gopkg.in/redis.v4 v4.2.4/go.mod h1:8KREHdypkCEojGKQcjMqAODMICIVwZAONWq8RowTITA= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0= gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0= From 84c4e30937e67fc7dad8924062fa06f560f12995 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Thu, 18 Jun 2020 00:30:15 -0400 Subject: [PATCH 095/183] formatting update --- snow/consensus/snowball/parameters_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snow/consensus/snowball/parameters_test.go b/snow/consensus/snowball/parameters_test.go index 7c3668c..056e229 100644 --- a/snow/consensus/snowball/parameters_test.go +++ b/snow/consensus/snowball/parameters_test.go @@ -125,14 +125,14 @@ func TestParametersAnotherInvalidBetaRogue(t *testing.T) { func TestParametersInvalidConcurrentRepolls(t *testing.T) { tests := []Parameters{ - Parameters{ + { K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 1, ConcurrentRepolls: 2, }, - Parameters{ + { K: 1, Alpha: 1, BetaVirtuous: 1, From be6be7ae1c1e06fc29d352770836f7f0065cc63d Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Thu, 18 Jun 2020 01:45:58 -0400 Subject: [PATCH 096/183] reverted breaking changes --- genesis/genesis_test.go | 4 +- genesis/network_id.go | 2 +- main/params.go | 2 +- network/commands.go | 10 +- network/peer.go | 20 ++-- node/node.go | 2 +- .../add_default_subnet_delegator_tx.go | 2 +- .../add_default_subnet_delegator_tx_test.go | 94 +++++++++---------- 8 files changed, 69 insertions(+), 67 deletions(-) diff --git a/genesis/genesis_test.go b/genesis/genesis_test.go index c80767d..292fdee 100644 --- a/genesis/genesis_test.go +++ b/genesis/genesis_test.go @@ -26,8 +26,8 @@ func TestNetworkName(t *testing.T) { if name := NetworkName(EverestID); name != EverestName { t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, EverestName) } - if name := NetworkName(TestnetID); name != EverestName { - t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, EverestName) + if name := NetworkName(DenaliID); name != DenaliName { + t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, DenaliName) } if name := NetworkName(4294967295); name != "network-4294967295" { t.Fatalf("NetworkID was incorrectly named. Result: %s ; Expected: %s", name, "network-4294967295") diff --git a/genesis/network_id.go b/genesis/network_id.go index f318a36..880583e 100644 --- a/genesis/network_id.go +++ b/genesis/network_id.go @@ -18,7 +18,7 @@ var ( DenaliID uint32 = 3 EverestID uint32 = 4 - TestnetID uint32 = 4 + TestnetID uint32 = 3 LocalID uint32 = 12345 MainnetName = "mainnet" diff --git a/main/params.go b/main/params.go index 468a599..1e526b3 100644 --- a/main/params.go +++ b/main/params.go @@ -30,7 +30,7 @@ import ( ) const ( - dbVersion = "v0.6.0" + dbVersion = "v0.5.0" ) // Results of parsing the CLI diff --git a/network/commands.go b/network/commands.go index a5a9006..06fc31b 100644 --- a/network/commands.go +++ b/network/commands.go @@ -170,21 +170,23 @@ const ( Version GetPeerList PeerList - Ping - Pong // Bootstrapping: GetAcceptedFrontier AcceptedFrontier GetAccepted Accepted - GetAncestors - MultiPut // Consensus: Get Put PushQuery PullQuery Chits + + // TODO: Reorder these messages when we transition to everest + GetAncestors + MultiPut + Ping + Pong ) // Defines the messages that can be sent/received with this network diff --git a/network/peer.go b/network/peer.go index 9fd801f..409d7f6 100644 --- a/network/peer.go +++ b/network/peer.go @@ -64,7 +64,7 @@ func (p *peer) Start() { // Initially send the version to the peer go p.Version() go p.requestVersion() - go p.sendPings() + // go p.sendPings() } func (p *peer) sendPings() { @@ -107,10 +107,10 @@ func (p *peer) requestVersion() { func (p *peer) ReadMessages() { defer p.Close() - if err := p.conn.SetReadDeadline(p.net.clock.Time().Add(p.net.pingPongTimeout)); err != nil { - p.net.log.Verbo("error on setting the connection read timeout %s", err) - return - } + // if err := p.conn.SetReadDeadline(p.net.clock.Time().Add(p.net.pingPongTimeout)); err != nil { + // p.net.log.Verbo("error on setting the connection read timeout %s", err) + // return + // } pendingBuffer := wrappers.Packer{} readBuffer := make([]byte, 1<<10) @@ -246,11 +246,11 @@ func (p *peer) handle(msg Msg) { currentTime := p.net.clock.Time() atomic.StoreInt64(&p.lastReceived, currentTime.Unix()) - if err := p.conn.SetReadDeadline(currentTime.Add(p.net.pingPongTimeout)); err != nil { - p.net.log.Verbo("error on setting the connection read timeout %s, closing the connection", err) - p.Close() - return - } + // if err := p.conn.SetReadDeadline(currentTime.Add(p.net.pingPongTimeout)); err != nil { + // p.net.log.Verbo("error on setting the connection read timeout %s, closing the connection", err) + // p.Close() + // return + // } op := msg.Op() msgMetrics := p.net.message(op) diff --git a/node/node.go b/node/node.go index eae8cfd..752c78d 100644 --- a/node/node.go +++ b/node/node.go @@ -57,7 +57,7 @@ var ( genesisHashKey = []byte("genesisID") // Version is the version of this code - Version = version.NewDefaultVersion("avalanche", 0, 6, 0) + Version = version.NewDefaultVersion("avalanche", 0, 5, 5) versionParser = version.NewDefaultParser() ) diff --git a/vms/platformvm/add_default_subnet_delegator_tx.go b/vms/platformvm/add_default_subnet_delegator_tx.go index 9881652..3012d84 100644 --- a/vms/platformvm/add_default_subnet_delegator_tx.go +++ b/vms/platformvm/add_default_subnet_delegator_tx.go @@ -128,7 +128,7 @@ func (tx *addDefaultSubnetDelegatorTx) SemanticVerify(db database.Database) (*ve // The account if this block's proposal is committed and the validator is // added to the pending validator set. (Increase the account's nonce; // decrease its balance.) - newAccount, err := account.Remove(tx.Wght, tx.Nonce) // Remove also removes the fee + newAccount, err := account.Remove(0, tx.Nonce) // Remove also removes the fee if err != nil { return nil, nil, nil, nil, permError{err} } diff --git a/vms/platformvm/add_default_subnet_delegator_tx_test.go b/vms/platformvm/add_default_subnet_delegator_tx_test.go index 9380001..4b6fe80 100644 --- a/vms/platformvm/add_default_subnet_delegator_tx_test.go +++ b/vms/platformvm/add_default_subnet_delegator_tx_test.go @@ -335,9 +335,9 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { } tx, err = vm.newAddDefaultSubnetDelegatorTx( - defaultNonce+1, // nonce - defaultStakeAmount, // weight - uint64(newTimestamp.Unix()), // start time + defaultNonce+1, // nonce + defaultStakeAmount, // weight + uint64(newTimestamp.Unix()), // start time uint64(newTimestamp.Add(MinimumStakingDuration).Unix()), // end time defaultKey.PublicKey().Address(), // node ID defaultKey.PublicKey().Address(), // destination @@ -387,51 +387,51 @@ func TestAddDefaultSubnetDelegatorTxSemanticVerify(t *testing.T) { } txFee = txFeeSaved // Reset tx fee - // Case 8: fail verification for spending more funds than it has - tx, err = vm.newAddDefaultSubnetDelegatorTx( - defaultNonce+1, - defaultBalance*2, // weight - uint64(defaultValidateStartTime.Unix()), // start time - uint64(defaultValidateEndTime.Unix()), // end time - defaultKey.PublicKey().Address(), // node ID - defaultKey.PublicKey().Address(), // destination - testNetworkID, // network ID - defaultKey, // tx fee payer - ) - if err != nil { - t.Fatal(err) - } - _, _, _, _, err = tx.SemanticVerify(vm.DB) - if err == nil { - t.Fatal("should have failed verification because payer account spent twice the account's balance") - } + // // Case 8: fail verification for spending more funds than it has + // tx, err = vm.newAddDefaultSubnetDelegatorTx( + // defaultNonce+1, + // defaultBalance*2, // weight + // uint64(defaultValidateStartTime.Unix()), // start time + // uint64(defaultValidateEndTime.Unix()), // end time + // defaultKey.PublicKey().Address(), // node ID + // defaultKey.PublicKey().Address(), // destination + // testNetworkID, // network ID + // defaultKey, // tx fee payer + // ) + // if err != nil { + // t.Fatal(err) + // } + // _, _, _, _, err = tx.SemanticVerify(vm.DB) + // if err == nil { + // t.Fatal("should have failed verification because payer account spent twice the account's balance") + // } - // Case 9: Confirm balance is correct - tx, err = vm.newAddDefaultSubnetDelegatorTx( - defaultNonce+1, - defaultStakeAmount, // weight - uint64(defaultValidateStartTime.Unix()), // start time - uint64(defaultValidateEndTime.Unix()), // end time - defaultKey.PublicKey().Address(), // node ID - defaultKey.PublicKey().Address(), // destination - testNetworkID, // network ID - defaultKey, // tx fee payer - ) - if err != nil { - t.Fatal(err) - } + // // Case 9: Confirm balance is correct + // tx, err = vm.newAddDefaultSubnetDelegatorTx( + // defaultNonce+1, + // defaultStakeAmount, // weight + // uint64(defaultValidateStartTime.Unix()), // start time + // uint64(defaultValidateEndTime.Unix()), // end time + // defaultKey.PublicKey().Address(), // node ID + // defaultKey.PublicKey().Address(), // destination + // testNetworkID, // network ID + // defaultKey, // tx fee payer + // ) + // if err != nil { + // t.Fatal(err) + // } - onCommitDB, _, _, _, err := tx.SemanticVerify(vm.DB) - if err != nil { - t.Fatal(err) - } - account, err := tx.vm.getAccount(onCommitDB, defaultKey.PublicKey().Address()) - if err != nil { - t.Fatal(err) - } - balance := account.Balance + // onCommitDB, _, _, _, err := tx.SemanticVerify(vm.DB) + // if err != nil { + // t.Fatal(err) + // } + // account, err := tx.vm.getAccount(onCommitDB, defaultKey.PublicKey().Address()) + // if err != nil { + // t.Fatal(err) + // } + // balance := account.Balance - if balance != defaultBalance-(defaultStakeAmount+txFee) { - t.Fatalf("balance was not updated correctly after subnet delegator tx") - } + // if balance != defaultBalance-(defaultStakeAmount+txFee) { + // t.Fatalf("balance was not updated correctly after subnet delegator tx") + // } } From f78d7b3caf6a438c286d64f038449a9c02638da3 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Thu, 18 Jun 2020 13:34:04 -0400 Subject: [PATCH 097/183] use transitive dependencies when possible with transaction dependencies --- vms/avm/base_tx.go | 11 +++- vms/avm/base_tx_test.go | 112 ++++++++++++++++---------------- vms/avm/create_asset_tx_test.go | 8 +-- vms/avm/export_tx_test.go | 16 ++--- vms/avm/import_tx.go | 11 +++- vms/avm/import_tx_test.go | 10 +-- vms/avm/operation_test.go | 18 ++--- vms/avm/operation_tx.go | 13 +++- vms/avm/prefixed_state_test.go | 6 +- vms/avm/service.go | 8 +-- vms/avm/service_test.go | 8 +-- vms/avm/state_test.go | 2 +- vms/avm/static_service_test.go | 12 ++-- vms/avm/tx.go | 4 +- vms/avm/tx_test.go | 12 ++-- vms/avm/unique_tx.go | 27 ++++---- vms/avm/vm.go | 8 +-- vms/avm/vm_test.go | 60 +++++++++-------- 18 files changed, 186 insertions(+), 160 deletions(-) diff --git a/vms/avm/base_tx.go b/vms/avm/base_tx.go index 0ab3fa4..cf4371f 100644 --- a/vms/avm/base_tx.go +++ b/vms/avm/base_tx.go @@ -9,8 +9,8 @@ import ( "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/utils/codec" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/verify" ) @@ -46,8 +46,8 @@ func (t *BaseTx) InputUTXOs() []*ava.UTXOID { return utxos } -// AssetIDs returns the IDs of the assets this transaction depends on -func (t *BaseTx) AssetIDs() ids.Set { +// ConsumedAssetIDs returns the IDs of the assets this transaction consumes +func (t *BaseTx) ConsumedAssetIDs() ids.Set { assets := ids.Set{} for _, in := range t.Ins { assets.Add(in.AssetID()) @@ -55,6 +55,11 @@ func (t *BaseTx) AssetIDs() ids.Set { return assets } +// AssetIDs returns the IDs of the assets this transaction depends on +func (t *BaseTx) AssetIDs() ids.Set { + return t.ConsumedAssetIDs() +} + // NumCredentials returns the number of expected credentials func (t *BaseTx) NumCredentials() int { return len(t.Ins) } diff --git a/vms/avm/base_tx_test.go b/vms/avm/base_tx_test.go index 163ef5c..9f7bbdd 100644 --- a/vms/avm/base_tx_test.go +++ b/vms/avm/base_tx_test.go @@ -77,7 +77,7 @@ func TestBaseTxSerialization(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 12345, @@ -87,7 +87,7 @@ func TestBaseTxSerialization(t *testing.T) { }, }, }}, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, @@ -125,7 +125,7 @@ func TestBaseTxGetters(t *testing.T) { tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 12345, @@ -135,7 +135,7 @@ func TestBaseTxGetters(t *testing.T) { }, }, }}, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, @@ -162,6 +162,10 @@ func TestBaseTxGetters(t *testing.T) { t.Fatalf("Wrong number of assets returned") } else if !assets.Contains(asset) { t.Fatalf("Wrong asset returned") + } else if assets := tx.ConsumedAssetIDs(); assets.Len() != 1 { + t.Fatalf("Wrong number of consumed assets returned") + } else if !assets.Contains(asset) { + t.Fatalf("Wrong consumed asset returned") } else if utxos := tx.UTXOs(); len(utxos) != 1 { t.Fatalf("Wrong number of utxos returned") } else if utxo := utxos[0]; !utxo.TxID.Equals(txID) { @@ -179,7 +183,7 @@ func TestBaseTxSyntacticVerify(t *testing.T) { tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 12345, @@ -189,7 +193,7 @@ func TestBaseTxSyntacticVerify(t *testing.T) { }, }, }}, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, @@ -230,7 +234,7 @@ func TestBaseTxSyntacticVerifyWrongNetworkID(t *testing.T) { tx := &BaseTx{ NetID: 0, BCID: chainID, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 12345, @@ -240,7 +244,7 @@ func TestBaseTxSyntacticVerifyWrongNetworkID(t *testing.T) { }, }, }}, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, @@ -272,7 +276,7 @@ func TestBaseTxSyntacticVerifyWrongChainID(t *testing.T) { tx := &BaseTx{ NetID: networkID, BCID: ids.Empty, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 12345, @@ -282,7 +286,7 @@ func TestBaseTxSyntacticVerifyWrongChainID(t *testing.T) { }, }, }}, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, @@ -315,7 +319,7 @@ func TestBaseTxSyntacticVerifyInvalidOutput(t *testing.T) { NetID: networkID, BCID: chainID, Outs: []*ava.TransferableOutput{nil}, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, @@ -348,7 +352,7 @@ func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { NetID: networkID, BCID: chainID, Outs: []*ava.TransferableOutput{ - &ava.TransferableOutput{ + { Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 2, @@ -358,7 +362,7 @@ func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { }, }, }, - &ava.TransferableOutput{ + { Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 1, @@ -370,7 +374,7 @@ func TestBaseTxSyntacticVerifyUnsortedOutputs(t *testing.T) { }, }, Ins: []*ava.TransferableInput{ - &ava.TransferableInput{ + { UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, @@ -403,7 +407,7 @@ func TestBaseTxSyntacticVerifyInvalidInput(t *testing.T) { tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 12345, @@ -428,7 +432,7 @@ func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 12345, @@ -439,7 +443,7 @@ func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { }, }}, Ins: []*ava.TransferableInput{ - &ava.TransferableInput{ + { UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, @@ -457,7 +461,7 @@ func TestBaseTxSyntacticVerifyInputOverflow(t *testing.T) { }, }, }, - &ava.TransferableInput{ + { UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, @@ -491,7 +495,7 @@ func TestBaseTxSyntacticVerifyOutputOverflow(t *testing.T) { NetID: networkID, BCID: chainID, Outs: []*ava.TransferableOutput{ - &ava.TransferableOutput{ + { Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 2, @@ -501,7 +505,7 @@ func TestBaseTxSyntacticVerifyOutputOverflow(t *testing.T) { }, }, }, - &ava.TransferableOutput{ + { Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: math.MaxUint64, @@ -512,7 +516,7 @@ func TestBaseTxSyntacticVerifyOutputOverflow(t *testing.T) { }, }, }, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, @@ -544,7 +548,7 @@ func TestBaseTxSyntacticVerifyInsufficientFunds(t *testing.T) { tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: math.MaxUint64, @@ -554,7 +558,7 @@ func TestBaseTxSyntacticVerifyInsufficientFunds(t *testing.T) { }, }, }}, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, @@ -586,7 +590,7 @@ func TestBaseTxSyntacticVerifyUninitialized(t *testing.T) { tx := &BaseTx{ NetID: networkID, BCID: chainID, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: asset}, Out: &secp256k1fx.TransferOutput{ Amt: 12345, @@ -596,7 +600,7 @@ func TestBaseTxSyntacticVerifyUninitialized(t *testing.T) { }, }, }}, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, @@ -633,7 +637,7 @@ func TestBaseTxSemanticVerify(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: genesisTx.ID(), OutputIndex: 1, @@ -702,7 +706,7 @@ func TestBaseTxSemanticVerifyUnknownFx(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: genesisTx.ID(), OutputIndex: 1, @@ -754,7 +758,7 @@ func TestBaseTxSemanticVerifyWrongAssetID(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: genesisTx.ID(), OutputIndex: 1, @@ -825,11 +829,11 @@ func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) { genesisBytes, issuer, []*common.Fx{ - &common.Fx{ + { ID: ids.Empty, Fx: &secp256k1fx.Fx{}, }, - &common.Fx{ + { ID: ids.NewID([32]byte{1}), Fx: &testFx{}, }, @@ -863,7 +867,7 @@ func TestBaseTxSemanticVerifyUnauthorizedFx(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: genesisTx.ID(), OutputIndex: 1, @@ -923,7 +927,7 @@ func TestBaseTxSemanticVerifyInvalidSignature(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: genesisTx.ID(), OutputIndex: 1, @@ -941,9 +945,7 @@ func TestBaseTxSemanticVerifyInvalidSignature(t *testing.T) { }} tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - [crypto.SECP256K1RSigLen]byte{}, - }, + Sigs: [][crypto.SECP256K1RSigLen]byte{{}}, }) b, err := vm.codec.Marshal(tx) @@ -977,7 +979,7 @@ func TestBaseTxSemanticVerifyMissingUTXO(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 1, @@ -1044,7 +1046,7 @@ func TestBaseTxSemanticVerifyInvalidUTXO(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: genesisTx.ID(), OutputIndex: math.MaxUint32, @@ -1107,7 +1109,7 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { pendingTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: genesisTx.ID(), OutputIndex: 1, @@ -1122,7 +1124,7 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { }, }, }}, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: genesisTx.ID()}, Out: &secp256k1fx.TransferOutput{ Amt: 50000, @@ -1179,7 +1181,7 @@ func TestBaseTxSemanticVerifyPendingInvalidUTXO(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: txID, OutputIndex: 2, @@ -1241,7 +1243,7 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { pendingTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: genesisTx.ID(), OutputIndex: 1, @@ -1256,7 +1258,7 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { }, }, }}, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: genesisTx.ID()}, Out: &secp256k1fx.TransferOutput{ Amt: 50000, @@ -1313,7 +1315,7 @@ func TestBaseTxSemanticVerifyPendingWrongAssetID(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: txID, OutputIndex: 0, @@ -1381,11 +1383,11 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { genesisBytes, issuer, []*common.Fx{ - &common.Fx{ + { ID: ids.NewID([32]byte{1}), Fx: &secp256k1fx.Fx{}, }, - &common.Fx{ + { ID: ids.Empty, Fx: &testFx{}, }, @@ -1419,7 +1421,7 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { pendingTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: genesisTx.ID(), OutputIndex: 1, @@ -1434,7 +1436,7 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { }, }, }}, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: genesisTx.ID()}, Out: &secp256k1fx.TransferOutput{ Amt: 50000, @@ -1491,7 +1493,7 @@ func TestBaseTxSemanticVerifyPendingUnauthorizedFx(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: txID, OutputIndex: 0, @@ -1543,11 +1545,11 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { genesisBytes, issuer, []*common.Fx{ - &common.Fx{ + { ID: ids.NewID([32]byte{1}), Fx: &secp256k1fx.Fx{}, }, - &common.Fx{ + { ID: ids.Empty, Fx: &testFx{}, }, @@ -1581,7 +1583,7 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { pendingTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: genesisTx.ID(), OutputIndex: 1, @@ -1596,7 +1598,7 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { }, }, }}, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: genesisTx.ID()}, Out: &secp256k1fx.TransferOutput{ Amt: 50000, @@ -1653,7 +1655,7 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: txID, OutputIndex: 0, @@ -1671,9 +1673,7 @@ func TestBaseTxSemanticVerifyPendingInvalidSignature(t *testing.T) { }} tx.Creds = append(tx.Creds, &secp256k1fx.Credential{ - Sigs: [][crypto.SECP256K1RSigLen]byte{ - [crypto.SECP256K1RSigLen]byte{}, - }, + Sigs: [][crypto.SECP256K1RSigLen]byte{{}}, }) b, err = vm.codec.Marshal(tx) diff --git a/vms/avm/create_asset_tx_test.go b/vms/avm/create_asset_tx_test.go index 324f403..6f1f686 100644 --- a/vms/avm/create_asset_tx_test.go +++ b/vms/avm/create_asset_tx_test.go @@ -8,8 +8,8 @@ import ( "testing" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/utils/codec" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/verify" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -93,7 +93,7 @@ func TestCreateAssetTxSerialization(t *testing.T) { 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, }), - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ ID: ids.NewID([32]byte{ 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, @@ -122,7 +122,7 @@ func TestCreateAssetTxSerialization(t *testing.T) { }, }, }}, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: ids.NewID([32]byte{ 0xf1, 0xe1, 0xd1, 0xc1, 0xb1, 0xa1, 0x91, 0x81, @@ -152,7 +152,7 @@ func TestCreateAssetTxSerialization(t *testing.T) { Symbol: "VIX", Denomination: 2, States: []*InitialState{ - &InitialState{ + { FxID: 0, Outs: []verify.Verifiable{ &secp256k1fx.TransferOutput{ diff --git a/vms/avm/export_tx_test.go b/vms/avm/export_tx_test.go index fdef399..75b359f 100644 --- a/vms/avm/export_tx_test.go +++ b/vms/avm/export_tx_test.go @@ -12,11 +12,11 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/hashing" "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -68,7 +68,7 @@ func TestExportTxSerialization(t *testing.T) { 0xbb, 0xbb, 0xbb, 0xbb, 0xaa, 0xaa, 0xaa, 0xaa, 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, }), - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{TxID: ids.NewID([32]byte{ 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, @@ -141,7 +141,7 @@ func TestIssueExportTx(t *testing.T) { memdb.New(), genesisBytes, issuer, - []*common.Fx{&common.Fx{ + []*common.Fx{{ ID: ids.Empty, Fx: &secp256k1fx.Fx{}, }}, @@ -167,7 +167,7 @@ func TestIssueExportTx(t *testing.T) { BaseTx: BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: avaID, OutputIndex: 1, @@ -179,7 +179,7 @@ func TestIssueExportTx(t *testing.T) { }, }}, }, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: avaID}, Out: &secp256k1fx.TransferOutput{ Amt: 50000, @@ -297,7 +297,7 @@ func TestClearForceAcceptedExportTx(t *testing.T) { memdb.New(), genesisBytes, issuer, - []*common.Fx{&common.Fx{ + []*common.Fx{{ ID: ids.Empty, Fx: &secp256k1fx.Fx{}, }}, @@ -323,7 +323,7 @@ func TestClearForceAcceptedExportTx(t *testing.T) { BaseTx: BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: avaID, OutputIndex: 1, @@ -335,7 +335,7 @@ func TestClearForceAcceptedExportTx(t *testing.T) { }, }}, }, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: avaID}, Out: &secp256k1fx.TransferOutput{ Amt: 50000, diff --git a/vms/avm/import_tx.go b/vms/avm/import_tx.go index 1729221..caf0f93 100644 --- a/vms/avm/import_tx.go +++ b/vms/avm/import_tx.go @@ -11,8 +11,8 @@ import ( "github.com/ava-labs/gecko/database/versiondb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/utils/codec" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/verify" ) @@ -33,6 +33,15 @@ func (t *ImportTx) InputUTXOs() []*ava.UTXOID { return utxos } +// ConsumedAssetIDs returns the IDs of the assets this transaction consumes +func (t *ImportTx) ConsumedAssetIDs() ids.Set { + assets := t.BaseTx.AssetIDs() + for _, in := range t.Ins { + assets.Add(in.AssetID()) + } + return assets +} + // AssetIDs returns the IDs of the assets this transaction depends on func (t *ImportTx) AssetIDs() ids.Set { assets := t.BaseTx.AssetIDs() diff --git a/vms/avm/import_tx_test.go b/vms/avm/import_tx_test.go index 696e841..e510aff 100644 --- a/vms/avm/import_tx_test.go +++ b/vms/avm/import_tx_test.go @@ -68,7 +68,7 @@ func TestImportTxSerialization(t *testing.T) { 0x99, 0x99, 0x99, 0x99, 0x88, 0x88, 0x88, 0x88, }), }, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{TxID: ids.NewID([32]byte{ 0x0f, 0x2f, 0x4f, 0x6f, 0x8e, 0xae, 0xce, 0xee, 0x0d, 0x2d, 0x4d, 0x6d, 0x8c, 0xac, 0xcc, 0xec, @@ -130,7 +130,7 @@ func TestIssueImportTx(t *testing.T) { memdb.New(), genesisBytes, issuer, - []*common.Fx{&common.Fx{ + []*common.Fx{{ ID: ids.Empty, Fx: &secp256k1fx.Fx{}, }}, @@ -166,7 +166,7 @@ func TestIssueImportTx(t *testing.T) { NetID: networkID, BCID: chainID, }, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: utxoID, Asset: ava.Asset{ID: avaID}, In: &secp256k1fx.TransferInput{ @@ -288,7 +288,7 @@ func TestForceAcceptImportTx(t *testing.T) { memdb.New(), genesisBytes, issuer, - []*common.Fx{&common.Fx{ + []*common.Fx{{ ID: ids.Empty, Fx: &secp256k1fx.Fx{}, }}, @@ -326,7 +326,7 @@ func TestForceAcceptImportTx(t *testing.T) { NetID: networkID, BCID: chainID, }, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: utxoID, Asset: ava.Asset{ID: genesisTx.ID()}, In: &secp256k1fx.TransferInput{ diff --git a/vms/avm/operation_test.go b/vms/avm/operation_test.go index 8b85901..b3aed54 100644 --- a/vms/avm/operation_test.go +++ b/vms/avm/operation_test.go @@ -7,8 +7,8 @@ import ( "testing" "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/utils/codec" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/verify" ) @@ -43,11 +43,11 @@ func TestOperationVerifyUTXOIDsNotSorted(t *testing.T) { op := &Operation{ Asset: ava.Asset{ID: ids.Empty}, UTXOIDs: []*ava.UTXOID{ - &ava.UTXOID{ + { TxID: ids.Empty, OutputIndex: 1, }, - &ava.UTXOID{ + { TxID: ids.Empty, OutputIndex: 0, }, @@ -64,7 +64,7 @@ func TestOperationVerify(t *testing.T) { op := &Operation{ Asset: ava.Asset{ID: ids.Empty}, UTXOIDs: []*ava.UTXOID{ - &ava.UTXOID{ + { TxID: ids.Empty, OutputIndex: 1, }, @@ -81,20 +81,20 @@ func TestOperationSorting(t *testing.T) { c.RegisterType(&testOperable{}) ops := []*Operation{ - &Operation{ + { Asset: ava.Asset{ID: ids.Empty}, UTXOIDs: []*ava.UTXOID{ - &ava.UTXOID{ + { TxID: ids.Empty, OutputIndex: 1, }, }, Op: &testOperable{}, }, - &Operation{ + { Asset: ava.Asset{ID: ids.Empty}, UTXOIDs: []*ava.UTXOID{ - &ava.UTXOID{ + { TxID: ids.Empty, OutputIndex: 0, }, @@ -112,7 +112,7 @@ func TestOperationSorting(t *testing.T) { ops = append(ops, &Operation{ Asset: ava.Asset{ID: ids.Empty}, UTXOIDs: []*ava.UTXOID{ - &ava.UTXOID{ + { TxID: ids.Empty, OutputIndex: 1, }, diff --git a/vms/avm/operation_tx.go b/vms/avm/operation_tx.go index ec419c7..c45f0be 100644 --- a/vms/avm/operation_tx.go +++ b/vms/avm/operation_tx.go @@ -8,8 +8,8 @@ import ( "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/utils/codec" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/verify" ) @@ -39,6 +39,17 @@ func (t *OperationTx) InputUTXOs() []*ava.UTXOID { return utxos } +// ConsumedAssetIDs returns the IDs of the assets this transaction consumes +func (t *OperationTx) ConsumedAssetIDs() ids.Set { + assets := t.BaseTx.AssetIDs() + for _, op := range t.Ops { + if len(op.UTXOIDs) > 0 { + assets.Add(op.AssetID()) + } + } + return assets +} + // AssetIDs returns the IDs of the assets this transaction depends on func (t *OperationTx) AssetIDs() ids.Set { assets := t.BaseTx.AssetIDs() diff --git a/vms/avm/prefixed_state_test.go b/vms/avm/prefixed_state_test.go index 74c4b53..e325142 100644 --- a/vms/avm/prefixed_state_test.go +++ b/vms/avm/prefixed_state_test.go @@ -38,7 +38,7 @@ func TestPrefixedSetsAndGets(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 0, @@ -160,9 +160,7 @@ func TestPrefixedFundingAddresses(t *testing.T) { }, Asset: ava.Asset{ID: ids.Empty}, Out: &ava.TestAddressable{ - Addrs: [][]byte{ - []byte{0}, - }, + Addrs: [][]byte{{0}}, }, } diff --git a/vms/avm/service.go b/vms/avm/service.go index 9b21414..3d33118 100644 --- a/vms/avm/service.go +++ b/vms/avm/service.go @@ -792,7 +792,7 @@ func (service *Service) Send(r *http.Request, args *SendArgs, reply *SendReply) ava.SortTransferableInputsWithSigners(ins, keys) - outs := []*ava.TransferableOutput{&ava.TransferableOutput{ + outs := []*ava.TransferableOutput{{ Asset: ava.Asset{ID: assetID}, Out: &secp256k1fx.TransferOutput{ Amt: uint64(args.Amount), @@ -946,7 +946,7 @@ func (service *Service) CreateMintTx(r *http.Request, args *CreateMintTxArgs, re BCID: service.vm.ctx.ChainID, }, Ops: []*Operation{ - &Operation{ + { Asset: ava.Asset{ID: assetID}, UTXOIDs: []*ava.UTXOID{ &utxo.UTXOID, @@ -1197,7 +1197,7 @@ func (service *Service) ImportAVA(_ *http.Request, args *ImportAVAArgs, reply *I ava.SortTransferableInputsWithSigners(ins, keys) - outs := []*ava.TransferableOutput{&ava.TransferableOutput{ + outs := []*ava.TransferableOutput{{ Asset: ava.Asset{ID: service.vm.ava}, Out: &secp256k1fx.TransferOutput{ Amt: amount, @@ -1352,7 +1352,7 @@ func (service *Service) ExportAVA(_ *http.Request, args *ExportAVAArgs, reply *E ava.SortTransferableInputsWithSigners(ins, keys) - exportOuts := []*ava.TransferableOutput{&ava.TransferableOutput{ + exportOuts := []*ava.TransferableOutput{{ Asset: ava.Asset{ID: service.vm.ava}, Out: &secp256k1fx.TransferOutput{ Amt: uint64(args.Amount), diff --git a/vms/avm/service_test.go b/vms/avm/service_test.go index 6e1d387..b8a7d56 100644 --- a/vms/avm/service_test.go +++ b/vms/avm/service_test.go @@ -300,7 +300,7 @@ func TestCreateFixedCapAsset(t *testing.T) { Name: "test asset", Symbol: "test", Denomination: 1, - InitialHolders: []*Holder{&Holder{ + InitialHolders: []*Holder{{ Amount: 123456789, Address: vm.Format(keys[0].PublicKey().Address().Bytes()), }}, @@ -326,7 +326,7 @@ func TestCreateVariableCapAsset(t *testing.T) { Name: "test asset", Symbol: "test", MinterSets: []Owners{ - Owners{ + { Threshold: 1, Minters: []string{ vm.Format(keys[0].PublicKey().Address().Bytes()), @@ -367,7 +367,7 @@ func TestImportAvmKey(t *testing.T) { factory := crypto.FactorySECP256K1R{} skIntf, err := factory.NewPrivateKey() if err != nil { - t.Fatalf("problem generating private key: %w", err) + t.Fatalf("problem generating private key: %s", err) } sk := skIntf.(*crypto.PrivateKeySECP256K1R) @@ -406,7 +406,7 @@ func TestImportAvmKeyNoDuplicates(t *testing.T) { factory := crypto.FactorySECP256K1R{} skIntf, err := factory.NewPrivateKey() if err != nil { - t.Fatalf("problem generating private key: %w", err) + t.Fatalf("problem generating private key: %s", err) } sk := skIntf.(*crypto.PrivateKeySECP256K1R) diff --git a/vms/avm/state_test.go b/vms/avm/state_test.go index 1feabab..3617ddc 100644 --- a/vms/avm/state_test.go +++ b/vms/avm/state_test.go @@ -288,7 +288,7 @@ func TestStateTXs(t *testing.T) { tx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 0, diff --git a/vms/avm/static_service_test.go b/vms/avm/static_service_test.go index fd9acc0..34e8803 100644 --- a/vms/avm/static_service_test.go +++ b/vms/avm/static_service_test.go @@ -11,12 +11,12 @@ func TestBuildGenesis(t *testing.T) { ss := StaticService{} args := BuildGenesisArgs{GenesisData: map[string]AssetDefinition{ - "asset1": AssetDefinition{ + "asset1": { Name: "myFixedCapAsset", Symbol: "MFCA", Denomination: 8, InitialState: map[string][]interface{}{ - "fixedCap": []interface{}{ + "fixedCap": { Holder{ Amount: 100000, Address: "A9bTQjfYGBFK3JPRJqF2eh3JYL7cHocvy", @@ -36,11 +36,11 @@ func TestBuildGenesis(t *testing.T) { }, }, }, - "asset2": AssetDefinition{ + "asset2": { Name: "myVarCapAsset", Symbol: "MVCA", InitialState: map[string][]interface{}{ - "variableCap": []interface{}{ + "variableCap": { Owners{ Threshold: 1, Minters: []string{ @@ -58,10 +58,10 @@ func TestBuildGenesis(t *testing.T) { }, }, }, - "asset3": AssetDefinition{ + "asset3": { Name: "myOtherVarCapAsset", InitialState: map[string][]interface{}{ - "variableCap": []interface{}{ + "variableCap": { Owners{ Threshold: 1, Minters: []string{ diff --git a/vms/avm/tx.go b/vms/avm/tx.go index f1d0b71..6c32153 100644 --- a/vms/avm/tx.go +++ b/vms/avm/tx.go @@ -9,8 +9,8 @@ import ( "github.com/ava-labs/gecko/database" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/utils/codec" + "github.com/ava-labs/gecko/vms/components/ava" "github.com/ava-labs/gecko/vms/components/verify" ) @@ -24,7 +24,9 @@ type UnsignedTx interface { ID() ids.ID Bytes() []byte + ConsumedAssetIDs() ids.Set AssetIDs() ids.Set + NumCredentials() int InputUTXOs() []*ava.UTXOID UTXOs() []*ava.UTXO diff --git a/vms/avm/tx_test.go b/vms/avm/tx_test.go index 53e20de..f088d9e 100644 --- a/vms/avm/tx_test.go +++ b/vms/avm/tx_test.go @@ -7,9 +7,9 @@ import ( "testing" "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/utils/units" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/verify" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -56,7 +56,7 @@ func TestTxInvalidCredential(t *testing.T) { UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 0, @@ -95,7 +95,7 @@ func TestTxInvalidUnsignedTx(t *testing.T) { NetID: networkID, BCID: chainID, Ins: []*ava.TransferableInput{ - &ava.TransferableInput{ + { UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 0, @@ -110,7 +110,7 @@ func TestTxInvalidUnsignedTx(t *testing.T) { }, }, }, - &ava.TransferableInput{ + { UTXOID: ava.UTXOID{ TxID: ids.Empty, OutputIndex: 0, @@ -153,7 +153,7 @@ func TestTxInvalidNumberOfCredentials(t *testing.T) { NetID: networkID, BCID: chainID, Ins: []*ava.TransferableInput{ - &ava.TransferableInput{ + { UTXOID: ava.UTXOID{TxID: ids.Empty, OutputIndex: 0}, Asset: ava.Asset{ID: asset}, In: &secp256k1fx.TransferInput{ @@ -165,7 +165,7 @@ func TestTxInvalidNumberOfCredentials(t *testing.T) { }, }, }, - &ava.TransferableInput{ + { UTXOID: ava.UTXOID{TxID: ids.Empty, OutputIndex: 1}, Asset: ava.Asset{ID: asset}, In: &secp256k1fx.TransferInput{ diff --git a/vms/avm/unique_tx.go b/vms/avm/unique_tx.go index 8d3feb2..892b513 100644 --- a/vms/avm/unique_tx.go +++ b/vms/avm/unique_tx.go @@ -206,22 +206,25 @@ func (tx *UniqueTx) Dependencies() []snowstorm.Tx { continue } txID, _ := in.InputSource() - if !txIDs.Contains(txID) { - txIDs.Add(txID) - tx.deps = append(tx.deps, &UniqueTx{ - vm: tx.vm, - txID: txID, - }) + if txIDs.Contains(txID) { + continue } + txIDs.Add(txID) + tx.deps = append(tx.deps, &UniqueTx{ + vm: tx.vm, + txID: txID, + }) } + consumedIDs := tx.Tx.ConsumedAssetIDs() for _, assetID := range tx.Tx.AssetIDs().List() { - if !txIDs.Contains(assetID) { - txIDs.Add(assetID) - tx.deps = append(tx.deps, &UniqueTx{ - vm: tx.vm, - txID: assetID, - }) + if consumedIDs.Contains(assetID) || txIDs.Contains(assetID) { + continue } + txIDs.Add(assetID) + tx.deps = append(tx.deps, &UniqueTx{ + vm: tx.vm, + txID: assetID, + }) } return tx.deps } diff --git a/vms/avm/vm.go b/vms/avm/vm.go index 4c0820d..d89d625 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -20,12 +20,12 @@ import ( "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/snow/consensus/snowstorm" "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/logging" "github.com/ava-labs/gecko/utils/timer" "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/utils/codec" cjson "github.com/ava-labs/gecko/utils/json" ) @@ -248,8 +248,8 @@ func (vm *VM) CreateHandlers() map[string]*common.HTTPHandler { rpcServer.RegisterService(&Service{vm: vm}, "avm") // name this service "avm" return map[string]*common.HTTPHandler{ - "": &common.HTTPHandler{Handler: rpcServer}, - "/pubsub": &common.HTTPHandler{LockOptions: common.NoLock, Handler: vm.pubsub}, + "": {Handler: rpcServer}, + "/pubsub": {LockOptions: common.NoLock, Handler: vm.pubsub}, } } @@ -261,7 +261,7 @@ func (vm *VM) CreateStaticHandlers() map[string]*common.HTTPHandler { newServer.RegisterCodec(codec, "application/json;charset=UTF-8") newServer.RegisterService(&StaticService{}, "avm") // name this service "avm" return map[string]*common.HTTPHandler{ - "": &common.HTTPHandler{LockOptions: common.WriteLock, Handler: newServer}, + "": {LockOptions: common.WriteLock, Handler: newServer}, } } diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index d3a2d73..81a08f8 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -84,11 +84,11 @@ func BuildGenesisTest(t *testing.T) []byte { addr2 := keys[2].PublicKey().Address() args := BuildGenesisArgs{GenesisData: map[string]AssetDefinition{ - "asset1": AssetDefinition{ + "asset1": { Name: "myFixedCapAsset", Symbol: "MFCA", InitialState: map[string][]interface{}{ - "fixedCap": []interface{}{ + "fixedCap": { Holder{ Amount: 100000, Address: addr0.String(), @@ -108,11 +108,11 @@ func BuildGenesisTest(t *testing.T) []byte { }, }, }, - "asset2": AssetDefinition{ + "asset2": { Name: "myVarCapAsset", Symbol: "MVCA", InitialState: map[string][]interface{}{ - "variableCap": []interface{}{ + "variableCap": { Owners{ Threshold: 1, Minters: []string{ @@ -131,10 +131,10 @@ func BuildGenesisTest(t *testing.T) []byte { }, }, }, - "asset3": AssetDefinition{ + "asset3": { Name: "myOtherVarCapAsset", InitialState: map[string][]interface{}{ - "variableCap": []interface{}{ + "variableCap": { Owners{ Threshold: 1, Minters: []string{ @@ -168,7 +168,7 @@ func GenesisVM(t *testing.T) ([]byte, chan common.Message, *VM) { memdb.New(), genesisBytes, issuer, - []*common.Fx{&common.Fx{ + []*common.Fx{{ ID: ids.Empty, Fx: &secp256k1fx.Fx{}, }}, @@ -195,7 +195,7 @@ func NewTx(t *testing.T, genesisBytes []byte, vm *VM) *Tx { newTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: genesisTx.ID(), OutputIndex: 1, @@ -357,7 +357,7 @@ func TestTxSerialization(t *testing.T) { Symbol: "symb", Denomination: 0, States: []*InitialState{ - &InitialState{ + { FxID: 0, Outs: []verify.Verifiable{ &secp256k1fx.MintOutput{ @@ -456,7 +456,7 @@ func TestFxInitializationFailure(t *testing.T) { /*db=*/ memdb.New(), /*genesisState=*/ genesisBytes, /*engineMessenger=*/ make(chan common.Message, 1), - /*fxs=*/ []*common.Fx{&common.Fx{ + /*fxs=*/ []*common.Fx{{ ID: ids.Empty, Fx: &testFx{initialize: errUnknownFx}, }}, @@ -537,7 +537,7 @@ func TestIssueDependentTx(t *testing.T) { firstTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: genesisTx.ID(), OutputIndex: 1, @@ -552,7 +552,7 @@ func TestIssueDependentTx(t *testing.T) { }, }, }}, - Outs: []*ava.TransferableOutput{&ava.TransferableOutput{ + Outs: []*ava.TransferableOutput{{ Asset: ava.Asset{ID: genesisTx.ID()}, Out: &secp256k1fx.TransferOutput{ Amt: 50000, @@ -596,7 +596,7 @@ func TestIssueDependentTx(t *testing.T) { secondTx := &Tx{UnsignedTx: &BaseTx{ NetID: networkID, BCID: chainID, - Ins: []*ava.TransferableInput{&ava.TransferableInput{ + Ins: []*ava.TransferableInput{{ UTXOID: ava.UTXOID{ TxID: firstTx.ID(), OutputIndex: 0, @@ -671,11 +671,11 @@ func TestIssueNFT(t *testing.T) { genesisBytes, issuer, []*common.Fx{ - &common.Fx{ + { ID: ids.Empty.Prefix(0), Fx: &secp256k1fx.Fx{}, }, - &common.Fx{ + { ID: ids.Empty.Prefix(1), Fx: &nftfx.Fx{}, }, @@ -704,7 +704,7 @@ func TestIssueNFT(t *testing.T) { Name: "Team Rocket", Symbol: "TR", Denomination: 0, - States: []*InitialState{&InitialState{ + States: []*InitialState{{ FxID: 1, Outs: []verify.Verifiable{ &nftfx.MintOutput{ @@ -740,9 +740,9 @@ func TestIssueNFT(t *testing.T) { NetID: networkID, BCID: chainID, }, - Ops: []*Operation{&Operation{ + Ops: []*Operation{{ Asset: ava.Asset{ID: createAssetTx.ID()}, - UTXOIDs: []*ava.UTXOID{&ava.UTXOID{ + UTXOIDs: []*ava.UTXOID{{ TxID: createAssetTx.ID(), OutputIndex: 0, }}, @@ -752,9 +752,7 @@ func TestIssueNFT(t *testing.T) { }, GroupID: 1, Payload: []byte{'h', 'e', 'l', 'l', 'o'}, - Outputs: []*secp256k1fx.OutputOwners{ - &secp256k1fx.OutputOwners{}, - }, + Outputs: []*secp256k1fx.OutputOwners{{}}, }, }}, }} @@ -793,9 +791,9 @@ func TestIssueNFT(t *testing.T) { NetID: networkID, BCID: chainID, }, - Ops: []*Operation{&Operation{ + Ops: []*Operation{{ Asset: ava.Asset{ID: createAssetTx.ID()}, - UTXOIDs: []*ava.UTXOID{&ava.UTXOID{ + UTXOIDs: []*ava.UTXOID{{ TxID: mintNFTTx.ID(), OutputIndex: 0, }}, @@ -840,15 +838,15 @@ func TestIssueProperty(t *testing.T) { genesisBytes, issuer, []*common.Fx{ - &common.Fx{ + { ID: ids.Empty.Prefix(0), Fx: &secp256k1fx.Fx{}, }, - &common.Fx{ + { ID: ids.Empty.Prefix(1), Fx: &nftfx.Fx{}, }, - &common.Fx{ + { ID: ids.Empty.Prefix(2), Fx: &propertyfx.Fx{}, }, @@ -877,7 +875,7 @@ func TestIssueProperty(t *testing.T) { Name: "Team Rocket", Symbol: "TR", Denomination: 0, - States: []*InitialState{&InitialState{ + States: []*InitialState{{ FxID: 2, Outs: []verify.Verifiable{ &propertyfx.MintOutput{ @@ -905,9 +903,9 @@ func TestIssueProperty(t *testing.T) { NetID: networkID, BCID: chainID, }, - Ops: []*Operation{&Operation{ + Ops: []*Operation{{ Asset: ava.Asset{ID: createAssetTx.ID()}, - UTXOIDs: []*ava.UTXOID{&ava.UTXOID{ + UTXOIDs: []*ava.UTXOID{{ TxID: createAssetTx.ID(), OutputIndex: 0, }}, @@ -960,9 +958,9 @@ func TestIssueProperty(t *testing.T) { NetID: networkID, BCID: chainID, }, - Ops: []*Operation{&Operation{ + Ops: []*Operation{{ Asset: ava.Asset{ID: createAssetTx.ID()}, - UTXOIDs: []*ava.UTXOID{&ava.UTXOID{ + UTXOIDs: []*ava.UTXOID{{ TxID: mintPropertyTx.ID(), OutputIndex: 1, }}, From a3d3ef4787042e4a631a856d0c6086830d3294bf Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Thu, 18 Jun 2020 12:29:17 -0400 Subject: [PATCH 098/183] Change ForceAccepted to process list of already stored vtxs --- snow/engine/avalanche/bootstrapper.go | 26 ++++++++++++++++++-------- 1 file changed, 18 insertions(+), 8 deletions(-) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index d5e5e7f..5646195 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -126,11 +126,14 @@ func (b *bootstrapper) fetch(vtxID ids.ID) error { } // Process vertices -func (b *bootstrapper) process(vtx avalanche.Vertex) error { +func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error { toProcess := newMaxVertexHeap() - if _, ok := b.processedCache.Get(vtx.ID()); !ok { // only process if we haven't already - toProcess.Push(vtx) + for _, vtx := range vtxs { + if _, ok := b.processedCache.Get(vtx.ID()); !ok { // only process if we haven't already + toProcess.Push(vtx) + } } + for toProcess.Len() > 0 { vtx := toProcess.Pop() switch vtx.Status() { @@ -217,14 +220,19 @@ func (b *bootstrapper) MultiPut(vdr ids.ShortID, requestID uint32, vtxs [][]byte return b.fetch(neededVtxID) } + processVertices := make([]avalanche.Vertex, 1, len(vtxs)) + processVertices[0] = neededVtx + for _, vtxBytes := range vtxs { // Parse/persist all the vertices - if _, err := b.State.ParseVertex(vtxBytes); err != nil { // Persists the vtx + if vtx, err := b.State.ParseVertex(vtxBytes); err != nil { // Persists the vtx b.BootstrapConfig.Context.Log.Debug("Failed to parse vertex: %w", err) b.BootstrapConfig.Context.Log.Verbo("vertex: %s", formatting.DumpBytes{Bytes: vtxBytes}) + } else { + processVertices = append(processVertices, vtx) } } - return b.process(neededVtx) + return b.process(processVertices...) } // GetAncestorsFailed is called when a GetAncestors message we sent fails @@ -245,15 +253,17 @@ func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) error { err) } + storedVtxs := make([]avalanche.Vertex, 0, acceptedContainerIDs.Len()) for _, vtxID := range acceptedContainerIDs.List() { if vtx, err := b.State.GetVertex(vtxID); err == nil { - if err := b.process(vtx); err != nil { - return err - } + storedVtxs = append(storedVtxs, vtx) } else if err := b.fetch(vtxID); err != nil { return err } } + if err := b.process(storedVtxs...); err != nil { + return err + } b.processedStartingAcceptedFrontier = true if numPending := b.outstandingRequests.Len(); numPending == 0 { From 1be5daf5cf352b5cdb13ca467b7328050ccd09a0 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Fri, 19 Jun 2020 07:44:28 -0300 Subject: [PATCH 099/183] updating CI --- .ci/run_e2e_tests.sh | 50 ++++++++++++++++++++++---------------------- 1 file changed, 25 insertions(+), 25 deletions(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 56d778d..0e43ba0 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -1,39 +1,39 @@ LATEST_KURTOSIS_TAG="kurtosistech/kurtosis:latest" LATEST_CONTROLLER_TAG="kurtosistech/ava-test-controller:latest" -docker pull ${LATEST_CONTROLLER_TAG} -docker pull ${LATEST_KURTOSIS_TAG} +#docker pull ${LATEST_CONTROLLER_TAG} SCRIPTS_PATH=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd) SRC_PATH=$(dirname "${SCRIPTS_PATH}") # build docker image we need -bash ${SRC_PATH}/scripts/build_image.sh +bash "${SRC_PATH}"/scripts/build_image.sh # get docker image label GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1) -(docker run -v /var/run/docker.sock:/var/run/docker.sock \ ---env DEFAULT_GECKO_IMAGE="${GECKO_IMAGE}" \ ---env TEST_CONTROLLER_IMAGE="${LATEST_CONTROLLER_TAG}" \ -${LATEST_KURTOSIS_TAG}) & +go get -d -t -v github.com/kurtosis-tech/ava-e2e-tests/... -kurtosis_pid=$! +cd "${GOPATH}"/src/github.com/kurtosis-tech/ava-e2e-tests -sleep 90 -kill ${kurtosis_pid} +./scripts/full_rebuild_and_run.sh -ACTUAL_EXIT_STATUS=$(docker ps -a --latest --filter ancestor=${LATEST_CONTROLLER_TAG} --format="{{.Status}}") -EXPECTED_EXIT_STATUS="Exited \(0\).*" - -# Clear containers. -echo "Clearing kurtosis testnet containers." -docker rm $(docker stop $(docker ps -a -q --filter ancestor="${GECKO_IMAGE}" --format="{{.ID}}")) >/dev/null - -if [[ ${ACTUAL_EXIT_STATUS} =~ ${EXPECTED_EXIT_STATUS} ]] -then - echo "Kurtosis test succeeded." - exit 0 -else - echo "Kurtosis test failed." - exit 1 -fi +#kurtosis_pid=$! +# +#sleep 90 +#kill ${kurtosis_pid} +# +#ACTUAL_EXIT_STATUS=$(docker ps -a --latest --filter ancestor=${LATEST_CONTROLLER_TAG} --format="{{.Status}}") +#EXPECTED_EXIT_STATUS="Exited \(0\).*" +# +## Clear containers. +#echo "Clearing kurtosis testnet containers." +#docker rm $(docker stop $(docker ps -a -q --filter ancestor="${GECKO_IMAGE}" --format="{{.ID}}")) >/dev/null +# +#if [[ ${ACTUAL_EXIT_STATUS} =~ ${EXPECTED_EXIT_STATUS} ]] +#then +# echo "Kurtosis test succeeded." +# exit 0 +#else +# echo "Kurtosis test failed." +# exit 1 +#fi From 4b22442fd26936e965092881a031397e737fbffb Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Fri, 19 Jun 2020 08:01:06 -0300 Subject: [PATCH 100/183] changing path to access ava-e2e-tests --- .ci/run_e2e_tests.sh | 2 +- .travis.yml | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 0e43ba0..9b7dac6 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -13,7 +13,7 @@ GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1) go get -d -t -v github.com/kurtosis-tech/ava-e2e-tests/... -cd "${GOPATH}"/src/github.com/kurtosis-tech/ava-e2e-tests +cd "${E2E_TEST_HOME}" || exit ./scripts/full_rebuild_and_run.sh diff --git a/.travis.yml b/.travis.yml index bb57fc9..aac0189 100644 --- a/.travis.yml +++ b/.travis.yml @@ -14,6 +14,7 @@ env: global: - CODECOV_TOKEN="8c18c993-fc6e-4706-998b-01ddc7987804" - GECKO_HOME=/go/src/github.com/ava-labs/gecko/ + - E2E_TEST_HOME=/go/src/github.com/kurtosis-tech/ava-e2e-tests/ - COMMIT=${TRAVIS_COMMIT::8} - DOCKERHUB_REPO=avaplatform/gecko - secure: "L/A9+re0NEKP6EV6H9RcTGiDhX3WMvsiWrkRKDYKqnviqbjY30RK6EM4vvjrM4Lrw2QwsO3YKgnku3+zioE/TxEZFkpkbjNUXru0nYBrWAg1TKVsDXnYaIZkHUejfryST3E8N7F4Hx6zCtGEO0sEdUeKuT+MNUIuHezHooTgGzDjMogm70EWMFjQHc7VucTJu7dWU1RBPjovWQ0q9qflrtCpbrvXFIiihQQ1PQha1Q2C4wLakKuLbhhSafue90Mnyss0blaPHy/tyewcASJu4vsGTKRBn0DzttlkNTwuD6+nKrbmJY0ohunnkVFzYjrZAw1gyN+DCDb/lPbz4ZDItKPwrIUPEtL5xuUOrxUZPUh+0io3Q2d6rjaqkdGjd1KQXzbnW1mn0BxX3d3b2UpIqhBn9umYYjHBKnMuoRiTK33b7U9+LF3K84+tEvVDCPeHs/mw6Inp5jGRSravnM6yPQ6feGzogs4+3EMzZXxnkngKFKCsnd67Oe9xfV9amOU2aQAx4jaAwlPjEpBEkUa8YKx3lPznvmUk1QsNCUbLjdSl5JBaXojLJoiuPbj29hp4S5AXXgn+3Hvwk3ndcFCxi6/l1W9mjYSOtFqg3EAUdF4EgnA/ykQg9ZokkoKY0+qgOzG2bKOAYuCDWeGr7P1apToh00ccsQXL81nVPiq7uDw=" From 5cc4aa471875c13cb81a078c943818b74381f551 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Fri, 19 Jun 2020 08:16:43 -0300 Subject: [PATCH 101/183] adding debugging steps --- .ci/run_e2e_tests.sh | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 9b7dac6..eec6946 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -1,8 +1,8 @@ -LATEST_KURTOSIS_TAG="kurtosistech/kurtosis:latest" -LATEST_CONTROLLER_TAG="kurtosistech/ava-test-controller:latest" - +#LATEST_CONTROLLER_TAG="kurtosistech/ava-test-controller:latest" #docker pull ${LATEST_CONTROLLER_TAG} +set -x + SCRIPTS_PATH=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd) SRC_PATH=$(dirname "${SCRIPTS_PATH}") @@ -13,6 +13,13 @@ GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1) go get -d -t -v github.com/kurtosis-tech/ava-e2e-tests/... +ls -ltrh "${GOPATH}" +ls -ltrh "${GOPATH}"/src/ +ls -ltrh "${GOPATH}"/src/github.com +ls -ltrh "${GOPATH}"/src/github.com/kurtosis-tech +ls -ltrh "${GOPATH}"/src/github.com/kurtosis-tech/ava-e2e-tests/ +cd "${GOPATH}"/src/ || exit + cd "${E2E_TEST_HOME}" || exit ./scripts/full_rebuild_and_run.sh From ea7b77ca9d9e274e8c1f9483d7f0f5a2ea590931 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Fri, 19 Jun 2020 08:17:35 -0300 Subject: [PATCH 102/183] adding correct cd --- .ci/run_e2e_tests.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index eec6946..6a96958 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -18,9 +18,7 @@ ls -ltrh "${GOPATH}"/src/ ls -ltrh "${GOPATH}"/src/github.com ls -ltrh "${GOPATH}"/src/github.com/kurtosis-tech ls -ltrh "${GOPATH}"/src/github.com/kurtosis-tech/ava-e2e-tests/ -cd "${GOPATH}"/src/ || exit - -cd "${E2E_TEST_HOME}" || exit +cd "${GOPATH}"/src/github.com/kurtosis-tech/ava-e2e-tests/ || exit ./scripts/full_rebuild_and_run.sh From 9ac4472a27f4ac76acd0e983b6f74eeb400ff192 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Fri, 19 Jun 2020 08:28:39 -0300 Subject: [PATCH 103/183] removing -d --- .ci/run_e2e_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 6a96958..b457c76 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -11,7 +11,7 @@ bash "${SRC_PATH}"/scripts/build_image.sh # get docker image label GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1) -go get -d -t -v github.com/kurtosis-tech/ava-e2e-tests/... +go get -t -v github.com/kurtosis-tech/ava-e2e-tests/... ls -ltrh "${GOPATH}" ls -ltrh "${GOPATH}"/src/ From 878056d24a341af689c43cabe5a9ee08dd9acab3 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Fri, 19 Jun 2020 08:49:09 -0300 Subject: [PATCH 104/183] pulling in gopath --- .ci/run_e2e_tests.sh | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index b457c76..628e83b 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -11,10 +11,12 @@ bash "${SRC_PATH}"/scripts/build_image.sh # get docker image label GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1) +export GOPATH="${GOPATH}" go get -t -v github.com/kurtosis-tech/ava-e2e-tests/... ls -ltrh "${GOPATH}" ls -ltrh "${GOPATH}"/src/ +ls -ltrh "${GOPATH}"/bin/ ls -ltrh "${GOPATH}"/src/github.com ls -ltrh "${GOPATH}"/src/github.com/kurtosis-tech ls -ltrh "${GOPATH}"/src/github.com/kurtosis-tech/ava-e2e-tests/ From 006ff7514996de983a8737bd2aa29f7599262742 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Fri, 19 Jun 2020 11:02:38 -0400 Subject: [PATCH 105/183] improve leveldb batch usage and pointer releasing in caches --- database/versiondb/db.go | 27 ++++++++++++-------- snow/engine/avalanche/state/unique_vertex.go | 2 ++ vms/avm/unique_tx.go | 6 ++++- vms/avm/vm.go | 2 +- 4 files changed, 24 insertions(+), 13 deletions(-) diff --git a/database/versiondb/db.go b/database/versiondb/db.go index fb692bf..7223c55 100644 --- a/database/versiondb/db.go +++ b/database/versiondb/db.go @@ -18,9 +18,10 @@ import ( // database, writing changes to the underlying database only when commit is // called. type Database struct { - lock sync.RWMutex - mem map[string]valueDelete - db database.Database + lock sync.RWMutex + mem map[string]valueDelete + db database.Database + batch database.Batch } type valueDelete struct { @@ -31,8 +32,9 @@ type valueDelete struct { // New returns a new prefixed database func New(db database.Database) *Database { return &Database{ - mem: make(map[string]valueDelete, memdb.DefaultSize), - db: db, + mem: make(map[string]valueDelete, memdb.DefaultSize), + db: db, + batch: db.NewBatch(), } } @@ -169,6 +171,7 @@ func (db *Database) SetDatabase(newDB database.Database) error { } db.db = newDB + db.batch = newDB.NewBatch() return nil } @@ -206,7 +209,9 @@ func (db *Database) Abort() { func (db *Database) abort() { db.mem = make(map[string]valueDelete, memdb.DefaultSize) } -// CommitBatch returns a batch that will commit all pending writes to the underlying database +// CommitBatch returns a batch that will commit all pending writes to the +// underlying database. The returned batch should be written before future calls +// to this DB unless the batch will never be written. func (db *Database) CommitBatch() (database.Batch, error) { db.lock.Lock() defer db.lock.Unlock() @@ -219,21 +224,21 @@ func (db *Database) commitBatch() (database.Batch, error) { return nil, database.ErrClosed } - batch := db.db.NewBatch() + db.batch.Reset() for key, value := range db.mem { if value.delete { - if err := batch.Delete([]byte(key)); err != nil { + if err := db.batch.Delete([]byte(key)); err != nil { return nil, err } - } else if err := batch.Put([]byte(key), value.value); err != nil { + } else if err := db.batch.Put([]byte(key), value.value); err != nil { return nil, err } } - if err := batch.Write(); err != nil { + if err := db.batch.Write(); err != nil { return nil, err } - return batch, nil + return db.batch, nil } // Close implements the database.Database interface diff --git a/snow/engine/avalanche/state/unique_vertex.go b/snow/engine/avalanche/state/unique_vertex.go index f7c6927..75c5893 100644 --- a/snow/engine/avalanche/state/unique_vertex.go +++ b/snow/engine/avalanche/state/unique_vertex.go @@ -54,6 +54,8 @@ func (vtx *uniqueVertex) refresh() { func (vtx *uniqueVertex) Evict() { if vtx.v != nil { vtx.v.unique = false + // make sure the parents are able to be garbage collected + vtx.v.parents = nil } } diff --git a/vms/avm/unique_tx.go b/vms/avm/unique_tx.go index 892b513..576702e 100644 --- a/vms/avm/unique_tx.go +++ b/vms/avm/unique_tx.go @@ -85,7 +85,11 @@ func (tx *UniqueTx) refresh() { // Evict is called when this UniqueTx will no longer be returned from a cache // lookup -func (tx *UniqueTx) Evict() { tx.unique = false } // Lock is already held here +func (tx *UniqueTx) Evict() { + // Lock is already held here + tx.unique = false + tx.deps = nil +} func (tx *UniqueTx) setStatus(status choices.Status) error { tx.refresh() diff --git a/vms/avm/vm.go b/vms/avm/vm.go index d89d625..715ce95 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -35,7 +35,7 @@ const ( batchSize = 30 stateCacheSize = 10000 idCacheSize = 10000 - txCacheSize = 100000 + txCacheSize = 10000 addressSep = "-" ) From 8b75abdee03ef1b1ab1b2b92d2420201dbacf418 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 19 Jun 2020 12:18:20 -0400 Subject: [PATCH 106/183] cache only contains vertices at height 5000, 10000, etc. --- snow/engine/avalanche/bootstrapper.go | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index 5646195..cdfb77b 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -17,7 +17,14 @@ import ( ) const ( - cacheSize = 100000 + // We cache processed vertices where height = c * stripeDistance for c = {1,2,3...} + // This forms a "stripe" of cached DAG vertices at height stripeDistance, 2*stripeDistance, etc. + // This helps to limit the number of repeated DAG traversals performed + // + // With stripeDistance == 2500, average DAG width == 25 and processedCache size == 100,000 + // the graph can have depth up to 10,000,000 and hold every stripe in cache (100,000 / 25 == x / 2,500) + stripeDistance = 2500 + cacheSize = 100000 ) // BootstrapConfig ... @@ -175,6 +182,9 @@ func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error { toProcess.Push(parent) } } + if vtx.Height()%stripeDistance == 0 { + b.processedCache.Put(vtx.ID(), nil) + } b.processedCache.Put(vtx.ID(), nil) } } From 28a313becf19ca1648ab9ba0a23743b6ff0e870d Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Fri, 19 Jun 2020 13:02:28 -0400 Subject: [PATCH 107/183] Fix param attribute access --- snow/engine/avalanche/transitive.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snow/engine/avalanche/transitive.go b/snow/engine/avalanche/transitive.go index 74e2ed2..565267b 100644 --- a/snow/engine/avalanche/transitive.go +++ b/snow/engine/avalanche/transitive.go @@ -57,7 +57,7 @@ func (t *Transitive) Initialize(config Config) error { t.onFinished = t.finishBootstrapping - t.polls = newPolls(int(config.Alpha), config.Context.Log, t.numPolls) + t.polls = newPolls(int(config.Params.Alpha), config.Context.Log, t.numPolls) return t.bootstrapper.Initialize(config.BootstrapConfig) } From 25478846d3f97d23170d56ab412673802a56f176 Mon Sep 17 00:00:00 2001 From: Aaron Buchwald Date: Fri, 19 Jun 2020 13:34:47 -0400 Subject: [PATCH 108/183] Remove extra string formatter in chain router QueryFailed message --- snow/networking/router/chain_router.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snow/networking/router/chain_router.go b/snow/networking/router/chain_router.go index 8cf708a..bbb345d 100644 --- a/snow/networking/router/chain_router.go +++ b/snow/networking/router/chain_router.go @@ -320,7 +320,7 @@ func (sr *ChainRouter) QueryFailed(validatorID ids.ShortID, chainID ids.ID, requ if chain, exists := sr.chains[chainID.Key()]; exists { chain.QueryFailed(validatorID, requestID) } else { - sr.log.Error("QueryFailed(%s, %s, %d, %s) dropped due to unknown chain", validatorID, chainID, requestID) + sr.log.Error("QueryFailed(%s, %s, %d) dropped due to unknown chain", validatorID, chainID, requestID) } } From 27bdba477608bf43279ff66d3511411d28fdf5dd Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 19 Jun 2020 13:44:57 -0400 Subject: [PATCH 109/183] GET to /ext/health returns a 200 and no body, per Fabio's request --- api/health/service.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/api/health/service.go b/api/health/service.go index 1989ab3..51a7c82 100644 --- a/api/health/service.go +++ b/api/health/service.go @@ -38,7 +38,14 @@ func (h *Health) Handler() *common.HTTPHandler { newServer.RegisterCodec(codec, "application/json") newServer.RegisterCodec(codec, "application/json;charset=UTF-8") newServer.RegisterService(h, "health") - return &common.HTTPHandler{LockOptions: common.NoLock, Handler: newServer} + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Method == http.MethodGet { // GET request --> reply with 200 + w.WriteHeader(http.StatusOK) + } else { + newServer.ServeHTTP(w, r) // Other request --> use JSON RPC + } + }) + return &common.HTTPHandler{LockOptions: common.NoLock, Handler: handler} } // RegisterHeartbeat adds a check with default options and a CheckFn that checks From 979f4e27597824a8d089bd83abfcb17babb28863 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 19 Jun 2020 14:05:11 -0400 Subject: [PATCH 110/183] GET to ext/health returns 200 if heathy, else 500 --- api/health/service.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/api/health/service.go b/api/health/service.go index 51a7c82..27a15f7 100644 --- a/api/health/service.go +++ b/api/health/service.go @@ -39,8 +39,12 @@ func (h *Health) Handler() *common.HTTPHandler { newServer.RegisterCodec(codec, "application/json;charset=UTF-8") newServer.RegisterService(h, "health") handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodGet { // GET request --> reply with 200 - w.WriteHeader(http.StatusOK) + if r.Method == http.MethodGet { // GET request --> return 200 if getLiveness returns true, else 500 + if _, healthy := h.health.Results(); healthy { + w.WriteHeader(http.StatusOK) + } else { + w.WriteHeader(http.StatusInternalServerError) + } } else { newServer.ServeHTTP(w, r) // Other request --> use JSON RPC } From 0627c7c28e01885bdeb907acf0bbdee629249872 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Fri, 19 Jun 2020 15:50:09 -0300 Subject: [PATCH 111/183] turning GO111MODULE off to pull --- .ci/run_e2e_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 628e83b..67bd2a8 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -12,7 +12,7 @@ bash "${SRC_PATH}"/scripts/build_image.sh GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1) export GOPATH="${GOPATH}" -go get -t -v github.com/kurtosis-tech/ava-e2e-tests/... +GO111MODULE=off go get -t -v github.com/kurtosis-tech/ava-e2e-tests/... ls -ltrh "${GOPATH}" ls -ltrh "${GOPATH}"/src/ From 975198bb5e8c16aeb42c7e99047df13bd3f4439b Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Fri, 19 Jun 2020 15:13:34 -0400 Subject: [PATCH 112/183] report error returned by the snowstorm RecordPoll --- snow/consensus/avalanche/topological.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/snow/consensus/avalanche/topological.go b/snow/consensus/avalanche/topological.go index f99cff1..d786a0f 100644 --- a/snow/consensus/avalanche/topological.go +++ b/snow/consensus/avalanche/topological.go @@ -141,7 +141,9 @@ func (ta *Topological) RecordPoll(responses ids.UniqueBag) error { votes := ta.pushVotes(kahns, leaves) // Update the conflict graph: O(|Transactions|) ta.ctx.Log.Verbo("Updating consumer confidences based on:\n%s", &votes) - ta.cg.RecordPoll(votes) + if err := ta.cg.RecordPoll(votes); err != nil { + return err + } // Update the dag: O(|Live Set|) return ta.updateFrontiers() } From 245f13ea6542384de34af7813914ac6baab81ff7 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 19 Jun 2020 16:29:27 -0400 Subject: [PATCH 113/183] add stripeDistance and stripeWidth --- snow/engine/avalanche/bootstrapper.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index cdfb77b..08ca7df 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -20,10 +20,8 @@ const ( // We cache processed vertices where height = c * stripeDistance for c = {1,2,3...} // This forms a "stripe" of cached DAG vertices at height stripeDistance, 2*stripeDistance, etc. // This helps to limit the number of repeated DAG traversals performed - // - // With stripeDistance == 2500, average DAG width == 25 and processedCache size == 100,000 - // the graph can have depth up to 10,000,000 and hold every stripe in cache (100,000 / 25 == x / 2,500) - stripeDistance = 2500 + stripeDistance = 2000 + stripeWidth = 5 cacheSize = 100000 ) @@ -182,10 +180,9 @@ func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error { toProcess.Push(parent) } } - if vtx.Height()%stripeDistance == 0 { + if vtx.Height()%stripeDistance < stripeWidth { b.processedCache.Put(vtx.ID(), nil) } - b.processedCache.Put(vtx.ID(), nil) } } From 7c05353d3d048d3a01eea52597c91611664bd095 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 19 Jun 2020 16:33:33 -0400 Subject: [PATCH 114/183] skip unneccessary parse --- snow/engine/avalanche/bootstrapper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index 08ca7df..3ed58c7 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -230,7 +230,7 @@ func (b *bootstrapper) MultiPut(vdr ids.ShortID, requestID uint32, vtxs [][]byte processVertices := make([]avalanche.Vertex, 1, len(vtxs)) processVertices[0] = neededVtx - for _, vtxBytes := range vtxs { // Parse/persist all the vertices + for _, vtxBytes := range vtxs[1:] { // Parse/persist all the vertices if vtx, err := b.State.ParseVertex(vtxBytes); err != nil { // Persists the vtx b.BootstrapConfig.Context.Log.Debug("Failed to parse vertex: %w", err) b.BootstrapConfig.Context.Log.Verbo("vertex: %s", formatting.DumpBytes{Bytes: vtxBytes}) From 6c34fd79eb0cfb080be256178582a56d2480275d Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Fri, 19 Jun 2020 17:56:35 -0400 Subject: [PATCH 115/183] version bump --- node/node.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/node.go b/node/node.go index 752c78d..d003767 100644 --- a/node/node.go +++ b/node/node.go @@ -57,7 +57,7 @@ var ( genesisHashKey = []byte("genesisID") // Version is the version of this code - Version = version.NewDefaultVersion("avalanche", 0, 5, 5) + Version = version.NewDefaultVersion("avalanche", 0, 5, 6) versionParser = version.NewDefaultParser() ) From 4d4a073d604d9d37948ff144dc01c9fb0e2d69dc Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 19 Jun 2020 17:57:47 -0400 Subject: [PATCH 116/183] only have 15 outstanding GetAncestors at a time during bootstrapping to not flood the network --- snow/engine/avalanche/bootstrapper.go | 33 +++++++++++++++++++++++++-- snow/engine/common/bootstrapper.go | 3 +++ 2 files changed, 34 insertions(+), 2 deletions(-) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index 3ed58c7..749694e 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -51,6 +51,11 @@ type bootstrapper struct { // tracks which validators were asked for which containers in which requests outstandingRequests common.Requests + // IDs of vertices that we will send a GetAncestors request for once we are not at the + // max number of outstanding requests + // Invariant: The intersection of needToFetch and outstandingRequests is empty + needToFetch ids.Set + // Contains IDs of vertices that have recently been processed processedCache *cache.LRU @@ -103,11 +108,21 @@ func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set { return acceptedVtxIDs } -// Get vertex [vtxID] and its ancestors +// Calls fetch for a pending vertex if there are any +func (b *bootstrapper) fetchANeededVtx() error { + if b.needToFetch.Len() > 0 { + return b.fetch(b.needToFetch.List()[0]) + } + return nil +} + +// Get vertex [vtxID] and its ancestors. +// If [vtxID] has already been requested or is already fetched, and there are +// unrequested vertices, requests one such vertex instead of [vtxID] func (b *bootstrapper) fetch(vtxID ids.ID) error { // Make sure we haven't already requested this block if b.outstandingRequests.Contains(vtxID) { - return nil + return b.fetchANeededVtx() } // Make sure we don't already have this vertex @@ -115,6 +130,13 @@ func (b *bootstrapper) fetch(vtxID ids.ID) error { if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier { return b.finish() } + b.needToFetch.Remove(vtxID) // we have this vertex. no need to request it. + return b.fetchANeededVtx() + } + + // If we're already at maximum number of outstanding requests, queue for later + if b.outstandingRequests.Len() >= common.MaxOutstandingRequests { + b.needToFetch.Add(vtxID) return nil } @@ -126,6 +148,7 @@ func (b *bootstrapper) fetch(vtxID ids.ID) error { b.RequestID++ b.outstandingRequests.Add(validatorID, b.RequestID, vtxID) + b.needToFetch.Remove(vtxID) // maintains invariant that intersection with outstandingRequests is empty b.BootstrapConfig.Sender.GetAncestors(validatorID, b.RequestID, vtxID) // request vertex and ancestors return nil } @@ -236,9 +259,15 @@ func (b *bootstrapper) MultiPut(vdr ids.ShortID, requestID uint32, vtxs [][]byte b.BootstrapConfig.Context.Log.Verbo("vertex: %s", formatting.DumpBytes{Bytes: vtxBytes}) } else { processVertices = append(processVertices, vtx) + b.needToFetch.Remove(vtx.ID()) // No need to fetch this vertex since we have it now } } + // Now there is one less outstanding request; send another if needed + if err := b.fetchANeededVtx(); err != nil { + return err + } + return b.process(processVertices...) } diff --git a/snow/engine/common/bootstrapper.go b/snow/engine/common/bootstrapper.go index 49f4051..963a4fb 100644 --- a/snow/engine/common/bootstrapper.go +++ b/snow/engine/common/bootstrapper.go @@ -17,6 +17,9 @@ const ( // StatusUpdateFrequency ... bootstrapper logs "processed X blocks/vertices" every [statusUpdateFrequency] blocks/vertices StatusUpdateFrequency = 2500 + + // MaxOutstandingRequests is the maximum number of GetAncestors sent but not responsded to/failed + MaxOutstandingRequests = 15 ) var ( From 67d92815010bc617c01094ccedd4d307ba57234a Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 19 Jun 2020 18:06:04 -0400 Subject: [PATCH 117/183] change maximum # outstanding to 8 to reduce load on nodes --- snow/engine/common/bootstrapper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snow/engine/common/bootstrapper.go b/snow/engine/common/bootstrapper.go index 963a4fb..8c9c745 100644 --- a/snow/engine/common/bootstrapper.go +++ b/snow/engine/common/bootstrapper.go @@ -19,7 +19,7 @@ const ( StatusUpdateFrequency = 2500 // MaxOutstandingRequests is the maximum number of GetAncestors sent but not responsded to/failed - MaxOutstandingRequests = 15 + MaxOutstandingRequests = 8 ) var ( From 32812e5375d763d33d67ea0657d22f9dfbdb1680 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Fri, 19 Jun 2020 18:36:45 -0400 Subject: [PATCH 118/183] re-added the admin API calls to be backwards compatible --- api/admin/service.go | 96 +++++++++++++++++++++++++++++++++++++++++++- node/node.go | 2 +- 2 files changed, 96 insertions(+), 2 deletions(-) diff --git a/api/admin/service.go b/api/admin/service.go index 3d61730..0718dfd 100644 --- a/api/admin/service.go +++ b/api/admin/service.go @@ -10,35 +10,129 @@ import ( "github.com/ava-labs/gecko/api" "github.com/ava-labs/gecko/chains" + "github.com/ava-labs/gecko/genesis" + "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/network" "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/version" cjson "github.com/ava-labs/gecko/utils/json" ) // Admin is the API service for node admin management type Admin struct { + version version.Version + nodeID ids.ShortID + networkID uint32 log logging.Logger + networking network.Network performance Performance chainManager chains.Manager httpServer *api.Server } // NewService returns a new admin API service -func NewService(log logging.Logger, chainManager chains.Manager, peers network.Network, httpServer *api.Server) *common.HTTPHandler { +func NewService(version version.Version, nodeID ids.ShortID, networkID uint32, log logging.Logger, chainManager chains.Manager, peers network.Network, httpServer *api.Server) *common.HTTPHandler { newServer := rpc.NewServer() codec := cjson.NewCodec() newServer.RegisterCodec(codec, "application/json") newServer.RegisterCodec(codec, "application/json;charset=UTF-8") newServer.RegisterService(&Admin{ + version: version, + nodeID: nodeID, + networkID: networkID, log: log, chainManager: chainManager, + networking: peers, httpServer: httpServer, }, "admin") return &common.HTTPHandler{Handler: newServer} } +// GetNodeVersionReply are the results from calling GetNodeVersion +type GetNodeVersionReply struct { + Version string `json:"version"` +} + +// GetNodeVersion returns the version this node is running +func (service *Admin) GetNodeVersion(_ *http.Request, _ *struct{}, reply *GetNodeVersionReply) error { + service.log.Info("Admin: GetNodeVersion called") + + reply.Version = service.version.String() + return nil +} + +// GetNodeIDReply are the results from calling GetNodeID +type GetNodeIDReply struct { + NodeID ids.ShortID `json:"nodeID"` +} + +// GetNodeID returns the node ID of this node +func (service *Admin) GetNodeID(_ *http.Request, _ *struct{}, reply *GetNodeIDReply) error { + service.log.Info("Admin: GetNodeID called") + + reply.NodeID = service.nodeID + return nil +} + +// GetNetworkIDReply are the results from calling GetNetworkID +type GetNetworkIDReply struct { + NetworkID cjson.Uint32 `json:"networkID"` +} + +// GetNetworkID returns the network ID this node is running on +func (service *Admin) GetNetworkID(_ *http.Request, _ *struct{}, reply *GetNetworkIDReply) error { + service.log.Info("Admin: GetNetworkID called") + + reply.NetworkID = cjson.Uint32(service.networkID) + return nil +} + +// GetNetworkNameReply is the result from calling GetNetworkName +type GetNetworkNameReply struct { + NetworkName string `json:"networkName"` +} + +// GetNetworkName returns the network name this node is running on +func (service *Admin) GetNetworkName(_ *http.Request, _ *struct{}, reply *GetNetworkNameReply) error { + service.log.Info("Admin: GetNetworkName called") + + reply.NetworkName = genesis.NetworkName(service.networkID) + return nil +} + +// GetBlockchainIDArgs are the arguments for calling GetBlockchainID +type GetBlockchainIDArgs struct { + Alias string `json:"alias"` +} + +// GetBlockchainIDReply are the results from calling GetBlockchainID +type GetBlockchainIDReply struct { + BlockchainID string `json:"blockchainID"` +} + +// GetBlockchainID returns the blockchain ID that resolves the alias that was supplied +func (service *Admin) GetBlockchainID(_ *http.Request, args *GetBlockchainIDArgs, reply *GetBlockchainIDReply) error { + service.log.Info("Admin: GetBlockchainID called") + + bID, err := service.chainManager.Lookup(args.Alias) + reply.BlockchainID = bID.String() + return err +} + +// PeersReply are the results from calling Peers +type PeersReply struct { + Peers []network.PeerID `json:"peers"` +} + +// Peers returns the list of current validators +func (service *Admin) Peers(_ *http.Request, _ *struct{}, reply *PeersReply) error { + service.log.Info("Admin: Peers called") + reply.Peers = service.networking.Peers() + return nil +} + // StartCPUProfilerArgs are the arguments for calling StartCPUProfiler type StartCPUProfilerArgs struct { Filename string `json:"filename"` diff --git a/node/node.go b/node/node.go index d003767..dbc58a8 100644 --- a/node/node.go +++ b/node/node.go @@ -462,7 +462,7 @@ func (n *Node) initMetricsAPI() { func (n *Node) initAdminAPI() { if n.Config.AdminAPIEnabled { n.Log.Info("initializing Admin API") - service := admin.NewService(n.Log, n.chainManager, n.Net, &n.APIServer) + service := admin.NewService(Version, n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.Net, &n.APIServer) n.APIServer.AddRoute(service, &sync.RWMutex{}, "admin", "", n.HTTPLog) } } From 2e16f2087a58f39b74a64e6f0046feaca70d06b5 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sat, 20 Jun 2020 10:22:55 -0300 Subject: [PATCH 119/183] cleaning up CI script --- .ci/run_e2e_tests.sh | 38 +------------------------------------- 1 file changed, 1 insertion(+), 37 deletions(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 67bd2a8..6aef7a3 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -1,46 +1,10 @@ -#LATEST_CONTROLLER_TAG="kurtosistech/ava-test-controller:latest" -#docker pull ${LATEST_CONTROLLER_TAG} - -set -x - SCRIPTS_PATH=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd) SRC_PATH=$(dirname "${SCRIPTS_PATH}") - -# build docker image we need +# Build the runnable Gecko docker image bash "${SRC_PATH}"/scripts/build_image.sh -# get docker image label GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1) -export GOPATH="${GOPATH}" GO111MODULE=off go get -t -v github.com/kurtosis-tech/ava-e2e-tests/... -ls -ltrh "${GOPATH}" -ls -ltrh "${GOPATH}"/src/ -ls -ltrh "${GOPATH}"/bin/ -ls -ltrh "${GOPATH}"/src/github.com -ls -ltrh "${GOPATH}"/src/github.com/kurtosis-tech -ls -ltrh "${GOPATH}"/src/github.com/kurtosis-tech/ava-e2e-tests/ cd "${GOPATH}"/src/github.com/kurtosis-tech/ava-e2e-tests/ || exit - ./scripts/full_rebuild_and_run.sh - -#kurtosis_pid=$! -# -#sleep 90 -#kill ${kurtosis_pid} -# -#ACTUAL_EXIT_STATUS=$(docker ps -a --latest --filter ancestor=${LATEST_CONTROLLER_TAG} --format="{{.Status}}") -#EXPECTED_EXIT_STATUS="Exited \(0\).*" -# -## Clear containers. -#echo "Clearing kurtosis testnet containers." -#docker rm $(docker stop $(docker ps -a -q --filter ancestor="${GECKO_IMAGE}" --format="{{.ID}}")) >/dev/null -# -#if [[ ${ACTUAL_EXIT_STATUS} =~ ${EXPECTED_EXIT_STATUS} ]] -#then -# echo "Kurtosis test succeeded." -# exit 0 -#else -# echo "Kurtosis test failed." -# exit 1 -#fi From a523fb184aba68b5e2c6991a6db4e8bf17e0bda6 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sat, 20 Jun 2020 10:24:02 -0300 Subject: [PATCH 120/183] cleaning CI run script --- .ci/run_e2e_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 6aef7a3..119fcf2 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -4,7 +4,7 @@ SRC_PATH=$(dirname "${SCRIPTS_PATH}") bash "${SRC_PATH}"/scripts/build_image.sh GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1) +# Turn off GO111MODULE to pull e2e test source code in order to get run script. GO111MODULE=off go get -t -v github.com/kurtosis-tech/ava-e2e-tests/... - cd "${GOPATH}"/src/github.com/kurtosis-tech/ava-e2e-tests/ || exit ./scripts/full_rebuild_and_run.sh From 4cacb56cf53aaa6ed435d8a9cd3f3182261da33c Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sat, 20 Jun 2020 12:12:53 -0300 Subject: [PATCH 121/183] commenting controller image label from script --- .ci/run_e2e_tests.sh | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 119fcf2..fa5bb8d 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -7,4 +7,10 @@ GECKO_IMAGE=$(docker image ls --format="{{.Repository}}" | head -n 1) # Turn off GO111MODULE to pull e2e test source code in order to get run script. GO111MODULE=off go get -t -v github.com/kurtosis-tech/ava-e2e-tests/... cd "${GOPATH}"/src/github.com/kurtosis-tech/ava-e2e-tests/ || exit -./scripts/full_rebuild_and_run.sh + +bash "./scripts/rebuild_initializer_binary.sh" +bash "./scripts/rebuild_controller_image.sh" +# TODO: Make the controller image label a parameter to rebuild_controller_image script +# Standard controller image label used by above scripts. +CONTROLLER_IMAGE="kurtosistech/ava-e2e-tests_controller:latest" +bash "./build/ava-e2e-tests --gecko-image-name=${GECKO_IMAGE} --test-controller-image-name=${CONTROLLER_IMAGE}" From 551e16fe368a4406a59bf63fdab23edaa1e583ab Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sat, 20 Jun 2020 12:26:36 -0300 Subject: [PATCH 122/183] checking build directory --- .ci/run_e2e_tests.sh | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index fa5bb8d..03c2582 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -1,3 +1,5 @@ +set -x + SCRIPTS_PATH=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd) SRC_PATH=$(dirname "${SCRIPTS_PATH}") # Build the runnable Gecko docker image @@ -13,4 +15,6 @@ bash "./scripts/rebuild_controller_image.sh" # TODO: Make the controller image label a parameter to rebuild_controller_image script # Standard controller image label used by above scripts. CONTROLLER_IMAGE="kurtosistech/ava-e2e-tests_controller:latest" +ls -ltrh ./ +ls -ltrh ./build/ bash "./build/ava-e2e-tests --gecko-image-name=${GECKO_IMAGE} --test-controller-image-name=${CONTROLLER_IMAGE}" From 0f0439ff159f3210e97ce0ec2ca4f07891e23bd0 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sat, 20 Jun 2020 12:42:23 -0300 Subject: [PATCH 123/183] calling executable raw --- .ci/run_e2e_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index 03c2582..df6b1c2 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -17,4 +17,4 @@ bash "./scripts/rebuild_controller_image.sh" CONTROLLER_IMAGE="kurtosistech/ava-e2e-tests_controller:latest" ls -ltrh ./ ls -ltrh ./build/ -bash "./build/ava-e2e-tests --gecko-image-name=${GECKO_IMAGE} --test-controller-image-name=${CONTROLLER_IMAGE}" +./build/ava-e2e-tests --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${CONTROLLER_IMAGE}" From 50fba7520babce27ba1fa865b34c175e58c429fa Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sat, 20 Jun 2020 12:54:02 -0300 Subject: [PATCH 124/183] defining just two tests --- .ci/run_e2e_tests.sh | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index df6b1c2..a4f0b81 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -15,6 +15,4 @@ bash "./scripts/rebuild_controller_image.sh" # TODO: Make the controller image label a parameter to rebuild_controller_image script # Standard controller image label used by above scripts. CONTROLLER_IMAGE="kurtosistech/ava-e2e-tests_controller:latest" -ls -ltrh ./ -ls -ltrh ./build/ -./build/ava-e2e-tests --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${CONTROLLER_IMAGE}" +./build/ava-e2e-tests --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${CONTROLLER_IMAGE}" --test="fiveStakingNodeGetValidatorsTest,fiveStakingNodeFullyConnectedTest" From f4a428351d6dd5253a99a679428f141c9b17fcfa Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sat, 20 Jun 2020 13:02:04 -0300 Subject: [PATCH 125/183] corrected test-names arg --- .ci/run_e2e_tests.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index a4f0b81..fee3861 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -15,4 +15,4 @@ bash "./scripts/rebuild_controller_image.sh" # TODO: Make the controller image label a parameter to rebuild_controller_image script # Standard controller image label used by above scripts. CONTROLLER_IMAGE="kurtosistech/ava-e2e-tests_controller:latest" -./build/ava-e2e-tests --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${CONTROLLER_IMAGE}" --test="fiveStakingNodeGetValidatorsTest,fiveStakingNodeFullyConnectedTest" +./build/ava-e2e-tests --gecko-image-name="${GECKO_IMAGE}" --test-controller-image-name="${CONTROLLER_IMAGE}" --test-names="fiveStakingNodeGetValidatorsTest,fiveStakingNodeFullyConnectedTest" From 6a37d268bcce86eef0c24c77b98c77b77f840911 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 21 Jun 2020 16:12:53 -0300 Subject: [PATCH 126/183] replacing whitespace --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index aac0189..559bed8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -23,7 +23,7 @@ before_install: - if [ "$TRAVIS_OS_NAME" = "linux" ]; then .ci/before_install_linux.sh; fi install: - - if [ "$TRAVIS_OS_NAME" = "linux" ]; then DOCKER_BUILDKIT=1 docker build --progress plain -t $DOCKERHUB_REPO:$COMMIT .; fi + - if [ "$TRAVIS_OS_NAME" = "linux" ]; then DOCKER_BUILDKIT=1 docker build --progress plain -t $DOCKERHUB_REPO:$COMMIT . ; fi script: - if [ "$TRAVIS_OS_NAME" = "osx" ]; then .ci/runscript_osx.sh; fi From e2aea232147da693608bf67844eb8a9fe332a87f Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 21 Jun 2020 16:13:59 -0300 Subject: [PATCH 127/183] re-enabling osx --- .travis.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.travis.yml b/.travis.yml index 559bed8..b06f3c2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -6,8 +6,8 @@ jobs: include: - os: linux dist: bionic - #- os: osx - # osx_image: xcode11.4 + - os: osx + osx_image: xcode11.4 services: - docker env: From 62340e4f29c2ac4978dea280d6461e4d159c6421 Mon Sep 17 00:00:00 2001 From: galenmarchetti Date: Sun, 21 Jun 2020 17:09:12 -0300 Subject: [PATCH 128/183] removing set x --- .ci/run_e2e_tests.sh | 2 -- 1 file changed, 2 deletions(-) diff --git a/.ci/run_e2e_tests.sh b/.ci/run_e2e_tests.sh index fee3861..82126d1 100755 --- a/.ci/run_e2e_tests.sh +++ b/.ci/run_e2e_tests.sh @@ -1,5 +1,3 @@ -set -x - SCRIPTS_PATH=$(cd $(dirname "${BASH_SOURCE[0]}"); pwd) SRC_PATH=$(dirname "${SCRIPTS_PATH}") # Build the runnable Gecko docker image From b0ad887a42701724d3d059b01513766f40a54fb9 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sun, 21 Jun 2020 19:38:24 -0400 Subject: [PATCH 129/183] refactored polls into their own sub package --- .../avalanche/poll/early_term_no_traversal.go | 85 +++++++ .../poll/early_term_no_traversal_test.go | 207 ++++++++++++++++++ snow/engine/avalanche/poll/interfaces.go | 33 +++ snow/engine/avalanche/poll/no_early_term.go | 52 +++++ .../avalanche/poll/no_early_term_test.go | 91 ++++++++ snow/engine/avalanche/poll/set.go | 107 +++++++++ snow/engine/avalanche/poll/set_test.go | 94 ++++++++ snow/engine/avalanche/polls.go | 137 ------------ snow/engine/avalanche/polls_test.go | 99 --------- snow/engine/avalanche/transitive.go | 14 +- .../snowman/poll/early_term_no_traversal.go | 73 ++++++ .../poll/early_term_no_traversal_test.go | 205 +++++++++++++++++ snow/engine/snowman/poll/interfaces.go | 35 +++ snow/engine/snowman/poll/no_early_term.go | 55 +++++ .../engine/snowman/poll/no_early_term_test.go | 92 ++++++++ snow/engine/snowman/poll/set.go | 134 ++++++++++++ snow/engine/snowman/poll/set_test.go | 132 +++++++++++ snow/engine/snowman/polls.go | 115 ---------- snow/engine/snowman/transitive.go | 15 +- snow/engine/snowman/transitive_test.go | 4 +- snow/engine/snowman/voter.go | 2 +- 21 files changed, 1417 insertions(+), 364 deletions(-) create mode 100644 snow/engine/avalanche/poll/early_term_no_traversal.go create mode 100644 snow/engine/avalanche/poll/early_term_no_traversal_test.go create mode 100644 snow/engine/avalanche/poll/interfaces.go create mode 100644 snow/engine/avalanche/poll/no_early_term.go create mode 100644 snow/engine/avalanche/poll/no_early_term_test.go create mode 100644 snow/engine/avalanche/poll/set.go create mode 100644 snow/engine/avalanche/poll/set_test.go delete mode 100644 snow/engine/avalanche/polls.go delete mode 100644 snow/engine/avalanche/polls_test.go create mode 100644 snow/engine/snowman/poll/early_term_no_traversal.go create mode 100644 snow/engine/snowman/poll/early_term_no_traversal_test.go create mode 100644 snow/engine/snowman/poll/interfaces.go create mode 100644 snow/engine/snowman/poll/no_early_term.go create mode 100644 snow/engine/snowman/poll/no_early_term_test.go create mode 100644 snow/engine/snowman/poll/set.go create mode 100644 snow/engine/snowman/poll/set_test.go delete mode 100644 snow/engine/snowman/polls.go diff --git a/snow/engine/avalanche/poll/early_term_no_traversal.go b/snow/engine/avalanche/poll/early_term_no_traversal.go new file mode 100644 index 0000000..52fdae3 --- /dev/null +++ b/snow/engine/avalanche/poll/early_term_no_traversal.go @@ -0,0 +1,85 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package poll + +import ( + "fmt" + + "github.com/ava-labs/gecko/ids" +) + +type earlyTermNoTraversalFactory struct { + alpha int +} + +// NewEarlyTermNoTraversalFactory returns a factory that returns polls with +// early termination, without doing DAG traversals +func NewEarlyTermNoTraversalFactory(alpha int) Factory { + return &earlyTermNoTraversalFactory{alpha: alpha} +} + +func (f *earlyTermNoTraversalFactory) New(vdrs ids.ShortSet) Poll { + return &earlyTermNoTraversalPoll{ + polled: vdrs, + alpha: f.alpha, + } +} + +// earlyTermNoTraversalPoll finishes when any remaining validators can't change +// the result of the poll. However, does not terminate tightly with this bound. +// It terminates as quickly as it can without performing any DAG traversals. +type earlyTermNoTraversalPoll struct { + votes ids.UniqueBag + polled ids.ShortSet + alpha int +} + +// Vote registers a response for this poll +func (p *earlyTermNoTraversalPoll) Vote(vdr ids.ShortID, votes []ids.ID) { + if !p.polled.Contains(vdr) { + // if the validator wasn't polled or already responded to this poll, we + // should just drop the vote + return + } + + // make sure that a validator can't respond multiple times + p.polled.Remove(vdr) + + // track the votes the validator responded with + p.votes.Add(uint(p.polled.Len()), votes...) +} + +// Finished returns true when all validators have voted +func (p *earlyTermNoTraversalPoll) Finished() bool { + // If there are no outstanding queries, the poll is finished + numPending := p.polled.Len() + if numPending == 0 { + return true + } + // If there are still enough pending responses to include another vertex, + // then the poll must wait for more responses + if numPending > p.alpha { + return false + } + + // Ignore any vertex that has already received alpha votes. To safely skip + // DAG traversal, assume that all votes for vertices with less than alpha + // votes will be applied to a single shared ancestor. In this case, the poll + // can terminate early, iff there are not enough pending votes for this + // ancestor to receive alpha votes. + partialVotes := ids.BitSet(0) + for _, vote := range p.votes.List() { + if voters := p.votes.GetSet(vote); voters.Len() < p.alpha { + partialVotes.Union(voters) + } + } + return partialVotes.Len()+numPending < p.alpha +} + +// Result returns the result of this poll +func (p *earlyTermNoTraversalPoll) Result() ids.UniqueBag { return p.votes } + +func (p *earlyTermNoTraversalPoll) String() string { + return fmt.Sprintf("waiting on %s", p.polled) +} diff --git a/snow/engine/avalanche/poll/early_term_no_traversal_test.go b/snow/engine/avalanche/poll/early_term_no_traversal_test.go new file mode 100644 index 0000000..ba2d81a --- /dev/null +++ b/snow/engine/avalanche/poll/early_term_no_traversal_test.go @@ -0,0 +1,207 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package poll + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestEarlyTermNoTraversalResults(t *testing.T) { + alpha := 1 + + vtxID := ids.NewID([32]byte{1}) + votes := []ids.ID{vtxID} + + vdr1 := ids.NewShortID([20]byte{1}) // k = 1 + + vdrs := ids.ShortSet{} + vdrs.Add(vdr1) + + factory := NewEarlyTermNoTraversalFactory(alpha) + poll := factory.New(vdrs) + + poll.Vote(vdr1, votes) + if !poll.Finished() { + t.Fatalf("Poll did not terminate after receiving k votes") + } + + result := poll.Result() + if list := result.List(); len(list) != 1 { + t.Fatalf("Wrong number of vertices returned") + } else if retVtxID := list[0]; !retVtxID.Equals(vtxID) { + t.Fatalf("Wrong vertex returned") + } else if set := result.GetSet(vtxID); set.Len() != 1 { + t.Fatalf("Wrong number of votes returned") + } +} + +func TestEarlyTermNoTraversalString(t *testing.T) { + alpha := 2 + + vtxID := ids.NewID([32]byte{1}) + votes := []ids.ID{vtxID} + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) // k = 2 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + ) + + factory := NewEarlyTermNoTraversalFactory(alpha) + poll := factory.New(vdrs) + + poll.Vote(vdr1, votes) + + expected := "waiting on {BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp}" + if result := poll.String(); expected != result { + t.Fatalf("Poll should have returned %s but returned %s", expected, result) + } +} + +func TestEarlyTermNoTraversalDropsDuplicatedVotes(t *testing.T) { + alpha := 2 + + vtxID := ids.NewID([32]byte{1}) + votes := []ids.ID{vtxID} + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) // k = 2 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + ) + + factory := NewEarlyTermNoTraversalFactory(alpha) + poll := factory.New(vdrs) + + poll.Vote(vdr1, votes) + if poll.Finished() { + t.Fatalf("Poll finished after less than alpha votes") + } + poll.Vote(vdr1, votes) + if poll.Finished() { + t.Fatalf("Poll finished after getting a duplicated vote") + } + poll.Vote(vdr2, votes) + if !poll.Finished() { + t.Fatalf("Poll did not terminate after receiving k votes") + } +} + +func TestEarlyTermNoTraversalTerminatesEarly(t *testing.T) { + alpha := 3 + + vtxID := ids.NewID([32]byte{1}) + votes := []ids.ID{vtxID} + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) + vdr3 := ids.NewShortID([20]byte{3}) + vdr4 := ids.NewShortID([20]byte{4}) + vdr5 := ids.NewShortID([20]byte{5}) // k = 5 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + vdr3, + vdr4, + vdr5, + ) + + factory := NewEarlyTermNoTraversalFactory(alpha) + poll := factory.New(vdrs) + + poll.Vote(vdr1, votes) + if poll.Finished() { + t.Fatalf("Poll finished after less than alpha votes") + } + poll.Vote(vdr2, votes) + if poll.Finished() { + t.Fatalf("Poll finished after less than alpha votes") + } + poll.Vote(vdr3, votes) + if !poll.Finished() { + t.Fatalf("Poll did not terminate early after receiving alpha votes for one vertex and none for other vertices") + } +} + +func TestEarlyTermNoTraversalForSharedAncestor(t *testing.T) { + alpha := 4 + + vtxA := ids.NewID([32]byte{1}) + vtxB := ids.NewID([32]byte{2}) + vtxC := ids.NewID([32]byte{3}) + vtxD := ids.NewID([32]byte{4}) + + // If validators 1-3 vote for frontier vertices + // B, C, and D respectively, which all share the common ancestor + // A, then we cannot terminate early with alpha = k = 4 + // If the final vote is cast for any of A, B, C, or D, then + // vertex A will have transitively received alpha = 4 votes + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) + vdr3 := ids.NewShortID([20]byte{3}) + vdr4 := ids.NewShortID([20]byte{4}) + + vdrs := ids.ShortSet{} + vdrs.Add(vdr1) + vdrs.Add(vdr2) + vdrs.Add(vdr3) + vdrs.Add(vdr4) + + factory := NewEarlyTermNoTraversalFactory(alpha) + poll := factory.New(vdrs) + + poll.Vote(vdr1, []ids.ID{vtxB}) + if poll.Finished() { + t.Fatalf("Poll finished early after receiving one vote") + } + poll.Vote(vdr2, []ids.ID{vtxC}) + if poll.Finished() { + t.Fatalf("Poll finished early after receiving two votes") + } + poll.Vote(vdr3, []ids.ID{vtxD}) + if poll.Finished() { + t.Fatalf("Poll terminated early, when a shared ancestor could have received alpha votes") + } + poll.Vote(vdr4, []ids.ID{vtxA}) + if !poll.Finished() { + t.Fatalf("Poll did not terminate after receiving all outstanding votes") + } +} + +func TestEarlyTermNoTraversalWithFastDrops(t *testing.T) { + alpha := 2 + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) + vdr3 := ids.NewShortID([20]byte{3}) // k = 3 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + vdr3, + ) + + factory := NewEarlyTermNoTraversalFactory(alpha) + poll := factory.New(vdrs) + + poll.Vote(vdr1, nil) + if poll.Finished() { + t.Fatalf("Poll finished early after dropping one vote") + } + poll.Vote(vdr2, nil) + if !poll.Finished() { + t.Fatalf("Poll did not terminate after dropping two votes") + } +} diff --git a/snow/engine/avalanche/poll/interfaces.go b/snow/engine/avalanche/poll/interfaces.go new file mode 100644 index 0000000..05234a3 --- /dev/null +++ b/snow/engine/avalanche/poll/interfaces.go @@ -0,0 +1,33 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package poll + +import ( + "fmt" + + "github.com/ava-labs/gecko/ids" +) + +// Set is a collection of polls +type Set interface { + fmt.Stringer + + Add(requestID uint32, vdrs ids.ShortSet) bool + Vote(requestID uint32, vdr ids.ShortID, votes []ids.ID) (ids.UniqueBag, bool) + Len() int +} + +// Poll is an outstanding poll +type Poll interface { + fmt.Stringer + + Vote(vdr ids.ShortID, votes []ids.ID) + Finished() bool + Result() ids.UniqueBag +} + +// Factory creates a new Poll +type Factory interface { + New(vdrs ids.ShortSet) Poll +} diff --git a/snow/engine/avalanche/poll/no_early_term.go b/snow/engine/avalanche/poll/no_early_term.go new file mode 100644 index 0000000..9a06649 --- /dev/null +++ b/snow/engine/avalanche/poll/no_early_term.go @@ -0,0 +1,52 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package poll + +import ( + "fmt" + + "github.com/ava-labs/gecko/ids" +) + +type noEarlyTermFactory struct{} + +// NewNoEarlyTermFactory returns a factory that returns polls with no early +// termination +func NewNoEarlyTermFactory() Factory { return noEarlyTermFactory{} } + +func (noEarlyTermFactory) New(vdrs ids.ShortSet) Poll { + return &noEarlyTermPoll{polled: vdrs} +} + +// noEarlyTermPoll finishes when all polled validators either respond to the +// query or a timeout occurs +type noEarlyTermPoll struct { + votes ids.UniqueBag + polled ids.ShortSet +} + +// Vote registers a response for this poll +func (p *noEarlyTermPoll) Vote(vdr ids.ShortID, votes []ids.ID) { + if !p.polled.Contains(vdr) { + // if the validator wasn't polled or already responded to this poll, we + // should just drop the vote + return + } + + // make sure that a validator can't respond multiple times + p.polled.Remove(vdr) + + // track the votes the validator responded with + p.votes.Add(uint(p.polled.Len()), votes...) +} + +// Finished returns true when all validators have voted +func (p *noEarlyTermPoll) Finished() bool { return p.polled.Len() == 0 } + +// Result returns the result of this poll +func (p *noEarlyTermPoll) Result() ids.UniqueBag { return p.votes } + +func (p *noEarlyTermPoll) String() string { + return fmt.Sprintf("waiting on %s", p.polled) +} diff --git a/snow/engine/avalanche/poll/no_early_term_test.go b/snow/engine/avalanche/poll/no_early_term_test.go new file mode 100644 index 0000000..f877416 --- /dev/null +++ b/snow/engine/avalanche/poll/no_early_term_test.go @@ -0,0 +1,91 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package poll + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestNoEarlyTermResults(t *testing.T) { + vtxID := ids.NewID([32]byte{1}) + votes := []ids.ID{vtxID} + + vdr1 := ids.NewShortID([20]byte{1}) // k = 1 + + vdrs := ids.ShortSet{} + vdrs.Add(vdr1) + + factory := NewNoEarlyTermFactory() + poll := factory.New(vdrs) + + poll.Vote(vdr1, votes) + if !poll.Finished() { + t.Fatalf("Poll did not terminate after receiving k votes") + } + + result := poll.Result() + if list := result.List(); len(list) != 1 { + t.Fatalf("Wrong number of vertices returned") + } else if retVtxID := list[0]; !retVtxID.Equals(vtxID) { + t.Fatalf("Wrong vertex returned") + } else if set := result.GetSet(vtxID); set.Len() != 1 { + t.Fatalf("Wrong number of votes returned") + } +} + +func TestNoEarlyTermString(t *testing.T) { + vtxID := ids.NewID([32]byte{1}) + votes := []ids.ID{vtxID} + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) // k = 2 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + ) + + factory := NewNoEarlyTermFactory() + poll := factory.New(vdrs) + + poll.Vote(vdr1, votes) + + expected := "waiting on {BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp}" + if result := poll.String(); expected != result { + t.Fatalf("Poll should have returned %s but returned %s", expected, result) + } +} + +func TestNoEarlyTermDropsDuplicatedVotes(t *testing.T) { + vtxID := ids.NewID([32]byte{1}) + votes := []ids.ID{vtxID} + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) // k = 2 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + ) + + factory := NewNoEarlyTermFactory() + poll := factory.New(vdrs) + + poll.Vote(vdr1, votes) + if poll.Finished() { + t.Fatalf("Poll finished after less than alpha votes") + } + poll.Vote(vdr1, votes) + if poll.Finished() { + t.Fatalf("Poll finished after getting a duplicated vote") + } + poll.Vote(vdr2, votes) + if !poll.Finished() { + t.Fatalf("Poll did not terminate after receiving k votes") + } +} diff --git a/snow/engine/avalanche/poll/set.go b/snow/engine/avalanche/poll/set.go new file mode 100644 index 0000000..34a8a1a --- /dev/null +++ b/snow/engine/avalanche/poll/set.go @@ -0,0 +1,107 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package poll + +import ( + "fmt" + "strings" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/logging" +) + +type set struct { + log logging.Logger + numPolls prometheus.Gauge + factory Factory + polls map[uint32]Poll +} + +// NewSet returns a new empty set of polls +func NewSet( + factory Factory, + log logging.Logger, + namespace string, + registerer prometheus.Registerer, +) Set { + numPolls := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "polls", + Help: "Number of pending network polls", + }) + if err := registerer.Register(numPolls); err != nil { + log.Error("failed to register av_polls statistics due to %s", err) + } + + return &set{ + log: log, + numPolls: numPolls, + factory: factory, + polls: make(map[uint32]Poll), + } +} + +// Add to the current set of polls +// Returns true if the poll was registered correctly and the network sample +// should be made. +func (s *set) Add(requestID uint32, vdrs ids.ShortSet) bool { + if _, exists := s.polls[requestID]; exists { + s.log.Debug("dropping poll due to duplicated requestID: %d", requestID) + return false + } + + s.log.Verbo("creating poll with requestID %d and validators %s", + requestID, + vdrs) + + s.polls[requestID] = s.factory.New(vdrs) // create the new poll + s.numPolls.Inc() // increase the metrics + return true +} + +// Vote registers the connections response to a query for [id]. If there was no +// query, or the response has already be registered, nothing is performed. +func (s *set) Vote( + requestID uint32, + vdr ids.ShortID, + votes []ids.ID, +) (ids.UniqueBag, bool) { + poll, exists := s.polls[requestID] + if !exists { + s.log.Verbo("dropping vote from %s to an unknown poll with requestID: %d", + vdr, + requestID) + return nil, false + } + + s.log.Verbo("processing vote from %s in the poll with requestID: %d with the votes %v", + vdr, + requestID, + votes) + + poll.Vote(vdr, votes) + if !poll.Finished() { + return nil, false + } + + s.log.Verbo("poll with requestID %d finished as %s", requestID, poll) + + delete(s.polls, requestID) // remove the poll from the current set + s.numPolls.Dec() // decrease the metrics + return poll.Result(), true +} + +// Len returns the number of outstanding polls +func (s *set) Len() int { return len(s.polls) } + +func (s *set) String() string { + sb := strings.Builder{} + sb.WriteString(fmt.Sprintf("current polls: (Size = %d)", len(s.polls))) + for requestID, poll := range s.polls { + sb.WriteString(fmt.Sprintf("\n %d: %s", requestID, poll)) + } + return sb.String() +} diff --git a/snow/engine/avalanche/poll/set_test.go b/snow/engine/avalanche/poll/set_test.go new file mode 100644 index 0000000..496f993 --- /dev/null +++ b/snow/engine/avalanche/poll/set_test.go @@ -0,0 +1,94 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package poll + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/logging" + "github.com/prometheus/client_golang/prometheus" +) + +func TestNewSetErrorOnMetrics(t *testing.T) { + factory := NewNoEarlyTermFactory() + log := logging.NoLog{} + namespace := "" + registerer := prometheus.NewRegistry() + + registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ + Name: "polls", + })) + + _ = NewSet(factory, log, namespace, registerer) +} + +func TestCreateAndFinishPoll(t *testing.T) { + factory := NewNoEarlyTermFactory() + log := logging.NoLog{} + namespace := "" + registerer := prometheus.NewRegistry() + s := NewSet(factory, log, namespace, registerer) + + vtxID := ids.NewID([32]byte{1}) + votes := []ids.ID{vtxID} + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) // k = 2 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + ) + + if s.Len() != 0 { + t.Fatalf("Shouldn't have any active polls yet") + } else if !s.Add(0, vdrs) { + t.Fatalf("Should have been able to add a new poll") + } else if s.Len() != 1 { + t.Fatalf("Should only have one active poll") + } else if s.Add(0, vdrs) { + t.Fatalf("Shouldn't have been able to add a duplicated poll") + } else if s.Len() != 1 { + t.Fatalf("Should only have one active poll") + } else if _, finished := s.Vote(1, vdr1, votes); finished { + t.Fatalf("Shouldn't have been able to finish a non-existant poll") + } else if _, finished := s.Vote(0, vdr1, votes); finished { + t.Fatalf("Shouldn't have been able to finish an ongoing poll") + } else if _, finished := s.Vote(0, vdr1, votes); finished { + t.Fatalf("Should have dropped a duplicated poll") + } else if result, finished := s.Vote(0, vdr2, votes); !finished { + t.Fatalf("Should have finished the") + } else if list := result.List(); len(list) != 1 { + t.Fatalf("Wrong number of vertices returned") + } else if retVtxID := list[0]; !retVtxID.Equals(vtxID) { + t.Fatalf("Wrong vertex returned") + } else if set := result.GetSet(vtxID); set.Len() != 2 { + t.Fatalf("Wrong number of votes returned") + } +} + +func TestSetString(t *testing.T) { + factory := NewNoEarlyTermFactory() + log := logging.NoLog{} + namespace := "" + registerer := prometheus.NewRegistry() + s := NewSet(factory, log, namespace, registerer) + + vdr1 := ids.NewShortID([20]byte{1}) // k = 1 + + vdrs := ids.ShortSet{} + vdrs.Add(vdr1) + + expected := "current polls: (Size = 1)\n" + + " 0: waiting on {6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt}" + if !s.Add(0, vdrs) { + t.Fatalf("Should have been able to add a new poll") + } else if str := s.String(); expected != str { + t.Fatalf("Set return wrong string, Expected:\n%s\nReturned:\n%s", + expected, + str) + } +} diff --git a/snow/engine/avalanche/polls.go b/snow/engine/avalanche/polls.go deleted file mode 100644 index ac3fe5c..0000000 --- a/snow/engine/avalanche/polls.go +++ /dev/null @@ -1,137 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package avalanche - -import ( - "fmt" - "strings" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/utils/logging" -) - -// TODO: There is a conservative early termination case that doesn't require dag -// traversals we may want to implement. The algorithm would go as follows: -// Keep track of the number of response that reference an ID. If an ID gets >= -// alpha responses, then remove it from all responses and place it into a chit -// list. Remove all empty responses. If the number of responses + the number of -// pending responses is less than alpha, terminate the poll. -// In the synchronous + virtuous case, when everyone returns the same hash, the -// poll now terminates after receiving alpha responses. -// In the rogue case, it is possible that the poll doesn't terminate as quickly -// as possible, because IDs may have the alpha threshold but only when counting -// transitive votes. In this case, we may wait even if it is no longer possible -// for another ID to earn alpha votes. -// Because alpha is typically set close to k, this may not be performance -// critical. However, early termination may be performance critical with crashed -// nodes. - -type polls struct { - log logging.Logger - numPolls prometheus.Gauge - alpha int - m map[uint32]poll -} - -func newPolls(alpha int, log logging.Logger, numPolls prometheus.Gauge) polls { - return polls{ - log: log, - numPolls: numPolls, - alpha: alpha, - m: make(map[uint32]poll), - } -} - -// Add to the current set of polls -// Returns true if the poll was registered correctly and the network sample -// should be made. -func (p *polls) Add(requestID uint32, vdrs ids.ShortSet) bool { - poll, exists := p.m[requestID] - if !exists { - poll.polled = vdrs - poll.alpha = p.alpha - p.m[requestID] = poll - - p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics - } - return !exists -} - -// Vote registers the connections response to a query for [id]. If there was no -// query, or the response has already be registered, nothing is performed. -func (p *polls) Vote(requestID uint32, vdr ids.ShortID, votes []ids.ID) (ids.UniqueBag, bool) { - p.log.Verbo("Vote. requestID: %d. validatorID: %s.", requestID, vdr) - poll, exists := p.m[requestID] - p.log.Verbo("Poll: %+v", poll) - if !exists { - return nil, false - } - - poll.Vote(votes, vdr) - if poll.Finished() { - p.log.Verbo("Poll is finished") - delete(p.m, requestID) - p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics - return poll.votes, true - } - p.m[requestID] = poll - return nil, false -} - -func (p *polls) String() string { - sb := strings.Builder{} - - sb.WriteString(fmt.Sprintf("Current polls: (Size = %d)", len(p.m))) - for requestID, poll := range p.m { - sb.WriteString(fmt.Sprintf("\n %d: %s", requestID, poll)) - } - - return sb.String() -} - -// poll represents the current state of a network poll for a vertex -type poll struct { - votes ids.UniqueBag - polled ids.ShortSet - alpha int -} - -// Vote registers a vote for this poll -func (p *poll) Vote(votes []ids.ID, vdr ids.ShortID) { - if p.polled.Contains(vdr) { - p.polled.Remove(vdr) - p.votes.Add(uint(p.polled.Len()), votes...) - } -} - -// Finished returns true if the poll has completed, with no more required -// responses -func (p poll) Finished() bool { - // If there are no outstanding queries, the poll is finished - numPending := p.polled.Len() - if numPending == 0 { - return true - } - // If there are still enough pending responses to include another vertex, - // then the poll must wait for more responses - if numPending > p.alpha { - return false - } - - // Ignore any vertex that has already received alpha votes. To safely skip - // DAG traversal, assume that all votes for vertices with less than alpha - // votes will be applied to a single shared ancestor. In this case, the poll - // can terminate early, iff there are not enough pending votes for this - // ancestor to receive alpha votes. - partialVotes := ids.BitSet(0) - for _, vote := range p.votes.List() { - if voters := p.votes.GetSet(vote); voters.Len() < p.alpha { - partialVotes.Union(voters) - } - } - return partialVotes.Len()+numPending < p.alpha -} -func (p poll) String() string { return fmt.Sprintf("Waiting on %d chits", p.polled.Len()) } diff --git a/snow/engine/avalanche/polls_test.go b/snow/engine/avalanche/polls_test.go deleted file mode 100644 index cbb1ea4..0000000 --- a/snow/engine/avalanche/polls_test.go +++ /dev/null @@ -1,99 +0,0 @@ -package avalanche - -import ( - "testing" - - "github.com/ava-labs/gecko/ids" -) - -func TestPollTerminatesEarlyVirtuousCase(t *testing.T) { - alpha := 3 - - vtxID := GenerateID() - votes := []ids.ID{vtxID} - - vdr1 := ids.NewShortID([20]byte{1}) - vdr2 := ids.NewShortID([20]byte{2}) - vdr3 := ids.NewShortID([20]byte{3}) - vdr4 := ids.NewShortID([20]byte{4}) - vdr5 := ids.NewShortID([20]byte{5}) // k = 5 - - vdrs := ids.ShortSet{} - vdrs.Add(vdr1) - vdrs.Add(vdr2) - vdrs.Add(vdr3) - vdrs.Add(vdr4) - vdrs.Add(vdr5) - - poll := poll{ - votes: make(ids.UniqueBag), - polled: vdrs, - alpha: alpha, - } - - poll.Vote(votes, vdr1) - if poll.Finished() { - t.Fatalf("Poll finished after less than alpha votes") - } - poll.Vote(votes, vdr2) - if poll.Finished() { - t.Fatalf("Poll finished after less than alpha votes") - } - poll.Vote(votes, vdr3) - if !poll.Finished() { - t.Fatalf("Poll did not terminate early after receiving alpha votes for one vertex and none for other vertices") - } -} - -func TestPollAccountsForSharedAncestor(t *testing.T) { - alpha := 4 - - vtxA := GenerateID() - vtxB := GenerateID() - vtxC := GenerateID() - vtxD := GenerateID() - - // If validators 1-3 vote for frontier vertices - // B, C, and D respectively, which all share the common ancestor - // A, then we cannot terminate early with alpha = k = 4 - // If the final vote is cast for any of A, B, C, or D, then - // vertex A will have transitively received alpha = 4 votes - vdr1 := ids.NewShortID([20]byte{1}) - vdr2 := ids.NewShortID([20]byte{2}) - vdr3 := ids.NewShortID([20]byte{3}) - vdr4 := ids.NewShortID([20]byte{4}) - - vdrs := ids.ShortSet{} - vdrs.Add(vdr1) - vdrs.Add(vdr2) - vdrs.Add(vdr3) - vdrs.Add(vdr4) - - poll := poll{ - votes: make(ids.UniqueBag), - polled: vdrs, - alpha: alpha, - } - - votes1 := []ids.ID{vtxB} - poll.Vote(votes1, vdr1) - if poll.Finished() { - t.Fatalf("Poll finished early after receiving one vote") - } - votes2 := []ids.ID{vtxC} - poll.Vote(votes2, vdr2) - if poll.Finished() { - t.Fatalf("Poll finished early after receiving two votes") - } - votes3 := []ids.ID{vtxD} - poll.Vote(votes3, vdr3) - if poll.Finished() { - t.Fatalf("Poll terminated early, when a shared ancestor could have received alpha votes") - } - - votes4 := []ids.ID{vtxA} - poll.Vote(votes4, vdr4) - if !poll.Finished() { - t.Fatalf("Poll did not terminate after receiving all outstanding votes") - } -} diff --git a/snow/engine/avalanche/transitive.go b/snow/engine/avalanche/transitive.go index 565267b..7412276 100644 --- a/snow/engine/avalanche/transitive.go +++ b/snow/engine/avalanche/transitive.go @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/snow/consensus/avalanche" "github.com/ava-labs/gecko/snow/consensus/snowstorm" + "github.com/ava-labs/gecko/snow/engine/avalanche/poll" "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/snow/events" "github.com/ava-labs/gecko/utils/formatting" @@ -31,7 +32,7 @@ type Transitive struct { Config bootstrapper - polls polls // track people I have asked for their preference + polls poll.Set // track people I have asked for their preference // vtxReqs prevents asking validators for the same vertex vtxReqs common.Requests @@ -57,7 +58,12 @@ func (t *Transitive) Initialize(config Config) error { t.onFinished = t.finishBootstrapping - t.polls = newPolls(int(config.Params.Alpha), config.Context.Log, t.numPolls) + factory := poll.NewEarlyTermNoTraversalFactory(int(config.Params.Alpha)) + t.polls = poll.NewSet(factory, + config.Context.Log, + config.Params.Namespace, + config.Params.Metrics, + ) return t.bootstrapper.Initialize(config.BootstrapConfig) } @@ -309,7 +315,7 @@ func (t *Transitive) Notify(msg common.Message) error { } func (t *Transitive) repoll() error { - if len(t.polls.m) >= t.Params.ConcurrentRepolls || t.errs.Errored() { + if t.polls.Len() >= t.Params.ConcurrentRepolls || t.errs.Errored() { return nil } @@ -318,7 +324,7 @@ func (t *Transitive) repoll() error { return err } - for i := len(t.polls.m); i < t.Params.ConcurrentRepolls; i++ { + for i := t.polls.Len(); i < t.Params.ConcurrentRepolls; i++ { if err := t.batch(nil, false /*=force*/, true /*=empty*/); err != nil { return err } diff --git a/snow/engine/snowman/poll/early_term_no_traversal.go b/snow/engine/snowman/poll/early_term_no_traversal.go new file mode 100644 index 0000000..8042b27 --- /dev/null +++ b/snow/engine/snowman/poll/early_term_no_traversal.go @@ -0,0 +1,73 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package poll + +import ( + "fmt" + + "github.com/ava-labs/gecko/ids" +) + +type earlyTermNoTraversalFactory struct { + alpha int +} + +// NewEarlyTermNoTraversalFactory returns a factory that returns polls with +// early termination, without doing DAG traversals +func NewEarlyTermNoTraversalFactory(alpha int) Factory { + return &earlyTermNoTraversalFactory{alpha: alpha} +} + +func (f *earlyTermNoTraversalFactory) New(vdrs ids.ShortSet) Poll { + return &earlyTermNoTraversalPoll{ + polled: vdrs, + alpha: f.alpha, + } +} + +// earlyTermNoTraversalPoll finishes when any remaining validators can't change +// the result of the poll. However, does not terminate tightly with this bound. +// It terminates as quickly as it can without performing any DAG traversals. +type earlyTermNoTraversalPoll struct { + votes ids.Bag + polled ids.ShortSet + alpha int +} + +// Vote registers a response for this poll +func (p *earlyTermNoTraversalPoll) Vote(vdr ids.ShortID, vote ids.ID) { + if !p.polled.Contains(vdr) { + // if the validator wasn't polled or already responded to this poll, we + // should just drop the vote + return + } + + // make sure that a validator can't respond multiple times + p.polled.Remove(vdr) + + // track the votes the validator responded with + p.votes.Add(vote) +} + +// Drop any future response for this poll +func (p *earlyTermNoTraversalPoll) Drop(vdr ids.ShortID) { + p.polled.Remove(vdr) +} + +// Finished returns true when all validators have voted +func (p *earlyTermNoTraversalPoll) Finished() bool { + remaining := p.polled.Len() + received := p.votes.Len() + _, freq := p.votes.Mode() + return remaining == 0 || // All k nodes responded + freq >= p.alpha || // An alpha majority has returned + received+remaining < p.alpha // An alpha majority can never return +} + +// Result returns the result of this poll +func (p *earlyTermNoTraversalPoll) Result() ids.Bag { return p.votes } + +func (p *earlyTermNoTraversalPoll) String() string { + return fmt.Sprintf("waiting on %s", p.polled) +} diff --git a/snow/engine/snowman/poll/early_term_no_traversal_test.go b/snow/engine/snowman/poll/early_term_no_traversal_test.go new file mode 100644 index 0000000..dd444e9 --- /dev/null +++ b/snow/engine/snowman/poll/early_term_no_traversal_test.go @@ -0,0 +1,205 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package poll + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestEarlyTermNoTraversalResults(t *testing.T) { + alpha := 1 + + vtxID := ids.NewID([32]byte{1}) + + vdr1 := ids.NewShortID([20]byte{1}) // k = 1 + + vdrs := ids.ShortSet{} + vdrs.Add(vdr1) + + factory := NewEarlyTermNoTraversalFactory(alpha) + poll := factory.New(vdrs) + + poll.Vote(vdr1, vtxID) + if !poll.Finished() { + t.Fatalf("Poll did not terminate after receiving k votes") + } + + result := poll.Result() + if list := result.List(); len(list) != 1 { + t.Fatalf("Wrong number of vertices returned") + } else if retVtxID := list[0]; !retVtxID.Equals(vtxID) { + t.Fatalf("Wrong vertex returned") + } else if result.Count(vtxID) != 1 { + t.Fatalf("Wrong number of votes returned") + } +} + +func TestEarlyTermNoTraversalString(t *testing.T) { + alpha := 2 + + vtxID := ids.NewID([32]byte{1}) + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) // k = 2 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + ) + + factory := NewEarlyTermNoTraversalFactory(alpha) + poll := factory.New(vdrs) + + poll.Vote(vdr1, vtxID) + + expected := "waiting on {BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp}" + if result := poll.String(); expected != result { + t.Fatalf("Poll should have returned %s but returned %s", expected, result) + } +} + +func TestEarlyTermNoTraversalDropsDuplicatedVotes(t *testing.T) { + alpha := 2 + + vtxID := ids.NewID([32]byte{1}) + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) // k = 2 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + ) + + factory := NewEarlyTermNoTraversalFactory(alpha) + poll := factory.New(vdrs) + + poll.Vote(vdr1, vtxID) + if poll.Finished() { + t.Fatalf("Poll finished after less than alpha votes") + } + poll.Vote(vdr1, vtxID) + if poll.Finished() { + t.Fatalf("Poll finished after getting a duplicated vote") + } + poll.Vote(vdr2, vtxID) + if !poll.Finished() { + t.Fatalf("Poll did not terminate after receiving k votes") + } +} + +func TestEarlyTermNoTraversalTerminatesEarly(t *testing.T) { + alpha := 3 + + vtxID := ids.NewID([32]byte{1}) + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) + vdr3 := ids.NewShortID([20]byte{3}) + vdr4 := ids.NewShortID([20]byte{4}) + vdr5 := ids.NewShortID([20]byte{5}) // k = 5 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + vdr3, + vdr4, + vdr5, + ) + + factory := NewEarlyTermNoTraversalFactory(alpha) + poll := factory.New(vdrs) + + poll.Vote(vdr1, vtxID) + if poll.Finished() { + t.Fatalf("Poll finished after less than alpha votes") + } + poll.Vote(vdr2, vtxID) + if poll.Finished() { + t.Fatalf("Poll finished after less than alpha votes") + } + poll.Vote(vdr3, vtxID) + if !poll.Finished() { + t.Fatalf("Poll did not terminate early after receiving alpha votes for one vertex and none for other vertices") + } +} + +func TestEarlyTermNoTraversalForSharedAncestor(t *testing.T) { + alpha := 4 + + vtxA := ids.NewID([32]byte{1}) + vtxB := ids.NewID([32]byte{2}) + vtxC := ids.NewID([32]byte{3}) + vtxD := ids.NewID([32]byte{4}) + + // If validators 1-3 vote for frontier vertices + // B, C, and D respectively, which all share the common ancestor + // A, then we cannot terminate early with alpha = k = 4 + // If the final vote is cast for any of A, B, C, or D, then + // vertex A will have transitively received alpha = 4 votes + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) + vdr3 := ids.NewShortID([20]byte{3}) + vdr4 := ids.NewShortID([20]byte{4}) + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + vdr3, + vdr4, + ) + + factory := NewEarlyTermNoTraversalFactory(alpha) + poll := factory.New(vdrs) + + poll.Vote(vdr1, vtxB) + if poll.Finished() { + t.Fatalf("Poll finished early after receiving one vote") + } + poll.Vote(vdr2, vtxC) + if poll.Finished() { + t.Fatalf("Poll finished early after receiving two votes") + } + poll.Vote(vdr3, vtxD) + if poll.Finished() { + t.Fatalf("Poll terminated early, when a shared ancestor could have received alpha votes") + } + poll.Vote(vdr4, vtxA) + if !poll.Finished() { + t.Fatalf("Poll did not terminate after receiving all outstanding votes") + } +} + +func TestEarlyTermNoTraversalWithFastDrops(t *testing.T) { + alpha := 2 + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) + vdr3 := ids.NewShortID([20]byte{3}) // k = 3 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + vdr3, + ) + + factory := NewEarlyTermNoTraversalFactory(alpha) + poll := factory.New(vdrs) + + poll.Drop(vdr1) + if poll.Finished() { + t.Fatalf("Poll finished early after dropping one vote") + } + poll.Drop(vdr2) + if !poll.Finished() { + t.Fatalf("Poll did not terminate after dropping two votes") + } +} diff --git a/snow/engine/snowman/poll/interfaces.go b/snow/engine/snowman/poll/interfaces.go new file mode 100644 index 0000000..33731ad --- /dev/null +++ b/snow/engine/snowman/poll/interfaces.go @@ -0,0 +1,35 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package poll + +import ( + "fmt" + + "github.com/ava-labs/gecko/ids" +) + +// Set is a collection of polls +type Set interface { + fmt.Stringer + + Add(requestID uint32, vdrs ids.ShortSet) bool + Vote(requestID uint32, vdr ids.ShortID, vote ids.ID) (ids.Bag, bool) + Drop(requestID uint32, vdr ids.ShortID) (ids.Bag, bool) + Len() int +} + +// Poll is an outstanding poll +type Poll interface { + fmt.Stringer + + Vote(vdr ids.ShortID, vote ids.ID) + Drop(vdr ids.ShortID) + Finished() bool + Result() ids.Bag +} + +// Factory creates a new Poll +type Factory interface { + New(vdrs ids.ShortSet) Poll +} diff --git a/snow/engine/snowman/poll/no_early_term.go b/snow/engine/snowman/poll/no_early_term.go new file mode 100644 index 0000000..3bcaf38 --- /dev/null +++ b/snow/engine/snowman/poll/no_early_term.go @@ -0,0 +1,55 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package poll + +import ( + "fmt" + + "github.com/ava-labs/gecko/ids" +) + +type noEarlyTermFactory struct{} + +// NewNoEarlyTermFactory returns a factory that returns polls with no early +// termination +func NewNoEarlyTermFactory() Factory { return noEarlyTermFactory{} } + +func (noEarlyTermFactory) New(vdrs ids.ShortSet) Poll { + return &noEarlyTermPoll{polled: vdrs} +} + +// noEarlyTermPoll finishes when all polled validators either respond to the +// query or a timeout occurs +type noEarlyTermPoll struct { + votes ids.Bag + polled ids.ShortSet +} + +// Vote registers a response for this poll +func (p *noEarlyTermPoll) Vote(vdr ids.ShortID, vote ids.ID) { + if !p.polled.Contains(vdr) { + // if the validator wasn't polled or already responded to this poll, we + // should just drop the vote + return + } + + // make sure that a validator can't respond multiple times + p.polled.Remove(vdr) + + // track the votes the validator responded with + p.votes.Add(vote) +} + +// Drop any future response for this poll +func (p *noEarlyTermPoll) Drop(vdr ids.ShortID) { p.polled.Remove(vdr) } + +// Finished returns true when all validators have voted +func (p *noEarlyTermPoll) Finished() bool { return p.polled.Len() == 0 } + +// Result returns the result of this poll +func (p *noEarlyTermPoll) Result() ids.Bag { return p.votes } + +func (p *noEarlyTermPoll) String() string { + return fmt.Sprintf("waiting on %s", p.polled) +} diff --git a/snow/engine/snowman/poll/no_early_term_test.go b/snow/engine/snowman/poll/no_early_term_test.go new file mode 100644 index 0000000..a366b5e --- /dev/null +++ b/snow/engine/snowman/poll/no_early_term_test.go @@ -0,0 +1,92 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package poll + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" +) + +func TestNoEarlyTermResults(t *testing.T) { + vtxID := ids.NewID([32]byte{1}) + + vdr1 := ids.NewShortID([20]byte{1}) // k = 1 + + vdrs := ids.ShortSet{} + vdrs.Add(vdr1) + + factory := NewNoEarlyTermFactory() + poll := factory.New(vdrs) + + poll.Vote(vdr1, vtxID) + if !poll.Finished() { + t.Fatalf("Poll did not terminate after receiving k votes") + } + + result := poll.Result() + if list := result.List(); len(list) != 1 { + t.Fatalf("Wrong number of vertices returned") + } else if retVtxID := list[0]; !retVtxID.Equals(vtxID) { + t.Fatalf("Wrong vertex returned") + } else if result.Count(vtxID) != 1 { + t.Fatalf("Wrong number of votes returned") + } +} + +func TestNoEarlyTermString(t *testing.T) { + vtxID := ids.NewID([32]byte{1}) + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) // k = 2 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + ) + + factory := NewNoEarlyTermFactory() + poll := factory.New(vdrs) + + poll.Vote(vdr1, vtxID) + + expected := "waiting on {BaMPFdqMUQ46BV8iRcwbVfsam55kMqcp}" + if result := poll.String(); expected != result { + t.Fatalf("Poll should have returned %s but returned %s", expected, result) + } +} + +func TestNoEarlyTermDropsDuplicatedVotes(t *testing.T) { + vtxID := ids.NewID([32]byte{1}) + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) // k = 2 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + ) + + factory := NewNoEarlyTermFactory() + poll := factory.New(vdrs) + + poll.Vote(vdr1, vtxID) + if poll.Finished() { + t.Fatalf("Poll finished after less than alpha votes") + } + poll.Vote(vdr1, vtxID) + if poll.Finished() { + t.Fatalf("Poll finished after getting a duplicated vote") + } + poll.Drop(vdr1) + if poll.Finished() { + t.Fatalf("Poll finished after getting a duplicated vote") + } + poll.Vote(vdr2, vtxID) + if !poll.Finished() { + t.Fatalf("Poll did not terminate after receiving k votes") + } +} diff --git a/snow/engine/snowman/poll/set.go b/snow/engine/snowman/poll/set.go new file mode 100644 index 0000000..cded824 --- /dev/null +++ b/snow/engine/snowman/poll/set.go @@ -0,0 +1,134 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package poll + +import ( + "fmt" + "strings" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/logging" +) + +type set struct { + log logging.Logger + numPolls prometheus.Gauge + factory Factory + polls map[uint32]Poll +} + +// NewSet returns a new empty set of polls +func NewSet( + factory Factory, + log logging.Logger, + namespace string, + registerer prometheus.Registerer, +) Set { + numPolls := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: namespace, + Name: "polls", + Help: "Number of pending network polls", + }) + if err := registerer.Register(numPolls); err != nil { + log.Error("failed to register av_polls statistics due to %s", err) + } + + return &set{ + log: log, + numPolls: numPolls, + factory: factory, + polls: make(map[uint32]Poll), + } +} + +// Add to the current set of polls +// Returns true if the poll was registered correctly and the network sample +// should be made. +func (s *set) Add(requestID uint32, vdrs ids.ShortSet) bool { + if _, exists := s.polls[requestID]; exists { + s.log.Debug("dropping poll due to duplicated requestID: %d", requestID) + return false + } + + s.log.Verbo("creating poll with requestID %d and validators %s", + requestID, + vdrs) + + s.polls[requestID] = s.factory.New(vdrs) // create the new poll + s.numPolls.Inc() // increase the metrics + return true +} + +// Vote registers the connections response to a query for [id]. If there was no +// query, or the response has already be registered, nothing is performed. +func (s *set) Vote( + requestID uint32, + vdr ids.ShortID, + vote ids.ID, +) (ids.Bag, bool) { + poll, exists := s.polls[requestID] + if !exists { + s.log.Verbo("dropping vote from %s to an unknown poll with requestID: %d", + vdr, + requestID) + return ids.Bag{}, false + } + + s.log.Verbo("processing vote from %s in the poll with requestID: %d with the vote %s", + vdr, + requestID, + vote) + + poll.Vote(vdr, vote) + if !poll.Finished() { + return ids.Bag{}, false + } + + s.log.Verbo("poll with requestID %d finished as %s", requestID, poll) + + delete(s.polls, requestID) // remove the poll from the current set + s.numPolls.Dec() // decrease the metrics + return poll.Result(), true +} + +// Drop registers the connections response to a query for [id]. If there was no +// query, or the response has already be registered, nothing is performed. +func (s *set) Drop(requestID uint32, vdr ids.ShortID) (ids.Bag, bool) { + poll, exists := s.polls[requestID] + if !exists { + s.log.Verbo("dropping vote from %s to an unknown poll with requestID: %d", + vdr, + requestID) + return ids.Bag{}, false + } + + s.log.Verbo("processing dropped vote from %s in the poll with requestID: %d", + vdr, + requestID) + + poll.Drop(vdr) + if !poll.Finished() { + return ids.Bag{}, false + } + + s.log.Verbo("poll with requestID %d finished as %s", requestID, poll) + + delete(s.polls, requestID) // remove the poll from the current set + s.numPolls.Dec() // decrease the metrics + return poll.Result(), true +} + +// Len returns the number of outstanding polls +func (s *set) Len() int { return len(s.polls) } + +func (s *set) String() string { + sb := strings.Builder{} + sb.WriteString(fmt.Sprintf("current polls: (Size = %d)", len(s.polls))) + for requestID, poll := range s.polls { + sb.WriteString(fmt.Sprintf("\n %d: %s", requestID, poll)) + } + return sb.String() +} diff --git a/snow/engine/snowman/poll/set_test.go b/snow/engine/snowman/poll/set_test.go new file mode 100644 index 0000000..316aaf6 --- /dev/null +++ b/snow/engine/snowman/poll/set_test.go @@ -0,0 +1,132 @@ +// (c) 2019-2020, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package poll + +import ( + "testing" + + "github.com/ava-labs/gecko/ids" + "github.com/ava-labs/gecko/utils/logging" + "github.com/prometheus/client_golang/prometheus" +) + +func TestNewSetErrorOnMetrics(t *testing.T) { + factory := NewNoEarlyTermFactory() + log := logging.NoLog{} + namespace := "" + registerer := prometheus.NewRegistry() + + registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ + Name: "polls", + })) + + _ = NewSet(factory, log, namespace, registerer) +} + +func TestCreateAndFinishSuccessfulPoll(t *testing.T) { + factory := NewNoEarlyTermFactory() + log := logging.NoLog{} + namespace := "" + registerer := prometheus.NewRegistry() + s := NewSet(factory, log, namespace, registerer) + + vtxID := ids.NewID([32]byte{1}) + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) // k = 2 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + ) + + if s.Len() != 0 { + t.Fatalf("Shouldn't have any active polls yet") + } else if !s.Add(0, vdrs) { + t.Fatalf("Should have been able to add a new poll") + } else if s.Len() != 1 { + t.Fatalf("Should only have one active poll") + } else if s.Add(0, vdrs) { + t.Fatalf("Shouldn't have been able to add a duplicated poll") + } else if s.Len() != 1 { + t.Fatalf("Should only have one active poll") + } else if _, finished := s.Vote(1, vdr1, vtxID); finished { + t.Fatalf("Shouldn't have been able to finish a non-existant poll") + } else if _, finished := s.Vote(0, vdr1, vtxID); finished { + t.Fatalf("Shouldn't have been able to finish an ongoing poll") + } else if _, finished := s.Vote(0, vdr1, vtxID); finished { + t.Fatalf("Should have dropped a duplicated poll") + } else if result, finished := s.Vote(0, vdr2, vtxID); !finished { + t.Fatalf("Should have finished the") + } else if list := result.List(); len(list) != 1 { + t.Fatalf("Wrong number of vertices returned") + } else if retVtxID := list[0]; !retVtxID.Equals(vtxID) { + t.Fatalf("Wrong vertex returned") + } else if result.Count(vtxID) != 2 { + t.Fatalf("Wrong number of votes returned") + } +} + +func TestCreateAndFinishFailedPoll(t *testing.T) { + factory := NewNoEarlyTermFactory() + log := logging.NoLog{} + namespace := "" + registerer := prometheus.NewRegistry() + s := NewSet(factory, log, namespace, registerer) + + vdr1 := ids.NewShortID([20]byte{1}) + vdr2 := ids.NewShortID([20]byte{2}) // k = 2 + + vdrs := ids.ShortSet{} + vdrs.Add( + vdr1, + vdr2, + ) + + if s.Len() != 0 { + t.Fatalf("Shouldn't have any active polls yet") + } else if !s.Add(0, vdrs) { + t.Fatalf("Should have been able to add a new poll") + } else if s.Len() != 1 { + t.Fatalf("Should only have one active poll") + } else if s.Add(0, vdrs) { + t.Fatalf("Shouldn't have been able to add a duplicated poll") + } else if s.Len() != 1 { + t.Fatalf("Should only have one active poll") + } else if _, finished := s.Drop(1, vdr1); finished { + t.Fatalf("Shouldn't have been able to finish a non-existant poll") + } else if _, finished := s.Drop(0, vdr1); finished { + t.Fatalf("Shouldn't have been able to finish an ongoing poll") + } else if _, finished := s.Drop(0, vdr1); finished { + t.Fatalf("Should have dropped a duplicated poll") + } else if result, finished := s.Drop(0, vdr2); !finished { + t.Fatalf("Should have finished the") + } else if list := result.List(); len(list) != 0 { + t.Fatalf("Wrong number of vertices returned") + } +} + +func TestSetString(t *testing.T) { + factory := NewNoEarlyTermFactory() + log := logging.NoLog{} + namespace := "" + registerer := prometheus.NewRegistry() + s := NewSet(factory, log, namespace, registerer) + + vdr1 := ids.NewShortID([20]byte{1}) // k = 1 + + vdrs := ids.ShortSet{} + vdrs.Add(vdr1) + + expected := "current polls: (Size = 1)\n" + + " 0: waiting on {6HgC8KRBEhXYbF4riJyJFLSHt37UNuRt}" + if !s.Add(0, vdrs) { + t.Fatalf("Should have been able to add a new poll") + } else if str := s.String(); expected != str { + t.Fatalf("Set return wrong string, Expected:\n%s\nReturned:\n%s", + expected, + str) + } +} diff --git a/snow/engine/snowman/polls.go b/snow/engine/snowman/polls.go deleted file mode 100644 index 6765ff7..0000000 --- a/snow/engine/snowman/polls.go +++ /dev/null @@ -1,115 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowman - -import ( - "fmt" - "strings" - - "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/utils/logging" - "github.com/prometheus/client_golang/prometheus" -) - -type polls struct { - log logging.Logger - numPolls prometheus.Gauge - alpha int - m map[uint32]poll -} - -// Add to the current set of polls -// Returns true if the poll was registered correctly and the network sample -// should be made. -func (p *polls) Add(requestID uint32, vdrs ids.ShortSet) bool { - poll, exists := p.m[requestID] - if !exists { - poll.alpha = p.alpha - poll.polled = vdrs - p.m[requestID] = poll - - p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics - } - return !exists -} - -// Vote registers the connections response to a query for [id]. If there was no -// query, or the response has already be registered, nothing is performed. -func (p *polls) Vote(requestID uint32, vdr ids.ShortID, vote ids.ID) (ids.Bag, bool) { - p.log.Verbo("[polls.Vote] Vote: requestID: %d. validatorID: %s. Vote: %s", requestID, vdr, vote) - poll, exists := p.m[requestID] - if !exists { - return ids.Bag{}, false - } - poll.Vote(vote, vdr) - if poll.Finished() { - delete(p.m, requestID) - p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics - return poll.votes, true - } - p.m[requestID] = poll - return ids.Bag{}, false -} - -// CancelVote registers the connections failure to respond to a query for [id]. -func (p *polls) CancelVote(requestID uint32, vdr ids.ShortID) (ids.Bag, bool) { - p.log.Verbo("CancelVote received. requestID: %d. validatorID: %s. Vote: %s", requestID, vdr) - poll, exists := p.m[requestID] - if !exists { - return ids.Bag{}, false - } - - poll.CancelVote(vdr) - if poll.Finished() { - delete(p.m, requestID) - p.numPolls.Set(float64(len(p.m))) // Tracks performance statistics - return poll.votes, true - } - p.m[requestID] = poll - return ids.Bag{}, false -} - -func (p *polls) String() string { - sb := strings.Builder{} - - sb.WriteString(fmt.Sprintf("Current polls: (Size = %d)", len(p.m))) - for requestID, poll := range p.m { - sb.WriteString(fmt.Sprintf("\n %d: %s", requestID, poll)) - } - - return sb.String() -} - -// poll represents the current state of a network poll for a block -type poll struct { - alpha int - votes ids.Bag - polled ids.ShortSet -} - -// Vote registers a vote for this poll -func (p *poll) CancelVote(vdr ids.ShortID) { p.polled.Remove(vdr) } - -// Vote registers a vote for this poll -func (p *poll) Vote(vote ids.ID, vdr ids.ShortID) { - if p.polled.Contains(vdr) { - p.polled.Remove(vdr) - p.votes.Add(vote) - } -} - -// Finished returns true if the poll has completed, with no more required -// responses -func (p poll) Finished() bool { - remaining := p.polled.Len() - received := p.votes.Len() - _, freq := p.votes.Mode() - return remaining == 0 || // All k nodes responded - freq >= p.alpha || // An alpha majority has returned - received+remaining < p.alpha // An alpha majority can never return -} - -func (p poll) String() string { - return fmt.Sprintf("Waiting on %d chits from %s", p.polled.Len(), p.polled) -} diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index 0a89dc4..ab4a881 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -12,6 +12,7 @@ import ( "github.com/ava-labs/gecko/snow/choices" "github.com/ava-labs/gecko/snow/consensus/snowman" "github.com/ava-labs/gecko/snow/engine/common" + "github.com/ava-labs/gecko/snow/engine/snowman/poll" "github.com/ava-labs/gecko/snow/events" "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/wrappers" @@ -30,7 +31,7 @@ type Transitive struct { bootstrapper // track outstanding preference requests - polls polls + polls poll.Set // blocks that have outstanding get requests blkReqs common.Requests @@ -64,10 +65,12 @@ func (t *Transitive) Initialize(config Config) error { t.onFinished = t.finishBootstrapping - t.polls.log = config.Context.Log - t.polls.numPolls = t.numPolls - t.polls.alpha = t.Params.Alpha - t.polls.m = make(map[uint32]poll) + factory := poll.NewEarlyTermNoTraversalFactory(int(config.Params.Alpha)) + t.polls = poll.NewSet(factory, + config.Context.Log, + config.Params.Namespace, + config.Params.Metrics, + ) return t.bootstrapper.Initialize(config.BootstrapConfig) } @@ -409,7 +412,7 @@ func (t *Transitive) repoll() { // propagate the most likely branch as quickly as possible prefID := t.Consensus.Preference() - for i := len(t.polls.m); i < t.Params.ConcurrentRepolls; i++ { + for i := t.polls.Len(); i < t.Params.ConcurrentRepolls; i++ { t.pullSample(prefID) } } diff --git a/snow/engine/snowman/transitive_test.go b/snow/engine/snowman/transitive_test.go index 8c5f9d1..e9571af 100644 --- a/snow/engine/snowman/transitive_test.go +++ b/snow/engine/snowman/transitive_test.go @@ -810,13 +810,13 @@ func TestVoteCanceling(t *testing.T) { te.insert(blk) - if len(te.polls.m) != 1 { + if te.polls.Len() != 1 { t.Fatalf("Shouldn't have finished blocking issue") } te.QueryFailed(vdr0.ID(), *queryRequestID) - if len(te.polls.m) != 1 { + if te.polls.Len() != 1 { t.Fatalf("Shouldn't have finished blocking issue") } diff --git a/snow/engine/snowman/voter.go b/snow/engine/snowman/voter.go index bd15831..5a3ca87 100644 --- a/snow/engine/snowman/voter.go +++ b/snow/engine/snowman/voter.go @@ -32,7 +32,7 @@ func (v *voter) Update() { results := ids.Bag{} finished := false if v.response.IsZero() { - results, finished = v.t.polls.CancelVote(v.requestID, v.vdr) + results, finished = v.t.polls.Drop(v.requestID, v.vdr) } else { results, finished = v.t.polls.Vote(v.requestID, v.vdr, v.response) } From 7a2a7f0add5921065e4fac8f8631699513efc21c Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sun, 21 Jun 2020 19:44:35 -0400 Subject: [PATCH 130/183] fixed polls metrics --- snow/engine/avalanche/metrics.go | 11 +---------- snow/engine/avalanche/poll/set.go | 2 +- snow/engine/snowman/metrics.go | 11 +---------- snow/engine/snowman/poll/set.go | 2 +- 4 files changed, 4 insertions(+), 22 deletions(-) diff --git a/snow/engine/avalanche/metrics.go b/snow/engine/avalanche/metrics.go index 021fe38..2cd3671 100644 --- a/snow/engine/avalanche/metrics.go +++ b/snow/engine/avalanche/metrics.go @@ -14,7 +14,7 @@ type metrics struct { numBSVtx, numBSDroppedVtx, numBSTx, numBSDroppedTx prometheus.Counter - numPolls, numVtxRequests, numTxRequests, numPendingVtx prometheus.Gauge + numVtxRequests, numTxRequests, numPendingVtx prometheus.Gauge } // Initialize implements the Engine interface @@ -61,12 +61,6 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr Name: "av_bs_dropped_txs", Help: "Number of dropped txs", }) - m.numPolls = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "av_polls", - Help: "Number of pending network polls", - }) m.numVtxRequests = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: namespace, @@ -107,9 +101,6 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr if err := registerer.Register(m.numBSDroppedTx); err != nil { log.Error("Failed to register av_bs_dropped_txs statistics due to %s", err) } - if err := registerer.Register(m.numPolls); err != nil { - log.Error("Failed to register av_polls statistics due to %s", err) - } if err := registerer.Register(m.numVtxRequests); err != nil { log.Error("Failed to register av_vtx_requests statistics due to %s", err) } diff --git a/snow/engine/avalanche/poll/set.go b/snow/engine/avalanche/poll/set.go index 34a8a1a..4107a38 100644 --- a/snow/engine/avalanche/poll/set.go +++ b/snow/engine/avalanche/poll/set.go @@ -33,7 +33,7 @@ func NewSet( Help: "Number of pending network polls", }) if err := registerer.Register(numPolls); err != nil { - log.Error("failed to register av_polls statistics due to %s", err) + log.Error("failed to register polls statistics due to %s", err) } return &set{ diff --git a/snow/engine/snowman/metrics.go b/snow/engine/snowman/metrics.go index f17d360..d71697a 100644 --- a/snow/engine/snowman/metrics.go +++ b/snow/engine/snowman/metrics.go @@ -13,7 +13,7 @@ type metrics struct { numPendingRequests, numBlocked prometheus.Gauge numBootstrapped, numDropped prometheus.Counter - numPolls, numBlkRequests, numBlockedBlk prometheus.Gauge + numBlkRequests, numBlockedBlk prometheus.Gauge } // Initialize implements the Engine interface @@ -42,12 +42,6 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr Name: "sm_bs_dropped", Help: "Number of dropped bootstrap blocks", }) - m.numPolls = prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: namespace, - Name: "sm_polls", - Help: "Number of pending network polls", - }) m.numBlkRequests = prometheus.NewGauge( prometheus.GaugeOpts{ Namespace: namespace, @@ -73,9 +67,6 @@ func (m *metrics) Initialize(log logging.Logger, namespace string, registerer pr if err := registerer.Register(m.numDropped); err != nil { log.Error("Failed to register sm_bs_dropped statistics due to %s", err) } - if err := registerer.Register(m.numPolls); err != nil { - log.Error("Failed to register sm_polls statistics due to %s", err) - } if err := registerer.Register(m.numBlkRequests); err != nil { log.Error("Failed to register sm_blk_requests statistics due to %s", err) } diff --git a/snow/engine/snowman/poll/set.go b/snow/engine/snowman/poll/set.go index cded824..069d153 100644 --- a/snow/engine/snowman/poll/set.go +++ b/snow/engine/snowman/poll/set.go @@ -33,7 +33,7 @@ func NewSet( Help: "Number of pending network polls", }) if err := registerer.Register(numPolls); err != nil { - log.Error("failed to register av_polls statistics due to %s", err) + log.Error("failed to register polls statistics due to %s", err) } return &set{ From d2573be25fc2239b513fd108ea7a72d8501a623f Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sun, 21 Jun 2020 20:00:54 -0400 Subject: [PATCH 131/183] Added duration metrics --- snow/engine/avalanche/poll/set.go | 33 +++++++++++++++++++---- snow/engine/avalanche/poll/set_test.go | 3 +++ snow/engine/snowman/poll/set.go | 36 +++++++++++++++++++++----- snow/engine/snowman/poll/set_test.go | 3 +++ 4 files changed, 64 insertions(+), 11 deletions(-) diff --git a/snow/engine/avalanche/poll/set.go b/snow/engine/avalanche/poll/set.go index 4107a38..24c93fb 100644 --- a/snow/engine/avalanche/poll/set.go +++ b/snow/engine/avalanche/poll/set.go @@ -6,18 +6,26 @@ package poll import ( "fmt" "strings" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/timer" ) +type poll struct { + Poll + start time.Time +} + type set struct { log logging.Logger numPolls prometheus.Gauge + durPolls prometheus.Histogram factory Factory - polls map[uint32]Poll + polls map[uint32]poll } // NewSet returns a new empty set of polls @@ -36,11 +44,22 @@ func NewSet( log.Error("failed to register polls statistics due to %s", err) } + durPolls := prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "poll_duration", + Help: "Length of time the poll existed in milliseconds", + Buckets: timer.MillisecondsBuckets, + }) + if err := registerer.Register(durPolls); err != nil { + log.Error("failed to register poll_duration statistics due to %s", err) + } + return &set{ log: log, numPolls: numPolls, + durPolls: durPolls, factory: factory, - polls: make(map[uint32]Poll), + polls: make(map[uint32]poll), } } @@ -57,8 +76,11 @@ func (s *set) Add(requestID uint32, vdrs ids.ShortSet) bool { requestID, vdrs) - s.polls[requestID] = s.factory.New(vdrs) // create the new poll - s.numPolls.Inc() // increase the metrics + s.polls[requestID] = poll{ + Poll: s.factory.New(vdrs), // create the new poll + start: time.Now(), + } + s.numPolls.Inc() // increase the metrics return true } @@ -90,7 +112,8 @@ func (s *set) Vote( s.log.Verbo("poll with requestID %d finished as %s", requestID, poll) delete(s.polls, requestID) // remove the poll from the current set - s.numPolls.Dec() // decrease the metrics + s.durPolls.Observe(float64(time.Now().Sub(poll.start).Milliseconds())) + s.numPolls.Dec() // decrease the metrics return poll.Result(), true } diff --git a/snow/engine/avalanche/poll/set_test.go b/snow/engine/avalanche/poll/set_test.go index 496f993..0f0e38e 100644 --- a/snow/engine/avalanche/poll/set_test.go +++ b/snow/engine/avalanche/poll/set_test.go @@ -20,6 +20,9 @@ func TestNewSetErrorOnMetrics(t *testing.T) { registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ Name: "polls", })) + registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ + Name: "poll_duration", + })) _ = NewSet(factory, log, namespace, registerer) } diff --git a/snow/engine/snowman/poll/set.go b/snow/engine/snowman/poll/set.go index 069d153..25e0e68 100644 --- a/snow/engine/snowman/poll/set.go +++ b/snow/engine/snowman/poll/set.go @@ -6,18 +6,26 @@ package poll import ( "fmt" "strings" + "time" "github.com/prometheus/client_golang/prometheus" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/utils/logging" + "github.com/ava-labs/gecko/utils/timer" ) +type poll struct { + Poll + start time.Time +} + type set struct { log logging.Logger numPolls prometheus.Gauge + durPolls prometheus.Histogram factory Factory - polls map[uint32]Poll + polls map[uint32]poll } // NewSet returns a new empty set of polls @@ -36,11 +44,22 @@ func NewSet( log.Error("failed to register polls statistics due to %s", err) } + durPolls := prometheus.NewHistogram(prometheus.HistogramOpts{ + Namespace: namespace, + Name: "poll_duration", + Help: "Length of time the poll existed in milliseconds", + Buckets: timer.MillisecondsBuckets, + }) + if err := registerer.Register(durPolls); err != nil { + log.Error("failed to register poll_duration statistics due to %s", err) + } + return &set{ log: log, numPolls: numPolls, + durPolls: durPolls, factory: factory, - polls: make(map[uint32]Poll), + polls: make(map[uint32]poll), } } @@ -57,8 +76,11 @@ func (s *set) Add(requestID uint32, vdrs ids.ShortSet) bool { requestID, vdrs) - s.polls[requestID] = s.factory.New(vdrs) // create the new poll - s.numPolls.Inc() // increase the metrics + s.polls[requestID] = poll{ + Poll: s.factory.New(vdrs), // create the new poll + start: time.Now(), + } + s.numPolls.Inc() // increase the metrics return true } @@ -90,7 +112,8 @@ func (s *set) Vote( s.log.Verbo("poll with requestID %d finished as %s", requestID, poll) delete(s.polls, requestID) // remove the poll from the current set - s.numPolls.Dec() // decrease the metrics + s.durPolls.Observe(float64(time.Now().Sub(poll.start).Milliseconds())) + s.numPolls.Dec() // decrease the metrics return poll.Result(), true } @@ -117,7 +140,8 @@ func (s *set) Drop(requestID uint32, vdr ids.ShortID) (ids.Bag, bool) { s.log.Verbo("poll with requestID %d finished as %s", requestID, poll) delete(s.polls, requestID) // remove the poll from the current set - s.numPolls.Dec() // decrease the metrics + s.durPolls.Observe(float64(time.Now().Sub(poll.start).Milliseconds())) + s.numPolls.Dec() // decrease the metrics return poll.Result(), true } diff --git a/snow/engine/snowman/poll/set_test.go b/snow/engine/snowman/poll/set_test.go index 316aaf6..2ccf0f3 100644 --- a/snow/engine/snowman/poll/set_test.go +++ b/snow/engine/snowman/poll/set_test.go @@ -20,6 +20,9 @@ func TestNewSetErrorOnMetrics(t *testing.T) { registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ Name: "polls", })) + registerer.Register(prometheus.NewCounter(prometheus.CounterOpts{ + Name: "poll_duration", + })) _ = NewSet(factory, log, namespace, registerer) } From 8865eabec73aa152c2fefb75cd5a13c7c1ff0f33 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sun, 21 Jun 2020 21:26:50 -0400 Subject: [PATCH 132/183] Added tests for error checking in snowman --- snow/consensus/snowman/block_test.go | 7 +- snow/consensus/snowman/consensus_test.go | 411 +++++++++++++++++------ 2 files changed, 313 insertions(+), 105 deletions(-) diff --git a/snow/consensus/snowman/block_test.go b/snow/consensus/snowman/block_test.go index e03e6c2..d7db116 100644 --- a/snow/consensus/snowman/block_test.go +++ b/snow/consensus/snowman/block_test.go @@ -17,6 +17,7 @@ type TestBlock struct { height int status choices.Status bytes []byte + err error } func (b *TestBlock) Parent() Block { return b.parent } @@ -27,16 +28,16 @@ func (b *TestBlock) Accept() error { return errors.New("Dis-agreement") } b.status = choices.Accepted - return nil + return b.err } func (b *TestBlock) Reject() error { if b.status.Decided() && b.status != choices.Rejected { return errors.New("Dis-agreement") } b.status = choices.Rejected - return nil + return b.err } -func (b *TestBlock) Verify() error { return nil } +func (b *TestBlock) Verify() error { return b.err } func (b *TestBlock) Bytes() []byte { return b.bytes } type sortBlocks []*TestBlock diff --git a/snow/consensus/snowman/consensus_test.go b/snow/consensus/snowman/consensus_test.go index f64b4d8..aa8836c 100644 --- a/snow/consensus/snowman/consensus_test.go +++ b/snow/consensus/snowman/consensus_test.go @@ -4,6 +4,7 @@ package snowman import ( + "errors" "math/rand" "testing" @@ -42,6 +43,10 @@ var ( MetricsProcessingErrorTest, MetricsAcceptedErrorTest, MetricsRejectedErrorTest, + ErrorOnInitialRejectionTest, + ErrorOnAcceptTest, + ErrorOnRejectSiblingTest, + ErrorOnTransitiveRejectionTest, RandomizedConsistencyTest, } ) @@ -101,7 +106,9 @@ func AddToTailTest(t *testing.T, factory Factory) { } // Adding to the previous preference will update the preference - sm.Add(block) + if err := sm.Add(block); err != nil { + t.Fatal(err) + } if pref := sm.Preference(); !pref.Equals(block.id) { t.Fatalf("Wrong preference. Expected %s, got %s", block.id, pref) @@ -133,7 +140,9 @@ func AddToNonTailTest(t *testing.T, factory Factory) { } // Adding to the previous preference will update the preference - sm.Add(firstBlock) + if err := sm.Add(firstBlock); err != nil { + t.Fatal(err) + } if pref := sm.Preference(); !pref.Equals(firstBlock.id) { t.Fatalf("Wrong preference. Expected %s, got %s", firstBlock.id, pref) @@ -141,7 +150,9 @@ func AddToNonTailTest(t *testing.T, factory Factory) { // Adding to something other than the previous preference won't update the // preference - sm.Add(secondBlock) + if err := sm.Add(secondBlock); err != nil { + t.Fatal(err) + } if pref := sm.Preference(); !pref.Equals(firstBlock.id) { t.Fatalf("Wrong preference. Expected %s, got %s", firstBlock.id, pref) @@ -171,7 +182,9 @@ func AddToUnknownTest(t *testing.T, factory Factory) { // Adding a block with an unknown parent means the parent must have already // been rejected. Therefore the block should be immediately rejected - sm.Add(block) + if err := sm.Add(block); err != nil { + t.Fatal(err) + } if pref := sm.Preference(); !pref.Equals(GenesisID) { t.Fatalf("Wrong preference. Expected %s, got %s", GenesisID, pref) @@ -269,7 +282,9 @@ func IssuedIssuedTest(t *testing.T, factory Factory) { status: choices.Processing, } - sm.Add(block) + if err := sm.Add(block); err != nil { + t.Fatal(err) + } if !sm.Issued(block) { t.Fatalf("Should have marked a pending block as having been issued") @@ -296,12 +311,15 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { status: choices.Processing, } - sm.Add(block) + if err := sm.Add(block); err != nil { + t.Fatal(err) + } votes := ids.Bag{} votes.Add(block.id) - - sm.RecordPoll(votes) + if err := sm.RecordPoll(votes); err != nil { + t.Fatal(err) + } if pref := sm.Preference(); !pref.Equals(block.id) { t.Fatalf("Preference returned the wrong block") @@ -309,11 +327,9 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { t.Fatalf("Snowman instance finalized too soon") } else if status := block.Status(); status != choices.Processing { t.Fatalf("Block's status changed unexpectedly") - } - - sm.RecordPoll(votes) - - if pref := sm.Preference(); !pref.Equals(block.id) { + } else if err := sm.RecordPoll(votes); err != nil { + t.Fatal(err) + } else if pref := sm.Preference(); !pref.Equals(block.id) { t.Fatalf("Preference returned the wrong block") } else if !sm.Finalized() { t.Fatalf("Snowman instance didn't finalize") @@ -347,15 +363,19 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { status: choices.Processing, } - sm.Add(firstBlock) - sm.Add(secondBlock) + if err := sm.Add(firstBlock); err != nil { + t.Fatal(err) + } + if err := sm.Add(secondBlock); err != nil { + t.Fatal(err) + } votes := ids.Bag{} votes.Add(firstBlock.id) - sm.RecordPoll(votes) - - if pref := sm.Preference(); !pref.Equals(firstBlock.id) { + if err := sm.RecordPoll(votes); err != nil { + t.Fatal(err) + } else if pref := sm.Preference(); !pref.Equals(firstBlock.id) { t.Fatalf("Preference returned the wrong block") } else if sm.Finalized() { t.Fatalf("Snowman instance finalized too soon") @@ -363,11 +383,9 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { t.Fatalf("Block's status changed unexpectedly") } else if status := secondBlock.Status(); status != choices.Processing { t.Fatalf("Block's status changed unexpectedly") - } - - sm.RecordPoll(votes) - - if pref := sm.Preference(); !pref.Equals(firstBlock.id) { + } else if err := sm.RecordPoll(votes); err != nil { + t.Fatal(err) + } else if pref := sm.Preference(); !pref.Equals(firstBlock.id) { t.Fatalf("Preference returned the wrong block") } else if !sm.Finalized() { t.Fatalf("Snowman instance didn't finalize") @@ -394,9 +412,9 @@ func RecordPollWhenFinalizedTest(t *testing.T, factory Factory) { votes := ids.Bag{} votes.Add(GenesisID) - sm.RecordPoll(votes) - - if !sm.Finalized() { + if err := sm.RecordPoll(votes); err != nil { + t.Fatal(err) + } else if !sm.Finalized() { t.Fatalf("Consensus should still be finalized") } else if pref := sm.Preference(); !GenesisID.Equals(pref) { t.Fatalf("Wrong preference listed") @@ -433,9 +451,15 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { status: choices.Processing, } - sm.Add(block0) - sm.Add(block1) - sm.Add(block2) + if err := sm.Add(block0); err != nil { + t.Fatal(err) + } + if err := sm.Add(block1); err != nil { + t.Fatal(err) + } + if err := sm.Add(block2); err != nil { + t.Fatal(err) + } // Current graph structure: // G @@ -447,7 +471,9 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { votes := ids.Bag{} votes.Add(block0.id) - sm.RecordPoll(votes) + if err := sm.RecordPoll(votes); err != nil { + t.Fatal(err) + } // Current graph structure: // 0 @@ -457,9 +483,7 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { t.Fatalf("Finalized too late") } else if pref := sm.Preference(); !block0.id.Equals(pref) { t.Fatalf("Wrong preference listed") - } - - if status := block0.Status(); status != choices.Accepted { + } else if status := block0.Status(); status != choices.Accepted { t.Fatalf("Wrong status returned") } else if status := block1.Status(); status != choices.Rejected { t.Fatalf("Wrong status returned") @@ -503,10 +527,18 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { status: choices.Processing, } - sm.Add(block0) - sm.Add(block1) - sm.Add(block2) - sm.Add(block3) + if err := sm.Add(block0); err != nil { + t.Fatal(err) + } + if err := sm.Add(block1); err != nil { + t.Fatal(err) + } + if err := sm.Add(block2); err != nil { + t.Fatal(err) + } + if err := sm.Add(block3); err != nil { + t.Fatal(err) + } // Current graph structure: // G @@ -517,26 +549,24 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { votesFor2 := ids.Bag{} votesFor2.Add(block2.id) - sm.RecordPoll(votesFor2) - - if sm.Finalized() { + if err := sm.RecordPoll(votesFor2); err != nil { + t.Fatal(err) + } else if sm.Finalized() { t.Fatalf("Finalized too early") } else if pref := sm.Preference(); !block2.id.Equals(pref) { t.Fatalf("Wrong preference listed") } emptyVotes := ids.Bag{} - sm.RecordPoll(emptyVotes) - - if sm.Finalized() { + if err := sm.RecordPoll(emptyVotes); err != nil { + t.Fatal(err) + } else if sm.Finalized() { t.Fatalf("Finalized too early") } else if pref := sm.Preference(); !block2.id.Equals(pref) { t.Fatalf("Wrong preference listed") - } - - sm.RecordPoll(votesFor2) - - if sm.Finalized() { + } else if err := sm.RecordPoll(votesFor2); err != nil { + t.Fatal(err) + } else if sm.Finalized() { t.Fatalf("Finalized too early") } else if pref := sm.Preference(); !block2.id.Equals(pref) { t.Fatalf("Wrong preference listed") @@ -544,23 +574,19 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { votesFor3 := ids.Bag{} votesFor3.Add(block3.id) - sm.RecordPoll(votesFor3) - - if sm.Finalized() { + if err := sm.RecordPoll(votesFor3); err != nil { + t.Fatal(err) + } else if sm.Finalized() { t.Fatalf("Finalized too early") } else if pref := sm.Preference(); !block2.id.Equals(pref) { t.Fatalf("Wrong preference listed") - } - - sm.RecordPoll(votesFor3) - - if !sm.Finalized() { + } else if err := sm.RecordPoll(votesFor3); err != nil { + t.Fatal(err) + } else if !sm.Finalized() { t.Fatalf("Finalized too late") } else if pref := sm.Preference(); !block3.id.Equals(pref) { t.Fatalf("Wrong preference listed") - } - - if status := block0.Status(); status != choices.Rejected { + } else if status := block0.Status(); status != choices.Rejected { t.Fatalf("Wrong status returned") } else if status := block1.Status(); status != choices.Accepted { t.Fatalf("Wrong status returned") @@ -592,19 +618,23 @@ func RecordPollInvalidVoteTest(t *testing.T, factory Factory) { } unknownBlockID := ids.Empty.Prefix(2) - sm.Add(block) + if err := sm.Add(block); err != nil { + t.Fatal(err) + } validVotes := ids.Bag{} validVotes.Add(block.id) - sm.RecordPoll(validVotes) + if err := sm.RecordPoll(validVotes); err != nil { + t.Fatal(err) + } invalidVotes := ids.Bag{} invalidVotes.Add(unknownBlockID) - sm.RecordPoll(invalidVotes) - - sm.RecordPoll(validVotes) - - if sm.Finalized() { + if err := sm.RecordPoll(invalidVotes); err != nil { + t.Fatal(err) + } else if err := sm.RecordPoll(validVotes); err != nil { + t.Fatal(err) + } else if sm.Finalized() { t.Fatalf("Finalized too early") } else if pref := sm.Preference(); !block.id.Equals(pref) { t.Fatalf("Wrong preference listed") @@ -651,11 +681,21 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { status: choices.Processing, } - sm.Add(block0) - sm.Add(block1) - sm.Add(block2) - sm.Add(block3) - sm.Add(block4) + if err := sm.Add(block0); err != nil { + t.Fatal(err) + } + if err := sm.Add(block1); err != nil { + t.Fatal(err) + } + if err := sm.Add(block2); err != nil { + t.Fatal(err) + } + if err := sm.Add(block3); err != nil { + t.Fatal(err) + } + if err := sm.Add(block4); err != nil { + t.Fatal(err) + } // Current graph structure: // G @@ -668,10 +708,14 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { // Tail = 2 votes0_2_4 := ids.Bag{} - votes0_2_4.Add(block0.id) - votes0_2_4.Add(block2.id) - votes0_2_4.Add(block4.id) - sm.RecordPoll(votes0_2_4) + votes0_2_4.Add( + block0.id, + block2.id, + block4.id, + ) + if err := sm.RecordPoll(votes0_2_4); err != nil { + t.Fatal(err) + } // Current graph structure: // 0 @@ -699,7 +743,9 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { dep2_2_2 := ids.Bag{} dep2_2_2.AddCount(block2.id, 3) - sm.RecordPoll(dep2_2_2) + if err := sm.RecordPoll(dep2_2_2); err != nil { + t.Fatal(err) + } // Current graph structure: // 2 @@ -757,20 +803,26 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { status: choices.Processing, } - sm.Add(block0) - sm.Add(block1) + if err := sm.Add(block0); err != nil { + t.Fatal(err) + } + if err := sm.Add(block1); err != nil { + t.Fatal(err) + } votes0 := ids.Bag{} votes0.Add(block0.id) - sm.RecordPoll(votes0) - - sm.Add(block2) + if err := sm.RecordPoll(votes0); err != nil { + t.Fatal(err) + } else if err := sm.Add(block2); err != nil { + t.Fatal(err) + } // dep2 is already rejected. - sm.Add(block3) - - if status := block0.Status(); status == choices.Accepted { + if err := sm.Add(block3); err != nil { + t.Fatal(err) + } else if status := block0.Status(); status == choices.Accepted { t.Fatalf("Shouldn't be accepted yet") } @@ -778,9 +830,9 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { // dep0. Because dep2 is already rejected, this will accept dep0. votes3 := ids.Bag{} votes3.Add(block3.id) - sm.RecordPoll(votes3) - - if !sm.Finalized() { + if err := sm.RecordPoll(votes3); err != nil { + t.Fatal(err) + } else if !sm.Finalized() { t.Fatalf("Finalized too late") } else if status := block0.Status(); status != choices.Accepted { t.Fatalf("Should be accepted") @@ -818,14 +870,15 @@ func MetricsProcessingErrorTest(t *testing.T, factory Factory) { status: choices.Processing, } - sm.Add(block) + if err := sm.Add(block); err != nil { + t.Fatal(err) + } votes := ids.Bag{} votes.Add(block.id) - - sm.RecordPoll(votes) - - if !sm.Finalized() { + if err := sm.RecordPoll(votes); err != nil { + t.Fatal(err) + } else if !sm.Finalized() { t.Fatalf("Snowman instance didn't finalize") } } @@ -861,14 +914,15 @@ func MetricsAcceptedErrorTest(t *testing.T, factory Factory) { status: choices.Processing, } - sm.Add(block) + if err := sm.Add(block); err != nil { + t.Fatal(err) + } votes := ids.Bag{} votes.Add(block.id) - - sm.RecordPoll(votes) - - if !sm.Finalized() { + if err := sm.RecordPoll(votes); err != nil { + t.Fatal(err) + } else if !sm.Finalized() { t.Fatalf("Snowman instance didn't finalize") } } @@ -904,18 +958,171 @@ func MetricsRejectedErrorTest(t *testing.T, factory Factory) { status: choices.Processing, } - sm.Add(block) + if err := sm.Add(block); err != nil { + t.Fatal(err) + } votes := ids.Bag{} votes.Add(block.id) - - sm.RecordPoll(votes) - - if !sm.Finalized() { + if err := sm.RecordPoll(votes); err != nil { + t.Fatal(err) + } else if !sm.Finalized() { t.Fatalf("Snowman instance didn't finalize") } } +func ErrorOnInitialRejectionTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + } + + sm.Initialize(ctx, params, GenesisID) + + rejectedBlock := &TestBlock{ + id: ids.Empty.Prefix(1), + status: choices.Rejected, + } + + block := &TestBlock{ + parent: rejectedBlock, + id: ids.Empty.Prefix(2), + status: choices.Processing, + err: errors.New(""), + } + + if err := sm.Add(block); err == nil { + t.Fatalf("Should have errored on rejecting the rejectable block") + } +} + +func ErrorOnAcceptTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + } + + sm.Initialize(ctx, params, GenesisID) + + block := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(1), + status: choices.Processing, + err: errors.New(""), + } + + if err := sm.Add(block); err != nil { + t.Fatal(err) + } + + votes := ids.Bag{} + votes.Add(block.id) + if err := sm.RecordPoll(votes); err == nil { + t.Fatalf("Should have errored on accepted the block") + } +} + +func ErrorOnRejectSiblingTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + } + + sm.Initialize(ctx, params, GenesisID) + + block0 := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(1), + status: choices.Processing, + } + block1 := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(2), + status: choices.Processing, + err: errors.New(""), + } + + if err := sm.Add(block0); err != nil { + t.Fatal(err) + } else if err := sm.Add(block1); err != nil { + t.Fatal(err) + } + + votes := ids.Bag{} + votes.Add(block0.id) + if err := sm.RecordPoll(votes); err == nil { + t.Fatalf("Should have errored on rejecting the block's sibling") + } +} + +func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) { + sm := factory.New() + + ctx := snow.DefaultContextTest() + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + } + + sm.Initialize(ctx, params, GenesisID) + + block0 := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(1), + status: choices.Processing, + } + block1 := &TestBlock{ + parent: Genesis, + id: ids.Empty.Prefix(2), + status: choices.Processing, + } + block2 := &TestBlock{ + parent: block1, + id: ids.Empty.Prefix(3), + status: choices.Processing, + err: errors.New(""), + } + + if err := sm.Add(block0); err != nil { + t.Fatal(err) + } else if err := sm.Add(block1); err != nil { + t.Fatal(err) + } else if err := sm.Add(block2); err != nil { + t.Fatal(err) + } + + votes := ids.Bag{} + votes.Add(block0.id) + if err := sm.RecordPoll(votes); err == nil { + t.Fatalf("Should have errored on transitively rejecting the block") + } +} + func RandomizedConsistencyTest(t *testing.T, factory Factory) { numColors := 50 numNodes := 100 From d923d5c0f9710e6a1f8dd621fab33f13b68f7f45 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sun, 21 Jun 2020 21:30:29 -0400 Subject: [PATCH 133/183] Cleaned up tests --- snow/consensus/snowman/consensus_test.go | 63 +++++++----------------- 1 file changed, 19 insertions(+), 44 deletions(-) diff --git a/snow/consensus/snowman/consensus_test.go b/snow/consensus/snowman/consensus_test.go index aa8836c..3d3fe7f 100644 --- a/snow/consensus/snowman/consensus_test.go +++ b/snow/consensus/snowman/consensus_test.go @@ -76,11 +76,9 @@ func InitializeTest(t *testing.T, factory Factory) { if p := sm.Parameters(); p != params { t.Fatalf("Wrong returned parameters") - } - if pref := sm.Preference(); !pref.Equals(GenesisID) { + } else if pref := sm.Preference(); !pref.Equals(GenesisID) { t.Fatalf("Wrong preference returned") - } - if !sm.Finalized() { + } else if !sm.Finalized() { t.Fatalf("Wrong should have marked the instance as being finalized") } } @@ -108,9 +106,7 @@ func AddToTailTest(t *testing.T, factory Factory) { // Adding to the previous preference will update the preference if err := sm.Add(block); err != nil { t.Fatal(err) - } - - if pref := sm.Preference(); !pref.Equals(block.id) { + } else if pref := sm.Preference(); !pref.Equals(block.id) { t.Fatalf("Wrong preference. Expected %s, got %s", block.id, pref) } } @@ -142,9 +138,7 @@ func AddToNonTailTest(t *testing.T, factory Factory) { // Adding to the previous preference will update the preference if err := sm.Add(firstBlock); err != nil { t.Fatal(err) - } - - if pref := sm.Preference(); !pref.Equals(firstBlock.id) { + } else if pref := sm.Preference(); !pref.Equals(firstBlock.id) { t.Fatalf("Wrong preference. Expected %s, got %s", firstBlock.id, pref) } @@ -152,9 +146,7 @@ func AddToNonTailTest(t *testing.T, factory Factory) { // preference if err := sm.Add(secondBlock); err != nil { t.Fatal(err) - } - - if pref := sm.Preference(); !pref.Equals(firstBlock.id) { + } else if pref := sm.Preference(); !pref.Equals(firstBlock.id) { t.Fatalf("Wrong preference. Expected %s, got %s", firstBlock.id, pref) } } @@ -184,9 +176,7 @@ func AddToUnknownTest(t *testing.T, factory Factory) { // been rejected. Therefore the block should be immediately rejected if err := sm.Add(block); err != nil { t.Fatal(err) - } - - if pref := sm.Preference(); !pref.Equals(GenesisID) { + } else if pref := sm.Preference(); !pref.Equals(GenesisID) { t.Fatalf("Wrong preference. Expected %s, got %s", GenesisID, pref) } else if status := block.Status(); status != choices.Rejected { t.Fatalf("Should have rejected the block") @@ -284,9 +274,7 @@ func IssuedIssuedTest(t *testing.T, factory Factory) { if err := sm.Add(block); err != nil { t.Fatal(err) - } - - if !sm.Issued(block) { + } else if !sm.Issued(block) { t.Fatalf("Should have marked a pending block as having been issued") } } @@ -319,9 +307,7 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { votes.Add(block.id) if err := sm.RecordPoll(votes); err != nil { t.Fatal(err) - } - - if pref := sm.Preference(); !pref.Equals(block.id) { + } else if pref := sm.Preference(); !pref.Equals(block.id) { t.Fatalf("Preference returned the wrong block") } else if sm.Finalized() { t.Fatalf("Snowman instance finalized too soon") @@ -365,8 +351,7 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { if err := sm.Add(firstBlock); err != nil { t.Fatal(err) - } - if err := sm.Add(secondBlock); err != nil { + } else if err := sm.Add(secondBlock); err != nil { t.Fatal(err) } @@ -453,11 +438,9 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { if err := sm.Add(block0); err != nil { t.Fatal(err) - } - if err := sm.Add(block1); err != nil { + } else if err := sm.Add(block1); err != nil { t.Fatal(err) - } - if err := sm.Add(block2); err != nil { + } else if err := sm.Add(block2); err != nil { t.Fatal(err) } @@ -529,14 +512,11 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { if err := sm.Add(block0); err != nil { t.Fatal(err) - } - if err := sm.Add(block1); err != nil { + } else if err := sm.Add(block1); err != nil { t.Fatal(err) - } - if err := sm.Add(block2); err != nil { + } else if err := sm.Add(block2); err != nil { t.Fatal(err) - } - if err := sm.Add(block3); err != nil { + } else if err := sm.Add(block3); err != nil { t.Fatal(err) } @@ -683,17 +663,13 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { if err := sm.Add(block0); err != nil { t.Fatal(err) - } - if err := sm.Add(block1); err != nil { + } else if err := sm.Add(block1); err != nil { t.Fatal(err) - } - if err := sm.Add(block2); err != nil { + } else if err := sm.Add(block2); err != nil { t.Fatal(err) - } - if err := sm.Add(block3); err != nil { + } else if err := sm.Add(block3); err != nil { t.Fatal(err) - } - if err := sm.Add(block4); err != nil { + } else if err := sm.Add(block4); err != nil { t.Fatal(err) } @@ -805,8 +781,7 @@ func RecordPollDivergedVotingTest(t *testing.T, factory Factory) { if err := sm.Add(block0); err != nil { t.Fatal(err) - } - if err := sm.Add(block1); err != nil { + } else if err := sm.Add(block1); err != nil { t.Fatal(err) } From fb7e4910001c51da11cfe15f026265bc1a7fb014 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sun, 21 Jun 2020 22:38:53 -0400 Subject: [PATCH 134/183] Set up tests for error handling in snowstorm --- snow/consensus/snowstorm/consensus_test.go | 496 ++++++++++++++------- snow/consensus/snowstorm/directed_test.go | 32 +- snow/consensus/snowstorm/input_test.go | 30 +- snow/consensus/snowstorm/test_tx.go | 4 +- 4 files changed, 351 insertions(+), 211 deletions(-) diff --git a/snow/consensus/snowstorm/consensus_test.go b/snow/consensus/snowstorm/consensus_test.go index 270292c..5ff46ab 100644 --- a/snow/consensus/snowstorm/consensus_test.go +++ b/snow/consensus/snowstorm/consensus_test.go @@ -4,6 +4,7 @@ package snowstorm import ( + "errors" "testing" "github.com/prometheus/client_golang/prometheus" @@ -19,6 +20,28 @@ var ( Green = &TestTx{Identifier: ids.Empty.Prefix(1)} Blue = &TestTx{Identifier: ids.Empty.Prefix(2)} Alpha = &TestTx{Identifier: ids.Empty.Prefix(3)} + + Tests = []func(*testing.T, Factory){ + MetricsTest, + ParamsTest, + IssuedTest, + LeftoverInputTest, + LowerConfidenceTest, + MiddleConfidenceTest, + IndependentTest, + VirtuousTest, + IsVirtuousTest, + QuiesceTest, + AcceptingDependencyTest, + RejectingDependencyTest, + VacuouslyAcceptedTest, + ConflictsTest, + VirtuousDependsOnRogueTest, + ErrorOnVacuouslyAcceptedTest, + ErrorOnAcceptedTest, + ErrorOnRejectingLowerConfidenceConflictTest, + ErrorOnRejectingHigherConfidenceConflictTest, + } ) // R - G - B - A @@ -46,6 +69,52 @@ func Setup() { Alpha.Reset() } +// Execute all tests against a consensus implementation +func ConsensusTest(t *testing.T, factory Factory, prefix string) { + for _, test := range Tests { + test(t, factory) + } + StringTest(t, factory, prefix) +} + +func MetricsTest(t *testing.T, factory Factory) { + Setup() + + { + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + } + params.Metrics.Register(prometheus.NewCounter(prometheus.CounterOpts{ + Name: "tx_processing", + })) + graph := factory.New() + graph.Initialize(snow.DefaultContextTest(), params) + } + { + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + } + params.Metrics.Register(prometheus.NewCounter(prometheus.CounterOpts{ + Name: "tx_accepted", + })) + graph := factory.New() + graph.Initialize(snow.DefaultContextTest(), params) + } + { + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, + } + params.Metrics.Register(prometheus.NewCounter(prometheus.CounterOpts{ + Name: "tx_rejected", + })) + graph := factory.New() + graph.Initialize(snow.DefaultContextTest(), params) + } +} + func ParamsTest(t *testing.T, factory Factory) { Setup() @@ -81,15 +150,13 @@ func IssuedTest(t *testing.T, factory Factory) { if issued := graph.Issued(Red); issued { t.Fatalf("Haven't issued anything yet.") - } - - graph.Add(Red) - - if issued := graph.Issued(Red); !issued { + } else if err := graph.Add(Red); err != nil { + t.Fatal(err) + } else if issued := graph.Issued(Red); !issued { t.Fatalf("Have already issued.") } - Blue.Accept() + _ = Blue.Accept() if issued := graph.Issued(Blue); !issued { t.Fatalf("Have already accepted.") @@ -106,10 +173,12 @@ func LeftoverInputTest(t *testing.T, factory Factory) { K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, } graph.Initialize(snow.DefaultContextTest(), params) - graph.Add(Red) - graph.Add(Green) - if prefs := graph.Preferences(); prefs.Len() != 1 { + if err := graph.Add(Red); err != nil { + t.Fatal(err) + } else if err := graph.Add(Green); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 1 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Red.ID()) { t.Fatalf("Wrong preference. Expected %s got %s", Red.ID(), prefs.List()[0]) @@ -120,15 +189,13 @@ func LeftoverInputTest(t *testing.T, factory Factory) { r := ids.Bag{} r.SetThreshold(2) r.AddCount(Red.ID(), 2) - graph.RecordPoll(r) - - if prefs := graph.Preferences(); prefs.Len() != 0 { + if err := graph.RecordPoll(r); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") } else if !graph.Finalized() { t.Fatalf("Finalized too late") - } - - if Red.Status() != choices.Accepted { + } else if Red.Status() != choices.Accepted { t.Fatalf("%s should have been accepted", Red.ID()) } else if Green.Status() != choices.Rejected { t.Fatalf("%s should have been rejected", Green.ID()) @@ -145,11 +212,14 @@ func LowerConfidenceTest(t *testing.T, factory Factory) { K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, } graph.Initialize(snow.DefaultContextTest(), params) - graph.Add(Red) - graph.Add(Green) - graph.Add(Blue) - if prefs := graph.Preferences(); prefs.Len() != 1 { + if err := graph.Add(Red); err != nil { + t.Fatal(err) + } else if err := graph.Add(Green); err != nil { + t.Fatal(err) + } else if err := graph.Add(Blue); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 1 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Red.ID()) { t.Fatalf("Wrong preference. Expected %s got %s", Red.ID(), prefs.List()[0]) @@ -160,9 +230,9 @@ func LowerConfidenceTest(t *testing.T, factory Factory) { r := ids.Bag{} r.SetThreshold(2) r.AddCount(Red.ID(), 2) - graph.RecordPoll(r) - - if prefs := graph.Preferences(); prefs.Len() != 1 { + if err := graph.RecordPoll(r); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 1 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Blue.ID()) { t.Fatalf("Wrong preference. Expected %s", Blue.ID()) @@ -181,12 +251,16 @@ func MiddleConfidenceTest(t *testing.T, factory Factory) { K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, } graph.Initialize(snow.DefaultContextTest(), params) - graph.Add(Red) - graph.Add(Green) - graph.Add(Alpha) - graph.Add(Blue) - if prefs := graph.Preferences(); prefs.Len() != 2 { + if err := graph.Add(Red); err != nil { + t.Fatal(err) + } else if err := graph.Add(Green); err != nil { + t.Fatal(err) + } else if err := graph.Add(Alpha); err != nil { + t.Fatal(err) + } else if err := graph.Add(Blue); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Red.ID()) { t.Fatalf("Wrong preference. Expected %s", Red.ID()) @@ -199,9 +273,9 @@ func MiddleConfidenceTest(t *testing.T, factory Factory) { r := ids.Bag{} r.SetThreshold(2) r.AddCount(Red.ID(), 2) - graph.RecordPoll(r) - - if prefs := graph.Preferences(); prefs.Len() != 1 { + if err := graph.RecordPoll(r); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 1 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Alpha.ID()) { t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) @@ -209,6 +283,7 @@ func MiddleConfidenceTest(t *testing.T, factory Factory) { t.Fatalf("Finalized too early") } } + func IndependentTest(t *testing.T, factory Factory) { Setup() @@ -219,10 +294,12 @@ func IndependentTest(t *testing.T, factory Factory) { K: 2, Alpha: 2, BetaVirtuous: 2, BetaRogue: 2, } graph.Initialize(snow.DefaultContextTest(), params) - graph.Add(Red) - graph.Add(Alpha) - if prefs := graph.Preferences(); prefs.Len() != 2 { + if err := graph.Add(Red); err != nil { + t.Fatal(err) + } else if err := graph.Add(Alpha); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Red.ID()) { t.Fatalf("Wrong preference. Expected %s", Red.ID()) @@ -236,9 +313,9 @@ func IndependentTest(t *testing.T, factory Factory) { ra.SetThreshold(2) ra.AddCount(Red.ID(), 2) ra.AddCount(Alpha.ID(), 2) - graph.RecordPoll(ra) - - if prefs := graph.Preferences(); prefs.Len() != 2 { + if err := graph.RecordPoll(ra); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Red.ID()) { t.Fatalf("Wrong preference. Expected %s", Red.ID()) @@ -246,11 +323,9 @@ func IndependentTest(t *testing.T, factory Factory) { t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) } else if graph.Finalized() { t.Fatalf("Finalized too early") - } - - graph.RecordPoll(ra) - - if prefs := graph.Preferences(); prefs.Len() != 0 { + } else if err := graph.RecordPoll(ra); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") } else if !graph.Finalized() { t.Fatalf("Finalized too late") @@ -267,35 +342,30 @@ func VirtuousTest(t *testing.T, factory Factory) { K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 1, } graph.Initialize(snow.DefaultContextTest(), params) - graph.Add(Red) - if virtuous := graph.Virtuous(); virtuous.Len() != 1 { + if err := graph.Add(Red); err != nil { + t.Fatal(err) + } else if virtuous := graph.Virtuous(); virtuous.Len() != 1 { t.Fatalf("Wrong number of virtuous.") } else if !virtuous.Contains(Red.ID()) { t.Fatalf("Wrong virtuous. Expected %s", Red.ID()) - } - - graph.Add(Alpha) - - if virtuous := graph.Virtuous(); virtuous.Len() != 2 { + } else if err := graph.Add(Alpha); err != nil { + t.Fatal(err) + } else if virtuous := graph.Virtuous(); virtuous.Len() != 2 { t.Fatalf("Wrong number of virtuous.") } else if !virtuous.Contains(Red.ID()) { t.Fatalf("Wrong virtuous. Expected %s", Red.ID()) } else if !virtuous.Contains(Alpha.ID()) { t.Fatalf("Wrong virtuous. Expected %s", Alpha.ID()) - } - - graph.Add(Green) - - if virtuous := graph.Virtuous(); virtuous.Len() != 1 { + } else if err := graph.Add(Green); err != nil { + t.Fatal(err) + } else if virtuous := graph.Virtuous(); virtuous.Len() != 1 { t.Fatalf("Wrong number of virtuous.") } else if !virtuous.Contains(Alpha.ID()) { t.Fatalf("Wrong virtuous. Expected %s", Alpha.ID()) - } - - graph.Add(Blue) - - if virtuous := graph.Virtuous(); virtuous.Len() != 0 { + } else if err := graph.Add(Blue); err != nil { + t.Fatal(err) + } else if virtuous := graph.Virtuous(); virtuous.Len() != 0 { t.Fatalf("Wrong number of virtuous.") } } @@ -319,11 +389,9 @@ func IsVirtuousTest(t *testing.T, factory Factory) { t.Fatalf("Should be virtuous") } else if !graph.IsVirtuous(Alpha) { t.Fatalf("Should be virtuous") - } - - graph.Add(Red) - - if !graph.IsVirtuous(Red) { + } else if err := graph.Add(Red); err != nil { + t.Fatal(err) + } else if !graph.IsVirtuous(Red) { t.Fatalf("Should be virtuous") } else if graph.IsVirtuous(Green) { t.Fatalf("Should not be virtuous") @@ -331,11 +399,9 @@ func IsVirtuousTest(t *testing.T, factory Factory) { t.Fatalf("Should be virtuous") } else if !graph.IsVirtuous(Alpha) { t.Fatalf("Should be virtuous") - } - - graph.Add(Green) - - if graph.IsVirtuous(Red) { + } else if err := graph.Add(Green); err != nil { + t.Fatal(err) + } else if graph.IsVirtuous(Red) { t.Fatalf("Should not be virtuous") } else if graph.IsVirtuous(Green) { t.Fatalf("Should not be virtuous") @@ -357,17 +423,13 @@ func QuiesceTest(t *testing.T, factory Factory) { if !graph.Quiesce() { t.Fatalf("Should quiesce") - } - - graph.Add(Red) - - if graph.Quiesce() { + } else if err := graph.Add(Red); err != nil { + t.Fatal(err) + } else if graph.Quiesce() { t.Fatalf("Shouldn't quiesce") - } - - graph.Add(Green) - - if !graph.Quiesce() { + } else if err := graph.Add(Green); err != nil { + t.Fatal(err) + } else if !graph.Quiesce() { t.Fatalf("Should quiesce") } } @@ -390,11 +452,13 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { } graph.Initialize(snow.DefaultContextTest(), params) - graph.Add(Red) - graph.Add(Green) - graph.Add(purple) - - if prefs := graph.Preferences(); prefs.Len() != 2 { + if err := graph.Add(Red); err != nil { + t.Fatal(err) + } else if err := graph.Add(Green); err != nil { + t.Fatal(err) + } else if err := graph.Add(purple); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Red.ID()) { t.Fatalf("Wrong preference. Expected %s", Red.ID()) @@ -410,10 +474,9 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { g := ids.Bag{} g.Add(Green.ID()) - - graph.RecordPoll(g) - - if prefs := graph.Preferences(); prefs.Len() != 2 { + if err := graph.RecordPoll(g); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Green.ID()) { t.Fatalf("Wrong preference. Expected %s", Green.ID()) @@ -429,10 +492,9 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { rp := ids.Bag{} rp.Add(Red.ID(), purple.ID()) - - graph.RecordPoll(rp) - - if prefs := graph.Preferences(); prefs.Len() != 2 { + if err := graph.RecordPoll(rp); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Green.ID()) { t.Fatalf("Wrong preference. Expected %s", Green.ID()) @@ -448,10 +510,9 @@ func AcceptingDependencyTest(t *testing.T, factory Factory) { r := ids.Bag{} r.Add(Red.ID()) - - graph.RecordPoll(r) - - if prefs := graph.Preferences(); prefs.Len() != 0 { + if err := graph.RecordPoll(r); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") } else if Red.Status() != choices.Accepted { t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Accepted) @@ -480,12 +541,15 @@ func RejectingDependencyTest(t *testing.T, factory Factory) { } graph.Initialize(snow.DefaultContextTest(), params) - graph.Add(Red) - graph.Add(Green) - graph.Add(Blue) - graph.Add(purple) - - if prefs := graph.Preferences(); prefs.Len() != 2 { + if err := graph.Add(Red); err != nil { + t.Fatal(err) + } else if err := graph.Add(Green); err != nil { + t.Fatal(err) + } else if err := graph.Add(Blue); err != nil { + t.Fatal(err) + } else if err := graph.Add(purple); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Red.ID()) { t.Fatalf("Wrong preference. Expected %s", Red.ID()) @@ -503,10 +567,9 @@ func RejectingDependencyTest(t *testing.T, factory Factory) { gp := ids.Bag{} gp.Add(Green.ID(), purple.ID()) - - graph.RecordPoll(gp) - - if prefs := graph.Preferences(); prefs.Len() != 2 { + if err := graph.RecordPoll(gp); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 2 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Green.ID()) { t.Fatalf("Wrong preference. Expected %s", Green.ID()) @@ -520,11 +583,9 @@ func RejectingDependencyTest(t *testing.T, factory Factory) { t.Fatalf("Wrong status. %s should be %s", Blue.ID(), choices.Processing) } else if purple.Status() != choices.Processing { t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Processing) - } - - graph.RecordPoll(gp) - - if prefs := graph.Preferences(); prefs.Len() != 0 { + } else if err := graph.RecordPoll(gp); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") } else if Red.Status() != choices.Rejected { t.Fatalf("Wrong status. %s should be %s", Red.ID(), choices.Rejected) @@ -553,9 +614,9 @@ func VacuouslyAcceptedTest(t *testing.T, factory Factory) { } graph.Initialize(snow.DefaultContextTest(), params) - graph.Add(purple) - - if prefs := graph.Preferences(); prefs.Len() != 0 { + if err := graph.Add(purple); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 0 { t.Fatalf("Wrong number of preferences.") } else if status := purple.Status(); status != choices.Accepted { t.Fatalf("Wrong status. %s should be %s", purple.ID(), choices.Accepted) @@ -593,17 +654,15 @@ func ConflictsTest(t *testing.T, factory Factory) { Ins: insPurple, } - graph.Add(purple) - - if orangeConflicts := graph.Conflicts(orange); orangeConflicts.Len() != 1 { + if err := graph.Add(purple); err != nil { + t.Fatal(err) + } else if orangeConflicts := graph.Conflicts(orange); orangeConflicts.Len() != 1 { t.Fatalf("Wrong number of conflicts") } else if !orangeConflicts.Contains(purple.Identifier) { t.Fatalf("Conflicts does not contain the right transaction") - } - - graph.Add(orange) - - if orangeConflicts := graph.Conflicts(orange); orangeConflicts.Len() != 1 { + } else if err := graph.Add(orange); err != nil { + t.Fatal(err) + } else if orangeConflicts := graph.Conflicts(orange); orangeConflicts.Len() != 1 { t.Fatalf("Wrong number of conflicts") } else if !orangeConflicts.Contains(purple.Identifier) { t.Fatalf("Conflicts does not contain the right transaction") @@ -643,17 +702,20 @@ func VirtuousDependsOnRogueTest(t *testing.T, factory Factory) { virtuous.Ins.Add(input2) - graph.Add(rogue1) - graph.Add(rogue2) - graph.Add(virtuous) + if err := graph.Add(rogue1); err != nil { + t.Fatal(err) + } else if err := graph.Add(rogue2); err != nil { + t.Fatal(err) + } else if err := graph.Add(virtuous); err != nil { + t.Fatal(err) + } votes := ids.Bag{} votes.Add(rogue1.ID()) votes.Add(virtuous.ID()) - - graph.RecordPoll(votes) - - if status := rogue1.Status(); status != choices.Processing { + if err := graph.RecordPoll(votes); err != nil { + t.Fatal(err) + } else if status := rogue1.Status(); status != choices.Processing { t.Fatalf("Rogue Tx is %s expected %s", status, choices.Processing) } else if status := rogue2.Status(); status != choices.Processing { t.Fatalf("Rogue Tx is %s expected %s", status, choices.Processing) @@ -664,6 +726,135 @@ func VirtuousDependsOnRogueTest(t *testing.T, factory Factory) { } } +func ErrorOnVacuouslyAcceptedTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + purple := &TestTx{ + Identifier: ids.Empty.Prefix(7), + Stat: choices.Processing, + Validity: errors.New(""), + } + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + graph.Initialize(snow.DefaultContextTest(), params) + + if err := graph.Add(purple); err == nil { + t.Fatalf("Should have errored on acceptance") + } +} + +func ErrorOnAcceptedTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + purple := &TestTx{ + Identifier: ids.Empty.Prefix(7), + Stat: choices.Processing, + Validity: errors.New(""), + } + purple.Ins.Add(ids.Empty.Prefix(4)) + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 2, + } + graph.Initialize(snow.DefaultContextTest(), params) + + if err := graph.Add(purple); err != nil { + t.Fatal(err) + } + + votes := ids.Bag{} + votes.Add(purple.ID()) + if err := graph.RecordPoll(votes); err == nil { + t.Fatalf("Should have errored on accepting an invalid tx") + } +} + +func ErrorOnRejectingLowerConfidenceConflictTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + X := ids.Empty.Prefix(4) + + purple := &TestTx{ + Identifier: ids.Empty.Prefix(7), + Stat: choices.Processing, + } + purple.Ins.Add(X) + + pink := &TestTx{ + Identifier: ids.Empty.Prefix(8), + Stat: choices.Processing, + Validity: errors.New(""), + } + pink.Ins.Add(X) + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 1, + } + graph.Initialize(snow.DefaultContextTest(), params) + + if err := graph.Add(purple); err != nil { + t.Fatal(err) + } else if err := graph.Add(pink); err != nil { + t.Fatal(err) + } + + votes := ids.Bag{} + votes.Add(purple.ID()) + if err := graph.RecordPoll(votes); err == nil { + t.Fatalf("Should have errored on rejecting an invalid tx") + } +} + +func ErrorOnRejectingHigherConfidenceConflictTest(t *testing.T, factory Factory) { + Setup() + + graph := factory.New() + + X := ids.Empty.Prefix(4) + + purple := &TestTx{ + Identifier: ids.Empty.Prefix(7), + Stat: choices.Processing, + } + purple.Ins.Add(X) + + pink := &TestTx{ + Identifier: ids.Empty.Prefix(8), + Stat: choices.Processing, + Validity: errors.New(""), + } + pink.Ins.Add(X) + + params := snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, Alpha: 1, BetaVirtuous: 1, BetaRogue: 1, + } + graph.Initialize(snow.DefaultContextTest(), params) + + if err := graph.Add(pink); err != nil { + t.Fatal(err) + } else if err := graph.Add(purple); err != nil { + t.Fatal(err) + } + + votes := ids.Bag{} + votes.Add(purple.ID()) + if err := graph.RecordPoll(votes); err == nil { + t.Fatalf("Should have errored on rejecting an invalid tx") + } +} + func StringTest(t *testing.T, factory Factory, prefix string) { Setup() @@ -674,12 +865,16 @@ func StringTest(t *testing.T, factory Factory, prefix string) { K: 2, Alpha: 2, BetaVirtuous: 1, BetaRogue: 2, } graph.Initialize(snow.DefaultContextTest(), params) - graph.Add(Red) - graph.Add(Green) - graph.Add(Blue) - graph.Add(Alpha) - if prefs := graph.Preferences(); prefs.Len() != 1 { + if err := graph.Add(Red); err != nil { + t.Fatal(err) + } else if err := graph.Add(Green); err != nil { + t.Fatal(err) + } else if err := graph.Add(Blue); err != nil { + t.Fatal(err) + } else if err := graph.Add(Alpha); err != nil { + t.Fatal(err) + } else if prefs := graph.Preferences(); prefs.Len() != 1 { t.Fatalf("Wrong number of preferences.") } else if !prefs.Contains(Red.ID()) { t.Fatalf("Wrong preference. Expected %s got %s", Red.ID(), prefs.List()[0]) @@ -691,8 +886,11 @@ func StringTest(t *testing.T, factory Factory, prefix string) { rb.SetThreshold(2) rb.AddCount(Red.ID(), 2) rb.AddCount(Blue.ID(), 2) - graph.RecordPoll(rb) - graph.Add(Blue) + if err := graph.RecordPoll(rb); err != nil { + t.Fatal(err) + } else if err := graph.Add(Blue); err != nil { + t.Fatal(err) + } { expected := prefix + "(\n" + @@ -720,7 +918,9 @@ func StringTest(t *testing.T, factory Factory, prefix string) { ga.SetThreshold(2) ga.AddCount(Green.ID(), 2) ga.AddCount(Alpha.ID(), 2) - graph.RecordPoll(ga) + if err := graph.RecordPoll(ga); err != nil { + t.Fatal(err) + } { expected := prefix + "(\n" + @@ -745,7 +945,9 @@ func StringTest(t *testing.T, factory Factory, prefix string) { } empty := ids.Bag{} - graph.RecordPoll(empty) + if err := graph.RecordPoll(empty); err != nil { + t.Fatal(err) + } { expected := prefix + "(\n" + @@ -767,10 +969,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("Wrong preference. Expected %s", Blue.ID()) } else if graph.Finalized() { t.Fatalf("Finalized too early") + } else if err := graph.RecordPoll(ga); err != nil { + t.Fatal(err) } - graph.RecordPoll(ga) - { expected := prefix + "(\n" + " Choice[0] = ID: LUC1cmcxnfNR9LdkACS2ccGKLEK7SYqB4gLLTycQfg1koyfSq Confidence: 0 Bias: 1\n" + @@ -791,10 +993,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("Wrong preference. Expected %s", Alpha.ID()) } else if graph.Finalized() { t.Fatalf("Finalized too early") + } else if err := graph.RecordPoll(ga); err != nil { + t.Fatal(err) } - graph.RecordPoll(ga) - { expected := prefix + "()" if str := graph.String(); str != expected { @@ -806,9 +1008,7 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("Wrong number of preferences.") } else if !graph.Finalized() { t.Fatalf("Finalized too late") - } - - if Green.Status() != choices.Accepted { + } else if Green.Status() != choices.Accepted { t.Fatalf("%s should have been accepted", Green.ID()) } else if Alpha.Status() != choices.Accepted { t.Fatalf("%s should have been accepted", Alpha.ID()) @@ -816,10 +1016,10 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("%s should have been rejected", Red.ID()) } else if Blue.Status() != choices.Rejected { t.Fatalf("%s should have been rejected", Blue.ID()) + } else if err := graph.RecordPoll(rb); err != nil { + t.Fatal(err) } - graph.RecordPoll(rb) - { expected := prefix + "()" if str := graph.String(); str != expected { @@ -831,9 +1031,7 @@ func StringTest(t *testing.T, factory Factory, prefix string) { t.Fatalf("Wrong number of preferences.") } else if !graph.Finalized() { t.Fatalf("Finalized too late") - } - - if Green.Status() != choices.Accepted { + } else if Green.Status() != choices.Accepted { t.Fatalf("%s should have been accepted", Green.ID()) } else if Alpha.Status() != choices.Accepted { t.Fatalf("%s should have been accepted", Alpha.ID()) diff --git a/snow/consensus/snowstorm/directed_test.go b/snow/consensus/snowstorm/directed_test.go index 39bc5bf..f61d53c 100644 --- a/snow/consensus/snowstorm/directed_test.go +++ b/snow/consensus/snowstorm/directed_test.go @@ -7,34 +7,4 @@ import ( "testing" ) -func TestDirectedParams(t *testing.T) { ParamsTest(t, DirectedFactory{}) } - -func TestDirectedIssued(t *testing.T) { IssuedTest(t, DirectedFactory{}) } - -func TestDirectedLeftoverInput(t *testing.T) { LeftoverInputTest(t, DirectedFactory{}) } - -func TestDirectedLowerConfidence(t *testing.T) { LowerConfidenceTest(t, DirectedFactory{}) } - -func TestDirectedMiddleConfidence(t *testing.T) { MiddleConfidenceTest(t, DirectedFactory{}) } - -func TestDirectedIndependent(t *testing.T) { IndependentTest(t, DirectedFactory{}) } - -func TestDirectedVirtuous(t *testing.T) { VirtuousTest(t, DirectedFactory{}) } - -func TestDirectedIsVirtuous(t *testing.T) { IsVirtuousTest(t, DirectedFactory{}) } - -func TestDirectedConflicts(t *testing.T) { ConflictsTest(t, DirectedFactory{}) } - -func TestDirectedQuiesce(t *testing.T) { QuiesceTest(t, DirectedFactory{}) } - -func TestDirectedAcceptingDependency(t *testing.T) { AcceptingDependencyTest(t, DirectedFactory{}) } - -func TestDirectedRejectingDependency(t *testing.T) { RejectingDependencyTest(t, DirectedFactory{}) } - -func TestDirectedVacuouslyAccepted(t *testing.T) { VacuouslyAcceptedTest(t, DirectedFactory{}) } - -func TestDirectedVirtuousDependsOnRogue(t *testing.T) { - VirtuousDependsOnRogueTest(t, DirectedFactory{}) -} - -func TestDirectedString(t *testing.T) { StringTest(t, DirectedFactory{}, "DG") } +func TestDirectedConsensus(t *testing.T) { ConsensusTest(t, DirectedFactory{}, "DG") } diff --git a/snow/consensus/snowstorm/input_test.go b/snow/consensus/snowstorm/input_test.go index 46a0033..9cae5e7 100644 --- a/snow/consensus/snowstorm/input_test.go +++ b/snow/consensus/snowstorm/input_test.go @@ -7,32 +7,4 @@ import ( "testing" ) -func TestInputParams(t *testing.T) { ParamsTest(t, InputFactory{}) } - -func TestInputIssued(t *testing.T) { IssuedTest(t, InputFactory{}) } - -func TestInputLeftoverInput(t *testing.T) { LeftoverInputTest(t, InputFactory{}) } - -func TestInputLowerConfidence(t *testing.T) { LowerConfidenceTest(t, InputFactory{}) } - -func TestInputMiddleConfidence(t *testing.T) { MiddleConfidenceTest(t, InputFactory{}) } - -func TestInputIndependent(t *testing.T) { IndependentTest(t, InputFactory{}) } - -func TestInputVirtuous(t *testing.T) { VirtuousTest(t, InputFactory{}) } - -func TestInputIsVirtuous(t *testing.T) { IsVirtuousTest(t, InputFactory{}) } - -func TestInputConflicts(t *testing.T) { ConflictsTest(t, InputFactory{}) } - -func TestInputQuiesce(t *testing.T) { QuiesceTest(t, InputFactory{}) } - -func TestInputAcceptingDependency(t *testing.T) { AcceptingDependencyTest(t, InputFactory{}) } - -func TestInputRejectingDependency(t *testing.T) { RejectingDependencyTest(t, InputFactory{}) } - -func TestInputVacuouslyAccepted(t *testing.T) { VacuouslyAcceptedTest(t, InputFactory{}) } - -func TestInputVirtuousDependsOnRogue(t *testing.T) { VirtuousDependsOnRogueTest(t, InputFactory{}) } - -func TestInputString(t *testing.T) { StringTest(t, InputFactory{}, "IG") } +func TestInputConsensus(t *testing.T) { ConsensusTest(t, InputFactory{}, "IG") } diff --git a/snow/consensus/snowstorm/test_tx.go b/snow/consensus/snowstorm/test_tx.go index 18f1465..e9fb8f7 100644 --- a/snow/consensus/snowstorm/test_tx.go +++ b/snow/consensus/snowstorm/test_tx.go @@ -31,10 +31,10 @@ func (tx *TestTx) InputIDs() ids.Set { return tx.Ins } func (tx *TestTx) Status() choices.Status { return tx.Stat } // Accept implements the Consumer interface -func (tx *TestTx) Accept() error { tx.Stat = choices.Accepted; return nil } +func (tx *TestTx) Accept() error { tx.Stat = choices.Accepted; return tx.Validity } // Reject implements the Consumer interface -func (tx *TestTx) Reject() error { tx.Stat = choices.Rejected; return nil } +func (tx *TestTx) Reject() error { tx.Stat = choices.Rejected; return tx.Validity } // Reset sets the status to pending func (tx *TestTx) Reset() { tx.Stat = choices.Processing } From 3211546b5a796a47703b9a48b54564cd74d5399e Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Sun, 21 Jun 2020 23:56:08 -0400 Subject: [PATCH 135/183] Set up tests for error handling in avalanche --- snow/consensus/avalanche/consensus_test.go | 1327 +++++++++++++++++- snow/consensus/avalanche/topological_test.go | 825 +---------- snow/consensus/avalanche/vertex_test.go | 8 +- 3 files changed, 1288 insertions(+), 872 deletions(-) diff --git a/snow/consensus/avalanche/consensus_test.go b/snow/consensus/avalanche/consensus_test.go index 3135ce2..e47fed0 100644 --- a/snow/consensus/avalanche/consensus_test.go +++ b/snow/consensus/avalanche/consensus_test.go @@ -4,7 +4,9 @@ package avalanche import ( + "errors" "fmt" + "math" "testing" "github.com/prometheus/client_golang/prometheus" @@ -24,8 +26,102 @@ func GenerateID() ids.ID { var ( Genesis = GenerateID() offset = uint64(0) + + Tests = []func(*testing.T, Factory){ + MetricsTest, + ParamsTest, + AddTest, + VertexIssuedTest, + TxIssuedTest, + VirtuousTest, + VotingTest, + IgnoreInvalidVotingTest, + TransitiveVotingTest, + SplitVotingTest, + TransitiveRejectionTest, + IsVirtuousTest, + QuiesceTest, + OrphansTest, + ErrorOnVacuousAcceptTest, + ErrorOnTxAcceptTest, + ErrorOnVtxAcceptTest, + ErrorOnVtxRejectTest, + ErrorOnParentVtxRejectTest, + ErrorOnTransitiveVtxRejectTest, + } ) +func ConsensusTest(t *testing.T, factory Factory) { + for _, test := range Tests { + test(t, factory) + } +} + +func MetricsTest(t *testing.T, factory Factory) { + ctx := snow.DefaultContextTest() + + { + avl := factory.New() + params := Parameters{ + Parameters: snowball.Parameters{ + Namespace: fmt.Sprintf("gecko_%s", ctx.ChainID.String()), + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + } + params.Metrics.Register(prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: params.Namespace, + Name: "vtx_processing", + })) + avl.Initialize(ctx, params, nil) + } + { + avl := factory.New() + params := Parameters{ + Parameters: snowball.Parameters{ + Namespace: fmt.Sprintf("gecko_%s", ctx.ChainID.String()), + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + } + params.Metrics.Register(prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: params.Namespace, + Name: "vtx_accepted", + })) + avl.Initialize(ctx, params, nil) + } + { + avl := factory.New() + params := Parameters{ + Parameters: snowball.Parameters{ + Namespace: fmt.Sprintf("gecko_%s", ctx.ChainID.String()), + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + }, + Parents: 2, + BatchSize: 1, + } + params.Metrics.Register(prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: params.Namespace, + Name: "vtx_rejected", + })) + avl.Initialize(ctx, params, nil) + } +} + func ParamsTest(t *testing.T, factory Factory) { avl := factory.New() @@ -43,26 +139,6 @@ func ParamsTest(t *testing.T, factory Factory) { BatchSize: 1, } - numProcessing := prometheus.NewGauge( - prometheus.GaugeOpts{ - Namespace: params.Namespace, - Name: "vtx_processing", - }) - numAccepted := prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: params.Namespace, - Name: "vtx_accepted", - }) - numRejected := prometheus.NewCounter( - prometheus.CounterOpts{ - Namespace: params.Namespace, - Name: "vtx_rejected", - }) - - params.Metrics.Register(numProcessing) - params.Metrics.Register(numAccepted) - params.Metrics.Register(numRejected) - avl.Initialize(ctx, params, nil) if p := avl.Parameters(); p.K != params.K { @@ -120,9 +196,9 @@ func AddTest(t *testing.T, factory Factory) { status: choices.Processing, } - avl.Add(vtx0) - - if avl.Finalized() { + if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } else if avl.Finalized() { t.Fatalf("A non-empty avalanche instance is finalized") } else if !ids.UnsortedEquals([]ids.ID{vtx0.id}, avl.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") @@ -139,25 +215,21 @@ func AddTest(t *testing.T, factory Factory) { status: choices.Processing, } - avl.Add(vtx1) - - if avl.Finalized() { + if err := avl.Add(vtx1); err != nil { + t.Fatal(err) + } else if avl.Finalized() { t.Fatalf("A non-empty avalanche instance is finalized") } else if !ids.UnsortedEquals([]ids.ID{vtx0.id}, avl.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") - } - - avl.Add(vtx1) - - if avl.Finalized() { + } else if err := avl.Add(vtx1); err != nil { + t.Fatal(err) + } else if avl.Finalized() { t.Fatalf("A non-empty avalanche instance is finalized") } else if !ids.UnsortedEquals([]ids.ID{vtx0.id}, avl.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") - } - - avl.Add(vts[0]) - - if avl.Finalized() { + } else if err := avl.Add(vts[0]); err != nil { + t.Fatal(err) + } else if avl.Finalized() { t.Fatalf("A non-empty avalanche instance is finalized") } else if !ids.UnsortedEquals([]ids.ID{vtx0.id}, avl.Preferences().List()) { t.Fatalf("Initial frontier failed to be set") @@ -209,11 +281,9 @@ func VertexIssuedTest(t *testing.T, factory Factory) { if avl.VertexIssued(vtx) { t.Fatalf("Vertex reported as issued") - } - - avl.Add(vtx) - - if !avl.VertexIssued(vtx) { + } else if err := avl.Add(vtx); err != nil { + t.Fatal(err) + } else if !avl.VertexIssued(vtx) { t.Fatalf("Vertex reported as not issued") } } @@ -266,9 +336,1178 @@ func TxIssuedTest(t *testing.T, factory Factory) { status: choices.Processing, } - avl.Add(vtx) - - if !avl.TxIssued(tx1) { + if err := avl.Add(vtx); err != nil { + t.Fatal(err) + } else if !avl.TxIssued(tx1) { t.Fatalf("Tx reported as not issued") } } + +func VirtuousTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID(), GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + if virtuous := avl.Virtuous(); virtuous.Len() != 2 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(vts[0].ID()) { + t.Fatalf("Wrong virtuous") + } else if !virtuous.Contains(vts[1].ID()) { + t.Fatalf("Wrong virtuous") + } + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + tx2 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx2.Ins.Add(utxos[1]) + + vtx2 := &Vtx{ + dependencies: []Vertex{vtx0}, + id: GenerateID(), + txs: []snowstorm.Tx{tx2}, + height: 2, + status: choices.Processing, + } + + if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } else if virtuous := avl.Virtuous(); virtuous.Len() != 1 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(vtx0.id) { + t.Fatalf("Wrong virtuous") + } else if err := avl.Add(vtx1); err != nil { + t.Fatal(err) + } else if virtuous := avl.Virtuous(); virtuous.Len() != 1 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(vtx0.id) { + t.Fatalf("Wrong virtuous") + } else if err := avl.RecordPoll(ids.UniqueBag{}); err != nil { + t.Fatal(err) + } else if virtuous := avl.Virtuous(); virtuous.Len() != 2 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(vts[0].ID()) { + t.Fatalf("Wrong virtuous") + } else if !virtuous.Contains(vts[1].ID()) { + t.Fatalf("Wrong virtuous") + } else if err := avl.Add(vtx2); err != nil { + t.Fatal(err) + } else if virtuous := avl.Virtuous(); virtuous.Len() != 2 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(vts[0].ID()) { + t.Fatalf("Wrong virtuous") + } else if !virtuous.Contains(vts[1].ID()) { + t.Fatalf("Wrong virtuous") + } else if err := avl.RecordPoll(ids.UniqueBag{}); err != nil { + t.Fatal(err) + } else if virtuous := avl.Virtuous(); virtuous.Len() != 2 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(vts[0].ID()) { + t.Fatalf("Wrong virtuous") + } else if !virtuous.Contains(vts[1].ID()) { + t.Fatalf("Wrong virtuous") + } +} + +func VotingTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } else if err := avl.Add(vtx1); err != nil { + t.Fatal(err) + } + + sm := ids.UniqueBag{} + sm.Add(0, vtx1.id) + sm.Add(1, vtx1.id) + if err := avl.RecordPoll(sm); err != nil { + t.Fatal(err) + } else if avl.Finalized() { + t.Fatalf("An avalanche instance finalized too early") + } else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, avl.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } else if err := avl.RecordPoll(sm); err != nil { + t.Fatal(err) + } else if !avl.Finalized() { + t.Fatalf("An avalanche instance finalized too late") + } else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, avl.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } else if tx0.Status() != choices.Rejected { + t.Fatalf("Tx should have been rejected") + } else if tx1.Status() != choices.Accepted { + t.Fatalf("Tx should have been accepted") + } +} + +func IgnoreInvalidVotingTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 3, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 1, + }, + Parents: 2, + BatchSize: 1, + } + + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } else if err := avl.Add(vtx1); err != nil { + t.Fatal(err) + } + + sm := ids.UniqueBag{} + sm.Add(0, vtx0.id) + sm.Add(1, vtx1.id) + + // Add Illegal Vote cast by Response 2 + sm.Add(2, vtx0.id) + sm.Add(2, vtx1.id) + + if err := avl.RecordPoll(sm); err != nil { + t.Fatal(err) + } else if avl.Finalized() { + t.Fatalf("An avalanche instance finalized too early") + } +} + +func TransitiveVotingTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID(), GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[1]) + + vtx1 := &Vtx{ + dependencies: []Vertex{vtx0}, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 2, + status: choices.Processing, + } + + vtx2 := &Vtx{ + dependencies: []Vertex{vtx1}, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 3, + status: choices.Processing, + } + + if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } else if err := avl.Add(vtx1); err != nil { + t.Fatal(err) + } else if err := avl.Add(vtx2); err != nil { + t.Fatal(err) + } + + sm1 := ids.UniqueBag{} + sm1.Add(0, vtx0.id) + sm1.Add(1, vtx2.id) + if err := avl.RecordPoll(sm1); err != nil { + t.Fatal(err) + } else if avl.Finalized() { + t.Fatalf("An avalanche instance finalized too early") + } else if !ids.UnsortedEquals([]ids.ID{vtx2.id}, avl.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } else if tx0.Status() != choices.Accepted { + t.Fatalf("Tx should have been accepted") + } + + sm2 := ids.UniqueBag{} + sm2.Add(0, vtx2.id) + sm2.Add(1, vtx2.id) + if err := avl.RecordPoll(sm2); err != nil { + t.Fatal(err) + } else if !avl.Finalized() { + t.Fatalf("An avalanche instance finalized too late") + } else if !ids.UnsortedEquals([]ids.ID{vtx2.id}, avl.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } else if tx0.Status() != choices.Accepted { + t.Fatalf("Tx should have been accepted") + } else if tx1.Status() != choices.Accepted { + t.Fatalf("Tx should have been accepted") + } +} + +func SplitVotingTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } else if err := avl.Add(vtx1); err != nil { + t.Fatal(err) + } + + sm1 := ids.UniqueBag{} + sm1.Add(0, vtx0.id) // peer 0 votes for the tx though vtx0 + sm1.Add(1, vtx1.id) // peer 1 votes for the tx though vtx1 + if err := avl.RecordPoll(sm1); err != nil { + t.Fatal(err) + } else if !avl.Finalized() { + t.Fatalf("An avalanche instance finalized too late") + } else if !ids.UnsortedEquals([]ids.ID{vtx0.id, vtx1.id}, avl.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } else if tx0.Status() != choices.Accepted { + t.Fatalf("Tx should have been accepted") + } +} + +func TransitiveRejectionTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID(), GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + tx2 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx2.Ins.Add(utxos[1]) + + vtx2 := &Vtx{ + dependencies: []Vertex{vtx0}, + id: GenerateID(), + txs: []snowstorm.Tx{tx2}, + height: 2, + status: choices.Processing, + } + + if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } else if err := avl.Add(vtx1); err != nil { + t.Fatal(err) + } else if err := avl.Add(vtx2); err != nil { + t.Fatal(err) + } + + sm := ids.UniqueBag{} + sm.Add(0, vtx1.id) + sm.Add(1, vtx1.id) + if err := avl.RecordPoll(sm); err != nil { + t.Fatal(err) + } else if avl.Finalized() { + t.Fatalf("An avalanche instance finalized too early") + } else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, avl.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } else if err := avl.RecordPoll(sm); err != nil { + t.Fatal(err) + } else if avl.Finalized() { + t.Fatalf("An avalanche instance finalized too early") + } else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, avl.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } else if tx0.Status() != choices.Rejected { + t.Fatalf("Tx should have been rejected") + } else if tx1.Status() != choices.Accepted { + t.Fatalf("Tx should have been accepted") + } else if tx2.Status() != choices.Processing { + t.Fatalf("Tx should not have been decided") + } else if err := avl.RecordPoll(sm); err != nil { + t.Fatal(err) + } else if avl.Finalized() { + t.Fatalf("An avalanche instance finalized too early") + } else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, avl.Preferences().List()) { + t.Fatalf("Initial frontier failed to be set") + } else if tx0.Status() != choices.Rejected { + t.Fatalf("Tx should have been rejected") + } else if tx1.Status() != choices.Accepted { + t.Fatalf("Tx should have been accepted") + } else if tx2.Status() != choices.Processing { + t.Fatalf("Tx should not have been decided") + } +} + +func IsVirtuousTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 2, + Alpha: 2, + BetaVirtuous: 1, + BetaRogue: 2, + ConcurrentRepolls: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID(), GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + if virtuous := avl.Virtuous(); virtuous.Len() != 2 { + t.Fatalf("Wrong number of virtuous.") + } else if !virtuous.Contains(vts[0].ID()) { + t.Fatalf("Wrong virtuous") + } else if !virtuous.Contains(vts[1].ID()) { + t.Fatalf("Wrong virtuous") + } + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + if !avl.IsVirtuous(tx0) { + t.Fatalf("Should be virtuous.") + } else if !avl.IsVirtuous(tx1) { + t.Fatalf("Should be virtuous.") + } else if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } else if !avl.IsVirtuous(tx0) { + t.Fatalf("Should be virtuous.") + } else if avl.IsVirtuous(tx1) { + t.Fatalf("Should not be virtuous.") + } else if err := avl.Add(vtx1); err != nil { + t.Fatal(err) + } else if avl.IsVirtuous(tx0) { + t.Fatalf("Should not be virtuous.") + } else if avl.IsVirtuous(tx1) { + t.Fatalf("Should not be virtuous.") + } +} + +func QuiesceTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID(), GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + tx2 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx2.Ins.Add(utxos[1]) + + vtx2 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx2}, + height: 2, + status: choices.Processing, + } + + if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } else if avl.Quiesce() { + t.Fatalf("Shouldn't quiesce") + } else if err := avl.Add(vtx1); err != nil { + t.Fatal(err) + } else if !avl.Quiesce() { + t.Fatalf("Should quiesce") + } else if err := avl.Add(vtx2); err != nil { + t.Fatal(err) + } else if avl.Quiesce() { + t.Fatalf("Shouldn't quiesce") + } + + sm := ids.UniqueBag{} + sm.Add(0, vtx2.id) + if err := avl.RecordPoll(sm); err != nil { + t.Fatal(err) + } else if !avl.Quiesce() { + t.Fatalf("Should quiesce") + } +} + +func OrphansTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: math.MaxInt32, + BetaRogue: math.MaxInt32, + ConcurrentRepolls: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }, &Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID(), GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + tx2 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx2.Ins.Add(utxos[1]) + + vtx2 := &Vtx{ + dependencies: []Vertex{vtx0}, + id: GenerateID(), + txs: []snowstorm.Tx{tx2}, + height: 2, + status: choices.Processing, + } + + if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } else if orphans := avl.Orphans(); orphans.Len() != 0 { + t.Fatalf("Wrong number of orphans") + } else if err := avl.Add(vtx1); err != nil { + t.Fatal(err) + } else if orphans := avl.Orphans(); orphans.Len() != 0 { + t.Fatalf("Wrong number of orphans") + } else if err := avl.Add(vtx2); err != nil { + t.Fatal(err) + } else if orphans := avl.Orphans(); orphans.Len() != 0 { + t.Fatalf("Wrong number of orphans") + } + + sm := ids.UniqueBag{} + sm.Add(0, vtx1.id) + if err := avl.RecordPoll(sm); err != nil { + t.Fatal(err) + } else if orphans := avl.Orphans(); orphans.Len() != 1 { + t.Fatalf("Wrong number of orphans") + } else if !orphans.Contains(tx2.ID()) { + t.Fatalf("Wrong orphan") + } +} + +func ErrorOnVacuousAcceptTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: math.MaxInt32, + BetaRogue: math.MaxInt32, + ConcurrentRepolls: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + Validity: errors.New(""), + } + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + if err := avl.Add(vtx0); err == nil { + t.Fatalf("Should have errored on vertex issuance") + } +} + +func ErrorOnTxAcceptTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + Validity: errors.New(""), + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } + + votes := ids.UniqueBag{} + votes.Add(0, vtx0.id) + if err := avl.RecordPoll(votes); err == nil { + t.Fatalf("Should have errored on vertex acceptance") + } +} + +func ErrorOnVtxAcceptTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + Validity: errors.New(""), + } + + if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } + + votes := ids.UniqueBag{} + votes.Add(0, vtx0.id) + if err := avl.RecordPoll(votes); err == nil { + t.Fatalf("Should have errored on vertex acceptance") + } +} + +func ErrorOnVtxRejectTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + Validity: errors.New(""), + } + + if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } else if err := avl.Add(vtx1); err != nil { + t.Fatal(err) + } + + votes := ids.UniqueBag{} + votes.Add(0, vtx0.id) + if err := avl.RecordPoll(votes); err == nil { + t.Fatalf("Should have errored on vertex rejection") + } +} + +func ErrorOnParentVtxRejectTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + Validity: errors.New(""), + } + + vtx2 := &Vtx{ + dependencies: []Vertex{vtx1}, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } else if err := avl.Add(vtx1); err != nil { + t.Fatal(err) + } else if err := avl.Add(vtx2); err != nil { + t.Fatal(err) + } + + votes := ids.UniqueBag{} + votes.Add(0, vtx0.id) + if err := avl.RecordPoll(votes); err == nil { + t.Fatalf("Should have errored on vertex rejection") + } +} + +func ErrorOnTransitiveVtxRejectTest(t *testing.T, factory Factory) { + avl := factory.New() + + params := Parameters{ + Parameters: snowball.Parameters{ + Metrics: prometheus.NewRegistry(), + K: 1, + Alpha: 1, + BetaVirtuous: 1, + BetaRogue: 1, + ConcurrentRepolls: 1, + }, + Parents: 2, + BatchSize: 1, + } + vts := []Vertex{&Vtx{ + id: GenerateID(), + status: choices.Accepted, + }} + utxos := []ids.ID{GenerateID()} + + avl.Initialize(snow.DefaultContextTest(), params, vts) + + tx0 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx0.Ins.Add(utxos[0]) + + vtx0 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx0}, + height: 1, + status: choices.Processing, + } + + tx1 := &snowstorm.TestTx{ + Identifier: GenerateID(), + Stat: choices.Processing, + } + tx1.Ins.Add(utxos[0]) + + vtx1 := &Vtx{ + dependencies: vts, + id: GenerateID(), + txs: []snowstorm.Tx{tx1}, + height: 1, + status: choices.Processing, + } + + vtx2 := &Vtx{ + dependencies: []Vertex{vtx1}, + id: GenerateID(), + height: 1, + status: choices.Processing, + Validity: errors.New(""), + } + + if err := avl.Add(vtx0); err != nil { + t.Fatal(err) + } else if err := avl.Add(vtx1); err != nil { + t.Fatal(err) + } else if err := avl.Add(vtx2); err != nil { + t.Fatal(err) + } + + votes := ids.UniqueBag{} + votes.Add(0, vtx0.id) + if err := avl.RecordPoll(votes); err == nil { + t.Fatalf("Should have errored on vertex rejection") + } +} diff --git a/snow/consensus/avalanche/topological_test.go b/snow/consensus/avalanche/topological_test.go index 3ed648a..4b2c617 100644 --- a/snow/consensus/avalanche/topological_test.go +++ b/snow/consensus/avalanche/topological_test.go @@ -4,830 +4,7 @@ package avalanche import ( - "math" "testing" - - "github.com/prometheus/client_golang/prometheus" - - "github.com/ava-labs/gecko/ids" - "github.com/ava-labs/gecko/snow" - "github.com/ava-labs/gecko/snow/choices" - "github.com/ava-labs/gecko/snow/consensus/snowball" - "github.com/ava-labs/gecko/snow/consensus/snowstorm" ) -func TestTopologicalParams(t *testing.T) { ParamsTest(t, TopologicalFactory{}) } - -func TestTopologicalAdd(t *testing.T) { AddTest(t, TopologicalFactory{}) } - -func TestTopologicalVertexIssued(t *testing.T) { VertexIssuedTest(t, TopologicalFactory{}) } - -func TestTopologicalTxIssued(t *testing.T) { TxIssuedTest(t, TopologicalFactory{}) } - -func TestAvalancheVoting(t *testing.T) { - params := Parameters{ - Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&Vtx{ - id: GenerateID(), - status: choices.Accepted, - }, &Vtx{ - id: GenerateID(), - status: choices.Accepted, - }} - utxos := []ids.ID{GenerateID()} - - ta := Topological{} - ta.Initialize(snow.DefaultContextTest(), params, vts) - - tx0 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx0.Ins.Add(utxos[0]) - - vtx0 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx0}, - height: 1, - status: choices.Processing, - } - - tx1 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx1.Ins.Add(utxos[0]) - - vtx1 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx1}, - height: 1, - status: choices.Processing, - } - - ta.Add(vtx0) - ta.Add(vtx1) - - sm := make(ids.UniqueBag) - sm.Add(0, vtx1.id) - sm.Add(1, vtx1.id) - ta.RecordPoll(sm) - - if ta.Finalized() { - t.Fatalf("An avalanche instance finalized too early") - } else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) { - t.Fatalf("Initial frontier failed to be set") - } - - ta.RecordPoll(sm) - - if !ta.Finalized() { - t.Fatalf("An avalanche instance finalized too late") - } else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) { - t.Fatalf("Initial frontier failed to be set") - } else if tx0.Status() != choices.Rejected { - t.Fatalf("Tx should have been rejected") - } else if tx1.Status() != choices.Accepted { - t.Fatalf("Tx should have been accepted") - } -} - -func TestAvalancheIgnoreInvalidVoting(t *testing.T) { - params := Parameters{ - Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 3, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 1, - }, - Parents: 2, - BatchSize: 1, - } - - vts := []Vertex{&Vtx{ - id: GenerateID(), - status: choices.Accepted, - }, &Vtx{ - id: GenerateID(), - status: choices.Accepted, - }} - utxos := []ids.ID{GenerateID()} - - ta := Topological{} - ta.Initialize(snow.DefaultContextTest(), params, vts) - - tx0 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx0.Ins.Add(utxos[0]) - - vtx0 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx0}, - height: 1, - status: choices.Processing, - } - - tx1 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx1.Ins.Add(utxos[0]) - - vtx1 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx1}, - height: 1, - status: choices.Processing, - } - - ta.Add(vtx0) - ta.Add(vtx1) - - sm := make(ids.UniqueBag) - - sm.Add(0, vtx0.id) - sm.Add(1, vtx1.id) - - // Add Illegal Vote cast by Response 2 - sm.Add(2, vtx0.id) - sm.Add(2, vtx1.id) - - ta.RecordPoll(sm) - - if ta.Finalized() { - t.Fatalf("An avalanche instance finalized too early") - } -} - -func TestAvalancheTransitiveVoting(t *testing.T) { - params := Parameters{ - Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&Vtx{ - id: GenerateID(), - status: choices.Accepted, - }, &Vtx{ - id: GenerateID(), - status: choices.Accepted, - }} - utxos := []ids.ID{GenerateID(), GenerateID()} - - ta := Topological{} - ta.Initialize(snow.DefaultContextTest(), params, vts) - - tx0 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx0.Ins.Add(utxos[0]) - - vtx0 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx0}, - height: 1, - status: choices.Processing, - } - - tx1 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx1.Ins.Add(utxos[1]) - - vtx1 := &Vtx{ - dependencies: []Vertex{vtx0}, - id: GenerateID(), - txs: []snowstorm.Tx{tx1}, - height: 2, - status: choices.Processing, - } - - vtx2 := &Vtx{ - dependencies: []Vertex{vtx1}, - id: GenerateID(), - txs: []snowstorm.Tx{tx1}, - height: 3, - status: choices.Processing, - } - - ta.Add(vtx0) - ta.Add(vtx1) - ta.Add(vtx2) - - sm1 := make(ids.UniqueBag) - sm1.Add(0, vtx0.id) - sm1.Add(1, vtx2.id) - ta.RecordPoll(sm1) - - if ta.Finalized() { - t.Fatalf("An avalanche instance finalized too early") - } else if !ids.UnsortedEquals([]ids.ID{vtx2.id}, ta.Preferences().List()) { - t.Fatalf("Initial frontier failed to be set") - } else if tx0.Status() != choices.Accepted { - t.Fatalf("Tx should have been accepted") - } - - sm2 := make(ids.UniqueBag) - sm2.Add(0, vtx2.id) - sm2.Add(1, vtx2.id) - ta.RecordPoll(sm2) - - if !ta.Finalized() { - t.Fatalf("An avalanche instance finalized too late") - } else if !ids.UnsortedEquals([]ids.ID{vtx2.id}, ta.Preferences().List()) { - t.Fatalf("Initial frontier failed to be set") - } else if tx0.Status() != choices.Accepted { - t.Fatalf("Tx should have been accepted") - } else if tx1.Status() != choices.Accepted { - t.Fatalf("Tx should have been accepted") - } -} - -func TestAvalancheSplitVoting(t *testing.T) { - params := Parameters{ - Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&Vtx{ - id: GenerateID(), - status: choices.Accepted, - }, &Vtx{ - id: GenerateID(), - status: choices.Accepted, - }} - utxos := []ids.ID{GenerateID()} - - ta := Topological{} - ta.Initialize(snow.DefaultContextTest(), params, vts) - - tx0 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx0.Ins.Add(utxos[0]) - - vtx0 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx0}, - height: 1, - status: choices.Processing, - } - - vtx1 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx0}, - height: 1, - status: choices.Processing, - } - - ta.Add(vtx0) - ta.Add(vtx1) - - sm1 := make(ids.UniqueBag) - sm1.Add(0, vtx0.id) - sm1.Add(1, vtx1.id) - ta.RecordPoll(sm1) - - if !ta.Finalized() { - t.Fatalf("An avalanche instance finalized too late") - } else if !ids.UnsortedEquals([]ids.ID{vtx0.id, vtx1.id}, ta.Preferences().List()) { - t.Fatalf("Initial frontier failed to be set") - } else if tx0.Status() != choices.Accepted { - t.Fatalf("Tx should have been accepted") - } -} - -func TestAvalancheTransitiveRejection(t *testing.T) { - params := Parameters{ - Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&Vtx{ - id: GenerateID(), - status: choices.Accepted, - }, &Vtx{ - id: GenerateID(), - status: choices.Accepted, - }} - utxos := []ids.ID{GenerateID(), GenerateID()} - - ta := Topological{} - ta.Initialize(snow.DefaultContextTest(), params, vts) - - tx0 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx0.Ins.Add(utxos[0]) - - vtx0 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx0}, - height: 1, - status: choices.Processing, - } - - tx1 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx1.Ins.Add(utxos[0]) - - vtx1 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx1}, - height: 1, - status: choices.Processing, - } - - tx2 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx2.Ins.Add(utxos[1]) - - vtx2 := &Vtx{ - dependencies: []Vertex{vtx0}, - id: GenerateID(), - txs: []snowstorm.Tx{tx2}, - height: 2, - status: choices.Processing, - } - - ta.Add(vtx0) - ta.Add(vtx1) - ta.Add(vtx2) - - sm := make(ids.UniqueBag) - sm.Add(0, vtx1.id) - sm.Add(1, vtx1.id) - ta.RecordPoll(sm) - - if ta.Finalized() { - t.Fatalf("An avalanche instance finalized too early") - } else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) { - t.Fatalf("Initial frontier failed to be set") - } - - ta.RecordPoll(sm) - - if ta.Finalized() { - t.Fatalf("An avalanche instance finalized too early") - } else if !ids.UnsortedEquals([]ids.ID{vtx1.id}, ta.Preferences().List()) { - t.Fatalf("Initial frontier failed to be set") - } else if tx0.Status() != choices.Rejected { - t.Fatalf("Tx should have been rejected") - } else if tx1.Status() != choices.Accepted { - t.Fatalf("Tx should have been accepted") - } else if tx2.Status() != choices.Processing { - t.Fatalf("Tx should not have been decided") - } - - ta.preferenceCache = make(map[[32]byte]bool) - ta.virtuousCache = make(map[[32]byte]bool) - - ta.update(vtx2) -} - -func TestAvalancheVirtuous(t *testing.T) { - params := Parameters{ - Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&Vtx{ - id: GenerateID(), - status: choices.Accepted, - }, &Vtx{ - id: GenerateID(), - status: choices.Accepted, - }} - utxos := []ids.ID{GenerateID(), GenerateID()} - - ta := Topological{} - ta.Initialize(snow.DefaultContextTest(), params, vts) - - if virtuous := ta.Virtuous(); virtuous.Len() != 2 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(vts[0].ID()) { - t.Fatalf("Wrong virtuous") - } else if !virtuous.Contains(vts[1].ID()) { - t.Fatalf("Wrong virtuous") - } - - tx0 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx0.Ins.Add(utxos[0]) - - vtx0 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx0}, - height: 1, - status: choices.Processing, - } - - tx1 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx1.Ins.Add(utxos[0]) - - vtx1 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx1}, - height: 1, - status: choices.Processing, - } - - tx2 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx2.Ins.Add(utxos[1]) - - vtx2 := &Vtx{ - dependencies: []Vertex{vtx0}, - id: GenerateID(), - txs: []snowstorm.Tx{tx2}, - height: 2, - status: choices.Processing, - } - - ta.Add(vtx0) - - if virtuous := ta.Virtuous(); virtuous.Len() != 1 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(vtx0.id) { - t.Fatalf("Wrong virtuous") - } - - ta.Add(vtx1) - - if virtuous := ta.Virtuous(); virtuous.Len() != 1 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(vtx0.id) { - t.Fatalf("Wrong virtuous") - } - - ta.updateFrontiers() - - if virtuous := ta.Virtuous(); virtuous.Len() != 2 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(vts[0].ID()) { - t.Fatalf("Wrong virtuous") - } else if !virtuous.Contains(vts[1].ID()) { - t.Fatalf("Wrong virtuous") - } - - ta.Add(vtx2) - - if virtuous := ta.Virtuous(); virtuous.Len() != 2 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(vts[0].ID()) { - t.Fatalf("Wrong virtuous") - } else if !virtuous.Contains(vts[1].ID()) { - t.Fatalf("Wrong virtuous") - } - - ta.updateFrontiers() - - if virtuous := ta.Virtuous(); virtuous.Len() != 2 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(vts[0].ID()) { - t.Fatalf("Wrong virtuous") - } else if !virtuous.Contains(vts[1].ID()) { - t.Fatalf("Wrong virtuous") - } -} - -func TestAvalancheIsVirtuous(t *testing.T) { - params := Parameters{ - Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 2, - Alpha: 2, - BetaVirtuous: 1, - BetaRogue: 2, - ConcurrentRepolls: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&Vtx{ - id: GenerateID(), - status: choices.Accepted, - }, &Vtx{ - id: GenerateID(), - status: choices.Accepted, - }} - utxos := []ids.ID{GenerateID(), GenerateID()} - - ta := Topological{} - ta.Initialize(snow.DefaultContextTest(), params, vts) - - if virtuous := ta.Virtuous(); virtuous.Len() != 2 { - t.Fatalf("Wrong number of virtuous.") - } else if !virtuous.Contains(vts[0].ID()) { - t.Fatalf("Wrong virtuous") - } else if !virtuous.Contains(vts[1].ID()) { - t.Fatalf("Wrong virtuous") - } - - tx0 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx0.Ins.Add(utxos[0]) - - vtx0 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx0}, - height: 1, - status: choices.Processing, - } - - tx1 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx1.Ins.Add(utxos[0]) - - vtx1 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx1}, - height: 1, - status: choices.Processing, - } - - if !ta.IsVirtuous(tx0) { - t.Fatalf("Should be virtuous.") - } else if !ta.IsVirtuous(tx1) { - t.Fatalf("Should be virtuous.") - } - - ta.Add(vtx0) - - if !ta.IsVirtuous(tx0) { - t.Fatalf("Should be virtuous.") - } else if ta.IsVirtuous(tx1) { - t.Fatalf("Should not be virtuous.") - } - - ta.Add(vtx1) - - if ta.IsVirtuous(tx0) { - t.Fatalf("Should not be virtuous.") - } else if ta.IsVirtuous(tx1) { - t.Fatalf("Should not be virtuous.") - } -} - -func TestAvalancheQuiesce(t *testing.T) { - params := Parameters{ - Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, - Alpha: 1, - BetaVirtuous: 1, - BetaRogue: 1, - ConcurrentRepolls: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&Vtx{ - id: GenerateID(), - status: choices.Accepted, - }, &Vtx{ - id: GenerateID(), - status: choices.Accepted, - }} - utxos := []ids.ID{GenerateID(), GenerateID()} - - ta := Topological{} - ta.Initialize(snow.DefaultContextTest(), params, vts) - - tx0 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx0.Ins.Add(utxos[0]) - - vtx0 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx0}, - height: 1, - status: choices.Processing, - } - - tx1 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx1.Ins.Add(utxos[0]) - - vtx1 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx1}, - height: 1, - status: choices.Processing, - } - - tx2 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx2.Ins.Add(utxos[1]) - - vtx2 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx2}, - height: 2, - status: choices.Processing, - } - - ta.Add(vtx0) - - if ta.Quiesce() { - t.Fatalf("Shouldn't quiesce") - } - - ta.Add(vtx1) - - if !ta.Quiesce() { - t.Fatalf("Should quiesce") - } - - ta.Add(vtx2) - - if ta.Quiesce() { - t.Fatalf("Shouldn't quiesce") - } - - sm := make(ids.UniqueBag) - sm.Add(0, vtx2.id) - ta.RecordPoll(sm) - - if !ta.Quiesce() { - t.Fatalf("Should quiesce") - } -} - -func TestAvalancheOrphans(t *testing.T) { - params := Parameters{ - Parameters: snowball.Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, - Alpha: 1, - BetaVirtuous: math.MaxInt32, - BetaRogue: math.MaxInt32, - ConcurrentRepolls: 1, - }, - Parents: 2, - BatchSize: 1, - } - vts := []Vertex{&Vtx{ - id: GenerateID(), - status: choices.Accepted, - }, &Vtx{ - id: GenerateID(), - status: choices.Accepted, - }} - utxos := []ids.ID{GenerateID(), GenerateID()} - - ta := Topological{} - ta.Initialize(snow.DefaultContextTest(), params, vts) - - tx0 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx0.Ins.Add(utxos[0]) - - vtx0 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx0}, - height: 1, - status: choices.Processing, - } - - tx1 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx1.Ins.Add(utxos[0]) - - vtx1 := &Vtx{ - dependencies: vts, - id: GenerateID(), - txs: []snowstorm.Tx{tx1}, - height: 1, - status: choices.Processing, - } - - tx2 := &snowstorm.TestTx{ - Identifier: GenerateID(), - Stat: choices.Processing, - } - tx2.Ins.Add(utxos[1]) - - vtx2 := &Vtx{ - dependencies: []Vertex{vtx0}, - id: GenerateID(), - txs: []snowstorm.Tx{tx2}, - height: 2, - status: choices.Processing, - } - - ta.Add(vtx0) - - if orphans := ta.Orphans(); orphans.Len() != 0 { - t.Fatalf("Wrong number of orphans") - } - - ta.Add(vtx1) - - if orphans := ta.Orphans(); orphans.Len() != 0 { - t.Fatalf("Wrong number of orphans") - } - - ta.Add(vtx2) - - if orphans := ta.Orphans(); orphans.Len() != 0 { - t.Fatalf("Wrong number of orphans") - } - - sm := make(ids.UniqueBag) - sm.Add(0, vtx1.id) - ta.RecordPoll(sm) - - if orphans := ta.Orphans(); orphans.Len() != 1 { - t.Fatalf("Wrong number of orphans") - } else if !orphans.Contains(tx2.ID()) { - t.Fatalf("Wrong orphan") - } -} +func TestTopological(t *testing.T) { ConsensusTest(t, TopologicalFactory{}) } diff --git a/snow/consensus/avalanche/vertex_test.go b/snow/consensus/avalanche/vertex_test.go index f7aee5a..0270af5 100644 --- a/snow/consensus/avalanche/vertex_test.go +++ b/snow/consensus/avalanche/vertex_test.go @@ -19,7 +19,8 @@ type Vtx struct { height uint64 status choices.Status - bytes []byte + Validity error + bytes []byte } func (v *Vtx) ID() ids.ID { return v.id } @@ -28,9 +29,8 @@ func (v *Vtx) Parents() []Vertex { return v.dependencies } func (v *Vtx) Height() uint64 { return v.height } func (v *Vtx) Txs() []snowstorm.Tx { return v.txs } func (v *Vtx) Status() choices.Status { return v.status } -func (v *Vtx) Live() {} -func (v *Vtx) Accept() error { v.status = choices.Accepted; return nil } -func (v *Vtx) Reject() error { v.status = choices.Rejected; return nil } +func (v *Vtx) Accept() error { v.status = choices.Accepted; return v.Validity } +func (v *Vtx) Reject() error { v.status = choices.Rejected; return v.Validity } func (v *Vtx) Bytes() []byte { return v.bytes } type sortVts []*Vtx From 12297cb0d24733ad33132597590eb9f7ea85533e Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Mon, 22 Jun 2020 00:08:11 -0400 Subject: [PATCH 136/183] Clarify that the snowball byzantine struct is only for testing --- snow/consensus/snowball/byzantine.go | 48 -------------------- snow/consensus/snowball/byzantine_test.go | 54 ----------------------- snow/consensus/snowball/consensus_test.go | 40 +++++++++++++++++ 3 files changed, 40 insertions(+), 102 deletions(-) delete mode 100644 snow/consensus/snowball/byzantine.go delete mode 100644 snow/consensus/snowball/byzantine_test.go diff --git a/snow/consensus/snowball/byzantine.go b/snow/consensus/snowball/byzantine.go deleted file mode 100644 index 88fda59..0000000 --- a/snow/consensus/snowball/byzantine.go +++ /dev/null @@ -1,48 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowball - -import ( - "github.com/ava-labs/gecko/ids" -) - -// ByzantineFactory implements Factory by returning a byzantine struct -type ByzantineFactory struct{} - -// New implements Factory -func (ByzantineFactory) New() Consensus { return &Byzantine{} } - -// Byzantine is a naive implementation of a multi-choice snowball instance -type Byzantine struct { - // params contains all the configurations of a snowball instance - params Parameters - - // Hardcode the preference - preference ids.ID -} - -// Initialize implements the Consensus interface -func (b *Byzantine) Initialize(params Parameters, choice ids.ID) { - b.params = params - b.preference = choice -} - -// Parameters implements the Consensus interface -func (b *Byzantine) Parameters() Parameters { return b.params } - -// Add implements the Consensus interface -func (b *Byzantine) Add(choice ids.ID) {} - -// Preference implements the Consensus interface -func (b *Byzantine) Preference() ids.ID { return b.preference } - -// RecordPoll implements the Consensus interface -func (b *Byzantine) RecordPoll(votes ids.Bag) {} - -// RecordUnsuccessfulPoll implements the Consensus interface -func (b *Byzantine) RecordUnsuccessfulPoll() {} - -// Finalized implements the Consensus interface -func (b *Byzantine) Finalized() bool { return true } -func (b *Byzantine) String() string { return b.preference.String() } diff --git a/snow/consensus/snowball/byzantine_test.go b/snow/consensus/snowball/byzantine_test.go deleted file mode 100644 index cee357b..0000000 --- a/snow/consensus/snowball/byzantine_test.go +++ /dev/null @@ -1,54 +0,0 @@ -// (c) 2019-2020, Ava Labs, Inc. All rights reserved. -// See the file LICENSE for licensing terms. - -package snowball - -import ( - "testing" - - "github.com/ava-labs/gecko/ids" - "github.com/prometheus/client_golang/prometheus" -) - -func TestByzantine(t *testing.T) { - params := Parameters{ - Metrics: prometheus.NewRegistry(), - K: 1, Alpha: 1, BetaVirtuous: 3, BetaRogue: 5, - } - - byzFactory := ByzantineFactory{} - byz := byzFactory.New() - byz.Initialize(params, Blue) - - if ret := byz.Parameters(); ret != params { - t.Fatalf("Should have returned the correct params") - } - - byz.Add(Green) - - if pref := byz.Preference(); !pref.Equals(Blue) { - t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref) - } - - oneGreen := ids.Bag{} - oneGreen.Add(Green) - byz.RecordPoll(oneGreen) - - if pref := byz.Preference(); !pref.Equals(Blue) { - t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref) - } - - byz.RecordUnsuccessfulPoll() - - if pref := byz.Preference(); !pref.Equals(Blue) { - t.Fatalf("Wrong preference, expected %s returned %s", Blue, pref) - } - - if final := byz.Finalized(); !final { - t.Fatalf("Should be marked as accepted") - } - - if str := byz.String(); str != Blue.String() { - t.Fatalf("Wrong string, expected %s returned %s", Blue, str) - } -} diff --git a/snow/consensus/snowball/consensus_test.go b/snow/consensus/snowball/consensus_test.go index 922f606..304bc19 100644 --- a/snow/consensus/snowball/consensus_test.go +++ b/snow/consensus/snowball/consensus_test.go @@ -11,6 +11,46 @@ import ( "github.com/ava-labs/gecko/ids" ) +// ByzantineFactory implements Factory by returning a byzantine struct +type ByzantineFactory struct{} + +// New implements Factory +func (ByzantineFactory) New() Consensus { return &Byzantine{} } + +// Byzantine is a naive implementation of a multi-choice snowball instance +type Byzantine struct { + // params contains all the configurations of a snowball instance + params Parameters + + // Hardcode the preference + preference ids.ID +} + +// Initialize implements the Consensus interface +func (b *Byzantine) Initialize(params Parameters, choice ids.ID) { + b.params = params + b.preference = choice +} + +// Parameters implements the Consensus interface +func (b *Byzantine) Parameters() Parameters { return b.params } + +// Add implements the Consensus interface +func (b *Byzantine) Add(choice ids.ID) {} + +// Preference implements the Consensus interface +func (b *Byzantine) Preference() ids.ID { return b.preference } + +// RecordPoll implements the Consensus interface +func (b *Byzantine) RecordPoll(votes ids.Bag) {} + +// RecordUnsuccessfulPoll implements the Consensus interface +func (b *Byzantine) RecordUnsuccessfulPoll() {} + +// Finalized implements the Consensus interface +func (b *Byzantine) Finalized() bool { return true } +func (b *Byzantine) String() string { return b.preference.String() } + var ( Red = ids.Empty.Prefix(0) Blue = ids.Empty.Prefix(1) From c88c85ea9b0df236ebbbce78b1c7a6e30ffc7507 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Mon, 22 Jun 2020 00:14:19 -0400 Subject: [PATCH 137/183] Minor cleanup in snowball consensus --- snow/consensus/snowball/flat.go | 2 +- snow/consensus/snowball/nnary_snowflake.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/snow/consensus/snowball/flat.go b/snow/consensus/snowball/flat.go index 21663c4..464e525 100644 --- a/snow/consensus/snowball/flat.go +++ b/snow/consensus/snowball/flat.go @@ -34,7 +34,7 @@ func (f *Flat) Parameters() Parameters { return f.params } // RecordPoll implements the Consensus interface func (f *Flat) RecordPoll(votes ids.Bag) { if pollMode, numVotes := votes.Mode(); numVotes >= f.params.Alpha { - f.nnarySnowball.RecordSuccessfulPoll(pollMode) + f.RecordSuccessfulPoll(pollMode) } else { f.RecordUnsuccessfulPoll() } diff --git a/snow/consensus/snowball/nnary_snowflake.go b/snow/consensus/snowball/nnary_snowflake.go index 8b461f0..ab580f9 100644 --- a/snow/consensus/snowball/nnary_snowflake.go +++ b/snow/consensus/snowball/nnary_snowflake.go @@ -51,7 +51,7 @@ func (sf *nnarySnowflake) RecordSuccessfulPoll(choice ids.ID) { return // This instace is already decided. } - if preference := sf.nnarySlush.Preference(); preference.Equals(choice) { + if preference := sf.Preference(); preference.Equals(choice) { sf.confidence++ } else { // confidence is set to 1 because there has already been 1 successful From 5cb106d349bb0ce9b9337a3bf9360c966438755b Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 22 Jun 2020 10:53:10 -0400 Subject: [PATCH 138/183] make staking cert/key read-only --- staking/gen_staker_key.go | 23 +++++++++++++++++------ 1 file changed, 17 insertions(+), 6 deletions(-) diff --git a/staking/gen_staker_key.go b/staking/gen_staker_key.go index 8969ea3..37142c4 100644 --- a/staking/gen_staker_key.go +++ b/staking/gen_staker_key.go @@ -40,20 +40,27 @@ func GenerateStakingKeyCert(keyPath, certPath string) error { return fmt.Errorf("couldn't create certificate: %w", err) } - // Write cert to disk - if err := os.MkdirAll(filepath.Dir(certPath), 0755); err != nil { - return fmt.Errorf("couldn't create path for key/cert: %w", err) + // Ensure directory where key/cert will live exist + if err := os.MkdirAll(filepath.Dir(certPath), 0700); err != nil { + return fmt.Errorf("couldn't create path for cert: %w", err) + } else if err := os.MkdirAll(filepath.Dir(keyPath), 0700); err != nil { + return fmt.Errorf("couldn't create path for key: %w", err) } - certOut, err := os.Create(certPath) + + // Write cert to disk + certFile, err := os.Create(certPath) if err != nil { return fmt.Errorf("couldn't create cert file: %w", err) } - if err := pem.Encode(certOut, &pem.Block{Type: "CERTIFICATE", Bytes: certBytes}); err != nil { + if err := pem.Encode(certFile, &pem.Block{Type: "CERTIFICATE", Bytes: certBytes}); err != nil { return fmt.Errorf("couldn't write cert file: %w", err) } - if err := certOut.Close(); err != nil { + if err := certFile.Close(); err != nil { return fmt.Errorf("couldn't close cert file: %w", err) } + if err := os.Chmod(certPath, 0400); err != nil { // Make cert read-only + return fmt.Errorf("couldn't change permissions on cert: %w", err) + } // Write key to disk keyOut, err := os.Create(keyPath) @@ -70,5 +77,9 @@ func GenerateStakingKeyCert(keyPath, certPath string) error { if err := keyOut.Close(); err != nil { return fmt.Errorf("couldn't close key file: %w", err) } + if err := os.Chmod(keyPath, 0400); err != nil { // Make key read-only + return fmt.Errorf("couldn't change permissions on key") + } + return nil } From 38f7e236473beea8d112875a98d1a69e6c44ce40 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 22 Jun 2020 13:05:47 -0400 Subject: [PATCH 139/183] disable keystore and admin APIs by default --- main/params.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/main/params.go b/main/params.go index 1e526b3..877d406 100644 --- a/main/params.go +++ b/main/params.go @@ -190,7 +190,7 @@ func init() { consensusIP := fs.String("public-ip", "", "Public IP of this node") // HTTP Server: - httpHost := fs.String("http-host", "", "Address of the HTTP server") + httpHost := fs.String("http-host", "127.0.0.1", "Address of the HTTP server") httpPort := fs.Uint("http-port", 9650, "Port of the HTTP server") fs.BoolVar(&Config.EnableHTTPS, "http-tls-enabled", false, "Upgrade the HTTP server to HTTPs") fs.StringVar(&Config.HTTPSKeyFile, "http-tls-key-file", "", "TLS private key file for the HTTPs server") @@ -225,9 +225,9 @@ func init() { fs.IntVar(&Config.ConsensusParams.ConcurrentRepolls, "snow-concurrent-repolls", 1, "Minimum number of concurrent polls for finalizing consensus") // Enable/Disable APIs: - fs.BoolVar(&Config.AdminAPIEnabled, "api-admin-enabled", true, "If true, this node exposes the Admin API") + fs.BoolVar(&Config.AdminAPIEnabled, "api-admin-enabled", false, "If true, this node exposes the Admin API") fs.BoolVar(&Config.InfoAPIEnabled, "api-info-enabled", true, "If true, this node exposes the Info API") - fs.BoolVar(&Config.KeystoreAPIEnabled, "api-keystore-enabled", true, "If true, this node exposes the Keystore API") + fs.BoolVar(&Config.KeystoreAPIEnabled, "api-keystore-enabled", false, "If true, this node exposes the Keystore API") fs.BoolVar(&Config.MetricsAPIEnabled, "api-metrics-enabled", true, "If true, this node exposes the Metrics API") fs.BoolVar(&Config.HealthAPIEnabled, "api-health-enabled", true, "If true, this node exposes the Health API") fs.BoolVar(&Config.IPCEnabled, "api-ipcs-enabled", false, "If true, IPCs can be opened") From c7356a581df7e063a570d43fb0f1782a17b1fc20 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 22 Jun 2020 13:06:18 -0400 Subject: [PATCH 140/183] open HTTP port iff HTTP server not listening on localhost --- main/main.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/main/main.go b/main/main.go index 98cb581..43fe1b9 100644 --- a/main/main.go +++ b/main/main.go @@ -71,8 +71,10 @@ func main() { mapper := nat.NewDefaultMapper(log, Config.Nat, nat.TCP, "gecko") defer mapper.UnmapAllPorts() - mapper.MapPort(Config.StakingIP.Port, Config.StakingIP.Port) - mapper.MapPort(Config.HTTPPort, Config.HTTPPort) + mapper.MapPort(Config.StakingIP.Port, Config.StakingIP.Port) // Open staking port + if Config.HTTPHost != "127.0.0.1" && Config.HTTPHost != "localhost" { // Open HTTP port iff HTTP server not listening on localhost + mapper.MapPort(Config.HTTPPort, Config.HTTPPort) + } node := node.Node{} From fc40ad802f42e2c6db83443531ab40651360f6de Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 22 Jun 2020 13:18:14 -0400 Subject: [PATCH 141/183] lock, mem and CPU profiles write to a fixed filename --- api/admin/performance.go | 21 +++++++++++++++------ api/admin/service.go | 33 +++++++++------------------------ 2 files changed, 24 insertions(+), 30 deletions(-) diff --git a/api/admin/performance.go b/api/admin/performance.go index bf2a460..6035b08 100644 --- a/api/admin/performance.go +++ b/api/admin/performance.go @@ -10,6 +10,15 @@ import ( "runtime/pprof" ) +const ( + // Name of file that CPU profile is written to when StartCPUProfiler called + cpuProfileFile = "cpu.profile" + // Name of file that memory profile is written to when MemoryProfile called + memProfileFile = "mem.profile" + // Name of file that lock profile is written to + lockProfileFile = "lock.profile" +) + var ( errCPUProfilerRunning = errors.New("cpu profiler already running") errCPUProfilerNotRunning = errors.New("cpu profiler doesn't exist") @@ -20,12 +29,12 @@ var ( type Performance struct{ cpuProfileFile *os.File } // StartCPUProfiler starts measuring the cpu utilization of this node -func (p *Performance) StartCPUProfiler(filename string) error { +func (p *Performance) StartCPUProfiler() error { if p.cpuProfileFile != nil { return errCPUProfilerRunning } - file, err := os.Create(filename) + file, err := os.Create(cpuProfileFile) if err != nil { return err } @@ -52,8 +61,8 @@ func (p *Performance) StopCPUProfiler() error { } // MemoryProfile dumps the current memory utilization of this node -func (p *Performance) MemoryProfile(filename string) error { - file, err := os.Create(filename) +func (p *Performance) MemoryProfile() error { + file, err := os.Create(memProfileFile) if err != nil { return err } @@ -66,8 +75,8 @@ func (p *Performance) MemoryProfile(filename string) error { } // LockProfile dumps the current lock statistics of this node -func (p *Performance) LockProfile(filename string) error { - file, err := os.Create(filename) +func (p *Performance) LockProfile() error { + file, err := os.Create(lockProfileFile) if err != nil { return err } diff --git a/api/admin/service.go b/api/admin/service.go index 3d61730..c5059f0 100644 --- a/api/admin/service.go +++ b/api/admin/service.go @@ -39,21 +39,16 @@ func NewService(log logging.Logger, chainManager chains.Manager, peers network.N return &common.HTTPHandler{Handler: newServer} } -// StartCPUProfilerArgs are the arguments for calling StartCPUProfiler -type StartCPUProfilerArgs struct { - Filename string `json:"filename"` -} - // StartCPUProfilerReply are the results from calling StartCPUProfiler type StartCPUProfilerReply struct { Success bool `json:"success"` } // StartCPUProfiler starts a cpu profile writing to the specified file -func (service *Admin) StartCPUProfiler(_ *http.Request, args *StartCPUProfilerArgs, reply *StartCPUProfilerReply) error { - service.log.Info("Admin: StartCPUProfiler called with %s", args.Filename) +func (service *Admin) StartCPUProfiler(_ *http.Request, args *struct{}, reply *StartCPUProfilerReply) error { + service.log.Info("Admin: StartCPUProfiler called") reply.Success = true - return service.performance.StartCPUProfiler(args.Filename) + return service.performance.StartCPUProfiler() } // StopCPUProfilerReply are the results from calling StopCPUProfiler @@ -68,26 +63,16 @@ func (service *Admin) StopCPUProfiler(_ *http.Request, _ *struct{}, reply *StopC return service.performance.StopCPUProfiler() } -// MemoryProfileArgs are the arguments for calling MemoryProfile -type MemoryProfileArgs struct { - Filename string `json:"filename"` -} - // MemoryProfileReply are the results from calling MemoryProfile type MemoryProfileReply struct { Success bool `json:"success"` } // MemoryProfile runs a memory profile writing to the specified file -func (service *Admin) MemoryProfile(_ *http.Request, args *MemoryProfileArgs, reply *MemoryProfileReply) error { - service.log.Info("Admin: MemoryProfile called with %s", args.Filename) +func (service *Admin) MemoryProfile(_ *http.Request, args *struct{}, reply *MemoryProfileReply) error { + service.log.Info("Admin: MemoryProfile called") reply.Success = true - return service.performance.MemoryProfile(args.Filename) -} - -// LockProfileArgs are the arguments for calling LockProfile -type LockProfileArgs struct { - Filename string `json:"filename"` + return service.performance.MemoryProfile() } // LockProfileReply are the results from calling LockProfile @@ -96,10 +81,10 @@ type LockProfileReply struct { } // LockProfile runs a mutex profile writing to the specified file -func (service *Admin) LockProfile(_ *http.Request, args *LockProfileArgs, reply *LockProfileReply) error { - service.log.Info("Admin: LockProfile called with %s", args.Filename) +func (service *Admin) LockProfile(_ *http.Request, args *struct{}, reply *LockProfileReply) error { + service.log.Info("Admin: LockProfile called") reply.Success = true - return service.performance.LockProfile(args.Filename) + return service.performance.LockProfile() } // AliasArgs are the arguments for calling Alias From 3a854ebdecd21a1220a0fe94f3ea7ba21f980d71 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Mon, 22 Jun 2020 15:20:35 -0400 Subject: [PATCH 142/183] handler engine gets/sets --- snow/networking/router/handler.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/snow/networking/router/handler.go b/snow/networking/router/handler.go index 9d45baf..03a55f1 100644 --- a/snow/networking/router/handler.go +++ b/snow/networking/router/handler.go @@ -54,6 +54,12 @@ func (h *Handler) Initialize( // Context of this Handler func (h *Handler) Context() *snow.Context { return h.engine.Context() } +// Engine returns the engine this handler dispatches to +func (h *Handler) Engine() common.Engine { return h.engine } + +// SetEngine sets the engine this handler dispatches to +func (h *Handler) SetEngine(engine common.Engine) { h.engine = engine } + // Dispatch waits for incoming messages from the network // and, when they arrive, sends them to the consensus engine func (h *Handler) Dispatch() { From 473bef24b1d44279b9a3b1975c55064d18746fe9 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Mon, 22 Jun 2020 15:50:52 -0400 Subject: [PATCH 143/183] removed duplicated batch writes, fixed tests --- database/versiondb/db.go | 3 --- vms/avm/export_tx_test.go | 11 +++++++---- vms/avm/import_tx_test.go | 11 +++++++---- vms/platformvm/vm_test.go | 6 +++--- 4 files changed, 17 insertions(+), 14 deletions(-) diff --git a/database/versiondb/db.go b/database/versiondb/db.go index 7223c55..b2b5be1 100644 --- a/database/versiondb/db.go +++ b/database/versiondb/db.go @@ -234,9 +234,6 @@ func (db *Database) commitBatch() (database.Batch, error) { return nil, err } } - if err := db.batch.Write(); err != nil { - return nil, err - } return db.batch, nil } diff --git a/vms/avm/export_tx_test.go b/vms/avm/export_tx_test.go index 75b359f..96c7733 100644 --- a/vms/avm/export_tx_test.go +++ b/vms/avm/export_tx_test.go @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/gecko/chains/atomic" "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/engine/common" @@ -117,9 +118,10 @@ func TestIssueExportTx(t *testing.T) { genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) + baseDB := memdb.New() sm := &atomic.SharedMemory{} - sm.Initialize(logging.NoLog{}, memdb.New()) + sm.Initialize(logging.NoLog{}, prefixdb.New([]byte{0}, baseDB)) ctx := snow.DefaultContextTest() ctx.NetworkID = networkID @@ -138,7 +140,7 @@ func TestIssueExportTx(t *testing.T) { } err := vm.Initialize( ctx, - memdb.New(), + prefixdb.New([]byte{1}, baseDB), genesisBytes, issuer, []*common.Fx{{ @@ -273,9 +275,10 @@ func TestClearForceAcceptedExportTx(t *testing.T) { genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) + baseDB := memdb.New() sm := &atomic.SharedMemory{} - sm.Initialize(logging.NoLog{}, memdb.New()) + sm.Initialize(logging.NoLog{}, prefixdb.New([]byte{0}, baseDB)) ctx := snow.DefaultContextTest() ctx.NetworkID = networkID @@ -294,7 +297,7 @@ func TestClearForceAcceptedExportTx(t *testing.T) { } err := vm.Initialize( ctx, - memdb.New(), + prefixdb.New([]byte{1}, baseDB), genesisBytes, issuer, []*common.Fx{{ diff --git a/vms/avm/import_tx_test.go b/vms/avm/import_tx_test.go index e510aff..750d402 100644 --- a/vms/avm/import_tx_test.go +++ b/vms/avm/import_tx_test.go @@ -9,6 +9,7 @@ import ( "github.com/ava-labs/gecko/chains/atomic" "github.com/ava-labs/gecko/database/memdb" + "github.com/ava-labs/gecko/database/prefixdb" "github.com/ava-labs/gecko/ids" "github.com/ava-labs/gecko/snow" "github.com/ava-labs/gecko/snow/engine/common" @@ -106,9 +107,10 @@ func TestIssueImportTx(t *testing.T) { genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) + baseDB := memdb.New() sm := &atomic.SharedMemory{} - sm.Initialize(logging.NoLog{}, memdb.New()) + sm.Initialize(logging.NoLog{}, prefixdb.New([]byte{0}, baseDB)) ctx := snow.DefaultContextTest() ctx.NetworkID = networkID @@ -127,7 +129,7 @@ func TestIssueImportTx(t *testing.T) { } err := vm.Initialize( ctx, - memdb.New(), + prefixdb.New([]byte{1}, baseDB), genesisBytes, issuer, []*common.Fx{{ @@ -265,9 +267,10 @@ func TestForceAcceptImportTx(t *testing.T) { genesisBytes := BuildGenesisTest(t) issuer := make(chan common.Message, 1) + baseDB := memdb.New() sm := &atomic.SharedMemory{} - sm.Initialize(logging.NoLog{}, memdb.New()) + sm.Initialize(logging.NoLog{}, prefixdb.New([]byte{0}, baseDB)) ctx := snow.DefaultContextTest() ctx.NetworkID = networkID @@ -285,7 +288,7 @@ func TestForceAcceptImportTx(t *testing.T) { err := vm.Initialize( ctx, - memdb.New(), + prefixdb.New([]byte{1}, baseDB), genesisBytes, issuer, []*common.Fx{{ diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index dcee89a..82989eb 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -137,7 +137,7 @@ func defaultVM() *VM { vm.validators.PutValidatorSet(DefaultSubnetID, defaultSubnet) vm.clock.Set(defaultGenesisTime) - db := memdb.New() + db := prefixdb.New([]byte{0}, memdb.New()) msgChan := make(chan common.Message, 1) ctx := defaultContext() ctx.Lock.Lock() @@ -1189,7 +1189,7 @@ func TestAtomicImport(t *testing.T) { key := keys[0] sm := &atomic.SharedMemory{} - sm.Initialize(logging.NoLog{}, memdb.New()) + sm.Initialize(logging.NoLog{}, prefixdb.New([]byte{0}, vm.DB.GetDatabase())) vm.Ctx.SharedMemory = sm.NewBlockchainSharedMemory(vm.Ctx.ChainID) @@ -1282,7 +1282,7 @@ func TestOptimisticAtomicImport(t *testing.T) { key := keys[0] sm := &atomic.SharedMemory{} - sm.Initialize(logging.NoLog{}, memdb.New()) + sm.Initialize(logging.NoLog{}, prefixdb.New([]byte{0}, vm.DB.GetDatabase())) vm.Ctx.SharedMemory = sm.NewBlockchainSharedMemory(vm.Ctx.ChainID) From fc15e3cfe69eb46d21dfb40753bbcffa0ef81c43 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 22 Jun 2020 16:35:42 -0400 Subject: [PATCH 144/183] prevent potential memory leaks --- database/encdb/db.go | 6 +++++- database/memdb/db.go | 9 ++++++--- database/prefixdb/db.go | 6 +++++- database/rpcdb/db_client.go | 6 +++++- 4 files changed, 21 insertions(+), 6 deletions(-) diff --git a/database/encdb/db.go b/database/encdb/db.go index fe33fa7..ddf47e0 100644 --- a/database/encdb/db.go +++ b/database/encdb/db.go @@ -17,6 +17,10 @@ import ( "github.com/ava-labs/gecko/utils/hashing" ) +const ( + minBatchSize = 32 +) + // Database encrypts all values that are provided type Database struct { lock sync.RWMutex @@ -201,7 +205,7 @@ func (b *batch) Write() error { // Reset resets the batch for reuse. func (b *batch) Reset() { - b.writes = b.writes[:0] + b.writes = make([]keyValue, 0, minBatchSize) b.Batch.Reset() } diff --git a/database/memdb/db.go b/database/memdb/db.go index de0cae3..5bbd3a2 100644 --- a/database/memdb/db.go +++ b/database/memdb/db.go @@ -13,8 +13,11 @@ import ( "github.com/ava-labs/gecko/utils" ) -// DefaultSize is the default initial size of the memory database -const DefaultSize = 1 << 10 +const ( + // DefaultSize is the default initial size of the memory database + DefaultSize = 1 << 10 + minBatchSize = 32 +) // Database is an ephemeral key-value store that implements the Database // interface. @@ -191,7 +194,7 @@ func (b *batch) Write() error { // Reset implements the Batch interface func (b *batch) Reset() { - b.writes = b.writes[:0] + b.writes = make([]keyValue, 0, minBatchSize) b.size = 0 } diff --git a/database/prefixdb/db.go b/database/prefixdb/db.go index 34bc50d..a413846 100644 --- a/database/prefixdb/db.go +++ b/database/prefixdb/db.go @@ -12,6 +12,10 @@ import ( "github.com/ava-labs/gecko/utils/hashing" ) +const ( + minBatchSize = 32 +) + // Database partitions a database into a sub-database by prefixing all keys with // a unique value. type Database struct { @@ -199,7 +203,7 @@ func (b *batch) Write() error { // Reset resets the batch for reuse. func (b *batch) Reset() { - b.writes = b.writes[:0] + b.writes = make([]keyValue, 0, minBatchSize) b.Batch.Reset() } diff --git a/database/rpcdb/db_client.go b/database/rpcdb/db_client.go index dc3f60b..f1a3abc 100644 --- a/database/rpcdb/db_client.go +++ b/database/rpcdb/db_client.go @@ -14,6 +14,10 @@ import ( "github.com/ava-labs/gecko/utils" ) +const ( + minBatchSize = 32 +) + var ( errClosed = fmt.Sprintf("rpc error: code = Unknown desc = %s", database.ErrClosed) errNotFound = fmt.Sprintf("rpc error: code = Unknown desc = %s", database.ErrNotFound) @@ -180,7 +184,7 @@ func (b *batch) Write() error { } func (b *batch) Reset() { - b.writes = b.writes[:0] + b.writes = make([]keyValue, 0, minBatchSize) b.size = 0 } From c9aa8eedc2a17b6067c7d1df32940ea132710ef1 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 22 Jun 2020 16:50:31 -0400 Subject: [PATCH 145/183] pre-allocate arrays --- ids/short_set.go | 18 +++++++++++++----- snow/validators/set.go | 7 ++++--- vms/platformvm/vm.go | 8 +++++--- 3 files changed, 22 insertions(+), 11 deletions(-) diff --git a/ids/short_set.go b/ids/short_set.go index 6977863..90766cd 100644 --- a/ids/short_set.go +++ b/ids/short_set.go @@ -57,15 +57,23 @@ func (ids *ShortSet) Remove(idList ...ShortID) { // Clear empties this set func (ids *ShortSet) Clear() { *ids = nil } -// CappedList returns a list of length at most [size]. Size should be >= 0 +// CappedList returns a list of length at most [size]. +// Size should be >= 0. If size < 0, returns empty list. func (ids ShortSet) CappedList(size int) []ShortID { - idList := make([]ShortID, size)[:0] + if size < 0 { + return make([]ShortID, 0, 0) + } + if l := ids.Len(); l < size { + size = l + } + i := 0 + idList := make([]ShortID, size) for id := range ids { - if size <= 0 { + if i >= size { break } - size-- - idList = append(idList, NewShortID(id)) + idList[i] = NewShortID(id) + i++ } return idList } diff --git a/snow/validators/set.go b/snow/validators/set.go index 50210bf..c33395f 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -71,9 +71,10 @@ func (s *set) Set(vdrs []Validator) { } func (s *set) set(vdrs []Validator) { - s.vdrMap = make(map[[20]byte]int, len(vdrs)) - s.vdrSlice = s.vdrSlice[:0] - s.sampler.Weights = s.sampler.Weights[:0] + lenVdrs := len(vdrs) + s.vdrMap = make(map[[20]byte]int, lenVdrs) + s.vdrSlice = make([]Validator, 0, lenVdrs) + s.sampler.Weights = make([]uint64, 0, lenVdrs) for _, vdr := range vdrs { s.add(vdr) diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index baff040..01bb6a4 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -18,6 +18,7 @@ import ( "github.com/ava-labs/gecko/snow/consensus/snowman" "github.com/ava-labs/gecko/snow/engine/common" "github.com/ava-labs/gecko/snow/validators" + "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/utils/crypto" "github.com/ava-labs/gecko/utils/formatting" "github.com/ava-labs/gecko/utils/logging" @@ -26,7 +27,6 @@ import ( "github.com/ava-labs/gecko/utils/units" "github.com/ava-labs/gecko/utils/wrappers" "github.com/ava-labs/gecko/vms/components/ava" - "github.com/ava-labs/gecko/utils/codec" "github.com/ava-labs/gecko/vms/components/core" "github.com/ava-labs/gecko/vms/secp256k1fx" ) @@ -808,9 +808,11 @@ func (vm *VM) getValidators(validatorEvents *EventHeap) []validators.Validator { validator.Wght = weight } - vdrList := make([]validators.Validator, len(vdrMap))[:0] + vdrList := make([]validators.Validator, len(vdrMap), len(vdrMap)) + i := 0 for _, validator := range vdrMap { - vdrList = append(vdrList, validator) + vdrList[i] = validator + i++ } return vdrList } From 5b6debbabad459dce10611e71fea7a58c3a33660 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Mon, 22 Jun 2020 18:08:20 -0400 Subject: [PATCH 146/183] added regression test --- database/versiondb/db_test.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/database/versiondb/db_test.go b/database/versiondb/db_test.go index 70cf8ff..0c284e2 100644 --- a/database/versiondb/db_test.go +++ b/database/versiondb/db_test.go @@ -299,6 +299,10 @@ func TestCommitBatch(t *testing.T) { if err := db.Put(key1, value1); err != nil { t.Fatalf("Unexpected error on db.Put: %s", err) + } else if has, err := baseDB.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("Unexpected result of db.Has: %v", has) } batch, err := db.CommitBatch() @@ -307,7 +311,11 @@ func TestCommitBatch(t *testing.T) { } db.Abort() - if err := batch.Write(); err != nil { + if has, err := baseDB.Has(key1); err != nil { + t.Fatalf("Unexpected error on db.Has: %s", err) + } else if has { + t.Fatalf("Unexpected result of db.Has: %v", has) + } else if err := batch.Write(); err != nil { t.Fatalf("Unexpected error on batch.Write: %s", err) } From 7ef37af0d666abaa791a407be1a2a8142c9c5737 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Mon, 22 Jun 2020 18:14:35 -0400 Subject: [PATCH 147/183] changed test to enforce abortions --- database/versiondb/db_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/database/versiondb/db_test.go b/database/versiondb/db_test.go index 0c284e2..345655a 100644 --- a/database/versiondb/db_test.go +++ b/database/versiondb/db_test.go @@ -311,7 +311,7 @@ func TestCommitBatch(t *testing.T) { } db.Abort() - if has, err := baseDB.Has(key1); err != nil { + if has, err := db.Has(key1); err != nil { t.Fatalf("Unexpected error on db.Has: %s", err) } else if has { t.Fatalf("Unexpected result of db.Has: %v", has) From 998f4bff40b89cc474c4ac276ce3f0d34e1a2e9e Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 23 Jun 2020 13:03:23 -0400 Subject: [PATCH 148/183] add comments; remove unnceccessary batch write; avoid possible memory leak; reset batch after write --- database/versiondb/db.go | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/database/versiondb/db.go b/database/versiondb/db.go index 7223c55..050de0a 100644 --- a/database/versiondb/db.go +++ b/database/versiondb/db.go @@ -195,6 +195,7 @@ func (db *Database) Commit() error { if err := batch.Write(); err != nil { return err } + batch.Reset() db.abort() return nil } @@ -209,9 +210,10 @@ func (db *Database) Abort() { func (db *Database) abort() { db.mem = make(map[string]valueDelete, memdb.DefaultSize) } -// CommitBatch returns a batch that will commit all pending writes to the -// underlying database. The returned batch should be written before future calls -// to this DB unless the batch will never be written. +// CommitBatch returns a batch that contains all uncommitted puts/deletes. +// Calling Write() on the returned batch causes the puts/deletes to be +// written to the underlying database. The returned batch should be written before +// future calls to this DB unless the batch will never be written. func (db *Database) CommitBatch() (database.Batch, error) { db.lock.Lock() defer db.lock.Unlock() @@ -219,6 +221,8 @@ func (db *Database) CommitBatch() (database.Batch, error) { return db.commitBatch() } +// Put all of the puts/deletes in memory into db.batch +// and return the batch func (db *Database) commitBatch() (database.Batch, error) { if db.mem == nil { return nil, database.ErrClosed @@ -234,9 +238,6 @@ func (db *Database) commitBatch() (database.Batch, error) { return nil, err } } - if err := db.batch.Write(); err != nil { - return nil, err - } return db.batch, nil } @@ -249,6 +250,7 @@ func (db *Database) Close() error { if db.mem == nil { return database.ErrClosed } + db.batch = nil db.mem = nil db.db = nil return nil @@ -303,7 +305,7 @@ func (b *batch) Write() error { // Reset implements the Database interface func (b *batch) Reset() { - b.writes = b.writes[:0] + b.writes = make([]keyValue, 0) b.size = 0 } From f92fa88d242ac7d7db38ef9066948926c03d659d Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 23 Jun 2020 13:04:10 -0400 Subject: [PATCH 149/183] commit db after parsing tx to avoid memory leak --- vms/avm/vm.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/vms/avm/vm.go b/vms/avm/vm.go index 715ce95..026760c 100644 --- a/vms/avm/vm.go +++ b/vms/avm/vm.go @@ -492,10 +492,10 @@ func (vm *VM) parseTx(b []byte) (*UniqueTx, error) { if err := vm.state.SetTx(tx.ID(), tx.Tx); err != nil { return nil, err } - if err := tx.setStatus(choices.Processing); err != nil { return nil, err } + return tx, vm.db.Commit() } return tx, nil From 55079aa893e91f4d15dab6342a4d02fb1aef0dd3 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 23 Jun 2020 15:01:55 -0400 Subject: [PATCH 150/183] add CappedList for ids.Set and use it in fetchANeededVtx --- ids/set.go | 23 +++++++++++++- ids/set_test.go | 43 +++++++++++++++++++++++++++ snow/engine/avalanche/bootstrapper.go | 2 +- 3 files changed, 66 insertions(+), 2 deletions(-) diff --git a/ids/set.go b/ids/set.go index c3aa024..d632949 100644 --- a/ids/set.go +++ b/ids/set.go @@ -78,7 +78,7 @@ func (ids *Set) Clear() { *ids = nil } // List converts this set into a list func (ids Set) List() []ID { - idList := make([]ID, ids.Len(), ids.Len()) + idList := make([]ID, ids.Len()) i := 0 for id := range ids { idList[i] = NewID(id) @@ -87,6 +87,27 @@ func (ids Set) List() []ID { return idList } +// CappedList returns a list of length at most [size]. +// Size should be >= 0. If size < 0, returns nil. +func (ids Set) CappedList(size int) []ID { + if size < 0 { + return nil + } + if l := ids.Len(); l < size { + size = l + } + i := 0 + idList := make([]ID, size) + for id := range ids { + if i >= size { + break + } + idList[i] = NewID(id) + i++ + } + return idList +} + // Equals returns true if the sets contain the same elements func (ids Set) Equals(oIDs Set) bool { if ids.Len() != oIDs.Len() { diff --git a/ids/set_test.go b/ids/set_test.go index 3c7ab15..b4e05db 100644 --- a/ids/set_test.go +++ b/ids/set_test.go @@ -55,3 +55,46 @@ func TestSet(t *testing.T) { t.Fatalf("Sets overlap") } } + +func TestSetCappedList(t *testing.T) { + set := Set{} + + id := Empty + + if list := set.CappedList(0); len(list) != 0 { + t.Fatalf("List should have been empty but was %v", list) + } + + set.Add(id) + + if list := set.CappedList(0); len(list) != 0 { + t.Fatalf("List should have been empty but was %v", list) + } else if list := set.CappedList(1); len(list) != 1 { + t.Fatalf("List should have had length %d but had %d", 1, len(list)) + } else if returnedID := list[0]; !id.Equals(returnedID) { + t.Fatalf("List should have been %s but was %s", id, returnedID) + } else if list := set.CappedList(2); len(list) != 1 { + t.Fatalf("List should have had length %d but had %d", 1, len(list)) + } else if returnedID := list[0]; !id.Equals(returnedID) { + t.Fatalf("List should have been %s but was %s", id, returnedID) + } + + id2 := NewID([32]byte{1}) + set.Add(id2) + + if list := set.CappedList(0); len(list) != 0 { + t.Fatalf("List should have been empty but was %v", list) + } else if list := set.CappedList(1); len(list) != 1 { + t.Fatalf("List should have had length %d but had %d", 1, len(list)) + } else if returnedID := list[0]; !id.Equals(returnedID) && !id2.Equals(returnedID) { + t.Fatalf("List should have been %s but was %s", id, returnedID) + } else if list := set.CappedList(2); len(list) != 2 { + t.Fatalf("List should have had length %d but had %d", 2, len(list)) + } else if list := set.CappedList(3); len(list) != 2 { + t.Fatalf("List should have had length %d but had %d", 2, len(list)) + } else if returnedID := list[0]; !id.Equals(returnedID) && !id2.Equals(returnedID) { + t.Fatalf("list contains unexpected element %s", returnedID) + } else if returnedID := list[1]; !id.Equals(returnedID) && !id2.Equals(returnedID) { + t.Fatalf("list contains unexpected element %s", returnedID) + } +} diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index 749694e..352f40b 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -111,7 +111,7 @@ func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set { // Calls fetch for a pending vertex if there are any func (b *bootstrapper) fetchANeededVtx() error { if b.needToFetch.Len() > 0 { - return b.fetch(b.needToFetch.List()[0]) + return b.fetch(b.needToFetch.CappedList(1)[0]) } return nil } From 7f5693dfd33fc31839c28ca5d94f44429b7218bc Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 23 Jun 2020 15:08:15 -0400 Subject: [PATCH 151/183] reduce MaxTimeFetchingAncestors from 100ms to 50ms --- snow/engine/common/bootstrapper.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/snow/engine/common/bootstrapper.go b/snow/engine/common/bootstrapper.go index 8c9c745..f1f58db 100644 --- a/snow/engine/common/bootstrapper.go +++ b/snow/engine/common/bootstrapper.go @@ -24,7 +24,7 @@ const ( var ( // MaxTimeFetchingAncestors is the maximum amount of time to spend fetching vertices during a call to GetAncestors - MaxTimeFetchingAncestors = 100 * time.Millisecond + MaxTimeFetchingAncestors = 50 * time.Millisecond ) // Bootstrapper implements the Engine interface. From 6c6136d5512f89ee99c796fb74757052dd6da79e Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 23 Jun 2020 16:44:02 -0400 Subject: [PATCH 152/183] only downsize underlying arrays if they're much too large --- database/common.go | 14 ++++++++++++++ database/encdb/db.go | 10 +++++----- database/memdb/db.go | 9 ++++++--- database/prefixdb/db.go | 10 +++++----- database/rpcdb/db_client.go | 10 +++++----- database/versiondb/db.go | 6 +++++- ids/short_set.go | 4 ++-- snow/validators/set.go | 35 +++++++++++++++++++++++++++++++++-- vms/platformvm/vm.go | 2 +- 9 files changed, 76 insertions(+), 24 deletions(-) create mode 100644 database/common.go diff --git a/database/common.go b/database/common.go new file mode 100644 index 0000000..26b0531 --- /dev/null +++ b/database/common.go @@ -0,0 +1,14 @@ +package database + +const ( + // MaxExcessCapacityFactor ... + // If, when a batch is reset, the cap(batch)/len(batch) > MaxExcessCapacityFactor, + // the underlying array's capacity will be reduced by a factor of capacityReductionFactor. + // Higher value for MaxExcessCapacityFactor --> less aggressive array downsizing --> less memory allocations + // but more unnecessary data in the underlying array that can't be garbage collected. + // Higher value for CapacityReductionFactor --> more aggressive array downsizing --> more memory allocations + // but less unnecessary data in the underlying array that can't be garbage collected. + MaxExcessCapacityFactor = 4 + // CapacityReductionFactor ... + CapacityReductionFactor = 2 +) diff --git a/database/encdb/db.go b/database/encdb/db.go index ddf47e0..8f0d8e3 100644 --- a/database/encdb/db.go +++ b/database/encdb/db.go @@ -17,10 +17,6 @@ import ( "github.com/ava-labs/gecko/utils/hashing" ) -const ( - minBatchSize = 32 -) - // Database encrypts all values that are provided type Database struct { lock sync.RWMutex @@ -205,7 +201,11 @@ func (b *batch) Write() error { // Reset resets the batch for reuse. func (b *batch) Reset() { - b.writes = make([]keyValue, 0, minBatchSize) + if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor { + b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor) + } else { + b.writes = b.writes[:0] + } b.Batch.Reset() } diff --git a/database/memdb/db.go b/database/memdb/db.go index 5bbd3a2..94ba395 100644 --- a/database/memdb/db.go +++ b/database/memdb/db.go @@ -15,8 +15,7 @@ import ( const ( // DefaultSize is the default initial size of the memory database - DefaultSize = 1 << 10 - minBatchSize = 32 + DefaultSize = 1 << 10 ) // Database is an ephemeral key-value store that implements the Database @@ -194,7 +193,11 @@ func (b *batch) Write() error { // Reset implements the Batch interface func (b *batch) Reset() { - b.writes = make([]keyValue, 0, minBatchSize) + if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor { + b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor) + } else { + b.writes = b.writes[:0] + } b.size = 0 } diff --git a/database/prefixdb/db.go b/database/prefixdb/db.go index a413846..7f606b2 100644 --- a/database/prefixdb/db.go +++ b/database/prefixdb/db.go @@ -12,10 +12,6 @@ import ( "github.com/ava-labs/gecko/utils/hashing" ) -const ( - minBatchSize = 32 -) - // Database partitions a database into a sub-database by prefixing all keys with // a unique value. type Database struct { @@ -203,7 +199,11 @@ func (b *batch) Write() error { // Reset resets the batch for reuse. func (b *batch) Reset() { - b.writes = make([]keyValue, 0, minBatchSize) + if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor { + b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor) + } else { + b.writes = b.writes[:0] + } b.Batch.Reset() } diff --git a/database/rpcdb/db_client.go b/database/rpcdb/db_client.go index f1a3abc..401e404 100644 --- a/database/rpcdb/db_client.go +++ b/database/rpcdb/db_client.go @@ -14,10 +14,6 @@ import ( "github.com/ava-labs/gecko/utils" ) -const ( - minBatchSize = 32 -) - var ( errClosed = fmt.Sprintf("rpc error: code = Unknown desc = %s", database.ErrClosed) errNotFound = fmt.Sprintf("rpc error: code = Unknown desc = %s", database.ErrNotFound) @@ -184,7 +180,11 @@ func (b *batch) Write() error { } func (b *batch) Reset() { - b.writes = make([]keyValue, 0, minBatchSize) + if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor { + b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor) + } else { + b.writes = b.writes[:0] + } b.size = 0 } diff --git a/database/versiondb/db.go b/database/versiondb/db.go index 050de0a..a1f9a18 100644 --- a/database/versiondb/db.go +++ b/database/versiondb/db.go @@ -305,7 +305,11 @@ func (b *batch) Write() error { // Reset implements the Database interface func (b *batch) Reset() { - b.writes = make([]keyValue, 0) + if cap(b.writes) > len(b.writes)*database.MaxExcessCapacityFactor { + b.writes = make([]keyValue, 0, cap(b.writes)/database.CapacityReductionFactor) + } else { + b.writes = b.writes[:0] + } b.size = 0 } diff --git a/ids/short_set.go b/ids/short_set.go index 90766cd..9bcd37d 100644 --- a/ids/short_set.go +++ b/ids/short_set.go @@ -58,10 +58,10 @@ func (ids *ShortSet) Remove(idList ...ShortID) { func (ids *ShortSet) Clear() { *ids = nil } // CappedList returns a list of length at most [size]. -// Size should be >= 0. If size < 0, returns empty list. +// Size should be >= 0. If size < 0, returns nil. func (ids ShortSet) CappedList(size int) []ShortID { if size < 0 { - return make([]ShortID, 0, 0) + return nil } if l := ids.Len(); l < size { size = l diff --git a/snow/validators/set.go b/snow/validators/set.go index c33395f..4fddf98 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -13,6 +13,19 @@ import ( "github.com/ava-labs/gecko/utils/random" ) +const ( + // maxExcessCapacityFactor ... + // If, when the validator set is reset, cap(set)/len(set) > MaxExcessCapacityFactor, + // the underlying arrays' capacities will be reduced by a factor of capacityReductionFactor. + // Higher value for maxExcessCapacityFactor --> less aggressive array downsizing --> less memory allocations + // but more unnecessary data in the underlying array that can't be garbage collected. + // Higher value for capacityReductionFactor --> more aggressive array downsizing --> more memory allocations + // but less unnecessary data in the underlying array that can't be garbage collected. + maxExcessCapacityFactor = 4 + // CapacityReductionFactor ... + capacityReductionFactor = 2 +) + // Set of validators that can be sampled type Set interface { fmt.Stringer @@ -72,9 +85,27 @@ func (s *set) Set(vdrs []Validator) { func (s *set) set(vdrs []Validator) { lenVdrs := len(vdrs) + // If the underlying arrays are much larger than necessary, resize them to + // allow garbage collection of unused memory + if cap(s.vdrSlice) > len(s.vdrSlice)*maxExcessCapacityFactor { + newCap := cap(s.vdrSlice) / capacityReductionFactor + if newCap < lenVdrs { + newCap = lenVdrs + } + s.vdrSlice = make([]Validator, 0, newCap) + } else { + s.vdrSlice = s.vdrSlice[:0] + } + if cap(s.sampler.Weights) > len(s.sampler.Weights)*maxExcessCapacityFactor { + newCap := cap(s.sampler.Weights) / capacityReductionFactor + if newCap < lenVdrs { + newCap = lenVdrs + } + s.sampler.Weights = make([]uint64, 0, newCap) + } else { + s.sampler.Weights = s.sampler.Weights[:0] + } s.vdrMap = make(map[[20]byte]int, lenVdrs) - s.vdrSlice = make([]Validator, 0, lenVdrs) - s.sampler.Weights = make([]uint64, 0, lenVdrs) for _, vdr := range vdrs { s.add(vdr) diff --git a/vms/platformvm/vm.go b/vms/platformvm/vm.go index 01bb6a4..8b9350f 100644 --- a/vms/platformvm/vm.go +++ b/vms/platformvm/vm.go @@ -808,7 +808,7 @@ func (vm *VM) getValidators(validatorEvents *EventHeap) []validators.Validator { validator.Wght = weight } - vdrList := make([]validators.Validator, len(vdrMap), len(vdrMap)) + vdrList := make([]validators.Validator, len(vdrMap)) i := 0 for _, validator := range vdrMap { vdrList[i] = validator From 8ce7bda92afb35b663b2db8a0aba34e422eef276 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 23 Jun 2020 16:54:03 -0400 Subject: [PATCH 153/183] cleanup --- snow/validators/set.go | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/snow/validators/set.go b/snow/validators/set.go index 4fddf98..edaecd7 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -93,17 +93,11 @@ func (s *set) set(vdrs []Validator) { newCap = lenVdrs } s.vdrSlice = make([]Validator, 0, newCap) - } else { - s.vdrSlice = s.vdrSlice[:0] - } - if cap(s.sampler.Weights) > len(s.sampler.Weights)*maxExcessCapacityFactor { - newCap := cap(s.sampler.Weights) / capacityReductionFactor - if newCap < lenVdrs { - newCap = lenVdrs - } s.sampler.Weights = make([]uint64, 0, newCap) } else { + s.vdrSlice = s.vdrSlice[:0] s.sampler.Weights = s.sampler.Weights[:0] + } s.vdrMap = make(map[[20]byte]int, lenVdrs) From 875b2d0cab12be0bdefb7561e60ee8046e1ac3ad Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 23 Jun 2020 16:54:25 -0400 Subject: [PATCH 154/183] remove errant newline --- snow/validators/set.go | 1 - 1 file changed, 1 deletion(-) diff --git a/snow/validators/set.go b/snow/validators/set.go index edaecd7..610a85f 100644 --- a/snow/validators/set.go +++ b/snow/validators/set.go @@ -97,7 +97,6 @@ func (s *set) set(vdrs []Validator) { } else { s.vdrSlice = s.vdrSlice[:0] s.sampler.Weights = s.sampler.Weights[:0] - } s.vdrMap = make(map[[20]byte]int, lenVdrs) From fa11fecbb0a0ddde986d858e112017e3bd507b6d Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 23 Jun 2020 17:15:25 -0400 Subject: [PATCH 155/183] pre-allocate map capacity in consensus --- snow/consensus/avalanche/topological.go | 18 +++++++++++------- snow/consensus/snowman/topological.go | 6 +++++- 2 files changed, 16 insertions(+), 8 deletions(-) diff --git a/snow/consensus/avalanche/topological.go b/snow/consensus/avalanche/topological.go index d786a0f..b8d128e 100644 --- a/snow/consensus/avalanche/topological.go +++ b/snow/consensus/avalanche/topological.go @@ -10,6 +10,10 @@ import ( "github.com/ava-labs/gecko/snow/consensus/snowstorm" ) +const ( + minMapSize = 16 +) + // TopologicalFactory implements Factory by returning a topological struct type TopologicalFactory struct{} @@ -65,12 +69,12 @@ func (ta *Topological) Initialize(ctx *snow.Context, params Parameters, frontier ta.ctx.Log.Error("%s", err) } - ta.nodes = make(map[[32]byte]Vertex) + ta.nodes = make(map[[32]byte]Vertex, minMapSize) ta.cg = &snowstorm.Directed{} ta.cg.Initialize(ctx, params.Parameters) - ta.frontier = make(map[[32]byte]Vertex) + ta.frontier = make(map[[32]byte]Vertex, minMapSize) for _, vtx := range frontier { ta.frontier[vtx.ID().Key()] = vtx } @@ -159,7 +163,7 @@ func (ta *Topological) Finalized() bool { return ta.cg.Finalized() } // the non-transitively applied votes. Also returns the list of leaf nodes. func (ta *Topological) calculateInDegree( responses ids.UniqueBag) (map[[32]byte]kahnNode, []ids.ID) { - kahns := make(map[[32]byte]kahnNode) + kahns := make(map[[32]byte]kahnNode, minMapSize) leaves := ids.Set{} for _, vote := range responses.List() { @@ -233,7 +237,7 @@ func (ta *Topological) pushVotes( kahnNodes map[[32]byte]kahnNode, leaves []ids.ID) ids.Bag { votes := make(ids.UniqueBag) - txConflicts := make(map[[32]byte]ids.Set) + txConflicts := make(map[[32]byte]ids.Set, minMapSize) for len(leaves) > 0 { newLeavesSize := len(leaves) - 1 @@ -443,9 +447,9 @@ func (ta *Topological) updateFrontiers() error { ta.preferred.Clear() ta.virtuous.Clear() ta.orphans.Clear() - ta.frontier = make(map[[32]byte]Vertex) - ta.preferenceCache = make(map[[32]byte]bool) - ta.virtuousCache = make(map[[32]byte]bool) + ta.frontier = make(map[[32]byte]Vertex, minMapSize) + ta.preferenceCache = make(map[[32]byte]bool, minMapSize) + ta.virtuousCache = make(map[[32]byte]bool, minMapSize) ta.orphans.Union(ta.cg.Virtuous()) // Initially, nothing is preferred diff --git a/snow/consensus/snowman/topological.go b/snow/consensus/snowman/topological.go index 6f98751..51612db 100644 --- a/snow/consensus/snowman/topological.go +++ b/snow/consensus/snowman/topological.go @@ -9,6 +9,10 @@ import ( "github.com/ava-labs/gecko/snow/consensus/snowball" ) +const ( + minMapSize = 16 +) + // TopologicalFactory implements Factory by returning a topological struct type TopologicalFactory struct{} @@ -183,7 +187,7 @@ func (ts *Topological) Finalized() bool { return len(ts.blocks) == 1 } // the non-transitively applied votes. Also returns the list of leaf blocks. func (ts *Topological) calculateInDegree( votes ids.Bag) (map[[32]byte]kahnNode, []ids.ID) { - kahns := make(map[[32]byte]kahnNode) + kahns := make(map[[32]byte]kahnNode, minMapSize) leaves := ids.Set{} for _, vote := range votes.List() { From 3d374a73db1e55e95a2dc51620d70c4057436a51 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 23 Jun 2020 17:30:45 -0400 Subject: [PATCH 156/183] enable keystore by default --- main/params.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main/params.go b/main/params.go index 877d406..4d55919 100644 --- a/main/params.go +++ b/main/params.go @@ -227,7 +227,7 @@ func init() { // Enable/Disable APIs: fs.BoolVar(&Config.AdminAPIEnabled, "api-admin-enabled", false, "If true, this node exposes the Admin API") fs.BoolVar(&Config.InfoAPIEnabled, "api-info-enabled", true, "If true, this node exposes the Info API") - fs.BoolVar(&Config.KeystoreAPIEnabled, "api-keystore-enabled", false, "If true, this node exposes the Keystore API") + fs.BoolVar(&Config.KeystoreAPIEnabled, "api-keystore-enabled", true, "If true, this node exposes the Keystore API") fs.BoolVar(&Config.MetricsAPIEnabled, "api-metrics-enabled", true, "If true, this node exposes the Metrics API") fs.BoolVar(&Config.HealthAPIEnabled, "api-health-enabled", true, "If true, this node exposes the Health API") fs.BoolVar(&Config.IPCEnabled, "api-ipcs-enabled", false, "If true, IPCs can be opened") From 7591e93e08182bb24f5314d28973341a311f8ffd Mon Sep 17 00:00:00 2001 From: Determinant Date: Tue, 23 Jun 2020 17:36:20 -0400 Subject: [PATCH 157/183] remove go-plugin logging; use coreth 0.2.5 --- go.mod | 2 +- go.sum | 2 ++ scripts/build.sh | 2 +- vms/rpcchainvm/factory.go | 5 +++++ 4 files changed, 9 insertions(+), 2 deletions(-) diff --git a/go.mod b/go.mod index a8a8f39..c8d28c2 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/AppsFlyer/go-sundheit v0.2.0 github.com/allegro/bigcache v1.2.1 // indirect github.com/aristanetworks/goarista v0.0.0-20200520141224-0f14e646773f // indirect - github.com/ava-labs/coreth v0.2.4 // Added manually; don't delete + github.com/ava-labs/coreth v0.2.5 // Added manually; don't delete github.com/ava-labs/go-ethereum v1.9.3 // indirect github.com/deckarep/golang-set v1.7.1 // indirect github.com/decred/dcrd/dcrec/secp256k1 v1.0.3 diff --git a/go.sum b/go.sum index 3f809ed..fb197cd 100644 --- a/go.sum +++ b/go.sum @@ -22,6 +22,8 @@ github.com/aristanetworks/goarista v0.0.0-20200520141224-0f14e646773f/go.mod h1: github.com/aristanetworks/splunk-hec-go v0.3.3/go.mod h1:1VHO9r17b0K7WmOlLb9nTk/2YanvOEnLMUgsFrxBROc= github.com/ava-labs/coreth v0.2.4 h1:MhnbuRyMcij7WU4+frayp40quc44AMPc4IrxXhmucWw= github.com/ava-labs/coreth v0.2.4/go.mod h1:pGolKipwq5vGIY2IBBcBkMYrqniXMsS5SBn+BBi4+Js= +github.com/ava-labs/coreth v0.2.5 h1:2Al753rpPHvvZfcz7w96YbKhGFvrcZzsIZ/sIp0A0Ao= +github.com/ava-labs/coreth v0.2.5/go.mod h1:pGolKipwq5vGIY2IBBcBkMYrqniXMsS5SBn+BBi4+Js= github.com/ava-labs/go-ethereum v1.9.3 h1:GmnMZ/dlvVAPFmWBzEpRJX49pUAymPfoASLNRJqR0AY= github.com/ava-labs/go-ethereum v1.9.3/go.mod h1:a+agc6fXfZFsPZCylA3ry4Y8CLCqLKg3Rc23NXZ9aw8= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= diff --git a/scripts/build.sh b/scripts/build.sh index 9bee1cf..a0a59d6 100755 --- a/scripts/build.sh +++ b/scripts/build.sh @@ -15,7 +15,7 @@ GECKO_PATH=$( cd "$( dirname "${BASH_SOURCE[0]}" )"; cd .. && pwd ) # Directory BUILD_DIR=$GECKO_PATH/build # Where binaries go PLUGIN_DIR="$BUILD_DIR/plugins" # Where plugin binaries (namely coreth) go -CORETH_VER="0.2.4" # Should match coreth version in go.mod +CORETH_VER="0.2.5" # Should match coreth version in go.mod CORETH_PATH="$GOPATH/pkg/mod/github.com/ava-labs/coreth@v$CORETH_VER" # Build Gecko diff --git a/vms/rpcchainvm/factory.go b/vms/rpcchainvm/factory.go index a48bb01..9b7db9c 100644 --- a/vms/rpcchainvm/factory.go +++ b/vms/rpcchainvm/factory.go @@ -5,6 +5,8 @@ package rpcchainvm import ( "errors" + "io/ioutil" + "log" "os/exec" "github.com/hashicorp/go-plugin" @@ -31,6 +33,9 @@ func (f *Factory) New(ctx *snow.Context) (interface{}, error) { }, } if ctx != nil { + // disable go-plugin logging (since it is not controlled by Gecko's own + // logging facility) + log.SetOutput(ioutil.Discard) config.Stderr = ctx.Log config.SyncStdout = ctx.Log config.SyncStderr = ctx.Log From 1d4c36846237e3b38c12537364e8708968527291 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 23 Jun 2020 18:23:22 -0400 Subject: [PATCH 158/183] added local path to plugin --- main/params.go | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/main/params.go b/main/params.go index 4d55919..53e7b01 100644 --- a/main/params.go +++ b/main/params.go @@ -35,17 +35,19 @@ const ( // Results of parsing the CLI var ( - Config = node.Config{} - Err error - defaultNetworkName = genesis.TestnetName - defaultDbDir = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "db")) - defaultStakingKeyPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.key")) - defaultStakingCertPath = os.ExpandEnv(filepath.Join("$HOME", ".gecko", "staking", "staker.crt")) + Config = node.Config{} + Err error + defaultNetworkName = genesis.TestnetName - defaultPluginDirs = []string{ - "./build/plugins", - "./plugins", - os.ExpandEnv(filepath.Join("$HOME", ".gecko", "plugins")), + homeDir = os.ExpandEnv("$HOME") + defaultDbDir = filepath.Join(homeDir, ".gecko", "db") + defaultStakingKeyPath = filepath.Join(homeDir, ".gecko", "staking", "staker.key") + defaultStakingCertPath = filepath.Join(homeDir, ".gecko", "staking", "staker.crt") + defaultPluginDirs = []string{ + filepath.Join(".", "build", "plugins"), + filepath.Join(".", "plugins"), + filepath.Join("/", "usr", "local", "lib", "gecko"), + filepath.Join(homeDir, ".gecko", "plugins"), } ) From d40fbe8f75721bcd58b9de949a330ffe6657e9e3 Mon Sep 17 00:00:00 2001 From: Determinant Date: Tue, 23 Jun 2020 18:33:57 -0400 Subject: [PATCH 159/183] improve plugin logging --- go.mod | 1 + vms/rpcchainvm/factory.go | 23 ++++++++++++++--------- 2 files changed, 15 insertions(+), 9 deletions(-) diff --git a/go.mod b/go.mod index c8d28c2..5afd406 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,7 @@ require ( github.com/gorilla/mux v1.7.4 github.com/gorilla/rpc v1.2.0 github.com/gorilla/websocket v1.4.2 + github.com/hashicorp/go-hclog v0.0.0-20180709165350-ff2cf002a8dd github.com/hashicorp/go-plugin v1.3.0 github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/huin/goupnp v1.0.0 diff --git a/vms/rpcchainvm/factory.go b/vms/rpcchainvm/factory.go index 9b7db9c..5b2c2ce 100644 --- a/vms/rpcchainvm/factory.go +++ b/vms/rpcchainvm/factory.go @@ -5,13 +5,12 @@ package rpcchainvm import ( "errors" + "github.com/ava-labs/gecko/snow" + "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-plugin" "io/ioutil" "log" "os/exec" - - "github.com/hashicorp/go-plugin" - - "github.com/ava-labs/gecko/snow" ) var ( @@ -33,12 +32,18 @@ func (f *Factory) New(ctx *snow.Context) (interface{}, error) { }, } if ctx != nil { - // disable go-plugin logging (since it is not controlled by Gecko's own - // logging facility) - log.SetOutput(ioutil.Discard) + log.SetOutput(ctx.Log) config.Stderr = ctx.Log - config.SyncStdout = ctx.Log - config.SyncStderr = ctx.Log + config.Logger = hclog.New(&hclog.LoggerOptions{ + Output: ctx.Log, + Level: hclog.Info, + }) + } else { + log.SetOutput(ioutil.Discard) + config.Stderr = ioutil.Discard + config.Logger = hclog.New(&hclog.LoggerOptions{ + Output: ioutil.Discard, + }) } client := plugin.NewClient(config) From 8c7934515c8e1ce1cb080b19ea02baf27ce65140 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 23 Jun 2020 19:41:22 -0400 Subject: [PATCH 160/183] removed mutually recursive functions for fetching --- snow/engine/avalanche/bootstrapper.go | 105 ++++++++++---------------- 1 file changed, 40 insertions(+), 65 deletions(-) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index 352f40b..d8f1d6b 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -42,18 +42,16 @@ type bootstrapper struct { metrics common.Bootstrapper - // true if all of the vertices in the original accepted frontier have been processed - processedStartingAcceptedFrontier bool - // number of vertices fetched so far numFetched uint32 // tracks which validators were asked for which containers in which requests outstandingRequests common.Requests - // IDs of vertices that we will send a GetAncestors request for once we are not at the - // max number of outstanding requests - // Invariant: The intersection of needToFetch and outstandingRequests is empty + // IDs of vertices that we will send a GetAncestors request for once we are + // not at the max number of outstanding requests + // Invariant: The intersection of needToFetch and outstandingRequests is + // empty needToFetch ids.Set // Contains IDs of vertices that have recently been processed @@ -108,49 +106,36 @@ func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set { return acceptedVtxIDs } -// Calls fetch for a pending vertex if there are any -func (b *bootstrapper) fetchANeededVtx() error { - if b.needToFetch.Len() > 0 { - return b.fetch(b.needToFetch.CappedList(1)[0]) - } - return nil -} +// Fetch vertices and their ancestors from the set of vertices that are needed +// to be fetched. +func (b *bootstrapper) fetch(vtxIDs ...ids.ID) error { + b.needToFetch.Add(vtxIDs...) + for b.needToFetch.Len() > 0 && b.outstandingRequests.Len() < common.MaxOutstandingRequests { + vtxID := b.needToFetch.CappedList(1)[0] + b.needToFetch.Remove(vtxID) -// Get vertex [vtxID] and its ancestors. -// If [vtxID] has already been requested or is already fetched, and there are -// unrequested vertices, requests one such vertex instead of [vtxID] -func (b *bootstrapper) fetch(vtxID ids.ID) error { - // Make sure we haven't already requested this block - if b.outstandingRequests.Contains(vtxID) { - return b.fetchANeededVtx() - } - - // Make sure we don't already have this vertex - if _, err := b.State.GetVertex(vtxID); err == nil { - if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier { - return b.finish() + // Make sure we haven't already requested this vertex + if b.outstandingRequests.Contains(vtxID) { + continue } - b.needToFetch.Remove(vtxID) // we have this vertex. no need to request it. - return b.fetchANeededVtx() - } - // If we're already at maximum number of outstanding requests, queue for later - if b.outstandingRequests.Len() >= common.MaxOutstandingRequests { - b.needToFetch.Add(vtxID) - return nil - } + // Make sure we don't already have this vertex + if _, err := b.State.GetVertex(vtxID); err == nil { + continue + } - validators := b.BootstrapConfig.Validators.Sample(1) // validator to send request to - if len(validators) == 0 { - return fmt.Errorf("Dropping request for %s as there are no validators", vtxID) - } - validatorID := validators[0].ID() - b.RequestID++ + validators := b.BootstrapConfig.Validators.Sample(1) // validator to send request to + if len(validators) == 0 { + return fmt.Errorf("Dropping request for %s as there are no validators", vtxID) + } + validatorID := validators[0].ID() + b.RequestID++ - b.outstandingRequests.Add(validatorID, b.RequestID, vtxID) - b.needToFetch.Remove(vtxID) // maintains invariant that intersection with outstandingRequests is empty - b.BootstrapConfig.Sender.GetAncestors(validatorID, b.RequestID, vtxID) // request vertex and ancestors - return nil + b.outstandingRequests.Add(validatorID, b.RequestID, vtxID) + b.needToFetch.Remove(vtxID) // maintains invariant that intersection with outstandingRequests is empty + b.BootstrapConfig.Sender.GetAncestors(validatorID, b.RequestID, vtxID) // request vertex and ancestors + } + return b.finish() } // Process vertices @@ -164,14 +149,17 @@ func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error { for toProcess.Len() > 0 { vtx := toProcess.Pop() + vtxID := vtx.ID() + switch vtx.Status() { case choices.Unknown: - if err := b.fetch(vtx.ID()); err != nil { - return err - } + b.fetch(vtxID) case choices.Rejected: + b.needToFetch.Remove(vtxID) return fmt.Errorf("tried to accept %s even though it was previously rejected", vtx.ID()) case choices.Processing: + b.needToFetch.Remove(vtxID) + if err := b.VtxBlocked.Push(&vertexJob{ log: b.BootstrapConfig.Context.Log, numAccepted: b.numBSVtx, @@ -216,10 +204,7 @@ func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error { return err } - if numPending := b.outstandingRequests.Len(); numPending == 0 && b.processedStartingAcceptedFrontier { - return b.finish() - } - return nil + return b.fetch() } // MultiPut handles the receipt of multiple containers. Should be received in response to a GetAncestors message to [vdr] @@ -263,11 +248,6 @@ func (b *bootstrapper) MultiPut(vdr ids.ShortID, requestID uint32, vtxs [][]byte } } - // Now there is one less outstanding request; send another if needed - if err := b.fetchANeededVtx(); err != nil { - return err - } - return b.process(processVertices...) } @@ -293,24 +273,19 @@ func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) error { for _, vtxID := range acceptedContainerIDs.List() { if vtx, err := b.State.GetVertex(vtxID); err == nil { storedVtxs = append(storedVtxs, vtx) - } else if err := b.fetch(vtxID); err != nil { - return err + } else { + b.needToFetch.Add(vtxID) } } if err := b.process(storedVtxs...); err != nil { return err } - b.processedStartingAcceptedFrontier = true - - if numPending := b.outstandingRequests.Len(); numPending == 0 { - return b.finish() - } - return nil + return b.fetch() } // Finish bootstrapping func (b *bootstrapper) finish() error { - if b.finished { + if b.finished || b.outstandingRequests.Len() > 0 || b.needToFetch.Len() > 0 { return nil } b.BootstrapConfig.Context.Log.Info("finished fetching vertices. executing transaction state transitions...") From 16f006edc98aa0421fc0bb7b38c519a858042730 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 23 Jun 2020 19:43:03 -0400 Subject: [PATCH 161/183] Removed no longer upheld invariant --- snow/engine/avalanche/bootstrapper.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index d8f1d6b..a9c4e64 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -50,8 +50,6 @@ type bootstrapper struct { // IDs of vertices that we will send a GetAncestors request for once we are // not at the max number of outstanding requests - // Invariant: The intersection of needToFetch and outstandingRequests is - // empty needToFetch ids.Set // Contains IDs of vertices that have recently been processed From 26edbc5e6ecfce15d9c1f49e50f3a5fd3a9aa823 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 23 Jun 2020 19:57:44 -0400 Subject: [PATCH 162/183] cleaned up avalanche bootstrapping --- snow/engine/avalanche/bootstrapper.go | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index a9c4e64..1af48bf 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -151,7 +151,7 @@ func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error { switch vtx.Status() { case choices.Unknown: - b.fetch(vtxID) + b.needToFetch.Add(vtxID) case choices.Rejected: b.needToFetch.Remove(vtxID) return fmt.Errorf("tried to accept %s even though it was previously rejected", vtx.ID()) @@ -275,10 +275,7 @@ func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) error { b.needToFetch.Add(vtxID) } } - if err := b.process(storedVtxs...); err != nil { - return err - } - return b.fetch() + return b.process(storedVtxs...) } // Finish bootstrapping From 6ba9d2264d62ec89c7cb9cad144df10cf84590c6 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 23 Jun 2020 20:29:03 -0400 Subject: [PATCH 163/183] version bump to 0.5.7 --- node/node.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/node/node.go b/node/node.go index bfeccc8..e281d58 100644 --- a/node/node.go +++ b/node/node.go @@ -57,7 +57,7 @@ var ( genesisHashKey = []byte("genesisID") // Version is the version of this code - Version = version.NewDefaultVersion("avalanche", 0, 5, 6) + Version = version.NewDefaultVersion("avalanche", 0, 5, 7) versionParser = version.NewDefaultParser() ) From a61d1638a381b3eef357eabcefd782b101eab6b4 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 23 Jun 2020 20:58:35 -0400 Subject: [PATCH 164/183] only log to info based on the beacons --- network/network.go | 5 +++++ network/network_test.go | 13 +++++++++++++ network/peer.go | 9 +++++++-- node/node.go | 21 +++++++++++++++------ 4 files changed, 40 insertions(+), 8 deletions(-) diff --git a/network/network.go b/network/network.go index 86abac4..bce1216 100644 --- a/network/network.go +++ b/network/network.go @@ -105,6 +105,7 @@ type network struct { serverUpgrader Upgrader clientUpgrader Upgrader vdrs validators.Set // set of current validators in the AVAnet + beacons validators.Set // set of beacons in the AVAnet router router.Router // router must be thread safe nodeID uint32 @@ -159,6 +160,7 @@ func NewDefaultNetwork( serverUpgrader, clientUpgrader Upgrader, vdrs validators.Set, + beacons validators.Set, router router.Router, ) Network { return NewNetwork( @@ -174,6 +176,7 @@ func NewDefaultNetwork( serverUpgrader, clientUpgrader, vdrs, + beacons, router, defaultInitialReconnectDelay, defaultMaxReconnectDelay, @@ -207,6 +210,7 @@ func NewNetwork( serverUpgrader, clientUpgrader Upgrader, vdrs validators.Set, + beacons validators.Set, router router.Router, initialReconnectDelay, maxReconnectDelay time.Duration, @@ -236,6 +240,7 @@ func NewNetwork( serverUpgrader: serverUpgrader, clientUpgrader: clientUpgrader, vdrs: vdrs, + beacons: beacons, router: router, nodeID: rand.Uint32(), initialReconnectDelay: initialReconnectDelay, diff --git a/network/network_test.go b/network/network_test.go index 0fd7053..c230402 100644 --- a/network/network_test.go +++ b/network/network_test.go @@ -197,6 +197,7 @@ func TestNewDefaultNetwork(t *testing.T) { serverUpgrader, clientUpgrader, vdrs, + vdrs, handler, ) assert.NotNil(t, net) @@ -280,6 +281,7 @@ func TestEstablishConnection(t *testing.T) { serverUpgrader, clientUpgrader, vdrs, + vdrs, handler, ) assert.NotNil(t, net0) @@ -297,6 +299,7 @@ func TestEstablishConnection(t *testing.T) { serverUpgrader, clientUpgrader, vdrs, + vdrs, handler, ) assert.NotNil(t, net1) @@ -419,6 +422,7 @@ func TestDoubleTrack(t *testing.T) { serverUpgrader, clientUpgrader, vdrs, + vdrs, handler, ) assert.NotNil(t, net0) @@ -436,6 +440,7 @@ func TestDoubleTrack(t *testing.T) { serverUpgrader, clientUpgrader, vdrs, + vdrs, handler, ) assert.NotNil(t, net1) @@ -559,6 +564,7 @@ func TestDoubleClose(t *testing.T) { serverUpgrader, clientUpgrader, vdrs, + vdrs, handler, ) assert.NotNil(t, net0) @@ -576,6 +582,7 @@ func TestDoubleClose(t *testing.T) { serverUpgrader, clientUpgrader, vdrs, + vdrs, handler, ) assert.NotNil(t, net1) @@ -704,6 +711,7 @@ func TestRemoveHandlers(t *testing.T) { serverUpgrader, clientUpgrader, vdrs, + vdrs, handler, ) assert.NotNil(t, net0) @@ -721,6 +729,7 @@ func TestRemoveHandlers(t *testing.T) { serverUpgrader, clientUpgrader, vdrs, + vdrs, handler, ) assert.NotNil(t, net1) @@ -858,6 +867,7 @@ func TestTrackConnected(t *testing.T) { serverUpgrader, clientUpgrader, vdrs, + vdrs, handler, ) assert.NotNil(t, net0) @@ -875,6 +885,7 @@ func TestTrackConnected(t *testing.T) { serverUpgrader, clientUpgrader, vdrs, + vdrs, handler, ) assert.NotNil(t, net1) @@ -999,6 +1010,7 @@ func TestTrackConnectedRace(t *testing.T) { serverUpgrader, clientUpgrader, vdrs, + vdrs, handler, ) assert.NotNil(t, net0) @@ -1016,6 +1028,7 @@ func TestTrackConnectedRace(t *testing.T) { serverUpgrader, clientUpgrader, vdrs, + vdrs, handler, ) assert.NotNil(t, net1) diff --git a/network/peer.go b/network/peer.go index 409d7f6..cf2f37f 100644 --- a/network/peer.go +++ b/network/peer.go @@ -470,8 +470,13 @@ func (p *peer) version(msg Msg) { } if p.net.version.Before(peerVersion) { - p.net.log.Info("peer attempting to connect with newer version %s. You may want to update your client", - peerVersion) + if p.net.beacons.Contains(p.id) { + p.net.log.Info("beacon attempting to connect with newer version %s. You may want to update your client", + peerVersion) + } else { + p.net.log.Debug("peer attempting to connect with newer version %s. You may want to update your client", + peerVersion) + } } if err := p.net.version.Compatible(peerVersion); err != nil { diff --git a/node/node.go b/node/node.go index e281d58..b24d414 100644 --- a/node/node.go +++ b/node/node.go @@ -93,6 +93,9 @@ type Node struct { // Net runs the networking stack Net network.Network + // this node's initial connections to the network + beacons validators.Set + // current validators of the network vdrs validators.Manager @@ -165,6 +168,7 @@ func (n *Node) initNetworking() error { serverUpgrader, clientUpgrader, defaultSubnetValidators, + n.beacons, n.Config.ConsensusRouter, ) @@ -278,6 +282,14 @@ func (n *Node) initNodeID() error { return nil } +// Create the IDs of the peers this node should first connect to +func (n *Node) initBeacons() { + n.beacons = validators.NewSet() + for _, peer := range n.Config.BootstrapPeers { + n.beacons.Add(validators.NewValidator(peer.ID, 1)) + } +} + // Create the vmManager and register the following vms: // AVM, Simple Payments DAG, Simple Payments Chain // The Platform VM is registered in initStaking because @@ -360,11 +372,6 @@ func (n *Node) initChains() error { return err } - beacons := validators.NewSet() - for _, peer := range n.Config.BootstrapPeers { - beacons.Add(validators.NewValidator(peer.ID, 1)) - } - genesisBytes, err := genesis.Genesis(n.Config.NetworkID) if err != nil { return err @@ -376,7 +383,7 @@ func (n *Node) initChains() error { SubnetID: platformvm.DefaultSubnetID, GenesisData: genesisBytes, // Specifies other chains to create VMAlias: platformvm.ID.String(), - CustomBeacons: beacons, + CustomBeacons: n.beacons, }) return nil @@ -551,6 +558,8 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg return fmt.Errorf("problem initializing staker ID: %w", err) } + n.initBeacons() + // Start HTTP APIs n.initAPIServer() // Start the API Server n.initKeystoreAPI() // Start the Keystore API From 63ea29064d6a1616ff490481e5b56aa4cc6f12c0 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Tue, 23 Jun 2020 21:07:35 -0400 Subject: [PATCH 165/183] Added some more numbers to interesting logs in bootstrapping --- snow/engine/avalanche/bootstrapper.go | 4 +++- snow/engine/snowman/bootstrapper.go | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index 1af48bf..d9c07a4 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -283,7 +283,8 @@ func (b *bootstrapper) finish() error { if b.finished || b.outstandingRequests.Len() > 0 || b.needToFetch.Len() > 0 { return nil } - b.BootstrapConfig.Context.Log.Info("finished fetching vertices. executing transaction state transitions...") + b.BootstrapConfig.Context.Log.Info("finished fetching %d vertices. executing transaction state transitions...", + b.numFetched) if err := b.executeAll(b.TxBlocked, b.numBSBlockedTx); err != nil { return err @@ -325,5 +326,6 @@ func (b *bootstrapper) executeAll(jobs *queue.Jobs, numBlocked prometheus.Gauge) b.BootstrapConfig.Context.Log.Info("executed %d operations", numExecuted) } } + b.BootstrapConfig.Context.Log.Info("executed %d operations", numExecuted) return nil } diff --git a/snow/engine/snowman/bootstrapper.go b/snow/engine/snowman/bootstrapper.go index e811b81..5ee73bc 100644 --- a/snow/engine/snowman/bootstrapper.go +++ b/snow/engine/snowman/bootstrapper.go @@ -227,7 +227,8 @@ func (b *bootstrapper) finish() error { if b.finished { return nil } - b.BootstrapConfig.Context.Log.Info("bootstrapping finished fetching blocks. executing state transitions...") + b.BootstrapConfig.Context.Log.Info("bootstrapping finished fetching %d blocks. executing state transitions...", + b.numFetched) if err := b.executeAll(b.Blocked, b.numBlocked); err != nil { return err @@ -265,5 +266,6 @@ func (b *bootstrapper) executeAll(jobs *queue.Jobs, numBlocked prometheus.Gauge) b.BootstrapConfig.Context.Log.Info("executed %d blocks", numExecuted) } } + b.BootstrapConfig.Context.Log.Info("executed %d blocks", numExecuted) return nil } From c0629af46ef970cf6a038468cd24180372b1390d Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 24 Jun 2020 09:51:34 -0400 Subject: [PATCH 166/183] add comments --- snow/engine/avalanche/bootstrapper.go | 57 +++++++++++++++------------ 1 file changed, 31 insertions(+), 26 deletions(-) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index d9c07a4..231e9e4 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -86,14 +86,15 @@ func (b *bootstrapper) Initialize(config BootstrapConfig) error { return nil } -// CurrentAcceptedFrontier ... +// CurrentAcceptedFrontier returns the set of vertices that this node has accepted +// that have no accepted children func (b *bootstrapper) CurrentAcceptedFrontier() ids.Set { acceptedFrontier := ids.Set{} acceptedFrontier.Add(b.State.Edge()...) return acceptedFrontier } -// FilterAccepted ... +// FilterAccepted returns the IDs of vertices in [containerIDs] that this node has accepted func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set { acceptedVtxIDs := ids.Set{} for _, vtxID := range containerIDs.List() { @@ -104,8 +105,9 @@ func (b *bootstrapper) FilterAccepted(containerIDs ids.Set) ids.Set { return acceptedVtxIDs } -// Fetch vertices and their ancestors from the set of vertices that are needed -// to be fetched. +// Add the vertices in [vtxIDs] to the set of vertices that we need to fetch, +// and then fetch vertices (and their ancestors) until either there are no more +// to fetch or we are at the maximum number of outstanding requests. func (b *bootstrapper) fetch(vtxIDs ...ids.ID) error { b.needToFetch.Add(vtxIDs...) for b.needToFetch.Len() > 0 && b.outstandingRequests.Len() < common.MaxOutstandingRequests { @@ -130,35 +132,38 @@ func (b *bootstrapper) fetch(vtxIDs ...ids.ID) error { b.RequestID++ b.outstandingRequests.Add(validatorID, b.RequestID, vtxID) - b.needToFetch.Remove(vtxID) // maintains invariant that intersection with outstandingRequests is empty + b.needToFetch.Remove(vtxID) b.BootstrapConfig.Sender.GetAncestors(validatorID, b.RequestID, vtxID) // request vertex and ancestors } return b.finish() } -// Process vertices +// Process the vertices in [vtxs]. func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error { + // Vertices that we need to process. Store them in a heap for de-deduplication + // and so we always process vertices further down in the DAG first. This helps + // to reduce the number of repeated DAG traversals. toProcess := newMaxVertexHeap() for _, vtx := range vtxs { - if _, ok := b.processedCache.Get(vtx.ID()); !ok { // only process if we haven't already + if _, ok := b.processedCache.Get(vtx.ID()); !ok { // only process a vertex if we haven't already toProcess.Push(vtx) } } - for toProcess.Len() > 0 { - vtx := toProcess.Pop() + for toProcess.Len() > 0 { // While there are unprocessed vertices + vtx := toProcess.Pop() // Get an unknown vertex or one furthest down the DAG vtxID := vtx.ID() switch vtx.Status() { case choices.Unknown: - b.needToFetch.Add(vtxID) + b.needToFetch.Add(vtxID) // We don't have this vertex locally. Mark that we need to fetch it. case choices.Rejected: - b.needToFetch.Remove(vtxID) + b.needToFetch.Remove(vtxID) // We have this vertex locally. Mark that we don't need to fetch it. return fmt.Errorf("tried to accept %s even though it was previously rejected", vtx.ID()) case choices.Processing: b.needToFetch.Remove(vtxID) - if err := b.VtxBlocked.Push(&vertexJob{ + if err := b.VtxBlocked.Push(&vertexJob{ // Add to queue of vertices to execute when bootstrapping finishes. log: b.BootstrapConfig.Context.Log, numAccepted: b.numBSVtx, numDropped: b.numBSDroppedVtx, @@ -172,7 +177,7 @@ func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error { } else { b.BootstrapConfig.Context.Log.Verbo("couldn't push to vtxBlocked: %s", err) } - for _, tx := range vtx.Txs() { + for _, tx := range vtx.Txs() { // Add transactions to queue of transactions to execute when bootstrapping finishes. if err := b.TxBlocked.Push(&txJob{ log: b.BootstrapConfig.Context.Log, numAccepted: b.numBSTx, @@ -184,12 +189,12 @@ func (b *bootstrapper) process(vtxs ...avalanche.Vertex) error { b.BootstrapConfig.Context.Log.Verbo("couldn't push to txBlocked: %s", err) } } - for _, parent := range vtx.Parents() { - if _, ok := b.processedCache.Get(parent.ID()); !ok { // already processed this + for _, parent := range vtx.Parents() { // Process the parents of this vertex (traverse up the DAG) + if _, ok := b.processedCache.Get(parent.ID()); !ok { // But only if we haven't processed the parent toProcess.Push(parent) } } - if vtx.Height()%stripeDistance < stripeWidth { + if vtx.Height()%stripeDistance < stripeWidth { // See comment for stripeDistance b.processedCache.Put(vtx.ID(), nil) } } @@ -218,12 +223,12 @@ func (b *bootstrapper) MultiPut(vdr ids.ShortID, requestID uint32, vtxs [][]byte // Make sure this is in response to a request we made neededVtxID, needed := b.outstandingRequests.Remove(vdr, requestID) - if !needed { // this message isn't in response to a request we made + if !needed { // this message isn't in response to a request we made, or is in response to a request that timed out b.BootstrapConfig.Context.Log.Debug("received unexpected MultiPut from %s with ID %d", vdr, requestID) return nil } - neededVtx, err := b.State.ParseVertex(vtxs[0]) // the vertex we requested + neededVtx, err := b.State.ParseVertex(vtxs[0]) // first vertex should be the one we requested in GetAncestors request if err != nil { b.BootstrapConfig.Context.Log.Debug("Failed to parse requested vertex %s: %w", neededVtxID, err) b.BootstrapConfig.Context.Log.Verbo("vertex: %s", formatting.DumpBytes{Bytes: vtxs[0]}) @@ -233,7 +238,7 @@ func (b *bootstrapper) MultiPut(vdr ids.ShortID, requestID uint32, vtxs [][]byte return b.fetch(neededVtxID) } - processVertices := make([]avalanche.Vertex, 1, len(vtxs)) + processVertices := make([]avalanche.Vertex, 1, len(vtxs)) // Process all of the vertices in this message processVertices[0] = neededVtx for _, vtxBytes := range vtxs[1:] { // Parse/persist all the vertices @@ -260,38 +265,38 @@ func (b *bootstrapper) GetAncestorsFailed(vdr ids.ShortID, requestID uint32) err return b.fetch(vtxID) } -// ForceAccepted ... +// ForceAccepted starts bootstrapping. Process the vertices in [accepterContainerIDs]. func (b *bootstrapper) ForceAccepted(acceptedContainerIDs ids.Set) error { if err := b.VM.Bootstrapping(); err != nil { return fmt.Errorf("failed to notify VM that bootstrapping has started: %w", err) } - storedVtxs := make([]avalanche.Vertex, 0, acceptedContainerIDs.Len()) + toProcess := make([]avalanche.Vertex, 0, acceptedContainerIDs.Len()) for _, vtxID := range acceptedContainerIDs.List() { if vtx, err := b.State.GetVertex(vtxID); err == nil { - storedVtxs = append(storedVtxs, vtx) + toProcess = append(toProcess, vtx) // Process this vertex. } else { - b.needToFetch.Add(vtxID) + b.needToFetch.Add(vtxID) // We don't have this vertex. Mark that we have to fetch it. } } - return b.process(storedVtxs...) + return b.process(toProcess...) } // Finish bootstrapping func (b *bootstrapper) finish() error { + // If there are outstanding requests for vertices or we still need to fetch vertices, we can't finish if b.finished || b.outstandingRequests.Len() > 0 || b.needToFetch.Len() > 0 { return nil } + b.BootstrapConfig.Context.Log.Info("finished fetching %d vertices. executing transaction state transitions...", b.numFetched) - if err := b.executeAll(b.TxBlocked, b.numBSBlockedTx); err != nil { return err } b.BootstrapConfig.Context.Log.Info("executing vertex state transitions...") - if err := b.executeAll(b.VtxBlocked, b.numBSBlockedVtx); err != nil { return err } From 5c618d6c1bc7901bfa06e4458ffa85d6c769fed3 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 24 Jun 2020 10:09:36 -0400 Subject: [PATCH 167/183] lower log level --- nat/nat.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nat/nat.go b/nat/nat.go index 8c351c7..0cae334 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -109,7 +109,7 @@ func (dev *Mapper) keepPortMapping(mappedPort chan<- uint16, protocol string, dev.log.Error("Renewing port mapping from external port %d to internal port %d failed with %s", intPort, extPort, err) } else { - dev.log.Info("Renewed port mapping from external port %d to internal port %d.", + dev.log.Debug("Renewed port mapping from external port %d to internal port %d.", intPort, extPort) } From eefaed4b1ad9816841a56bd3f0bd1b64f2fe26d7 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Wed, 24 Jun 2020 11:41:22 -0400 Subject: [PATCH 168/183] added error reporting to nat.Map --- main/main.go | 12 +++++++----- main/params.go | 4 ++-- nat/nat.go | 9 +++++++-- 3 files changed, 16 insertions(+), 9 deletions(-) diff --git a/main/main.go b/main/main.go index 71de46f..250c1e0 100644 --- a/main/main.go +++ b/main/main.go @@ -67,13 +67,15 @@ func main() { mapper := nat.NewPortMapper(log, Config.Nat) defer mapper.UnmapAllPorts() - Config.StakingIP.Port = mapper.Map("TCP", Config.StakingLocalPort, "gecko-staking") // Open staking port - if Config.HTTPHost != "127.0.0.1" && Config.HTTPHost != "localhost" { // Open HTTP port iff HTTP server not listening on localhost - mapper.Map("TCP", Config.HTTPPort, "gecko-http") + port, err := mapper.Map("TCP", Config.StakingLocalPort, "gecko-staking") // Open staking port + if err == nil { + Config.StakingIP.Port = port + } else { + log.Warn("NAT traversal has failed. The node will be able to connect to less nodes.") } - if Config.StakingIP.IsZero() { - log.Warn("NAT traversal has failed. The node will be able to connect to less nodes.") + if Config.HTTPHost != "127.0.0.1" && Config.HTTPHost != "localhost" { // Open HTTP port iff HTTP server not listening on localhost + _, _ = mapper.Map("TCP", Config.HTTPPort, "gecko-http") } node := node.Node{} diff --git a/main/params.go b/main/params.go index 03b8459..47ea5f8 100644 --- a/main/params.go +++ b/main/params.go @@ -284,16 +284,16 @@ func init() { Config.DB = memdb.New() } - Config.Nat = nat.GetRouter() - var ip net.IP // If public IP is not specified, get it using shell command dig if *consensusIP == "" { + Config.Nat = nat.GetRouter() ip, err = Config.Nat.ExternalIP() if err != nil { ip = net.IPv4zero // Couldn't get my IP...set to 0.0.0.0 } } else { + Config.Nat = nat.NewNoRouter() ip = net.ParseIP(*consensusIP) } diff --git a/nat/nat.go b/nat/nat.go index 8c351c7..bd79fee 100644 --- a/nat/nat.go +++ b/nat/nat.go @@ -4,6 +4,7 @@ package nat import ( + "errors" "net" "sync" "time" @@ -63,12 +64,16 @@ func NewPortMapper(log logging.Logger, r Router) Mapper { // Map sets up port mapping using given protocol, internal and external ports // and returns the final port mapped. It returns 0 if mapping failed after the // maximun number of retries -func (dev *Mapper) Map(protocol string, intPort uint16, desc string) uint16 { +func (dev *Mapper) Map(protocol string, intPort uint16, desc string) (uint16, error) { mappedPort := make(chan uint16) go dev.keepPortMapping(mappedPort, protocol, intPort, desc) - return <-mappedPort + port := <-mappedPort + if port == 0 { + return 0, errors.New("failed to map port") + } + return port, nil } // keepPortMapping runs in the background to keep a port mapped. It renews the From 481f3c41eca27d169574297898969b948b4330f8 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Wed, 24 Jun 2020 12:01:45 -0400 Subject: [PATCH 169/183] removed duplicated call --- snow/engine/avalanche/bootstrapper.go | 1 - 1 file changed, 1 deletion(-) diff --git a/snow/engine/avalanche/bootstrapper.go b/snow/engine/avalanche/bootstrapper.go index 231e9e4..200f84f 100644 --- a/snow/engine/avalanche/bootstrapper.go +++ b/snow/engine/avalanche/bootstrapper.go @@ -132,7 +132,6 @@ func (b *bootstrapper) fetch(vtxIDs ...ids.ID) error { b.RequestID++ b.outstandingRequests.Add(validatorID, b.RequestID, vtxID) - b.needToFetch.Remove(vtxID) b.BootstrapConfig.Sender.GetAncestors(validatorID, b.RequestID, vtxID) // request vertex and ancestors } return b.finish() From 3ca3a7377ab909276147d74b125aee803a82c8ac Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 24 Jun 2020 12:28:16 -0400 Subject: [PATCH 170/183] return 503 rather than 500 when unhealthy --- api/health/service.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/api/health/service.go b/api/health/service.go index 27a15f7..b59cf7e 100644 --- a/api/health/service.go +++ b/api/health/service.go @@ -39,11 +39,11 @@ func (h *Health) Handler() *common.HTTPHandler { newServer.RegisterCodec(codec, "application/json;charset=UTF-8") newServer.RegisterService(h, "health") handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.Method == http.MethodGet { // GET request --> return 200 if getLiveness returns true, else 500 + if r.Method == http.MethodGet { // GET request --> return 200 if getLiveness returns true, else 503 if _, healthy := h.health.Results(); healthy { w.WriteHeader(http.StatusOK) } else { - w.WriteHeader(http.StatusInternalServerError) + w.WriteHeader(http.StatusServiceUnavailable) } } else { newServer.ServeHTTP(w, r) // Other request --> use JSON RPC From 4c759890562f5472a923f9e34aa6d81466be889c Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 24 Jun 2020 13:27:10 -0400 Subject: [PATCH 171/183] don't ignore errors when adding routes; improve logging --- node/node.go | 107 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 69 insertions(+), 38 deletions(-) diff --git a/node/node.go b/node/node.go index b24d414..31a1151 100644 --- a/node/node.go +++ b/node/node.go @@ -443,66 +443,83 @@ func (n *Node) initSharedMemory() { // initKeystoreAPI initializes the keystore service // Assumes n.APIServer is already set -func (n *Node) initKeystoreAPI() { - n.Log.Info("initializing Keystore API") +func (n *Node) initKeystoreAPI() error { + n.Log.Info("initializing keystore") keystoreDB := prefixdb.New([]byte("keystore"), n.DB) n.keystoreServer.Initialize(n.Log, keystoreDB) keystoreHandler := n.keystoreServer.CreateHandler() - if n.Config.KeystoreAPIEnabled { - n.APIServer.AddRoute(keystoreHandler, &sync.RWMutex{}, "keystore", "", n.HTTPLog) + if !n.Config.KeystoreAPIEnabled { + n.Log.Info("skipping keystore API initializaion because it has been disabled") + return nil } + n.Log.Info("initializing keystore API") + return n.APIServer.AddRoute(keystoreHandler, &sync.RWMutex{}, "keystore", "", n.HTTPLog) + } // initMetricsAPI initializes the Metrics API // Assumes n.APIServer is already set -func (n *Node) initMetricsAPI() { - n.Log.Info("initializing Metrics API") +func (n *Node) initMetricsAPI() error { + n.Log.Info("initializing metrics") registry, handler := metrics.NewService() if n.Config.MetricsAPIEnabled { - n.APIServer.AddRoute(handler, &sync.RWMutex{}, "metrics", "", n.HTTPLog) + n.Log.Info("initializing metrics API") + if err := n.APIServer.AddRoute(handler, &sync.RWMutex{}, "metrics", "", n.HTTPLog); err != nil { + return err + } + } else { + n.Log.Info("skipping metrics API initialization because it has been disabled") } n.Config.ConsensusParams.Metrics = registry + return nil } // initAdminAPI initializes the Admin API service // Assumes n.log, n.chainManager, and n.ValidatorAPI already initialized -func (n *Node) initAdminAPI() { - if n.Config.AdminAPIEnabled { - n.Log.Info("initializing Admin API") - service := admin.NewService(Version, n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.Net, &n.APIServer) - n.APIServer.AddRoute(service, &sync.RWMutex{}, "admin", "", n.HTTPLog) +func (n *Node) initAdminAPI() error { + if !n.Config.AdminAPIEnabled { + n.Log.Info("skipping admin API initializaion because it has been disabled") + return nil } + n.Log.Info("initializing admin API") + service := admin.NewService(Version, n.ID, n.Config.NetworkID, n.Log, n.chainManager, n.Net, &n.APIServer) + return n.APIServer.AddRoute(service, &sync.RWMutex{}, "admin", "", n.HTTPLog) } -func (n *Node) initInfoAPI() { - if n.Config.InfoAPIEnabled { - n.Log.Info("initializing Info API") - service := info.NewService(n.Log, Version, n.ID, n.Config.NetworkID, n.chainManager, n.Net) - n.APIServer.AddRoute(service, &sync.RWMutex{}, "info", "", n.HTTPLog) +func (n *Node) initInfoAPI() error { + if !n.Config.InfoAPIEnabled { + n.Log.Info("skipping info API initializaion because it has been disabled") + return nil } + n.Log.Info("initializing info API") + service := info.NewService(n.Log, Version, n.ID, n.Config.NetworkID, n.chainManager, n.Net) + return n.APIServer.AddRoute(service, &sync.RWMutex{}, "info", "", n.HTTPLog) + } // initHealthAPI initializes the Health API service -// Assumes n.Log, n.ConsensusAPI, and n.ValidatorAPI already initialized -func (n *Node) initHealthAPI() { +// Assumes n.Log, n.Net, n.APIServer, n.HTTPLog already initialized +func (n *Node) initHealthAPI() error { if !n.Config.HealthAPIEnabled { - return + n.Log.Info("skipping health API initializaion because it has been disabled") + return nil } - n.Log.Info("initializing Health API") service := health.NewService(n.Log) service.RegisterHeartbeat("network.validators.heartbeat", n.Net, 5*time.Minute) - n.APIServer.AddRoute(service.Handler(), &sync.RWMutex{}, "health", "", n.HTTPLog) + return n.APIServer.AddRoute(service.Handler(), &sync.RWMutex{}, "health", "", n.HTTPLog) } // initIPCAPI initializes the IPC API service // Assumes n.log and n.chainManager already initialized -func (n *Node) initIPCAPI() { - if n.Config.IPCEnabled { - n.Log.Info("initializing IPC API") - service := ipcs.NewService(n.Log, n.chainManager, n.DecisionDispatcher, &n.APIServer) - n.APIServer.AddRoute(service, &sync.RWMutex{}, "ipcs", "", n.HTTPLog) +func (n *Node) initIPCAPI() error { + if !n.Config.IPCEnabled { + n.Log.Info("skipping ipc API initializaion because it has been disabled") + return nil } + n.Log.Info("initializing ipc API") + service := ipcs.NewService(n.Log, n.chainManager, n.DecisionDispatcher, &n.APIServer) + return n.APIServer.AddRoute(service, &sync.RWMutex{}, "ipcs", "", n.HTTPLog) } // Give chains and VMs aliases as specified by the genesis information @@ -561,9 +578,13 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg n.initBeacons() // Start HTTP APIs - n.initAPIServer() // Start the API Server - n.initKeystoreAPI() // Start the Keystore API - n.initMetricsAPI() // Start the Metrics API + n.initAPIServer() // Start the API Server + if err := n.initKeystoreAPI(); err != nil { // Start the Keystore API + return fmt.Errorf("couldn't initialize keystore API: %w", err) + } + if err := n.initMetricsAPI(); err != nil { // Start the Metrics API + return fmt.Errorf("couldn't initialize metrics API: %w", err) + } // initialize shared memory n.initSharedMemory() @@ -579,15 +600,25 @@ func (n *Node) Initialize(Config *Config, logger logging.Logger, logFactory logg n.initEventDispatcher() // Set up the event dipatcher n.initChainManager() // Set up the chain manager - n.initAdminAPI() // Start the Admin API - n.initInfoAPI() // Start the Info API - n.initHealthAPI() // Start the Health API - n.initIPCAPI() // Start the IPC API - - if err := n.initAliases(); err != nil { // Set up aliases - return err + if err := n.initAdminAPI(); err != nil { // Start the Admin API + return fmt.Errorf("couldn't initialize admin API: %w", err) } - return n.initChains() // Start the Platform chain + if err := n.initInfoAPI(); err != nil { // Start the Info API + return fmt.Errorf("couldn't initialize info API: %w", err) + } + if err := n.initHealthAPI(); err != nil { // Start the Health API + return fmt.Errorf("couldn't initialize health API: %w", err) + } + if err := n.initIPCAPI(); err != nil { // Start the IPC API + return fmt.Errorf("couldn't initialize ipc API: %w", err) + } + if err := n.initAliases(); err != nil { // Set up aliases + return fmt.Errorf("couldn't initialize aliases: %w", err) + } + if err := n.initChains(); err != nil { // Start the Platform chain + return fmt.Errorf("couldn't initialize chains: %w", err) + } + return nil } // Shutdown this node From c4605b2f2b23511fcbafd9b35ff6cb79dfdd0b4d Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 24 Jun 2020 16:49:11 -0400 Subject: [PATCH 172/183] add IsBootstrapped method to engine and chain manager --- chains/manager.go | 18 +++++++++++++++++- snow/engine/avalanche/transitive.go | 5 +++++ snow/engine/common/engine.go | 3 +++ snow/engine/common/test_engine.go | 8 ++++++++ snow/engine/snowman/transitive.go | 5 +++++ 5 files changed, 38 insertions(+), 1 deletion(-) diff --git a/chains/manager.go b/chains/manager.go index 78470b4..2751250 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -76,6 +76,9 @@ type Manager interface { // Add an alias to a chain Alias(ids.ID, string) error + // Returns true iff the chain with the given ID exists and is finished bootstrapping + IsBootstrapped(ids.ID) bool + Shutdown() } @@ -114,6 +117,10 @@ type manager struct { keystore *keystore.Keystore sharedMemory *atomic.SharedMemory + // Key: Chain's ID + // Value: The chain + chains map[[32]byte]common.Engine + unblocked bool blockedChains []ChainParameters } @@ -165,6 +172,7 @@ func New( server: server, keystore: keystore, sharedMemory: sharedMemory, + chains: make(map[[32]byte]common.Engine), } m.Initialize() return m @@ -454,7 +462,7 @@ func (m *manager) createAvalancheChain( eng: &engine, }) } - + m.chains[ctx.ChainID.Key()] = &engine return nil } @@ -546,9 +554,17 @@ func (m *manager) createSnowmanChain( eng: &engine, }) } + m.chains[ctx.ChainID.Key()] = &engine return nil } +func (m *manager) IsBootstrapped(id ids.ID) bool { + if chain, exists := m.chains[id.Key()]; exists && chain.IsBootstrapped() { + return true + } + return false +} + // Shutdown stops all the chains func (m *manager) Shutdown() { m.chainRouter.Shutdown() } diff --git a/snow/engine/avalanche/transitive.go b/snow/engine/avalanche/transitive.go index 7412276..de714c7 100644 --- a/snow/engine/avalanche/transitive.go +++ b/snow/engine/avalanche/transitive.go @@ -521,3 +521,8 @@ func (t *Transitive) sendRequest(vdr ids.ShortID, vtxID ids.ID) { t.numVtxRequests.Set(float64(t.vtxReqs.Len())) // Tracks performance statistics } + +// IsBootstrapped returns true iff this chain is done bootstrapping +func (t *Transitive) IsBootstrapped() bool { + return t.bootstrapped +} diff --git a/snow/engine/common/engine.go b/snow/engine/common/engine.go index 3be916d..7ca5622 100644 --- a/snow/engine/common/engine.go +++ b/snow/engine/common/engine.go @@ -14,6 +14,9 @@ type Engine interface { // Return the context of the chain this engine is working on Context() *snow.Context + + // Returns true iff the chain is done bootstrapping + IsBootstrapped() bool } // Handler defines the functions that are acted on the node diff --git a/snow/engine/common/test_engine.go b/snow/engine/common/test_engine.go index cf63df8..6e41dce 100644 --- a/snow/engine/common/test_engine.go +++ b/snow/engine/common/test_engine.go @@ -15,6 +15,7 @@ import ( type EngineTest struct { T *testing.T + Bootstrapped, CantStartup, CantGossip, CantShutdown, @@ -58,6 +59,8 @@ var _ Engine = &EngineTest{} // Default ... func (e *EngineTest) Default(cant bool) { + e.Bootstrapped = cant + e.CantStartup = cant e.CantGossip = cant e.CantShutdown = cant @@ -354,3 +357,8 @@ func (e *EngineTest) Chits(validatorID ids.ShortID, requestID uint32, containerI } return nil } + +// IsBootstrapped ... +func (e *EngineTest) IsBootstrapped() bool { + return e.Bootstrapped +} diff --git a/snow/engine/snowman/transitive.go b/snow/engine/snowman/transitive.go index ab4a881..6b57a8f 100644 --- a/snow/engine/snowman/transitive.go +++ b/snow/engine/snowman/transitive.go @@ -647,3 +647,8 @@ func (t *Transitive) deliver(blk snowman.Block) error { t.numBlockedBlk.Set(float64(t.pending.Len())) return t.errs.Err } + +// IsBootstrapped returns true iff this chain is done bootstrapping +func (t *Transitive) IsBootstrapped() bool { + return t.bootstrapped +} From fde0ce73279ed991b154bca5862190d62457d654 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 24 Jun 2020 18:21:09 -0400 Subject: [PATCH 173/183] add isBootstrapped method to info API --- api/info/service.go | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/api/info/service.go b/api/info/service.go index 2c903e2..51324b1 100644 --- a/api/info/service.go +++ b/api/info/service.go @@ -4,6 +4,7 @@ package info import ( + "fmt" "net/http" "github.com/gorilla/rpc/v2" @@ -129,3 +130,31 @@ func (service *Info) Peers(_ *http.Request, _ *struct{}, reply *PeersReply) erro reply.Peers = service.networking.Peers() return nil } + +// IsBootstrappedArgs are the arguments for calling IsBootstrapped +type IsBootstrappedArgs struct { + // Alias of the chain + // Can also be the string representation of the chain's ID + Chain string `json:"chain"` +} + +// IsBootstrappedResponse are the results from calling IsBootstrapped +type IsBootstrappedResponse struct { + // True iff the chain exists and is done bootstrapping + IsBootstrapped bool `json:"isBootstrapped"` +} + +// IsBootstrapped returns nil and sets [reply.IsBootstrapped] == true iff [args.Chain] exists and is done bootstrapping +// Returns an error if the chain doesn't exist +func (service *Info) IsBootstrapped(_ *http.Request, args *IsBootstrappedArgs, reply *IsBootstrappedResponse) error { + service.log.Info("Info: IsBootstrapped called") + if args.Chain == "" { + return fmt.Errorf("argument 'chain' not given") + } + chainID, err := service.chainManager.Lookup(args.Chain) + if err != nil { + return fmt.Errorf("there is no chain with alias/ID '%s'", args.Chain) + } + reply.IsBootstrapped = service.chainManager.IsBootstrapped(chainID) + return nil +} From 76d82288e16d93e486a22c10a910f4e43ef6c921 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Wed, 24 Jun 2020 18:28:33 -0400 Subject: [PATCH 174/183] add isBootstrapped to mockManager --- chains/mock_manager.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/chains/mock_manager.go b/chains/mock_manager.go index 7c0f86b..33c5535 100644 --- a/chains/mock_manager.go +++ b/chains/mock_manager.go @@ -35,3 +35,6 @@ func (mm MockManager) Alias(ids.ID, string) error { return nil } // Shutdown ... func (mm MockManager) Shutdown() {} + +// IsBootstrapped ... +func (mm MockManager) IsBootstrapped(ids.ID) bool { return false } From ebf1ae152b5dafcbdb81941a2f96457cabe01041 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Mon, 29 Jun 2020 18:07:48 -0400 Subject: [PATCH 175/183] grab context of chain before calling isBootstrapped() to avoid race condition --- chains/manager.go | 23 +++++++++++++---------- 1 file changed, 13 insertions(+), 10 deletions(-) diff --git a/chains/manager.go b/chains/manager.go index 2751250..114cbd9 100644 --- a/chains/manager.go +++ b/chains/manager.go @@ -119,7 +119,7 @@ type manager struct { // Key: Chain's ID // Value: The chain - chains map[[32]byte]common.Engine + chains map[[32]byte]*router.Handler unblocked bool blockedChains []ChainParameters @@ -138,7 +138,7 @@ func New( decisionEvents *triggers.EventDispatcher, consensusEvents *triggers.EventDispatcher, db database.Database, - router router.Router, + rtr router.Router, net network.Network, consensusParams avacon.Parameters, validators validators.Manager, @@ -152,7 +152,7 @@ func New( timeoutManager.Initialize(requestTimeout) go log.RecoverAndPanic(timeoutManager.Dispatch) - router.Initialize(log, &timeoutManager, gossipFrequency, shutdownTimeout) + rtr.Initialize(log, &timeoutManager, gossipFrequency, shutdownTimeout) m := &manager{ stakingEnabled: stakingEnabled, @@ -162,7 +162,7 @@ func New( decisionEvents: decisionEvents, consensusEvents: consensusEvents, db: db, - chainRouter: router, + chainRouter: rtr, net: net, timeoutManager: &timeoutManager, consensusParams: consensusParams, @@ -172,7 +172,7 @@ func New( server: server, keystore: keystore, sharedMemory: sharedMemory, - chains: make(map[[32]byte]common.Engine), + chains: make(map[[32]byte]*router.Handler), } m.Initialize() return m @@ -462,7 +462,7 @@ func (m *manager) createAvalancheChain( eng: &engine, }) } - m.chains[ctx.ChainID.Key()] = &engine + m.chains[ctx.ChainID.Key()] = handler return nil } @@ -554,15 +554,18 @@ func (m *manager) createSnowmanChain( eng: &engine, }) } - m.chains[ctx.ChainID.Key()] = &engine + m.chains[ctx.ChainID.Key()] = handler return nil } func (m *manager) IsBootstrapped(id ids.ID) bool { - if chain, exists := m.chains[id.Key()]; exists && chain.IsBootstrapped() { - return true + chain, exists := m.chains[id.Key()] + if !exists { + return false } - return false + chain.Context().Lock.Lock() + defer chain.Context().Lock.Unlock() + return chain.Engine().IsBootstrapped() } // Shutdown stops all the chains From efaf2f147ab6d845829c04ac7cda946f4132deff Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 30 Jun 2020 13:28:56 -0400 Subject: [PATCH 176/183] change Check to an interface and create implementing type check --- api/health/checks.go | 44 +++++++++++++++++++++++++++---------------- api/health/service.go | 14 +++++++------- 2 files changed, 35 insertions(+), 23 deletions(-) diff --git a/api/health/checks.go b/api/health/checks.go index d11544b..262f0f6 100644 --- a/api/health/checks.go +++ b/api/health/checks.go @@ -20,36 +20,48 @@ type CheckFn func() (interface{}, error) // Check defines a single health check that we want to monitor and consider as // part of our wider healthiness -type Check struct { +type Check interface { // Name is the identifier for this check and must be unique among all Checks - Name string + Name() string - // CheckFn is the function to call to perform the the health check - CheckFn CheckFn + // Execute performs the health check. It returns nil if the check passes. + // It can also return additional information to marshal and display to the caller + Execute() (interface{}, error) // ExecutionPeriod is the duration to wait between executions of this Check - ExecutionPeriod time.Duration + ExecutionPeriod() time.Duration // InitialDelay is the duration to wait before executing the first time - InitialDelay time.Duration + InitialDelay() time.Duration // InitiallyPassing is whether or not to consider the Check healthy before the // initial execution - InitiallyPassing bool + InitiallyPassing() bool } -// gosundheitCheck implements the health.Check interface backed by a CheckFn -type gosundheitCheck struct { - name string - checkFn CheckFn +// check implements the Check interface +type check struct { + name string + checkFn CheckFn + executionPeriod, initialDelay time.Duration + initiallyPassing bool } -// Name implements the health.Check interface by returning a unique name -func (c gosundheitCheck) Name() string { return c.name } +// Name is the identifier for this check and must be unique among all Checks +func (c check) Name() string { return c.name } -// Execute implements the health.Check interface by executing the checkFn and -// returning the results -func (c gosundheitCheck) Execute() (interface{}, error) { return c.checkFn() } +// Execute performs the health check. It returns nil if the check passes. +// It can also return additional information to marshal and display to the caller +func (c check) Execute() (interface{}, error) { return c.checkFn() } + +// ExecutionPeriod is the duration to wait between executions of this Check +func (c check) ExecutionPeriod() time.Duration { return c.executionPeriod } + +// InitialDelay is the duration to wait before executing the first time +func (c check) InitialDelay() time.Duration { return c.initialDelay } + +// InitiallyPassing is whether or not to consider the Check healthy before the initial execution +func (c check) InitiallyPassing() bool { return c.initiallyPassing } // Heartbeater provides a getter to the most recently observed heartbeat type Heartbeater interface { diff --git a/api/health/service.go b/api/health/service.go index b59cf7e..9c2e479 100644 --- a/api/health/service.go +++ b/api/health/service.go @@ -17,7 +17,7 @@ import ( ) // defaultCheckOpts is a Check whose properties represent a default Check -var defaultCheckOpts = Check{ExecutionPeriod: time.Minute} +var defaultCheckOpts = check{executionPeriod: time.Minute} // Health observes a set of vital signs and makes them available through an HTTP // API. @@ -61,18 +61,18 @@ func (h *Health) RegisterHeartbeat(name string, hb Heartbeater, max time.Duratio // RegisterCheckFunc adds a Check with default options and the given CheckFn func (h *Health) RegisterCheckFunc(name string, checkFn CheckFn) error { check := defaultCheckOpts - check.Name = name - check.CheckFn = checkFn + check.name = name + check.checkFn = checkFn return h.RegisterCheck(check) } // RegisterCheck adds the given Check func (h *Health) RegisterCheck(c Check) error { return h.health.RegisterCheck(&health.Config{ - InitialDelay: c.InitialDelay, - ExecutionPeriod: c.ExecutionPeriod, - InitiallyPassing: c.InitiallyPassing, - Check: gosundheitCheck{c.Name, c.CheckFn}, + InitialDelay: c.InitialDelay(), + ExecutionPeriod: c.ExecutionPeriod(), + InitiallyPassing: c.InitiallyPassing(), + Check: c, }) } From 1be948f38c211a90556acab0217b0c50b9658712 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 30 Jun 2020 14:31:30 -0400 Subject: [PATCH 177/183] add health check to ensure default subnet is bootstrapped. add monotonicHealthCheck type --- api/health/checks.go | 18 ++++++++++++++++++ api/health/service.go | 9 +++++++++ node/node.go | 25 +++++++++++++++++++++++++ 3 files changed, 52 insertions(+) diff --git a/api/health/checks.go b/api/health/checks.go index 262f0f6..8e56dc7 100644 --- a/api/health/checks.go +++ b/api/health/checks.go @@ -63,6 +63,24 @@ func (c check) InitialDelay() time.Duration { return c.initialDelay } // InitiallyPassing is whether or not to consider the Check healthy before the initial execution func (c check) InitiallyPassing() bool { return c.initiallyPassing } +// monotonicCheck is a check that will run until it passes once, and after that it will +// always pass without performing any logic. Used for bootstrapping, for example. +type monotonicCheck struct { + passed bool + check +} + +func (mc monotonicCheck) Execute() (interface{}, error) { + if mc.passed { + return nil, nil + } + details, pass := mc.Execute() + if pass == nil { + mc.passed = true + } + return details, pass +} + // Heartbeater provides a getter to the most recently observed heartbeat type Heartbeater interface { GetHeartbeat() int64 diff --git a/api/health/service.go b/api/health/service.go index 9c2e479..522ad62 100644 --- a/api/health/service.go +++ b/api/health/service.go @@ -66,6 +66,15 @@ func (h *Health) RegisterCheckFunc(name string, checkFn CheckFn) error { return h.RegisterCheck(check) } +// RegisterMonotonicCheckFunc adds a Check with default options and the given CheckFn +// After it passes once, its logic (checkFunc) is never run again; it just passes +func (h *Health) RegisterMonotonicCheckFunc(name string, checkFn CheckFn) error { + check := monotonicCheck{check: defaultCheckOpts} + check.name = name + check.checkFn = checkFn + return h.RegisterCheck(check) +} + // RegisterCheck adds the given Check func (h *Health) RegisterCheck(c Check) error { return h.health.RegisterCheck(&health.Config{ diff --git a/node/node.go b/node/node.go index 31a1151..f05edc4 100644 --- a/node/node.go +++ b/node/node.go @@ -7,6 +7,7 @@ import ( "crypto/tls" "crypto/x509" "encoding/pem" + "errors" "fmt" "io/ioutil" "net" @@ -507,6 +508,30 @@ func (n *Node) initHealthAPI() error { n.Log.Info("initializing Health API") service := health.NewService(n.Log) service.RegisterHeartbeat("network.validators.heartbeat", n.Net, 5*time.Minute) + isBootstrappedFunc := func() (interface{}, error) { + pChainID, err := n.chainManager.Lookup("P") + if err != nil { + return nil, errors.New("P-Chain not created") + } else if !n.chainManager.IsBootstrapped(pChainID) { + return nil, errors.New("P-Chain not bootstrapped") + } + xChainID, err := n.chainManager.Lookup("X") + if err != nil { + return nil, errors.New("X-Chain not created") + } else if !n.chainManager.IsBootstrapped(xChainID) { + return nil, errors.New("X-Chain not bootstrapped") + } + cChainID, err := n.chainManager.Lookup("C") + if err != nil { + return nil, errors.New("C-Chain not created") + } else if !n.chainManager.IsBootstrapped(cChainID) { + return nil, errors.New("C-Chain not bootstrapped") + } + return nil, nil + } + if err := service.RegisterMonotonicCheckFunc("defaultChainsBootstrapped", isBootstrappedFunc); err != nil { + return err + } return n.APIServer.AddRoute(service.Handler(), &sync.RWMutex{}, "health", "", n.HTTPLog) } From d3a4dcffef6dac0e78c83e8e3f1d7410c49adf86 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 30 Jun 2020 16:51:41 -0400 Subject: [PATCH 178/183] add comment; handle error on health check registration --- node/node.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/node/node.go b/node/node.go index f05edc4..1d41e27 100644 --- a/node/node.go +++ b/node/node.go @@ -507,7 +507,9 @@ func (n *Node) initHealthAPI() error { } n.Log.Info("initializing Health API") service := health.NewService(n.Log) - service.RegisterHeartbeat("network.validators.heartbeat", n.Net, 5*time.Minute) + if err := service.RegisterHeartbeat("network.validators.heartbeat", n.Net, 5*time.Minute); err != nil { + return fmt.Errorf("couldn't register heartbeat health check: %w", err) + } isBootstrappedFunc := func() (interface{}, error) { pChainID, err := n.chainManager.Lookup("P") if err != nil { @@ -529,6 +531,7 @@ func (n *Node) initHealthAPI() error { } return nil, nil } + // Passes if the P, X and C chains are finished bootstrapping if err := service.RegisterMonotonicCheckFunc("defaultChainsBootstrapped", isBootstrappedFunc); err != nil { return err } From 1d2e1eb00eeed593eabbe55c9d8409ad56b58f1a Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 30 Jun 2020 16:55:05 -0400 Subject: [PATCH 179/183] style --- node/node.go | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/node/node.go b/node/node.go index 1d41e27..49598a5 100644 --- a/node/node.go +++ b/node/node.go @@ -511,20 +511,17 @@ func (n *Node) initHealthAPI() error { return fmt.Errorf("couldn't register heartbeat health check: %w", err) } isBootstrappedFunc := func() (interface{}, error) { - pChainID, err := n.chainManager.Lookup("P") - if err != nil { + if pChainID, err := n.chainManager.Lookup("P"); err != nil { return nil, errors.New("P-Chain not created") } else if !n.chainManager.IsBootstrapped(pChainID) { return nil, errors.New("P-Chain not bootstrapped") } - xChainID, err := n.chainManager.Lookup("X") - if err != nil { + if xChainID, err := n.chainManager.Lookup("X"); err != nil { return nil, errors.New("X-Chain not created") } else if !n.chainManager.IsBootstrapped(xChainID) { return nil, errors.New("X-Chain not bootstrapped") } - cChainID, err := n.chainManager.Lookup("C") - if err != nil { + if cChainID, err := n.chainManager.Lookup("C"); err != nil { return nil, errors.New("C-Chain not created") } else if !n.chainManager.IsBootstrapped(cChainID) { return nil, errors.New("C-Chain not bootstrapped") From d92420d4f4b5de410d4801c7f122dddd6b46e008 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Tue, 30 Jun 2020 17:13:06 -0400 Subject: [PATCH 180/183] fix typo causing infinite loop --- api/health/checks.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/api/health/checks.go b/api/health/checks.go index 8e56dc7..20dd6b1 100644 --- a/api/health/checks.go +++ b/api/health/checks.go @@ -74,7 +74,7 @@ func (mc monotonicCheck) Execute() (interface{}, error) { if mc.passed { return nil, nil } - details, pass := mc.Execute() + details, pass := mc.check.Execute() if pass == nil { mc.passed = true } From fa4cd10efe1fcb610e0bc74a58be07a07eaace49 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 3 Jul 2020 13:45:54 -0400 Subject: [PATCH 181/183] address PR comments --- node/node.go | 20 +++++++++----------- snow/engine/common/test_engine.go | 15 ++++++++++++--- 2 files changed, 21 insertions(+), 14 deletions(-) diff --git a/node/node.go b/node/node.go index 49598a5..ea40c77 100644 --- a/node/node.go +++ b/node/node.go @@ -461,18 +461,16 @@ func (n *Node) initKeystoreAPI() error { // initMetricsAPI initializes the Metrics API // Assumes n.APIServer is already set func (n *Node) initMetricsAPI() error { - n.Log.Info("initializing metrics") registry, handler := metrics.NewService() - if n.Config.MetricsAPIEnabled { - n.Log.Info("initializing metrics API") - if err := n.APIServer.AddRoute(handler, &sync.RWMutex{}, "metrics", "", n.HTTPLog); err != nil { - return err - } - } else { - n.Log.Info("skipping metrics API initialization because it has been disabled") - } + // It is assumed by components of the system that the Metrics interface is + // non-nil. So, it is set regardless of if the metrics API is available or not. n.Config.ConsensusParams.Metrics = registry - return nil + if !n.Config.MetricsAPIEnabled { + n.Log.Info("skipping metrics API initialization because it has been disabled") + return nil + } + n.Log.Info("initializing metrics API") + return n.APIServer.AddRoute(handler, &sync.RWMutex{}, "metrics", "", n.HTTPLog) } // initAdminAPI initializes the Admin API service @@ -529,7 +527,7 @@ func (n *Node) initHealthAPI() error { return nil, nil } // Passes if the P, X and C chains are finished bootstrapping - if err := service.RegisterMonotonicCheckFunc("defaultChainsBootstrapped", isBootstrappedFunc); err != nil { + if err := service.RegisterMonotonicCheckFunc("chains.default.bootstrapped", isBootstrappedFunc); err != nil { return err } return n.APIServer.AddRoute(service.Handler(), &sync.RWMutex{}, "health", "", n.HTTPLog) diff --git a/snow/engine/common/test_engine.go b/snow/engine/common/test_engine.go index 6e41dce..d64e479 100644 --- a/snow/engine/common/test_engine.go +++ b/snow/engine/common/test_engine.go @@ -15,7 +15,7 @@ import ( type EngineTest struct { T *testing.T - Bootstrapped, + CantIsBootstrapped, CantStartup, CantGossip, CantShutdown, @@ -44,6 +44,7 @@ type EngineTest struct { CantQueryFailed, CantChits bool + IsBootstrappedF func() bool ContextF func() *snow.Context StartupF, GossipF, ShutdownF func() error NotifyF func(Message) error @@ -59,7 +60,7 @@ var _ Engine = &EngineTest{} // Default ... func (e *EngineTest) Default(cant bool) { - e.Bootstrapped = cant + e.CantIsBootstrapped = cant e.CantStartup = cant e.CantGossip = cant @@ -360,5 +361,13 @@ func (e *EngineTest) Chits(validatorID ids.ShortID, requestID uint32, containerI // IsBootstrapped ... func (e *EngineTest) IsBootstrapped() bool { - return e.Bootstrapped + if e.IsBootstrappedF != nil { + return e.IsBootstrappedF() + } else if e.CantIsBootstrapped { + if e.T != nil { + e.T.Fatalf("Unexpectedly called IsBootstrapped") + } + panic("Unexpectedly called IsBootstrapped") + } + return e.IsBootstrappedF() } From a6317bd60f1e9fe87716000d515a14dceea009e4 Mon Sep 17 00:00:00 2001 From: Dan Laine Date: Fri, 3 Jul 2020 15:18:39 -0400 Subject: [PATCH 182/183] fix nil pointer error --- snow/engine/common/test_engine.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/snow/engine/common/test_engine.go b/snow/engine/common/test_engine.go index d64e479..ce44abb 100644 --- a/snow/engine/common/test_engine.go +++ b/snow/engine/common/test_engine.go @@ -367,7 +367,7 @@ func (e *EngineTest) IsBootstrapped() bool { if e.T != nil { e.T.Fatalf("Unexpectedly called IsBootstrapped") } - panic("Unexpectedly called IsBootstrapped") + return false } - return e.IsBootstrappedF() + return false } From 18350cf3e3bdd744214a361532991601488c6b57 Mon Sep 17 00:00:00 2001 From: StephenButtolph Date: Fri, 3 Jul 2020 17:07:39 -0400 Subject: [PATCH 183/183] nit cleanup --- snow/engine/common/test_engine.go | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/snow/engine/common/test_engine.go b/snow/engine/common/test_engine.go index ce44abb..2f4c511 100644 --- a/snow/engine/common/test_engine.go +++ b/snow/engine/common/test_engine.go @@ -363,11 +363,9 @@ func (e *EngineTest) Chits(validatorID ids.ShortID, requestID uint32, containerI func (e *EngineTest) IsBootstrapped() bool { if e.IsBootstrappedF != nil { return e.IsBootstrappedF() - } else if e.CantIsBootstrapped { - if e.T != nil { - e.T.Fatalf("Unexpectedly called IsBootstrapped") - } - return false + } + if e.CantIsBootstrapped && e.T != nil { + e.T.Fatalf("Unexpectedly called IsBootstrapped") } return false }