From 75182f72056b92b73ed164e4dccfc867072d82d3 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Thu, 28 Dec 2017 11:17:15 -0600 Subject: [PATCH 01/33] change directory for each call, not only for each test Fixes #1026 --- consensus/wal_generator.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index b4efb5a9..73ad3e7f 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -18,6 +18,7 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" auto "github.com/tendermint/tmlibs/autofile" + cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/log" ) @@ -128,7 +129,7 @@ func makeAddrs() (string, string, string) { // getConfig returns a config for test cases func getConfig() *cfg.Config { pathname := makePathname() - c := cfg.ResetTestRoot(pathname) + c := cfg.ResetTestRoot(fmt.Sprintf("%s_%d", pathname, cmn.RandInt())) // and we use random ports to run in parallel tm, rpc, grpc := makeAddrs() From f55135578c5b6a5aa06012c17cb41ff20b00975f Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 27 Dec 2017 14:27:37 -0500 Subject: [PATCH 02/33] state: move methods to funcs --- consensus/replay.go | 2 +- evidence/pool.go | 12 +- rpc/core/blocks.go | 3 +- rpc/core/consensus.go | 3 +- state/db.go | 199 +++++++++++++++++++++++++++++++++ state/execution.go | 77 +++++++++---- state/state.go | 248 ++---------------------------------------- state/state_test.go | 41 ++++--- types/services.go | 9 -- 9 files changed, 299 insertions(+), 295 deletions(-) create mode 100644 state/db.go diff --git a/consensus/replay.go b/consensus/replay.go index 209ea597..a9aaeefc 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -301,7 +301,7 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int64, proxyApp } else if appBlockHeight == storeBlockHeight { // We ran Commit, but didn't save the state, so replayBlock with mock app - abciResponses, err := h.state.LoadABCIResponses(storeBlockHeight) + abciResponses, err := sm.LoadABCIResponses(h.state.DB(), storeBlockHeight) if err != nil { return nil, err } diff --git a/evidence/pool.go b/evidence/pool.go index 2296ac02..1965d063 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -9,22 +9,24 @@ import ( // EvidencePool maintains a pool of valid evidence // in an EvidenceStore. type EvidencePool struct { - params types.EvidenceParams logger log.Logger - state types.State // TODO: update this on commit! evidenceStore *EvidenceStore + chainID string + lastBlockHeight int64 + params types.EvidenceParams + // never close evidenceChan chan types.Evidence } -func NewEvidencePool(params types.EvidenceParams, evidenceStore *EvidenceStore, state types.State) *EvidencePool { +func NewEvidencePool(params types.EvidenceParams, evidenceStore *EvidenceStore, state *types.State) *EvidencePool { evpool := &EvidencePool{ params: params, logger: log.NewNopLogger(), evidenceStore: evidenceStore, - state: state, + state: *state, evidenceChan: make(chan types.Evidence), } return evpool @@ -56,7 +58,7 @@ func (evpool *EvidencePool) AddEvidence(evidence types.Evidence) (err error) { // TODO: check if we already have evidence for this // validator at this height so we dont get spammed - priority, err := evpool.state.VerifyEvidence(evidence) + priority, err := sm.VerifyEvidence(evpool.state, evidence) if err != nil { // TODO: if err is just that we cant find it cuz we pruned, ignore. // TODO: if its actually bad evidence, punish peer diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index 43edcd35..8b0ee459 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -4,6 +4,7 @@ import ( "fmt" ctypes "github.com/tendermint/tendermint/rpc/core/types" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tmlibs/common" ) @@ -337,7 +338,7 @@ func BlockResults(heightPtr *int64) (*ctypes.ResultBlockResults, error) { // load the results state := consensusState.GetState() - results, err := state.LoadABCIResponses(height) + results, err := sm.LoadABCIResponses(state.DB(), height) if err != nil { return nil, err } diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index e358c487..eedcce27 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -4,6 +4,7 @@ import ( cm "github.com/tendermint/tendermint/consensus" cstypes "github.com/tendermint/tendermint/consensus/types" ctypes "github.com/tendermint/tendermint/rpc/core/types" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -50,7 +51,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { } state := consensusState.GetState() - validators, err := state.LoadValidators(height) + validators, err := sm.LoadValidators(state.DB(), height) if err != nil { return nil, err } diff --git a/state/db.go b/state/db.go new file mode 100644 index 00000000..5174b9cb --- /dev/null +++ b/state/db.go @@ -0,0 +1,199 @@ +package state + +import ( + "bytes" + "fmt" + + abci "github.com/tendermint/abci/types" + wire "github.com/tendermint/go-wire" + "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tmlibs/common" + dbm "github.com/tendermint/tmlibs/db" +) + +//------------------------------------------------------------------------ + +// ABCIResponses retains the responses +// of the various ABCI calls during block processing. +// It is persisted to disk for each height before calling Commit. +type ABCIResponses struct { + DeliverTx []*abci.ResponseDeliverTx + EndBlock *abci.ResponseEndBlock +} + +// NewABCIResponses returns a new ABCIResponses +func NewABCIResponses(block *types.Block) *ABCIResponses { + return &ABCIResponses{ + DeliverTx: make([]*abci.ResponseDeliverTx, block.NumTxs), + } +} + +// Bytes serializes the ABCIResponse using go-wire +func (a *ABCIResponses) Bytes() []byte { + return wire.BinaryBytes(*a) +} + +func (a *ABCIResponses) ResultsHash() []byte { + results := types.NewResults(a.DeliverTx) + return results.Hash() +} + +// LoadABCIResponses loads the ABCIResponses for the given height from the database. +// This is useful for recovering from crashes where we called app.Commit and before we called +// s.Save(). It can also be used to produce Merkle proofs of the result of txs. +func LoadABCIResponses(db dbm.DB, height int64) (*ABCIResponses, error) { + buf := db.Get(calcABCIResponsesKey(height)) + if len(buf) == 0 { + return nil, ErrNoABCIResponsesForHeight{height} + } + + abciResponses := new(ABCIResponses) + r, n, err := bytes.NewReader(buf), new(int), new(error) + wire.ReadBinaryPtr(abciResponses, r, 0, n, err) + if *err != nil { + // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED + cmn.Exit(cmn.Fmt(`LoadABCIResponses: Data has been corrupted or its spec has + changed: %v\n`, *err)) + } + // TODO: ensure that buf is completely read. + + return abciResponses, nil +} + +// SaveABCIResponses persists the ABCIResponses to the database. +// This is useful in case we crash after app.Commit and before s.Save(). +// Responses are indexed by height so they can also be loaded later to produce Merkle proofs. +func SaveABCIResponses(db dbm.DB, height int64, abciResponses *ABCIResponses) { + db.SetSync(calcABCIResponsesKey(height), abciResponses.Bytes()) +} + +//----------------------------------------------------------------------------- + +// ValidatorsInfo represents the latest validator set, or the last height it changed +type ValidatorsInfo struct { + ValidatorSet *types.ValidatorSet + LastHeightChanged int64 +} + +// Bytes serializes the ValidatorsInfo using go-wire +func (valInfo *ValidatorsInfo) Bytes() []byte { + return wire.BinaryBytes(*valInfo) +} + +// LoadValidators loads the ValidatorSet for a given height. +// Returns ErrNoValSetForHeight if the validator set can't be found for this height. +func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) { + valInfo := loadValidatorsInfo(db, height) + if valInfo == nil { + return nil, ErrNoValSetForHeight{height} + } + + if valInfo.ValidatorSet == nil { + valInfo = loadValidatorsInfo(db, valInfo.LastHeightChanged) + if valInfo == nil { + cmn.PanicSanity(fmt.Sprintf(`Couldn't find validators at height %d as + last changed from height %d`, valInfo.LastHeightChanged, height)) + } + } + + return valInfo.ValidatorSet, nil +} + +func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo { + buf := db.Get(calcValidatorsKey(height)) + if len(buf) == 0 { + return nil + } + + v := new(ValidatorsInfo) + r, n, err := bytes.NewReader(buf), new(int), new(error) + wire.ReadBinaryPtr(v, r, 0, n, err) + if *err != nil { + // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED + cmn.Exit(cmn.Fmt(`LoadValidators: Data has been corrupted or its spec has changed: + %v\n`, *err)) + } + // TODO: ensure that buf is completely read. + + return v +} + +// saveValidatorsInfo persists the validator set for the next block to disk. +// It should be called from s.Save(), right before the state itself is persisted. +// If the validator set did not change after processing the latest block, +// only the last height for which the validators changed is persisted. +func saveValidatorsInfo(db dbm.DB, nextHeight, changeHeight int64, valSet *types.ValidatorSet) { + valInfo := &ValidatorsInfo{ + LastHeightChanged: changeHeight, + } + if changeHeight == nextHeight { + valInfo.ValidatorSet = valSet + } + db.SetSync(calcValidatorsKey(nextHeight), valInfo.Bytes()) +} + +//----------------------------------------------------------------------------- + +// ConsensusParamsInfo represents the latest consensus params, or the last height it changed +type ConsensusParamsInfo struct { + ConsensusParams types.ConsensusParams + LastHeightChanged int64 +} + +// Bytes serializes the ConsensusParamsInfo using go-wire +func (params ConsensusParamsInfo) Bytes() []byte { + return wire.BinaryBytes(params) +} + +// LoadConsensusParams loads the ConsensusParams for a given height. +func LoadConsensusParams(db dbm.DB, height int64) (types.ConsensusParams, error) { + empty := types.ConsensusParams{} + + paramsInfo := loadConsensusParamsInfo(db, height) + if paramsInfo == nil { + return empty, ErrNoConsensusParamsForHeight{height} + } + + if paramsInfo.ConsensusParams == empty { + paramsInfo = loadConsensusParamsInfo(db, paramsInfo.LastHeightChanged) + if paramsInfo == nil { + cmn.PanicSanity(fmt.Sprintf(`Couldn't find consensus params at height %d as + last changed from height %d`, paramsInfo.LastHeightChanged, height)) + } + } + + return paramsInfo.ConsensusParams, nil +} + +func loadConsensusParamsInfo(db dbm.DB, height int64) *ConsensusParamsInfo { + buf := db.Get(calcConsensusParamsKey(height)) + if len(buf) == 0 { + return nil + } + + paramsInfo := new(ConsensusParamsInfo) + r, n, err := bytes.NewReader(buf), new(int), new(error) + wire.ReadBinaryPtr(paramsInfo, r, 0, n, err) + if *err != nil { + // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED + cmn.Exit(cmn.Fmt(`LoadConsensusParams: Data has been corrupted or its spec has changed: + %v\n`, *err)) + } + // TODO: ensure that buf is completely read. + + return paramsInfo +} + +// saveConsensusParamsInfo persists the consensus params for the next block to disk. +// It should be called from s.Save(), right before the state itself is persisted. +// If the consensus params did not change after processing the latest block, +// only the last height for which they changed is persisted. +func saveConsensusParamsInfo(db dbm.DB, nextHeight, changeHeight int64, params types.ConsensusParams) { + paramsInfo := &ConsensusParamsInfo{ + LastHeightChanged: changeHeight, + } + if changeHeight == nextHeight { + paramsInfo.ConsensusParams = params + } + db.SetSync(calcConsensusParamsKey(nextHeight), paramsInfo.Bytes()) +} diff --git a/state/execution.go b/state/execution.go index c9686152..b56f61f9 100644 --- a/state/execution.go +++ b/state/execution.go @@ -209,28 +209,9 @@ func changeInVotingPowerMoreOrEqualToOneThird(currentSet *types.ValidatorSet, up return false, nil } -// return a bit array of validators that signed the last commit -// NOTE: assumes commits have already been authenticated -/* function is currently unused -func commitBitArrayFromBlock(block *types.Block) *cmn.BitArray { - signed := cmn.NewBitArray(len(block.LastCommit.Precommits)) - for i, precommit := range block.LastCommit.Precommits { - if precommit != nil { - signed.SetIndex(i, true) // val_.LastCommitHeight = block.Height - 1 - } - } - return signed -} -*/ - //----------------------------------------------------- // Validate block -// ValidateBlock validates the block against the state. -func (s *State) ValidateBlock(block *types.Block) error { - return s.validateBlock(block) -} - // MakeBlock builds a block with the given txs and commit from the current state. func (s *State) MakeBlock(height int64, txs []types.Tx, commit *types.Commit) (*types.Block, *types.PartSet) { // build base block @@ -248,7 +229,12 @@ func (s *State) MakeBlock(height int64, txs []types.Tx, commit *types.Commit) (* return block, block.MakePartSet(s.ConsensusParams.BlockGossip.BlockPartSizeBytes) } -func (s *State) validateBlock(b *types.Block) error { +// ValidateBlock validates the block against the state. +func (s State) ValidateBlock(block *types.Block) error { + return s.validateBlock(block) +} + +func (s State) validateBlock(b *types.Block) error { // validate internal consistency if err := b.ValidateBasic(); err != nil { return err @@ -310,7 +296,7 @@ func (s *State) validateBlock(b *types.Block) error { } for _, ev := range b.Evidence.Evidence { - if _, err := s.VerifyEvidence(ev); err != nil { + if _, err := VerifyEvidence(s, ev); err != nil { return types.NewEvidenceInvalidErr(ev, err) } } @@ -318,10 +304,57 @@ func (s *State) validateBlock(b *types.Block) error { return nil } +// VerifyEvidence verifies the evidence fully by checking it is internally +// consistent and corresponds to an existing or previous validator. +// It returns the priority of this evidence, or an error. +// NOTE: return error may be ErrNoValSetForHeight, in which case the validator set +// for the evidence height could not be loaded. +func VerifyEvidence(s State, evidence types.Evidence) (priority int64, err error) { + height := s.LastBlockHeight + evidenceAge := height - evidence.Height() + maxAge := s.ConsensusParams.EvidenceParams.MaxAge + if evidenceAge > maxAge { + return priority, fmt.Errorf("Evidence from height %d is too old. Min height is %d", + evidence.Height(), height-maxAge) + } + + if err := evidence.Verify(s.ChainID); err != nil { + return priority, err + } + + // The address must have been an active validator at the height + ev := evidence + height, addr, idx := ev.Height(), ev.Address(), ev.Index() + valset, err := LoadValidators(s.db, height) + if err != nil { + // XXX/TODO: what do we do if we can't load the valset? + // eg. if we have pruned the state or height is too high? + return priority, err + } + valIdx, val := valset.GetByAddress(addr) + if val == nil { + return priority, fmt.Errorf("Address %X was not a validator at height %d", addr, height) + } else if idx != valIdx { + return priority, fmt.Errorf("Address %X was validator %d at height %d, not %d", addr, valIdx, height, idx) + } + + priority = val.VotingPower + return priority, nil +} + //----------------------------------------------------------------------------- // ApplyBlock validates & executes the block, updates state w/ ABCI responses, // then commits and updates the mempool atomically, then saves state. +// BlockExecutor provides the context and accessories for properly executing a block. +type BlockExecutor struct { + txEventPublisher types.TxEventPublisher + proxyApp proxy.AppConnConsensus + + mempool types.Mempool + evpool types.EvidencePool +} + // ApplyBlock validates the block against the state, executes it against the app, // commits it, and saves the block and state. It's the only function that needs to be called // from outside this package to process and commit an entire block. @@ -337,7 +370,7 @@ func (s *State) ApplyBlock(txEventPublisher types.TxEventPublisher, proxyAppConn fail.Fail() // XXX // save the results before we commit - s.SaveABCIResponses(block.Height, abciResponses) + SaveABCIResponses(s.db, block.Height, abciResponses) fail.Fail() // XXX diff --git a/state/state.go b/state/state.go index 773b46fc..1aaddeb3 100644 --- a/state/state.go +++ b/state/state.go @@ -4,11 +4,8 @@ import ( "bytes" "fmt" "io/ioutil" - "sync" "time" - abci "github.com/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" dbm "github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/log" @@ -44,9 +41,7 @@ func calcABCIResponsesKey(height int64) []byte { // but the fields should only be changed by calling state.SetBlockAndValidators. // NOTE: not goroutine-safe. type State struct { - // mtx for writing to db - mtx sync.Mutex - db dbm.DB + db dbm.DB // Immutable ChainID string @@ -82,6 +77,10 @@ type State struct { logger log.Logger } +func (s *State) DB() dbm.DB { + return s.db +} + // GetState loads the most recent state from the database, // or creates a new one from the given genesisFile and persists the result // to the database. @@ -157,150 +156,13 @@ func (s *State) Copy() *State { // Save persists the State to the database. func (s *State) Save() { - s.mtx.Lock() - defer s.mtx.Unlock() - - s.saveValidatorsInfo() - s.saveConsensusParamsInfo() - s.db.SetSync(stateKey, s.Bytes()) -} - -// SaveABCIResponses persists the ABCIResponses to the database. -// This is useful in case we crash after app.Commit and before s.Save(). -// Responses are indexed by height so they can also be loaded later to produce Merkle proofs. -func (s *State) SaveABCIResponses(height int64, abciResponses *ABCIResponses) { - s.db.SetSync(calcABCIResponsesKey(height), abciResponses.Bytes()) -} - -// LoadABCIResponses loads the ABCIResponses for the given height from the database. -// This is useful for recovering from crashes where we called app.Commit and before we called -// s.Save(). It can also be used to produce Merkle proofs of the result of txs. -func (s *State) LoadABCIResponses(height int64) (*ABCIResponses, error) { - buf := s.db.Get(calcABCIResponsesKey(height)) - if len(buf) == 0 { - return nil, ErrNoABCIResponsesForHeight{height} - } - - abciResponses := new(ABCIResponses) - r, n, err := bytes.NewReader(buf), new(int), new(error) - wire.ReadBinaryPtr(abciResponses, r, 0, n, err) - if *err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadABCIResponses: Data has been corrupted or its spec has - changed: %v\n`, *err)) - } - // TODO: ensure that buf is completely read. - - return abciResponses, nil -} - -// LoadValidators loads the ValidatorSet for a given height. -// Returns ErrNoValSetForHeight if the validator set can't be found for this height. -func (s *State) LoadValidators(height int64) (*types.ValidatorSet, error) { - valInfo := s.loadValidatorsInfo(height) - if valInfo == nil { - return nil, ErrNoValSetForHeight{height} - } - - if valInfo.ValidatorSet == nil { - valInfo = s.loadValidatorsInfo(valInfo.LastHeightChanged) - if valInfo == nil { - cmn.PanicSanity(fmt.Sprintf(`Couldn't find validators at height %d as - last changed from height %d`, valInfo.LastHeightChanged, height)) - } - } - - return valInfo.ValidatorSet, nil -} - -func (s *State) loadValidatorsInfo(height int64) *ValidatorsInfo { - buf := s.db.Get(calcValidatorsKey(height)) - if len(buf) == 0 { - return nil - } - - v := new(ValidatorsInfo) - r, n, err := bytes.NewReader(buf), new(int), new(error) - wire.ReadBinaryPtr(v, r, 0, n, err) - if *err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadValidators: Data has been corrupted or its spec has changed: - %v\n`, *err)) - } - // TODO: ensure that buf is completely read. - - return v -} - -// saveValidatorsInfo persists the validator set for the next block to disk. -// It should be called from s.Save(), right before the state itself is persisted. -// If the validator set did not change after processing the latest block, -// only the last height for which the validators changed is persisted. -func (s *State) saveValidatorsInfo() { - changeHeight := s.LastHeightValidatorsChanged nextHeight := s.LastBlockHeight + 1 - valInfo := &ValidatorsInfo{ - LastHeightChanged: changeHeight, - } - if changeHeight == nextHeight { - valInfo.ValidatorSet = s.Validators - } - s.db.SetSync(calcValidatorsKey(nextHeight), valInfo.Bytes()) -} -// LoadConsensusParams loads the ConsensusParams for a given height. -func (s *State) LoadConsensusParams(height int64) (types.ConsensusParams, error) { - empty := types.ConsensusParams{} - - paramsInfo := s.loadConsensusParamsInfo(height) - if paramsInfo == nil { - return empty, ErrNoConsensusParamsForHeight{height} - } - - if paramsInfo.ConsensusParams == empty { - paramsInfo = s.loadConsensusParamsInfo(paramsInfo.LastHeightChanged) - if paramsInfo == nil { - cmn.PanicSanity(fmt.Sprintf(`Couldn't find consensus params at height %d as - last changed from height %d`, paramsInfo.LastHeightChanged, height)) - } - } - - return paramsInfo.ConsensusParams, nil -} - -func (s *State) loadConsensusParamsInfo(height int64) *ConsensusParamsInfo { - buf := s.db.Get(calcConsensusParamsKey(height)) - if len(buf) == 0 { - return nil - } - - paramsInfo := new(ConsensusParamsInfo) - r, n, err := bytes.NewReader(buf), new(int), new(error) - wire.ReadBinaryPtr(paramsInfo, r, 0, n, err) - if *err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadConsensusParams: Data has been corrupted or its spec has changed: - %v\n`, *err)) - } - // TODO: ensure that buf is completely read. - - return paramsInfo -} - -// saveConsensusParamsInfo persists the consensus params for the next block to disk. -// It should be called from s.Save(), right before the state itself is persisted. -// If the consensus params did not change after processing the latest block, -// only the last height for which they changed is persisted. -func (s *State) saveConsensusParamsInfo() { - changeHeight := s.LastHeightConsensusParamsChanged - nextHeight := s.LastBlockHeight + 1 - paramsInfo := &ConsensusParamsInfo{ - LastHeightChanged: changeHeight, - } - if changeHeight == nextHeight { - paramsInfo.ConsensusParams = s.ConsensusParams - } - s.db.SetSync(calcConsensusParamsKey(nextHeight), paramsInfo.Bytes()) + // persist everything to db + db := s.db + saveValidatorsInfo(db, nextHeight, s.LastHeightValidatorsChanged, s.Validators) + saveConsensusParamsInfo(db, nextHeight, s.LastHeightConsensusParamsChanged, s.ConsensusParams) + db.SetSync(stateKey, s.Bytes()) } // Equals returns true if the States are identical. @@ -383,96 +245,6 @@ func (s *State) GetValidators() (last *types.ValidatorSet, current *types.Valida return s.LastValidators, s.Validators } -// VerifyEvidence verifies the evidence fully by checking it is internally -// consistent and corresponds to an existing or previous validator. -// It returns the priority of this evidence, or an error. -// NOTE: return error may be ErrNoValSetForHeight, in which case the validator set -// for the evidence height could not be loaded. -func (s *State) VerifyEvidence(evidence types.Evidence) (priority int64, err error) { - evidenceAge := s.LastBlockHeight - evidence.Height() - maxAge := s.ConsensusParams.EvidenceParams.MaxAge - if evidenceAge > maxAge { - return priority, fmt.Errorf("Evidence from height %d is too old. Min height is %d", - evidence.Height(), s.LastBlockHeight-maxAge) - } - - if err := evidence.Verify(s.ChainID); err != nil { - return priority, err - } - - // The address must have been an active validator at the height - ev := evidence - height, addr, idx := ev.Height(), ev.Address(), ev.Index() - valset, err := s.LoadValidators(height) - if err != nil { - // XXX/TODO: what do we do if we can't load the valset? - // eg. if we have pruned the state or height is too high? - return priority, err - } - valIdx, val := valset.GetByAddress(addr) - if val == nil { - return priority, fmt.Errorf("Address %X was not a validator at height %d", addr, height) - } else if idx != valIdx { - return priority, fmt.Errorf("Address %X was validator %d at height %d, not %d", addr, valIdx, height, idx) - } - - priority = val.VotingPower - return priority, nil -} - -//------------------------------------------------------------------------ - -// ABCIResponses retains the responses -// of the various ABCI calls during block processing. -// It is persisted to disk for each height before calling Commit. -type ABCIResponses struct { - DeliverTx []*abci.ResponseDeliverTx - EndBlock *abci.ResponseEndBlock -} - -// NewABCIResponses returns a new ABCIResponses -func NewABCIResponses(block *types.Block) *ABCIResponses { - return &ABCIResponses{ - DeliverTx: make([]*abci.ResponseDeliverTx, block.NumTxs), - } -} - -// Bytes serializes the ABCIResponse using go-wire -func (a *ABCIResponses) Bytes() []byte { - return wire.BinaryBytes(*a) -} - -func (a *ABCIResponses) ResultsHash() []byte { - results := types.NewResults(a.DeliverTx) - return results.Hash() -} - -//----------------------------------------------------------------------------- - -// ValidatorsInfo represents the latest validator set, or the last height it changed -type ValidatorsInfo struct { - ValidatorSet *types.ValidatorSet - LastHeightChanged int64 -} - -// Bytes serializes the ValidatorsInfo using go-wire -func (valInfo *ValidatorsInfo) Bytes() []byte { - return wire.BinaryBytes(*valInfo) -} - -//----------------------------------------------------------------------------- - -// ConsensusParamsInfo represents the latest consensus params, or the last height it changed -type ConsensusParamsInfo struct { - ConsensusParams types.ConsensusParams - LastHeightChanged int64 -} - -// Bytes serializes the ConsensusParamsInfo using go-wire -func (params ConsensusParamsInfo) Bytes() []byte { - return wire.BinaryBytes(params) -} - //------------------------------------------------------------------------ // Genesis diff --git a/state/state_test.go b/state/state_test.go index b1adc0d0..486ad24a 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -88,8 +88,8 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { }, }} - state.SaveABCIResponses(block.Height, abciResponses) - loadedAbciResponses, err := state.LoadABCIResponses(block.Height) + SaveABCIResponses(state.db, block.Height, abciResponses) + loadedAbciResponses, err := LoadABCIResponses(state.db, block.Height) assert.Nil(err) assert.Equal(abciResponses, loadedAbciResponses, cmn.Fmt(`ABCIResponses don't match: Got %v, Expected %v`, loadedAbciResponses, @@ -142,7 +142,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { // query all before, should return error for i := range cases { h := int64(i + 1) - res, err := state.LoadABCIResponses(h) + res, err := LoadABCIResponses(state.db, h) assert.Error(err, "%d: %#v", i, res) } @@ -153,13 +153,13 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { DeliverTx: tc.added, EndBlock: &abci.ResponseEndBlock{}, } - state.SaveABCIResponses(h, responses) + SaveABCIResponses(state.db, h, responses) } // query all before, should return expected value for i, tc := range cases { h := int64(i + 1) - res, err := state.LoadABCIResponses(h) + res, err := LoadABCIResponses(state.db, h) assert.NoError(err, "%d", i) assert.Equal(tc.expected.Hash(), res.ResultsHash(), "%d", i) } @@ -173,30 +173,32 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { assert := assert.New(t) // can't load anything for height 0 - v, err := state.LoadValidators(0) + v, err := LoadValidators(state.db, 0) assert.IsType(ErrNoValSetForHeight{}, err, "expected err at height 0") // should be able to load for height 1 - v, err = state.LoadValidators(1) + v, err = LoadValidators(state.db, 1) assert.Nil(err, "expected no err at height 1") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") // increment height, save; should be able to load for next height state.LastBlockHeight++ - state.saveValidatorsInfo() - v, err = state.LoadValidators(state.LastBlockHeight + 1) + nextHeight := state.LastBlockHeight + 1 + saveValidatorsInfo(state.db, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + v, err = LoadValidators(state.db, nextHeight) assert.Nil(err, "expected no err") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") // increment height, save; should be able to load for next height state.LastBlockHeight += 10 - state.saveValidatorsInfo() - v, err = state.LoadValidators(state.LastBlockHeight + 1) + nextHeight = state.LastBlockHeight + 1 + saveValidatorsInfo(state.db, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + v, err = LoadValidators(state.db, nextHeight) assert.Nil(err, "expected no err") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") // should be able to load for next next height - _, err = state.LoadValidators(state.LastBlockHeight + 2) + _, err = LoadValidators(state.db, state.LastBlockHeight+2) assert.IsType(ErrNoValSetForHeight{}, err, "expected err at unknown height") } @@ -225,7 +227,8 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { header, parts, responses := makeHeaderPartsResponsesValPowerChange(state, i, power) err := state.SetBlockAndValidators(header, parts, responses) assert.Nil(t, err) - state.saveValidatorsInfo() + nextHeight := state.LastBlockHeight + 1 + saveValidatorsInfo(state.db, nextHeight, state.LastHeightValidatorsChanged, state.Validators) } // on each change height, increment the power by one. @@ -243,7 +246,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } for i, power := range testCases { - v, err := state.LoadValidators(int64(i + 1)) + v, err := LoadValidators(state.db, int64(i+1)) assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) _, val := v.GetByIndex(0) @@ -268,9 +271,10 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { header, parts, responses := makeHeaderPartsResponsesValPubKeyChange(state, height, pubkey) err := state.SetBlockAndValidators(header, parts, responses) require.Nil(t, err) - state.saveValidatorsInfo() + nextHeight := state.LastBlockHeight + 1 + saveValidatorsInfo(state.db, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - v, err := state.LoadValidators(height + 1) + v, err := LoadValidators(state.db, height+1) assert.Nil(t, err) assert.Equal(t, valSetSize, v.Size()) @@ -323,7 +327,8 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { header, parts, responses := makeHeaderPartsResponsesParams(state, i, cp) err := state.SetBlockAndValidators(header, parts, responses) require.Nil(t, err) - state.saveConsensusParamsInfo() + nextHeight := state.LastBlockHeight + 1 + saveConsensusParamsInfo(state.db, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) } // make all the test cases by using the same params until after the change @@ -341,7 +346,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { } for _, testCase := range testCases { - p, err := state.LoadConsensusParams(testCase.height) + p, err := LoadConsensusParams(state.db, testCase.height) assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", testCase.height)) assert.Equal(t, testCase.params, p, fmt.Sprintf(`unexpected consensus params at height %d`, testCase.height)) diff --git a/types/services.go b/types/services.go index 787b1b99..a901898f 100644 --- a/types/services.go +++ b/types/services.go @@ -70,15 +70,6 @@ type BlockStore interface { SaveBlock(block *Block, blockParts *PartSet, seenCommit *Commit) } -//------------------------------------------------------ -// state - -// State defines the stateful interface used to verify evidence. -// UNSTABLE -type State interface { - VerifyEvidence(Evidence) (priority int64, err error) -} - //------------------------------------------------------ // evidence pool From c915719f85f229e58de13876d414bbb63987bc6e Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 27 Dec 2017 17:50:16 -0500 Subject: [PATCH 03/33] *State->State; SetBlockAndValidators->NextState --- evidence/pool.go | 2 +- state/db.go | 41 +++++++++++++ state/execution.go | 4 +- state/state.go | 140 +++++++++++---------------------------------- 4 files changed, 76 insertions(+), 111 deletions(-) diff --git a/evidence/pool.go b/evidence/pool.go index 1965d063..381801df 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -21,7 +21,7 @@ type EvidencePool struct { evidenceChan chan types.Evidence } -func NewEvidencePool(params types.EvidenceParams, evidenceStore *EvidenceStore, state *types.State) *EvidencePool { +func NewEvidencePool(params types.EvidenceParams, evidenceStore *EvidenceStore, state types.State) *EvidencePool { evpool := &EvidencePool{ params: params, logger: log.NewNopLogger(), diff --git a/state/db.go b/state/db.go index 5174b9cb..08da59cb 100644 --- a/state/db.go +++ b/state/db.go @@ -11,6 +11,47 @@ import ( dbm "github.com/tendermint/tmlibs/db" ) +// GetState loads the most recent state from the database, +// or creates a new one from the given genesisFile and persists the result +// to the database. +func GetState(stateDB dbm.DB, genesisFile string) (*State, error) { + state := LoadState(stateDB) + if state == nil { + var err error + state, err = MakeGenesisStateFromFile(stateDB, genesisFile) + if err != nil { + return nil, err + } + state.Save() + } + + return state, nil +} + +// LoadState loads the State from the database. +func LoadState(db dbm.DB) *State { + return loadState(db, stateKey) +} + +func loadState(db dbm.DB, key []byte) *State { + buf := db.Get(key) + if len(buf) == 0 { + return nil + } + + s := &State{db: db} + r, n, err := bytes.NewReader(buf), new(int), new(error) + wire.ReadBinaryPtr(&s, r, 0, n, err) + if *err != nil { + // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED + cmn.Exit(cmn.Fmt(`LoadState: Data has been corrupted or its spec has changed: + %v\n`, *err)) + } + // TODO: ensure that buf is completely read. + + return s +} + //------------------------------------------------------------------------ // ABCIResponses retains the responses diff --git a/state/execution.go b/state/execution.go index b56f61f9..d05e043d 100644 --- a/state/execution.go +++ b/state/execution.go @@ -213,7 +213,7 @@ func changeInVotingPowerMoreOrEqualToOneThird(currentSet *types.ValidatorSet, up // Validate block // MakeBlock builds a block with the given txs and commit from the current state. -func (s *State) MakeBlock(height int64, txs []types.Tx, commit *types.Commit) (*types.Block, *types.PartSet) { +func (s State) MakeBlock(height int64, txs []types.Tx, commit *types.Commit) (*types.Block, *types.PartSet) { // build base block block := types.MakeBlock(height, txs, commit) @@ -309,7 +309,7 @@ func (s State) validateBlock(b *types.Block) error { // It returns the priority of this evidence, or an error. // NOTE: return error may be ErrNoValSetForHeight, in which case the validator set // for the evidence height could not be loaded. -func VerifyEvidence(s State, evidence types.Evidence) (priority int64, err error) { +func (s State) VerifyEvidence(evidence types.Evidence) (priority int64, err error) { height := s.LastBlockHeight evidenceAge := height - evidence.Height() maxAge := s.ConsensusParams.EvidenceParams.MaxAge diff --git a/state/state.go b/state/state.go index 1aaddeb3..ca9cf16b 100644 --- a/state/state.go +++ b/state/state.go @@ -8,7 +8,6 @@ import ( cmn "github.com/tendermint/tmlibs/common" dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" wire "github.com/tendermint/go-wire" @@ -38,16 +37,13 @@ func calcABCIResponsesKey(height int64) []byte { // It keeps all information necessary to validate new blocks, // including the last validator set and the consensus params. // All fields are exposed so the struct can be easily serialized, -// but the fields should only be changed by calling state.SetBlockAndValidators. +// but none of them should be mutated directly. +// Instead, use state.Copy() ro state.NextState(...). // NOTE: not goroutine-safe. type State struct { - db dbm.DB - // Immutable ChainID string - // Exposed fields are updated by SetBlockAndValidators. - // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) LastBlockHeight int64 LastBlockTotalTx int64 @@ -73,65 +69,11 @@ type State struct { // The latest AppHash we've received from calling abci.Commit() AppHash []byte - - logger log.Logger -} - -func (s *State) DB() dbm.DB { - return s.db -} - -// GetState loads the most recent state from the database, -// or creates a new one from the given genesisFile and persists the result -// to the database. -func GetState(stateDB dbm.DB, genesisFile string) (*State, error) { - state := LoadState(stateDB) - if state == nil { - var err error - state, err = MakeGenesisStateFromFile(stateDB, genesisFile) - if err != nil { - return nil, err - } - state.Save() - } - - return state, nil -} - -// LoadState loads the State from the database. -func LoadState(db dbm.DB) *State { - return loadState(db, stateKey) -} - -func loadState(db dbm.DB, key []byte) *State { - buf := db.Get(key) - if len(buf) == 0 { - return nil - } - - s := &State{db: db} - r, n, err := bytes.NewReader(buf), new(int), new(error) - wire.ReadBinaryPtr(&s, r, 0, n, err) - if *err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadState: Data has been corrupted or its spec has changed: - %v\n`, *err)) - } - // TODO: ensure that buf is completely read. - - return s -} - -// SetLogger sets the logger on the State. -func (s *State) SetLogger(l log.Logger) { - s.logger = l } // Copy makes a copy of the State for mutating. -func (s *State) Copy() *State { +func (s State) Copy() State { return &State{ - db: s.db, - ChainID: s.ChainID, LastBlockHeight: s.LastBlockHeight, @@ -149,36 +91,30 @@ func (s *State) Copy() *State { AppHash: s.AppHash, LastResultsHash: s.LastResultsHash, - - logger: s.logger, } } -// Save persists the State to the database. -func (s *State) Save() { +// Save persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database. +func (s State) Save(db dbm.DB) { nextHeight := s.LastBlockHeight + 1 - - // persist everything to db - db := s.db saveValidatorsInfo(db, nextHeight, s.LastHeightValidatorsChanged, s.Validators) saveConsensusParamsInfo(db, nextHeight, s.LastHeightConsensusParamsChanged, s.ConsensusParams) db.SetSync(stateKey, s.Bytes()) } // Equals returns true if the States are identical. -func (s *State) Equals(s2 *State) bool { +func (s State) Equals(s2 State) bool { return bytes.Equal(s.Bytes(), s2.Bytes()) } // Bytes serializes the State using go-wire. -func (s *State) Bytes() []byte { +func (s State) Bytes() []byte { return wire.BinaryBytes(s) } -// SetBlockAndValidators mutates State variables -// to update block and validators after running EndBlock. -func (s *State) SetBlockAndValidators(header *types.Header, blockPartsHeader types.PartSetHeader, - abciResponses *ABCIResponses) error { +// NextState returns a new State updated according to the header and responses. +func (s State) NextState(header *types.Header, blockPartsHeader types.PartSetHeader, + abciResponses *ABCIResponses) (State, error) { // copy the valset so we can apply changes from EndBlock // and update s.LastValidators and s.Validators @@ -186,13 +122,14 @@ func (s *State) SetBlockAndValidators(header *types.Header, blockPartsHeader typ nextValSet := prevValSet.Copy() // update the validator set with the latest abciResponses + lastHeightValsChanged := s.LastHeightValidatorsChanged if len(abciResponses.EndBlock.ValidatorUpdates) > 0 { err := updateValidators(nextValSet, abciResponses.EndBlock.ValidatorUpdates) if err != nil { return fmt.Errorf("Error changing validator set: %v", err) } // change results from this height but only applies to the next height - s.LastHeightValidatorsChanged = header.Height + 1 + lastHeightValsChanged = header.Height + 1 } // Update validator accums and set state variables @@ -200,6 +137,7 @@ func (s *State) SetBlockAndValidators(header *types.Header, blockPartsHeader typ // update the params with the latest abciResponses nextParams := s.ConsensusParams + lastHeightParamsChanged := s.LastHeightConsensusParamsChanged if abciResponses.EndBlock.ConsensusParamUpdates != nil { // NOTE: must not mutate s.ConsensusParams nextParams = s.ConsensusParams.Update(abciResponses.EndBlock.ConsensusParamUpdates) @@ -208,40 +146,27 @@ func (s *State) SetBlockAndValidators(header *types.Header, blockPartsHeader typ return fmt.Errorf("Error updating consensus params: %v", err) } // change results from this height but only applies to the next height - s.LastHeightConsensusParamsChanged = header.Height + 1 + lastHeightParamsChanged = header.Height + 1 } - s.setBlockAndValidators(header.Height, - header.NumTxs, - types.BlockID{header.Hash(), blockPartsHeader}, - header.Time, - nextValSet, - nextParams, - abciResponses.ResultsHash()) - return nil -} - -func (s *State) setBlockAndValidators(height int64, - newTxs int64, blockID types.BlockID, blockTime time.Time, - valSet *types.ValidatorSet, - params types.ConsensusParams, - resultsHash []byte) { - - s.LastBlockHeight = height - s.LastBlockTotalTx += newTxs - s.LastBlockID = blockID - s.LastBlockTime = blockTime - - s.LastValidators = s.Validators.Copy() - s.Validators = valSet - - s.ConsensusParams = params - - s.LastResultsHash = resultsHash + return State{ + ChainID: s.ChainID, + LastBlockHeight: header.Height, + LastBlockTotalTx: s.LastBlockTotalTx + header.NumTxs, + LastBlockID: types.BlockID{header.Hash(), blockPartsHeader}, + LastBlockTime: header.Time, + Validators: nextValSet, + LastValidators: s.Validators.Copy(), + LastHeightValidatorsChanged: lastHeightValsChanged, + ConsensusParams: nextParams, + LastHeightConsensusParamsChanged: lastHeightParamsChanged, + LastResultsHash: abciResponses.ResultsHash(), + AppHash: nil, + } } // GetValidators returns the last and current validator sets. -func (s *State) GetValidators() (last *types.ValidatorSet, current *types.ValidatorSet) { +func (s State) GetValidators() (last *types.ValidatorSet, current *types.ValidatorSet) { return s.LastValidators, s.Validators } @@ -252,12 +177,12 @@ func (s *State) GetValidators() (last *types.ValidatorSet, current *types.Valida // file. // // Used during replay and in tests. -func MakeGenesisStateFromFile(db dbm.DB, genDocFile string) (*State, error) { +func MakeGenesisStateFromFile(genDocFile string) (*State, error) { genDoc, err := MakeGenesisDocFromFile(genDocFile) if err != nil { return nil, err } - return MakeGenesisState(db, genDoc) + return MakeGenesisState(genDoc) } // MakeGenesisDocFromFile reads and unmarshals genesis doc from the given file. @@ -274,7 +199,7 @@ func MakeGenesisDocFromFile(genDocFile string) (*types.GenesisDoc, error) { } // MakeGenesisState creates state from types.GenesisDoc. -func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) (*State, error) { +func MakeGenesisState(genDoc *types.GenesisDoc) (*State, error) { err := genDoc.ValidateAndComplete() if err != nil { return nil, fmt.Errorf("Error in genesis file: %v", err) @@ -295,7 +220,6 @@ func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) (*State, error) { } return &State{ - db: db, ChainID: genDoc.ChainID, From 9e6d0887574136fcde247885d7a3032d4eaa96f3 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 27 Dec 2017 19:21:16 -0500 Subject: [PATCH 04/33] state: BlockExecutor --- state/db.go | 6 +- state/execution.go | 152 ++++++++++++++++++++++++++------------------- state/state.go | 18 +++--- 3 files changed, 103 insertions(+), 73 deletions(-) diff --git a/state/db.go b/state/db.go index 08da59cb..08dd61d1 100644 --- a/state/db.go +++ b/state/db.go @@ -18,11 +18,11 @@ func GetState(stateDB dbm.DB, genesisFile string) (*State, error) { state := LoadState(stateDB) if state == nil { var err error - state, err = MakeGenesisStateFromFile(stateDB, genesisFile) + state, err = MakeGenesisStateFromFile(genesisFile) if err != nil { return nil, err } - state.Save() + state.Save(stateDB, state.AppHash) } return state, nil @@ -39,7 +39,7 @@ func loadState(db dbm.DB, key []byte) *State { return nil } - s := &State{db: db} + s := new(State) r, n, err := bytes.NewReader(buf), new(int), new(error) wire.ReadBinaryPtr(&s, r, 0, n, err) if *err != nil { diff --git a/state/execution.go b/state/execution.go index d05e043d..969c3328 100644 --- a/state/execution.go +++ b/state/execution.go @@ -10,23 +10,22 @@ import ( crypto "github.com/tendermint/go-crypto" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/log" ) //-------------------------------------------------- // Execute the block -// ValExecBlock executes the block, but does NOT mutate State. +// ValExecBlock executes the block and returns the responses. It does NOT mutate State. // + validates the block // + executes block.Txs on the proxyAppConn -func (s *State) ValExecBlock(txEventPublisher types.TxEventPublisher, proxyAppConn proxy.AppConnConsensus, block *types.Block) (*ABCIResponses, error) { - // Validate the block. +func (blockExec *BlockExecutor) ValExecBlock(s State, block *types.Block) (*ABCIResponses, error) { if err := s.validateBlock(block); err != nil { return nil, ErrInvalidBlock(err) } - // Execute the block txs - abciResponses, err := execBlockOnProxyApp(txEventPublisher, proxyAppConn, block, s.logger, s.LastValidators) + abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block) if err != nil { // There was some error in proxyApp // TODO Report error and wait for proxyApp to be available. @@ -38,8 +37,7 @@ func (s *State) ValExecBlock(txEventPublisher types.TxEventPublisher, proxyAppCo // Executes block's transactions on proxyAppConn. // Returns a list of transaction results and updates to the validator set -// TODO: Generate a bitmap or otherwise store tx validity in state. -func execBlockOnProxyApp(txEventPublisher types.TxEventPublisher, proxyAppConn proxy.AppConnConsensus, block *types.Block, logger log.Logger, lastValidators *types.ValidatorSet) (*ABCIResponses, error) { +func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, block *types.Block) (*ABCIResponses, error) { var validTxs, invalidTxs = 0, 0 txIndex := 0 @@ -59,17 +57,6 @@ func execBlockOnProxyApp(txEventPublisher types.TxEventPublisher, proxyAppConn p logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log) invalidTxs++ } - - // NOTE: if we count we can access the tx from the block instead of - // pulling it from the req - tx := types.Tx(req.GetDeliverTx().Tx) - txEventPublisher.PublishEventTx(types.EventDataTx{types.TxResult{ - Height: block.Height, - Index: uint32(txIndex), - Tx: tx, - Result: *txRes, - }}) - abciResponses.DeliverTx[txIndex] = txRes txIndex++ } @@ -296,41 +283,52 @@ func (s State) validateBlock(b *types.Block) error { } for _, ev := range b.Evidence.Evidence { - if _, err := VerifyEvidence(s, ev); err != nil { + if err := VerifyEvidence(s, ev); err != nil { return types.NewEvidenceInvalidErr(ev, err) } + /* // Needs a db ... + valset, err := LoadValidators(s.db, ev.Height()) + if err != nil { + // XXX/TODO: what do we do if we can't load the valset? + // eg. if we have pruned the state or height is too high? + return err + } + if err := VerifyEvidenceValidator(valSet, ev); err != nil { + return types.NewEvidenceInvalidErr(ev, err) + } + */ } return nil } +// XXX: What's cheaper (ie. what should be checked first): +// evidence internal validity (ie. sig checks) or validator existed (fetch historical val set from db) + // VerifyEvidence verifies the evidence fully by checking it is internally -// consistent and corresponds to an existing or previous validator. -// It returns the priority of this evidence, or an error. -// NOTE: return error may be ErrNoValSetForHeight, in which case the validator set -// for the evidence height could not be loaded. -func (s State) VerifyEvidence(evidence types.Evidence) (priority int64, err error) { +// consistent and sufficiently recent. +func VerifyEvidence(s State, evidence types.Evidence) error { height := s.LastBlockHeight + evidenceAge := height - evidence.Height() maxAge := s.ConsensusParams.EvidenceParams.MaxAge if evidenceAge > maxAge { - return priority, fmt.Errorf("Evidence from height %d is too old. Min height is %d", + return fmt.Errorf("Evidence from height %d is too old. Min height is %d", evidence.Height(), height-maxAge) } if err := evidence.Verify(s.ChainID); err != nil { - return priority, err + return err } + return nil +} +// VerifyEvidenceValidator returns the voting power of the validator at the height of the evidence. +// It returns an error if the validator did not exist or does not match that loaded from the historical validator set. +func VerifyEvidenceValidator(valset *types.ValidatorSet, evidence types.Evidence) (priority int64, err error) { // The address must have been an active validator at the height ev := evidence height, addr, idx := ev.Height(), ev.Address(), ev.Index() - valset, err := LoadValidators(s.db, height) - if err != nil { - // XXX/TODO: what do we do if we can't load the valset? - // eg. if we have pruned the state or height is too high? - return priority, err - } valIdx, val := valset.GetByAddress(addr) if val == nil { return priority, fmt.Errorf("Address %X was not a validator at height %d", addr, height) @@ -348,6 +346,9 @@ func (s State) VerifyEvidence(evidence types.Evidence) (priority int64, err erro // BlockExecutor provides the context and accessories for properly executing a block. type BlockExecutor struct { + db dbm.DB + logger log.Logger + txEventPublisher types.TxEventPublisher proxyApp proxy.AppConnConsensus @@ -355,81 +356,106 @@ type BlockExecutor struct { evpool types.EvidencePool } +func NewBlockExecutor(db dbm.DB, logger log.Logger, txEventer types.TxEventPublisher, proxyApp proxy.AppConnConsensus, + mempool types.Mempool, evpool types.EvidencePool) *BlockExecutor { + return &BlockExecutor{ + db, + logger, + txEventer, + proxyApp, + mempool, + evpool, + } +} + // ApplyBlock validates the block against the state, executes it against the app, // commits it, and saves the block and state. It's the only function that needs to be called // from outside this package to process and commit an entire block. -func (s *State) ApplyBlock(txEventPublisher types.TxEventPublisher, proxyAppConn proxy.AppConnConsensus, - block *types.Block, partsHeader types.PartSetHeader, - mempool types.Mempool, evpool types.EvidencePool) error { +// It takes a blockID to avoid recomputing the parts hash. +func (blockExec *BlockExecutor) ApplyBlock(s State, blockID types.BlockID, block *types.Block) (State, error) { - abciResponses, err := s.ValExecBlock(txEventPublisher, proxyAppConn, block) + abciResponses, err := blockExec.ValExecBlock(s, block) if err != nil { - return fmt.Errorf("Exec failed for application: %v", err) + return s, fmt.Errorf("Exec failed for application: %v", err) } + // TODO: Fire events + /* + tx := types.Tx(req.GetDeliverTx().Tx) + txEventPublisher.PublishEventTx(types.EventDataTx{types.TxResult{ + Height: block.Height, + Index: uint32(txIndex), + Tx: tx, + Result: *txRes, + }}) + */ + fail.Fail() // XXX // save the results before we commit - SaveABCIResponses(s.db, block.Height, abciResponses) + SaveABCIResponses(blockExec.db, block.Height, abciResponses) fail.Fail() // XXX - // now update the block and validators - err = s.SetBlockAndValidators(block.Header, partsHeader, abciResponses) + // update the state with the block and responses + s, err = s.NextState(blockID, block.Header, abciResponses) if err != nil { - return fmt.Errorf("Commit failed for application: %v", err) + return s, fmt.Errorf("Commit failed for application: %v", err) } // lock mempool, commit state, update mempoool - err = s.CommitStateUpdateMempool(proxyAppConn, block, mempool) + appHash, err := blockExec.Commit(block) if err != nil { - return fmt.Errorf("Commit failed for application: %v", err) + return s, fmt.Errorf("Commit failed for application: %v", err) } fail.Fail() // XXX - evpool.MarkEvidenceAsCommitted(block.Evidence.Evidence) - // save the state and the validators - s.Save() + s.Save(blockExec.db, appHash) - return nil + return s, nil } -// CommitStateUpdateMempool locks the mempool, runs the ABCI Commit message, and updates the mempool. +// Commit locks the mempool, runs the ABCI Commit message, and updates the mempool. +// It returns the result of calling abci.Commit (the AppHash), and an error. // The Mempool must be locked during commit and update because state is typically reset on Commit and old txs must be replayed // against committed state before new txs are run in the mempool, lest they be invalid. -func (s *State) CommitStateUpdateMempool(proxyAppConn proxy.AppConnConsensus, block *types.Block, mempool types.Mempool) error { - mempool.Lock() - defer mempool.Unlock() +func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) { + blockExec.mempool.Lock() + defer blockExec.mempool.Unlock() // Commit block, get hash back - res, err := proxyAppConn.CommitSync() + res, err := blockExec.proxyApp.CommitSync() if err != nil { - s.logger.Error("Client error during proxyAppConn.CommitSync", "err", err) - return err + blockExec.logger.Error("Client error during proxyAppConn.CommitSync", "err", err) + return nil, err } if res.IsErr() { - s.logger.Error("Error in proxyAppConn.CommitSync", "err", res) - return res + blockExec.logger.Error("Error in proxyAppConn.CommitSync", "err", res) + return nil, res } if res.Log != "" { - s.logger.Debug("Commit.Log: " + res.Log) + blockExec.logger.Debug("Commit.Log: " + res.Log) } - s.logger.Info("Committed state", "height", block.Height, "txs", block.NumTxs, "hash", res.Data) + blockExec.logger.Info("Committed state", "height", block.Height, "txs", block.NumTxs, "hash", res.Data) - // Set the state's new AppHash - s.AppHash = res.Data + // Update evpool + blockExec.evpool.MarkEvidenceAsCommitted(block.Evidence.Evidence) // Update mempool. - return mempool.Update(block.Height, block.Txs) + if err := blockExec.mempool.Update(block.Height, block.Txs); err != nil { + return nil, err + } + + return res.Data, nil } // ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state. // It returns the application root hash (result of abci.Commit). -func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block, logger log.Logger, lastValidators *types.ValidatorSet) ([]byte, error) { - _, err := execBlockOnProxyApp(types.NopEventBus{}, appConnConsensus, block, logger, lastValidators) +func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block, logger log.Logger) ([]byte, error) { + _, err := execBlockOnProxyApp(logger, appConnConsensus, block) if err != nil { logger.Error("Error executing block on proxy app", "height", block.Height, "err", err) return nil, err diff --git a/state/state.go b/state/state.go index ca9cf16b..723a3255 100644 --- a/state/state.go +++ b/state/state.go @@ -73,7 +73,7 @@ type State struct { // Copy makes a copy of the State for mutating. func (s State) Copy() State { - return &State{ + return State{ ChainID: s.ChainID, LastBlockHeight: s.LastBlockHeight, @@ -95,7 +95,9 @@ func (s State) Copy() State { } // Save persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database. -func (s State) Save(db dbm.DB) { +// It sets the given appHash on the state before persisting. +func (s State) Save(db dbm.DB, appHash []byte) { + s.AppHash = appHash nextHeight := s.LastBlockHeight + 1 saveValidatorsInfo(db, nextHeight, s.LastHeightValidatorsChanged, s.Validators) saveConsensusParamsInfo(db, nextHeight, s.LastHeightConsensusParamsChanged, s.ConsensusParams) @@ -113,7 +115,7 @@ func (s State) Bytes() []byte { } // NextState returns a new State updated according to the header and responses. -func (s State) NextState(header *types.Header, blockPartsHeader types.PartSetHeader, +func (s State) NextState(blockID types.BlockID, header *types.Header, abciResponses *ABCIResponses) (State, error) { // copy the valset so we can apply changes from EndBlock @@ -126,7 +128,7 @@ func (s State) NextState(header *types.Header, blockPartsHeader types.PartSetHea if len(abciResponses.EndBlock.ValidatorUpdates) > 0 { err := updateValidators(nextValSet, abciResponses.EndBlock.ValidatorUpdates) if err != nil { - return fmt.Errorf("Error changing validator set: %v", err) + return s, fmt.Errorf("Error changing validator set: %v", err) } // change results from this height but only applies to the next height lastHeightValsChanged = header.Height + 1 @@ -143,17 +145,19 @@ func (s State) NextState(header *types.Header, blockPartsHeader types.PartSetHea nextParams = s.ConsensusParams.Update(abciResponses.EndBlock.ConsensusParamUpdates) err := nextParams.Validate() if err != nil { - return fmt.Errorf("Error updating consensus params: %v", err) + return s, fmt.Errorf("Error updating consensus params: %v", err) } // change results from this height but only applies to the next height lastHeightParamsChanged = header.Height + 1 } + // NOTE: the AppHash has not been populated. + // It will be filled on state.Save. return State{ ChainID: s.ChainID, LastBlockHeight: header.Height, LastBlockTotalTx: s.LastBlockTotalTx + header.NumTxs, - LastBlockID: types.BlockID{header.Hash(), blockPartsHeader}, + LastBlockID: blockID, LastBlockTime: header.Time, Validators: nextValSet, LastValidators: s.Validators.Copy(), @@ -162,7 +166,7 @@ func (s State) NextState(header *types.Header, blockPartsHeader types.PartSetHea LastHeightConsensusParamsChanged: lastHeightParamsChanged, LastResultsHash: abciResponses.ResultsHash(), AppHash: nil, - } + }, nil } // GetValidators returns the last and current validator sets. From f82b7e2a133909e59387a39580d5dcc26922ec61 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 27 Dec 2017 20:03:48 -0500 Subject: [PATCH 05/33] state: re-order funcs. fix tests --- state/db.go | 45 +++-- state/execution.go | 394 +++++++++++++++------------------------ state/execution_test.go | 84 ++------- state/state.go | 111 +++-------- state/state_test.go | 106 +++++------ state/validation.go | 136 ++++++++++++++ state/validation_test.go | 64 +++++++ 7 files changed, 479 insertions(+), 461 deletions(-) create mode 100644 state/validation.go create mode 100644 state/validation_test.go diff --git a/state/db.go b/state/db.go index 08dd61d1..32f62584 100644 --- a/state/db.go +++ b/state/db.go @@ -11,37 +11,50 @@ import ( dbm "github.com/tendermint/tmlibs/db" ) +//------------------------------------------------------------------------ + +func calcValidatorsKey(height int64) []byte { + return []byte(cmn.Fmt("validatorsKey:%v", height)) +} + +func calcConsensusParamsKey(height int64) []byte { + return []byte(cmn.Fmt("consensusParamsKey:%v", height)) +} + +func calcABCIResponsesKey(height int64) []byte { + return []byte(cmn.Fmt("abciResponsesKey:%v", height)) +} + // GetState loads the most recent state from the database, // or creates a new one from the given genesisFile and persists the result // to the database. -func GetState(stateDB dbm.DB, genesisFile string) (*State, error) { +func GetState(stateDB dbm.DB, genesisFile string) (State, error) { state := LoadState(stateDB) - if state == nil { + if state.IsEmpty() { var err error state, err = MakeGenesisStateFromFile(genesisFile) if err != nil { - return nil, err + return state, err } - state.Save(stateDB, state.AppHash) + SaveState(stateDB, state, state.AppHash) } return state, nil } // LoadState loads the State from the database. -func LoadState(db dbm.DB) *State { +func LoadState(db dbm.DB) State { return loadState(db, stateKey) } -func loadState(db dbm.DB, key []byte) *State { +func loadState(db dbm.DB, key []byte) (state State) { buf := db.Get(key) if len(buf) == 0 { - return nil + return state } - s := new(State) r, n, err := bytes.NewReader(buf), new(int), new(error) - wire.ReadBinaryPtr(&s, r, 0, n, err) + wire.ReadBinaryPtr(&state, r, 0, n, err) if *err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED cmn.Exit(cmn.Fmt(`LoadState: Data has been corrupted or its spec has changed: @@ -49,7 +62,17 @@ func loadState(db dbm.DB, key []byte) *State { } // TODO: ensure that buf is completely read. - return s + return state +} + +// SaveState persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database. +// It sets the given appHash on the state before persisting. +func SaveState(db dbm.DB, s State, appHash []byte) { + s.AppHash = appHash + nextHeight := s.LastBlockHeight + 1 + saveValidatorsInfo(db, nextHeight, s.LastHeightValidatorsChanged, s.Validators) + saveConsensusParamsInfo(db, nextHeight, s.LastHeightConsensusParamsChanged, s.ConsensusParams) + db.SetSync(stateKey, s.Bytes()) } //------------------------------------------------------------------------ @@ -104,7 +127,7 @@ func LoadABCIResponses(db dbm.DB, height int64) (*ABCIResponses, error) { // SaveABCIResponses persists the ABCIResponses to the database. // This is useful in case we crash after app.Commit and before s.Save(). // Responses are indexed by height so they can also be loaded later to produce Merkle proofs. -func SaveABCIResponses(db dbm.DB, height int64, abciResponses *ABCIResponses) { +func saveABCIResponses(db dbm.DB, height int64, abciResponses *ABCIResponses) { db.SetSync(calcABCIResponsesKey(height), abciResponses.Bytes()) } diff --git a/state/execution.go b/state/execution.go index 969c3328..88ee1127 100644 --- a/state/execution.go +++ b/state/execution.go @@ -1,7 +1,6 @@ package state import ( - "bytes" "errors" "fmt" @@ -14,27 +13,119 @@ import ( "github.com/tendermint/tmlibs/log" ) -//-------------------------------------------------- -// Execute the block +//----------------------------------------------------------------------------- +// BlockExecutor handles block execution and state updates. +// It exposes ApplyBlock(), which validates & executes the block, updates state w/ ABCI responses, +// then commits and updates the mempool atomically, then saves state. -// ValExecBlock executes the block and returns the responses. It does NOT mutate State. -// + validates the block -// + executes block.Txs on the proxyAppConn -func (blockExec *BlockExecutor) ValExecBlock(s State, block *types.Block) (*ABCIResponses, error) { - if err := s.validateBlock(block); err != nil { - return nil, ErrInvalidBlock(err) +// BlockExecutor provides the context and accessories for properly executing a block. +type BlockExecutor struct { + db dbm.DB + logger log.Logger + + txEventPublisher types.TxEventPublisher + proxyApp proxy.AppConnConsensus + + mempool types.Mempool + evpool types.EvidencePool +} + +// NewBlockExecutor returns a new BlockExecutor. +func NewBlockExecutor(db dbm.DB, logger log.Logger, + txEventer types.TxEventPublisher, proxyApp proxy.AppConnConsensus, + mempool types.Mempool, evpool types.EvidencePool) *BlockExecutor { + return &BlockExecutor{ + db, + logger, + txEventer, + proxyApp, + mempool, + evpool, + } +} + +// ApplyBlock validates the block against the state, executes it against the app, +// commits it, and saves the block and state. It's the only function that needs to be called +// from outside this package to process and commit an entire block. +// It takes a blockID to avoid recomputing the parts hash. +func (blockExec *BlockExecutor) ApplyBlock(s State, blockID types.BlockID, block *types.Block) (State, error) { + + if err := validateBlock(s, block); err != nil { + return s, ErrInvalidBlock(err) } abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block) if err != nil { - // There was some error in proxyApp - // TODO Report error and wait for proxyApp to be available. - return nil, ErrProxyAppConn(err) + return s, ErrProxyAppConn(err) } - return abciResponses, nil + fireEvents(blockExec.txEventPublisher, block, abciResponses) + + fail.Fail() // XXX + + // save the results before we commit + saveABCIResponses(blockExec.db, block.Height, abciResponses) + + fail.Fail() // XXX + + // update the state with the block and responses + s, err = updateState(s, blockID, block.Header, abciResponses) + if err != nil { + return s, fmt.Errorf("Commit failed for application: %v", err) + } + + // lock mempool, commit state, update mempoool + appHash, err := blockExec.Commit(block) + if err != nil { + return s, fmt.Errorf("Commit failed for application: %v", err) + } + + fail.Fail() // XXX + + // save the state and the validators + SaveState(blockExec.db, s, appHash) + + return s, nil } +// Commit locks the mempool, runs the ABCI Commit message, and updates the mempool. +// It returns the result of calling abci.Commit (the AppHash), and an error. +// The Mempool must be locked during commit and update because state is typically reset on Commit and old txs must be replayed +// against committed state before new txs are run in the mempool, lest they be invalid. +func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) { + blockExec.mempool.Lock() + defer blockExec.mempool.Unlock() + + // Commit block, get hash back + res, err := blockExec.proxyApp.CommitSync() + if err != nil { + blockExec.logger.Error("Client error during proxyAppConn.CommitSync", "err", err) + return nil, err + } + if res.IsErr() { + blockExec.logger.Error("Error in proxyAppConn.CommitSync", "err", res) + return nil, res + } + if res.Log != "" { + blockExec.logger.Debug("Commit.Log: " + res.Log) + } + + blockExec.logger.Info("Committed state", "height", block.Height, "txs", block.NumTxs, "hash", res.Data) + + // Update evpool + blockExec.evpool.MarkEvidenceAsCommitted(block.Evidence.Evidence) + + // Update mempool. + if err := blockExec.mempool.Update(block.Height, block.Txs); err != nil { + return nil, err + } + + return res.Data, nil +} + +//--------------------------------------------------------- +// Helper functions for executing blocks and updating state + // Executes block's transactions on proxyAppConn. // Returns a list of transaction results and updates to the validator set func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, block *types.Block) (*ABCIResponses, error) { @@ -196,189 +287,62 @@ func changeInVotingPowerMoreOrEqualToOneThird(currentSet *types.ValidatorSet, up return false, nil } -//----------------------------------------------------- -// Validate block +// updateState returns a new State updated according to the header and responses. +func updateState(s State, blockID types.BlockID, header *types.Header, + abciResponses *ABCIResponses) (State, error) { -// MakeBlock builds a block with the given txs and commit from the current state. -func (s State) MakeBlock(height int64, txs []types.Tx, commit *types.Commit) (*types.Block, *types.PartSet) { - // build base block - block := types.MakeBlock(height, txs, commit) + // copy the valset so we can apply changes from EndBlock + // and update s.LastValidators and s.Validators + prevValSet := s.Validators.Copy() + nextValSet := prevValSet.Copy() - // fill header with state data - block.ChainID = s.ChainID - block.TotalTxs = s.LastBlockTotalTx + block.NumTxs - block.LastBlockID = s.LastBlockID - block.ValidatorsHash = s.Validators.Hash() - block.AppHash = s.AppHash - block.ConsensusHash = s.ConsensusParams.Hash() - block.LastResultsHash = s.LastResultsHash - - return block, block.MakePartSet(s.ConsensusParams.BlockGossip.BlockPartSizeBytes) -} - -// ValidateBlock validates the block against the state. -func (s State) ValidateBlock(block *types.Block) error { - return s.validateBlock(block) -} - -func (s State) validateBlock(b *types.Block) error { - // validate internal consistency - if err := b.ValidateBasic(); err != nil { - return err - } - - // validate basic info - if b.ChainID != s.ChainID { - return fmt.Errorf("Wrong Block.Header.ChainID. Expected %v, got %v", s.ChainID, b.ChainID) - } - if b.Height != s.LastBlockHeight+1 { - return fmt.Errorf("Wrong Block.Header.Height. Expected %v, got %v", s.LastBlockHeight+1, b.Height) - } - /* TODO: Determine bounds for Time - See blockchain/reactor "stopSyncingDurationMinutes" - - if !b.Time.After(lastBlockTime) { - return errors.New("Invalid Block.Header.Time") - } - */ - - // validate prev block info - if !b.LastBlockID.Equals(s.LastBlockID) { - return fmt.Errorf("Wrong Block.Header.LastBlockID. Expected %v, got %v", s.LastBlockID, b.LastBlockID) - } - newTxs := int64(len(b.Data.Txs)) - if b.TotalTxs != s.LastBlockTotalTx+newTxs { - return fmt.Errorf("Wrong Block.Header.TotalTxs. Expected %v, got %v", s.LastBlockTotalTx+newTxs, b.TotalTxs) - } - - // validate app info - if !bytes.Equal(b.AppHash, s.AppHash) { - return fmt.Errorf("Wrong Block.Header.AppHash. Expected %X, got %v", s.AppHash, b.AppHash) - } - if !bytes.Equal(b.ConsensusHash, s.ConsensusParams.Hash()) { - return fmt.Errorf("Wrong Block.Header.ConsensusHash. Expected %X, got %v", s.ConsensusParams.Hash(), b.ConsensusHash) - } - if !bytes.Equal(b.LastResultsHash, s.LastResultsHash) { - return fmt.Errorf("Wrong Block.Header.LastResultsHash. Expected %X, got %v", s.LastResultsHash, b.LastResultsHash) - } - if !bytes.Equal(b.ValidatorsHash, s.Validators.Hash()) { - return fmt.Errorf("Wrong Block.Header.ValidatorsHash. Expected %X, got %v", s.Validators.Hash(), b.ValidatorsHash) - } - - // Validate block LastCommit. - if b.Height == 1 { - if len(b.LastCommit.Precommits) != 0 { - return errors.New("Block at height 1 (first block) should have no LastCommit precommits") - } - } else { - if len(b.LastCommit.Precommits) != s.LastValidators.Size() { - return fmt.Errorf("Invalid block commit size. Expected %v, got %v", - s.LastValidators.Size(), len(b.LastCommit.Precommits)) - } - err := s.LastValidators.VerifyCommit( - s.ChainID, s.LastBlockID, b.Height-1, b.LastCommit) + // update the validator set with the latest abciResponses + lastHeightValsChanged := s.LastHeightValidatorsChanged + if len(abciResponses.EndBlock.ValidatorUpdates) > 0 { + err := updateValidators(nextValSet, abciResponses.EndBlock.ValidatorUpdates) if err != nil { - return err + return s, fmt.Errorf("Error changing validator set: %v", err) } + // change results from this height but only applies to the next height + lastHeightValsChanged = header.Height + 1 } - for _, ev := range b.Evidence.Evidence { - if err := VerifyEvidence(s, ev); err != nil { - return types.NewEvidenceInvalidErr(ev, err) - } - /* // Needs a db ... - valset, err := LoadValidators(s.db, ev.Height()) + // Update validator accums and set state variables + nextValSet.IncrementAccum(1) + + // update the params with the latest abciResponses + nextParams := s.ConsensusParams + lastHeightParamsChanged := s.LastHeightConsensusParamsChanged + if abciResponses.EndBlock.ConsensusParamUpdates != nil { + // NOTE: must not mutate s.ConsensusParams + nextParams = s.ConsensusParams.Update(abciResponses.EndBlock.ConsensusParamUpdates) + err := nextParams.Validate() if err != nil { - // XXX/TODO: what do we do if we can't load the valset? - // eg. if we have pruned the state or height is too high? - return err + return s, fmt.Errorf("Error updating consensus params: %v", err) } - if err := VerifyEvidenceValidator(valSet, ev); err != nil { - return types.NewEvidenceInvalidErr(ev, err) - } - */ + // change results from this height but only applies to the next height + lastHeightParamsChanged = header.Height + 1 } - return nil + // NOTE: the AppHash has not been populated. + // It will be filled on state.Save. + return State{ + ChainID: s.ChainID, + LastBlockHeight: header.Height, + LastBlockTotalTx: s.LastBlockTotalTx + header.NumTxs, + LastBlockID: blockID, + LastBlockTime: header.Time, + Validators: nextValSet, + LastValidators: s.Validators.Copy(), + LastHeightValidatorsChanged: lastHeightValsChanged, + ConsensusParams: nextParams, + LastHeightConsensusParamsChanged: lastHeightParamsChanged, + LastResultsHash: abciResponses.ResultsHash(), + AppHash: nil, + }, nil } -// XXX: What's cheaper (ie. what should be checked first): -// evidence internal validity (ie. sig checks) or validator existed (fetch historical val set from db) - -// VerifyEvidence verifies the evidence fully by checking it is internally -// consistent and sufficiently recent. -func VerifyEvidence(s State, evidence types.Evidence) error { - height := s.LastBlockHeight - - evidenceAge := height - evidence.Height() - maxAge := s.ConsensusParams.EvidenceParams.MaxAge - if evidenceAge > maxAge { - return fmt.Errorf("Evidence from height %d is too old. Min height is %d", - evidence.Height(), height-maxAge) - } - - if err := evidence.Verify(s.ChainID); err != nil { - return err - } - return nil -} - -// VerifyEvidenceValidator returns the voting power of the validator at the height of the evidence. -// It returns an error if the validator did not exist or does not match that loaded from the historical validator set. -func VerifyEvidenceValidator(valset *types.ValidatorSet, evidence types.Evidence) (priority int64, err error) { - // The address must have been an active validator at the height - ev := evidence - height, addr, idx := ev.Height(), ev.Address(), ev.Index() - valIdx, val := valset.GetByAddress(addr) - if val == nil { - return priority, fmt.Errorf("Address %X was not a validator at height %d", addr, height) - } else if idx != valIdx { - return priority, fmt.Errorf("Address %X was validator %d at height %d, not %d", addr, valIdx, height, idx) - } - - priority = val.VotingPower - return priority, nil -} - -//----------------------------------------------------------------------------- -// ApplyBlock validates & executes the block, updates state w/ ABCI responses, -// then commits and updates the mempool atomically, then saves state. - -// BlockExecutor provides the context and accessories for properly executing a block. -type BlockExecutor struct { - db dbm.DB - logger log.Logger - - txEventPublisher types.TxEventPublisher - proxyApp proxy.AppConnConsensus - - mempool types.Mempool - evpool types.EvidencePool -} - -func NewBlockExecutor(db dbm.DB, logger log.Logger, txEventer types.TxEventPublisher, proxyApp proxy.AppConnConsensus, - mempool types.Mempool, evpool types.EvidencePool) *BlockExecutor { - return &BlockExecutor{ - db, - logger, - txEventer, - proxyApp, - mempool, - evpool, - } -} - -// ApplyBlock validates the block against the state, executes it against the app, -// commits it, and saves the block and state. It's the only function that needs to be called -// from outside this package to process and commit an entire block. -// It takes a blockID to avoid recomputing the parts hash. -func (blockExec *BlockExecutor) ApplyBlock(s State, blockID types.BlockID, block *types.Block) (State, error) { - - abciResponses, err := blockExec.ValExecBlock(s, block) - if err != nil { - return s, fmt.Errorf("Exec failed for application: %v", err) - } - +func fireEvents(txEventPublisher types.TxEventPublisher, block *types.Block, abciResponses *ABCIResponses) { // TODO: Fire events /* tx := types.Tx(req.GetDeliverTx().Tx) @@ -389,68 +353,10 @@ func (blockExec *BlockExecutor) ApplyBlock(s State, blockID types.BlockID, block Result: *txRes, }}) */ - - fail.Fail() // XXX - - // save the results before we commit - SaveABCIResponses(blockExec.db, block.Height, abciResponses) - - fail.Fail() // XXX - - // update the state with the block and responses - s, err = s.NextState(blockID, block.Header, abciResponses) - if err != nil { - return s, fmt.Errorf("Commit failed for application: %v", err) - } - - // lock mempool, commit state, update mempoool - appHash, err := blockExec.Commit(block) - if err != nil { - return s, fmt.Errorf("Commit failed for application: %v", err) - } - - fail.Fail() // XXX - - // save the state and the validators - s.Save(blockExec.db, appHash) - - return s, nil } -// Commit locks the mempool, runs the ABCI Commit message, and updates the mempool. -// It returns the result of calling abci.Commit (the AppHash), and an error. -// The Mempool must be locked during commit and update because state is typically reset on Commit and old txs must be replayed -// against committed state before new txs are run in the mempool, lest they be invalid. -func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) { - blockExec.mempool.Lock() - defer blockExec.mempool.Unlock() - - // Commit block, get hash back - res, err := blockExec.proxyApp.CommitSync() - if err != nil { - blockExec.logger.Error("Client error during proxyAppConn.CommitSync", "err", err) - return nil, err - } - if res.IsErr() { - blockExec.logger.Error("Error in proxyAppConn.CommitSync", "err", res) - return nil, res - } - if res.Log != "" { - blockExec.logger.Debug("Commit.Log: " + res.Log) - } - - blockExec.logger.Info("Committed state", "height", block.Height, "txs", block.NumTxs, "hash", res.Data) - - // Update evpool - blockExec.evpool.MarkEvidenceAsCommitted(block.Evidence.Evidence) - - // Update mempool. - if err := blockExec.mempool.Update(block.Height, block.Txs); err != nil { - return nil, err - } - - return res.Data, nil -} +//---------------------------------------------------------------------------------------------------- +// Execute block without state. TODO: eliminate // ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state. // It returns the application root hash (result of abci.Commit). diff --git a/state/execution_test.go b/state/execution_test.go index 7cda5c1d..1a63d3ed 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -23,64 +23,6 @@ var ( nTxsPerBlock = 10 ) -func TestValidateBlock(t *testing.T) { - state := state() - state.SetLogger(log.TestingLogger()) - - // proper block must pass - block := makeBlock(state, 1) - err := state.ValidateBlock(block) - require.NoError(t, err) - - // wrong chain fails - block = makeBlock(state, 1) - block.ChainID = "not-the-real-one" - err = state.ValidateBlock(block) - require.Error(t, err) - - // wrong height fails - block = makeBlock(state, 1) - block.Height += 10 - err = state.ValidateBlock(block) - require.Error(t, err) - - // wrong total tx fails - block = makeBlock(state, 1) - block.TotalTxs += 10 - err = state.ValidateBlock(block) - require.Error(t, err) - - // wrong blockid fails - block = makeBlock(state, 1) - block.LastBlockID.PartsHeader.Total += 10 - err = state.ValidateBlock(block) - require.Error(t, err) - - // wrong app hash fails - block = makeBlock(state, 1) - block.AppHash = []byte("wrong app hash") - err = state.ValidateBlock(block) - require.Error(t, err) - - // wrong consensus hash fails - block = makeBlock(state, 1) - block.ConsensusHash = []byte("wrong consensus hash") - err = state.ValidateBlock(block) - require.Error(t, err) - - // wrong results hash fails - block = makeBlock(state, 1) - block.LastResultsHash = []byte("wrong results hash") - err = state.ValidateBlock(block) - require.Error(t, err) - - // wrong validators hash fails - block = makeBlock(state, 1) - block.ValidatorsHash = []byte("wrong validators hash") - err = state.ValidateBlock(block) - require.Error(t, err) -} - func TestApplyBlock(t *testing.T) { cc := proxy.NewLocalClientCreator(dummy.NewDummyApplication()) proxyApp := proxy.NewAppConns(cc, nil) @@ -88,15 +30,16 @@ func TestApplyBlock(t *testing.T) { require.Nil(t, err) defer proxyApp.Stop() - state := state() - state.SetLogger(log.TestingLogger()) + state, stateDB := state(), dbm.NewMemDB() - block := makeBlock(state, 1) - - err = state.ApplyBlock(types.NopEventBus{}, proxyApp.Consensus(), - block, block.MakePartSet(testPartSize).Header(), + blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), + types.NopEventBus{}, proxyApp.Consensus(), types.MockMempool{}, types.MockEvidencePool{}) + block := makeBlock(state, 1) + blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} + + state, err = blockExec.ApplyBlock(state, blockID, block) require.Nil(t, err) // TODO check state and mempool @@ -112,15 +55,14 @@ func TestBeginBlockAbsentValidators(t *testing.T) { defer proxyApp.Stop() state := state() - state.SetLogger(log.TestingLogger()) // there were 2 validators - val1PrivKey := crypto.GenPrivKeyEd25519() + /*val1PrivKey := crypto.GenPrivKeyEd25519() val2PrivKey := crypto.GenPrivKeyEd25519() lastValidators := types.NewValidatorSet([]*types.Validator{ types.NewValidator(val1PrivKey.PubKey(), 10), types.NewValidator(val2PrivKey.PubKey(), 5), - }) + })*/ prevHash := state.LastBlockID.Hash prevParts := types.PartSetHeader{} @@ -141,7 +83,7 @@ func TestBeginBlockAbsentValidators(t *testing.T) { lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: tc.lastCommitPrecommits} block, _ := state.MakeBlock(2, makeTxs(2), lastCommit) - _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), lastValidators) + _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger()) require.Nil(t, err, tc.desc) // -> app must receive an index of the absent validator @@ -159,8 +101,8 @@ func makeTxs(height int64) (txs []types.Tx) { return txs } -func state() *State { - s, _ := MakeGenesisState(dbm.NewMemDB(), &types.GenesisDoc{ +func state() State { + s, _ := MakeGenesisState(&types.GenesisDoc{ ChainID: chainID, Validators: []types.GenesisValidator{ {privKey.PubKey(), 10000, "test"}, @@ -170,7 +112,7 @@ func state() *State { return s } -func makeBlock(state *State, height int64) *types.Block { +func makeBlock(state State, height int64) *types.Block { block, _ := state.MakeBlock(height, makeTxs(state.LastBlockHeight), new(types.Commit)) return block } diff --git a/state/state.go b/state/state.go index 723a3255..ed8a2013 100644 --- a/state/state.go +++ b/state/state.go @@ -6,9 +6,6 @@ import ( "io/ioutil" "time" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - wire "github.com/tendermint/go-wire" "github.com/tendermint/tendermint/types" @@ -19,18 +16,6 @@ var ( stateKey = []byte("stateKey") ) -func calcValidatorsKey(height int64) []byte { - return []byte(cmn.Fmt("validatorsKey:%v", height)) -} - -func calcConsensusParamsKey(height int64) []byte { - return []byte(cmn.Fmt("consensusParamsKey:%v", height)) -} - -func calcABCIResponsesKey(height int64) []byte { - return []byte(cmn.Fmt("abciResponsesKey:%v", height)) -} - //----------------------------------------------------------------------------- // State is a short description of the latest committed block of the Tendermint consensus. @@ -94,16 +79,6 @@ func (s State) Copy() State { } } -// Save persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database. -// It sets the given appHash on the state before persisting. -func (s State) Save(db dbm.DB, appHash []byte) { - s.AppHash = appHash - nextHeight := s.LastBlockHeight + 1 - saveValidatorsInfo(db, nextHeight, s.LastHeightValidatorsChanged, s.Validators) - saveConsensusParamsInfo(db, nextHeight, s.LastHeightConsensusParamsChanged, s.ConsensusParams) - db.SetSync(stateKey, s.Bytes()) -} - // Equals returns true if the States are identical. func (s State) Equals(s2 State) bool { return bytes.Equal(s.Bytes(), s2.Bytes()) @@ -114,59 +89,9 @@ func (s State) Bytes() []byte { return wire.BinaryBytes(s) } -// NextState returns a new State updated according to the header and responses. -func (s State) NextState(blockID types.BlockID, header *types.Header, - abciResponses *ABCIResponses) (State, error) { - - // copy the valset so we can apply changes from EndBlock - // and update s.LastValidators and s.Validators - prevValSet := s.Validators.Copy() - nextValSet := prevValSet.Copy() - - // update the validator set with the latest abciResponses - lastHeightValsChanged := s.LastHeightValidatorsChanged - if len(abciResponses.EndBlock.ValidatorUpdates) > 0 { - err := updateValidators(nextValSet, abciResponses.EndBlock.ValidatorUpdates) - if err != nil { - return s, fmt.Errorf("Error changing validator set: %v", err) - } - // change results from this height but only applies to the next height - lastHeightValsChanged = header.Height + 1 - } - - // Update validator accums and set state variables - nextValSet.IncrementAccum(1) - - // update the params with the latest abciResponses - nextParams := s.ConsensusParams - lastHeightParamsChanged := s.LastHeightConsensusParamsChanged - if abciResponses.EndBlock.ConsensusParamUpdates != nil { - // NOTE: must not mutate s.ConsensusParams - nextParams = s.ConsensusParams.Update(abciResponses.EndBlock.ConsensusParamUpdates) - err := nextParams.Validate() - if err != nil { - return s, fmt.Errorf("Error updating consensus params: %v", err) - } - // change results from this height but only applies to the next height - lastHeightParamsChanged = header.Height + 1 - } - - // NOTE: the AppHash has not been populated. - // It will be filled on state.Save. - return State{ - ChainID: s.ChainID, - LastBlockHeight: header.Height, - LastBlockTotalTx: s.LastBlockTotalTx + header.NumTxs, - LastBlockID: blockID, - LastBlockTime: header.Time, - Validators: nextValSet, - LastValidators: s.Validators.Copy(), - LastHeightValidatorsChanged: lastHeightValsChanged, - ConsensusParams: nextParams, - LastHeightConsensusParamsChanged: lastHeightParamsChanged, - LastResultsHash: abciResponses.ResultsHash(), - AppHash: nil, - }, nil +// IsEmpty returns true if the State is equal to the empty State. +func (s State) IsEmpty() bool { + return s.LastBlockHeight == 0 // XXX can't compare to Empty } // GetValidators returns the last and current validator sets. @@ -174,6 +99,26 @@ func (s State) GetValidators() (last *types.ValidatorSet, current *types.Validat return s.LastValidators, s.Validators } +//------------------------------------------------------------------------ +// Create a block from the latest state + +// MakeBlock builds a block with the given txs and commit from the current state. +func (s State) MakeBlock(height int64, txs []types.Tx, commit *types.Commit) (*types.Block, *types.PartSet) { + // build base block + block := types.MakeBlock(height, txs, commit) + + // fill header with state data + block.ChainID = s.ChainID + block.TotalTxs = s.LastBlockTotalTx + block.NumTxs + block.LastBlockID = s.LastBlockID + block.ValidatorsHash = s.Validators.Hash() + block.AppHash = s.AppHash + block.ConsensusHash = s.ConsensusParams.Hash() + block.LastResultsHash = s.LastResultsHash + + return block, block.MakePartSet(s.ConsensusParams.BlockGossip.BlockPartSizeBytes) +} + //------------------------------------------------------------------------ // Genesis @@ -181,10 +126,10 @@ func (s State) GetValidators() (last *types.ValidatorSet, current *types.Validat // file. // // Used during replay and in tests. -func MakeGenesisStateFromFile(genDocFile string) (*State, error) { +func MakeGenesisStateFromFile(genDocFile string) (State, error) { genDoc, err := MakeGenesisDocFromFile(genDocFile) if err != nil { - return nil, err + return State{}, err } return MakeGenesisState(genDoc) } @@ -203,10 +148,10 @@ func MakeGenesisDocFromFile(genDocFile string) (*types.GenesisDoc, error) { } // MakeGenesisState creates state from types.GenesisDoc. -func MakeGenesisState(genDoc *types.GenesisDoc) (*State, error) { +func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { err := genDoc.ValidateAndComplete() if err != nil { - return nil, fmt.Errorf("Error in genesis file: %v", err) + return State{}, fmt.Errorf("Error in genesis file: %v", err) } // Make validators slice @@ -223,7 +168,7 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (*State, error) { } } - return &State{ + return State{ ChainID: genDoc.ChainID, diff --git a/state/state_test.go b/state/state_test.go index 486ad24a..cbd3c813 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -14,19 +14,17 @@ import ( cmn "github.com/tendermint/tmlibs/common" dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" ) // setupTestCase does setup common to all test cases -func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, *State) { +func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, State) { config := cfg.ResetTestRoot("state_") stateDB := dbm.NewDB("state", config.DBBackend, config.DBDir()) state, err := GetState(stateDB, config.GenesisFile()) assert.NoError(t, err, "expected no error on GetState") - state.SetLogger(log.TestingLogger()) tearDown := func(t *testing.T) {} @@ -59,7 +57,7 @@ func TestStateSaveLoad(t *testing.T) { assert := assert.New(t) state.LastBlockHeight++ - state.Save() + SaveState(stateDB, state, state.AppHash) loadedState := LoadState(stateDB) assert.True(state.Equals(loadedState), @@ -69,7 +67,7 @@ func TestStateSaveLoad(t *testing.T) { // TestABCIResponsesSaveLoad tests saving and loading ABCIResponses. func TestABCIResponsesSaveLoad1(t *testing.T) { - tearDown, _, state := setupTestCase(t) + tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) // nolint: vetshadow assert := assert.New(t) @@ -88,8 +86,8 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { }, }} - SaveABCIResponses(state.db, block.Height, abciResponses) - loadedAbciResponses, err := LoadABCIResponses(state.db, block.Height) + saveABCIResponses(stateDB, block.Height, abciResponses) + loadedAbciResponses, err := LoadABCIResponses(stateDB, block.Height) assert.Nil(err) assert.Equal(abciResponses, loadedAbciResponses, cmn.Fmt(`ABCIResponses don't match: Got %v, Expected %v`, loadedAbciResponses, @@ -98,7 +96,7 @@ func TestABCIResponsesSaveLoad1(t *testing.T) { // TestResultsSaveLoad tests saving and loading abci results. func TestABCIResponsesSaveLoad2(t *testing.T) { - tearDown, _, state := setupTestCase(t) + tearDown, stateDB, _ := setupTestCase(t) defer tearDown(t) // nolint: vetshadow assert := assert.New(t) @@ -142,7 +140,7 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { // query all before, should return error for i := range cases { h := int64(i + 1) - res, err := LoadABCIResponses(state.db, h) + res, err := LoadABCIResponses(stateDB, h) assert.Error(err, "%d: %#v", i, res) } @@ -153,13 +151,13 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { DeliverTx: tc.added, EndBlock: &abci.ResponseEndBlock{}, } - SaveABCIResponses(state.db, h, responses) + saveABCIResponses(stateDB, h, responses) } // query all before, should return expected value for i, tc := range cases { h := int64(i + 1) - res, err := LoadABCIResponses(state.db, h) + res, err := LoadABCIResponses(stateDB, h) assert.NoError(err, "%d", i) assert.Equal(tc.expected.Hash(), res.ResultsHash(), "%d", i) } @@ -167,56 +165,57 @@ func TestABCIResponsesSaveLoad2(t *testing.T) { // TestValidatorSimpleSaveLoad tests saving and loading validators. func TestValidatorSimpleSaveLoad(t *testing.T) { - tearDown, _, state := setupTestCase(t) + tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) // nolint: vetshadow assert := assert.New(t) // can't load anything for height 0 - v, err := LoadValidators(state.db, 0) + v, err := LoadValidators(stateDB, 0) assert.IsType(ErrNoValSetForHeight{}, err, "expected err at height 0") // should be able to load for height 1 - v, err = LoadValidators(state.db, 1) + v, err = LoadValidators(stateDB, 1) assert.Nil(err, "expected no err at height 1") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") // increment height, save; should be able to load for next height state.LastBlockHeight++ nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(state.db, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - v, err = LoadValidators(state.db, nextHeight) + saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + v, err = LoadValidators(stateDB, nextHeight) assert.Nil(err, "expected no err") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") // increment height, save; should be able to load for next height state.LastBlockHeight += 10 nextHeight = state.LastBlockHeight + 1 - saveValidatorsInfo(state.db, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - v, err = LoadValidators(state.db, nextHeight) + saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + v, err = LoadValidators(stateDB, nextHeight) assert.Nil(err, "expected no err") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") // should be able to load for next next height - _, err = LoadValidators(state.db, state.LastBlockHeight+2) + _, err = LoadValidators(stateDB, state.LastBlockHeight+2) assert.IsType(ErrNoValSetForHeight{}, err, "expected err at unknown height") } // TestValidatorChangesSaveLoad tests saving and loading a validator set with changes. func TestOneValidatorChangesSaveLoad(t *testing.T) { - tearDown, _, state := setupTestCase(t) + tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) // change vals at these heights changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} N := len(changeHeights) - // build the validator history by running SetBlockAndValidators + // build the validator history by running updateState // with the right validator set for each height highestHeight := changeHeights[N-1] + 5 changeIndex := 0 _, val := state.Validators.GetByIndex(0) power := val.VotingPower + var err error for i := int64(1); i < highestHeight; i++ { // when we get to a change height, // use the next pubkey @@ -224,11 +223,11 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { changeIndex++ power += 1 } - header, parts, responses := makeHeaderPartsResponsesValPowerChange(state, i, power) - err := state.SetBlockAndValidators(header, parts, responses) + header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, i, power) + state, err = updateState(state, blockID, header, responses) assert.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(state.db, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) } // on each change height, increment the power by one. @@ -246,7 +245,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } for i, power := range testCases { - v, err := LoadValidators(state.db, int64(i+1)) + v, err := LoadValidators(stateDB, int64(i+1)) assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) _, val := v.GetByIndex(0) @@ -260,21 +259,22 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { // changes. func TestManyValidatorChangesSaveLoad(t *testing.T) { const valSetSize = 7 - tearDown, _, state := setupTestCase(t) + tearDown, stateDB, state := setupTestCase(t) state.Validators = genValSet(valSetSize) - state.Save() + SaveState(stateDB, state, state.AppHash) defer tearDown(t) const height = 1 pubkey := crypto.GenPrivKeyEd25519().PubKey() // swap the first validator with a new one ^^^ (validator set size stays the same) - header, parts, responses := makeHeaderPartsResponsesValPubKeyChange(state, height, pubkey) - err := state.SetBlockAndValidators(header, parts, responses) + header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, height, pubkey) + var err error + state, err = updateState(state, blockID, header, responses) require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(state.db, nextHeight, state.LastHeightValidatorsChanged, state.Validators) + saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - v, err := LoadValidators(state.db, height+1) + v, err := LoadValidators(stateDB, height+1) assert.Nil(t, err) assert.Equal(t, valSetSize, v.Size()) @@ -296,7 +296,7 @@ func genValSet(size int) *types.ValidatorSet { // TestConsensusParamsChangesSaveLoad tests saving and loading consensus params // with changes. func TestConsensusParamsChangesSaveLoad(t *testing.T) { - tearDown, _, state := setupTestCase(t) + tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) // change vals at these heights @@ -312,11 +312,12 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { params[i].BlockSize.MaxBytes += i } - // build the params history by running SetBlockAndValidators + // build the params history by running updateState // with the right params set for each height highestHeight := changeHeights[N-1] + 5 changeIndex := 0 cp := params[changeIndex] + var err error for i := int64(1); i < highestHeight; i++ { // when we get to a change height, // use the next params @@ -324,11 +325,12 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { changeIndex++ cp = params[changeIndex] } - header, parts, responses := makeHeaderPartsResponsesParams(state, i, cp) - err := state.SetBlockAndValidators(header, parts, responses) + header, blockID, responses := makeHeaderPartsResponsesParams(state, i, cp) + state, err = updateState(state, blockID, header, responses) + require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - saveConsensusParamsInfo(state.db, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) + saveConsensusParamsInfo(stateDB, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) } // make all the test cases by using the same params until after the change @@ -346,7 +348,7 @@ func TestConsensusParamsChangesSaveLoad(t *testing.T) { } for _, testCase := range testCases { - p, err := LoadConsensusParams(state.db, testCase.height) + p, err := LoadConsensusParams(stateDB, testCase.height) assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", testCase.height)) assert.Equal(t, testCase.params, p, fmt.Sprintf(`unexpected consensus params at height %d`, testCase.height)) @@ -421,15 +423,15 @@ func TestLessThanOneThirdOfVotingPowerPerBlockEnforced(t *testing.T) { } for i, tc := range testCases { - tearDown, _, state := setupTestCase(t) + tearDown, stateDB, state := setupTestCase(t) state.Validators = genValSet(tc.initialValSetSize) - state.Save() + SaveState(stateDB, state, state.AppHash) height := state.LastBlockHeight + 1 block := makeBlock(state, height) abciResponses := &ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: tc.valUpdatesFn(state.Validators)}, } - err := state.SetBlockAndValidators(block.Header, types.PartSetHeader{}, abciResponses) + state, err := updateState(state, types.BlockID{block.Hash(), types.PartSetHeader{}}, block.Header, abciResponses) if tc.shouldErr { assert.Error(t, err, "#%d", i) } else { @@ -489,8 +491,8 @@ func TestApplyUpdates(t *testing.T) { } } -func makeHeaderPartsResponsesValPubKeyChange(state *State, height int64, - pubkey crypto.PubKey) (*types.Header, types.PartSetHeader, *ABCIResponses) { +func makeHeaderPartsResponsesValPubKeyChange(state State, height int64, + pubkey crypto.PubKey) (*types.Header, types.BlockID, *ABCIResponses) { block := makeBlock(state, height) abciResponses := &ABCIResponses{ @@ -508,11 +510,11 @@ func makeHeaderPartsResponsesValPubKeyChange(state *State, height int64, } } - return block.Header, types.PartSetHeader{}, abciResponses + return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses } -func makeHeaderPartsResponsesValPowerChange(state *State, height int64, - power int64) (*types.Header, types.PartSetHeader, *ABCIResponses) { +func makeHeaderPartsResponsesValPowerChange(state State, height int64, + power int64) (*types.Header, types.BlockID, *ABCIResponses) { block := makeBlock(state, height) abciResponses := &ABCIResponses{ @@ -529,17 +531,17 @@ func makeHeaderPartsResponsesValPowerChange(state *State, height int64, } } - return block.Header, types.PartSetHeader{}, abciResponses + return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses } -func makeHeaderPartsResponsesParams(state *State, height int64, - params types.ConsensusParams) (*types.Header, types.PartSetHeader, *ABCIResponses) { +func makeHeaderPartsResponsesParams(state State, height int64, + params types.ConsensusParams) (*types.Header, types.BlockID, *ABCIResponses) { block := makeBlock(state, height) abciResponses := &ABCIResponses{ EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: types.TM2PB.ConsensusParams(¶ms)}, } - return block.Header, types.PartSetHeader{}, abciResponses + return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses } type paramsChangeTestCase struct { @@ -547,13 +549,13 @@ type paramsChangeTestCase struct { params types.ConsensusParams } -func makeHeaderPartsResults(state *State, height int64, - results []*abci.ResponseDeliverTx) (*types.Header, types.PartSetHeader, *ABCIResponses) { +func makeHeaderPartsResults(state State, height int64, + results []*abci.ResponseDeliverTx) (*types.Header, types.BlockID, *ABCIResponses) { block := makeBlock(state, height) abciResponses := &ABCIResponses{ DeliverTx: results, EndBlock: &abci.ResponseEndBlock{}, } - return block.Header, types.PartSetHeader{}, abciResponses + return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses } diff --git a/state/validation.go b/state/validation.go new file mode 100644 index 00000000..69200840 --- /dev/null +++ b/state/validation.go @@ -0,0 +1,136 @@ +package state + +import ( + "bytes" + "errors" + "fmt" + + "github.com/tendermint/tendermint/types" +) + +//----------------------------------------------------- +// Validate block + +// ValidateBlock validates the block against the state. +func ValidateBlock(s State, block *types.Block) error { + return validateBlock(s, block) +} + +func validateBlock(s State, b *types.Block) error { + // validate internal consistency + if err := b.ValidateBasic(); err != nil { + return err + } + + // validate basic info + if b.ChainID != s.ChainID { + return fmt.Errorf("Wrong Block.Header.ChainID. Expected %v, got %v", s.ChainID, b.ChainID) + } + if b.Height != s.LastBlockHeight+1 { + return fmt.Errorf("Wrong Block.Header.Height. Expected %v, got %v", s.LastBlockHeight+1, b.Height) + } + /* TODO: Determine bounds for Time + See blockchain/reactor "stopSyncingDurationMinutes" + + if !b.Time.After(lastBlockTime) { + return errors.New("Invalid Block.Header.Time") + } + */ + + // validate prev block info + if !b.LastBlockID.Equals(s.LastBlockID) { + return fmt.Errorf("Wrong Block.Header.LastBlockID. Expected %v, got %v", s.LastBlockID, b.LastBlockID) + } + newTxs := int64(len(b.Data.Txs)) + if b.TotalTxs != s.LastBlockTotalTx+newTxs { + return fmt.Errorf("Wrong Block.Header.TotalTxs. Expected %v, got %v", s.LastBlockTotalTx+newTxs, b.TotalTxs) + } + + // validate app info + if !bytes.Equal(b.AppHash, s.AppHash) { + return fmt.Errorf("Wrong Block.Header.AppHash. Expected %X, got %v", s.AppHash, b.AppHash) + } + if !bytes.Equal(b.ConsensusHash, s.ConsensusParams.Hash()) { + return fmt.Errorf("Wrong Block.Header.ConsensusHash. Expected %X, got %v", s.ConsensusParams.Hash(), b.ConsensusHash) + } + if !bytes.Equal(b.LastResultsHash, s.LastResultsHash) { + return fmt.Errorf("Wrong Block.Header.LastResultsHash. Expected %X, got %v", s.LastResultsHash, b.LastResultsHash) + } + if !bytes.Equal(b.ValidatorsHash, s.Validators.Hash()) { + return fmt.Errorf("Wrong Block.Header.ValidatorsHash. Expected %X, got %v", s.Validators.Hash(), b.ValidatorsHash) + } + + // Validate block LastCommit. + if b.Height == 1 { + if len(b.LastCommit.Precommits) != 0 { + return errors.New("Block at height 1 (first block) should have no LastCommit precommits") + } + } else { + if len(b.LastCommit.Precommits) != s.LastValidators.Size() { + return fmt.Errorf("Invalid block commit size. Expected %v, got %v", + s.LastValidators.Size(), len(b.LastCommit.Precommits)) + } + err := s.LastValidators.VerifyCommit( + s.ChainID, s.LastBlockID, b.Height-1, b.LastCommit) + if err != nil { + return err + } + } + + for _, ev := range b.Evidence.Evidence { + if err := VerifyEvidence(s, ev); err != nil { + return types.NewEvidenceInvalidErr(ev, err) + } + /* // Needs a db ... + valset, err := LoadValidators(s.db, ev.Height()) + if err != nil { + // XXX/TODO: what do we do if we can't load the valset? + // eg. if we have pruned the state or height is too high? + return err + } + if err := VerifyEvidenceValidator(valSet, ev); err != nil { + return types.NewEvidenceInvalidErr(ev, err) + } + */ + } + + return nil +} + +// XXX: What's cheaper (ie. what should be checked first): +// evidence internal validity (ie. sig checks) or validator existed (fetch historical val set from db) + +// VerifyEvidence verifies the evidence fully by checking it is internally +// consistent and sufficiently recent. +func VerifyEvidence(s State, evidence types.Evidence) error { + height := s.LastBlockHeight + + evidenceAge := height - evidence.Height() + maxAge := s.ConsensusParams.EvidenceParams.MaxAge + if evidenceAge > maxAge { + return fmt.Errorf("Evidence from height %d is too old. Min height is %d", + evidence.Height(), height-maxAge) + } + + if err := evidence.Verify(s.ChainID); err != nil { + return err + } + return nil +} + +// VerifyEvidenceValidator returns the voting power of the validator at the height of the evidence. +// It returns an error if the validator did not exist or does not match that loaded from the historical validator set. +func VerifyEvidenceValidator(valset *types.ValidatorSet, evidence types.Evidence) (priority int64, err error) { + // The address must have been an active validator at the height + ev := evidence + height, addr, idx := ev.Height(), ev.Address(), ev.Index() + valIdx, val := valset.GetByAddress(addr) + if val == nil { + return priority, fmt.Errorf("Address %X was not a validator at height %d", addr, height) + } else if idx != valIdx { + return priority, fmt.Errorf("Address %X was validator %d at height %d, not %d", addr, valIdx, height, idx) + } + + priority = val.VotingPower + return priority, nil +} diff --git a/state/validation_test.go b/state/validation_test.go new file mode 100644 index 00000000..a8e4d42e --- /dev/null +++ b/state/validation_test.go @@ -0,0 +1,64 @@ +package state + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func _TestValidateBlock(t *testing.T) { + state := state() + + // proper block must pass + block := makeBlock(state, 1) + err := ValidateBlock(state, block) + require.NoError(t, err) + + // wrong chain fails + block = makeBlock(state, 1) + block.ChainID = "not-the-real-one" + err = ValidateBlock(state, block) + require.Error(t, err) + + // wrong height fails + block = makeBlock(state, 1) + block.Height += 10 + err = ValidateBlock(state, block) + require.Error(t, err) + + // wrong total tx fails + block = makeBlock(state, 1) + block.TotalTxs += 10 + err = ValidateBlock(state, block) + require.Error(t, err) + + // wrong blockid fails + block = makeBlock(state, 1) + block.LastBlockID.PartsHeader.Total += 10 + err = ValidateBlock(state, block) + require.Error(t, err) + + // wrong app hash fails + block = makeBlock(state, 1) + block.AppHash = []byte("wrong app hash") + err = ValidateBlock(state, block) + require.Error(t, err) + + // wrong consensus hash fails + block = makeBlock(state, 1) + block.ConsensusHash = []byte("wrong consensus hash") + err = ValidateBlock(state, block) + require.Error(t, err) + + // wrong results hash fails + block = makeBlock(state, 1) + block.LastResultsHash = []byte("wrong results hash") + err = ValidateBlock(state, block) + require.Error(t, err) + + // wrong validators hash fails + block = makeBlock(state, 1) + block.ValidatorsHash = []byte("wrong validators hash") + err = ValidateBlock(state, block) + require.Error(t, err) +} From bac60f2067cfd08f09e02e060bab520bb51f3c3c Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 27 Dec 2017 20:40:36 -0500 Subject: [PATCH 06/33] blockchain: update for new state --- blockchain/reactor.go | 67 +++++++++++++++++++++++--------------- blockchain/reactor_test.go | 19 +++++++---- 2 files changed, 52 insertions(+), 34 deletions(-) diff --git a/blockchain/reactor.go b/blockchain/reactor.go index f985e284..c8e794a1 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -4,11 +4,11 @@ import ( "bytes" "errors" "reflect" + "sync" "time" wire "github.com/tendermint/go-wire" "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tmlibs/common" @@ -34,29 +34,33 @@ const ( type consensusReactor interface { // for when we switch from blockchain reactor and fast sync to // the consensus machine - SwitchToConsensus(*sm.State, int) + SwitchToConsensus(sm.State, int) } // BlockchainReactor handles long-term catchup syncing. type BlockchainReactor struct { p2p.BaseReactor - state *sm.State - proxyAppConn proxy.AppConnConsensus // same as consensus.proxyAppConn - store *BlockStore - pool *BlockPool - fastSync bool - requestsCh chan BlockRequest - timeoutsCh chan string + mtx sync.Mutex + params types.ConsensusParams - eventBus *types.EventBus + // immutable + initialState sm.State + + blockExec *sm.BlockExecutor + store *BlockStore + pool *BlockPool + fastSync bool + requestsCh chan BlockRequest + timeoutsCh chan string } // NewBlockchainReactor returns new reactor instance. -func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConnConsensus, store *BlockStore, fastSync bool) *BlockchainReactor { +func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *BlockStore, fastSync bool) *BlockchainReactor { if state.LastBlockHeight != store.Height() { cmn.PanicSanity(cmn.Fmt("state (%v) and store (%v) height mismatch", state.LastBlockHeight, store.Height())) } + requestsCh := make(chan BlockRequest, defaultChannelCapacity) timeoutsCh := make(chan string, defaultChannelCapacity) pool := NewBlockPool( @@ -65,8 +69,9 @@ func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConnConsensus, timeoutsCh, ) bcR := &BlockchainReactor{ - state: state, - proxyAppConn: proxyAppConn, + params: state.ConsensusParams, + initialState: state, + blockExec: blockExec, store: store, pool: pool, fastSync: fastSync, @@ -183,7 +188,16 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) // maxMsgSize returns the maximum allowable size of a // message on the blockchain reactor. func (bcR *BlockchainReactor) maxMsgSize() int { - return bcR.state.ConsensusParams.BlockSize.MaxBytes + 2 + bcR.mtx.Lock() + defer bcR.mtx.Unlock() + return bcR.params.BlockSize.MaxBytes + 2 +} + +// updateConsensusParams updates the internal consensus params +func (bcR *BlockchainReactor) updateConsensusParams(params types.ConsensusParams) { + bcR.mtx.Lock() + defer bcR.mtx.Unlock() + bcR.params = params } // Handle messages from the poolReactor telling the reactor what to do. @@ -197,7 +211,8 @@ func (bcR *BlockchainReactor) poolRoutine() { blocksSynced := 0 - chainID := bcR.state.ChainID + chainID := bcR.initialState.ChainID + state := bcR.initialState lastHundred := time.Now() lastRate := 0.0 @@ -236,7 +251,7 @@ FOR_LOOP: bcR.pool.Stop() conR := bcR.Switch.Reactor("CONSENSUS").(consensusReactor) - conR.SwitchToConsensus(bcR.state, blocksSynced) + conR.SwitchToConsensus(state, blocksSynced) break FOR_LOOP } @@ -251,14 +266,15 @@ FOR_LOOP: // We need both to sync the first block. break SYNC_LOOP } - firstParts := first.MakePartSet(bcR.state.ConsensusParams.BlockPartSizeBytes) + firstParts := first.MakePartSet(state.ConsensusParams.BlockPartSizeBytes) firstPartsHeader := firstParts.Header() + firstID := types.BlockID{first.Hash(), firstPartsHeader} // Finally, verify the first block using the second's commit // NOTE: we can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is // currently necessary. - err := bcR.state.Validators.VerifyCommit( - chainID, types.BlockID{first.Hash(), firstPartsHeader}, first.Height, second.LastCommit) + err := state.Validators.VerifyCommit( + chainID, firstID, first.Height, second.LastCommit) if err != nil { bcR.Logger.Error("Error in validation", "err", err) bcR.pool.RedoRequest(first.Height) @@ -272,15 +288,17 @@ FOR_LOOP: // NOTE: we could improve performance if we // didn't make the app commit to disk every block // ... but we would need a way to get the hash without it persisting - err := bcR.state.ApplyBlock(bcR.eventBus, bcR.proxyAppConn, - first, firstPartsHeader, - types.MockMempool{}, types.MockEvidencePool{}) // TODO unmock! + var err error + state, err = bcR.blockExec.ApplyBlock(state, firstID, first) if err != nil { // TODO This is bad, are we zombie? cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) } blocksSynced += 1 + // update the consensus params + bcR.updateConsensusParams(state.ConsensusParams) + if blocksSynced%100 == 0 { lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height, @@ -302,11 +320,6 @@ func (bcR *BlockchainReactor) BroadcastStatusRequest() error { return nil } -// SetEventBus sets event bus. -func (bcR *BlockchainReactor) SetEventBus(b *types.EventBus) { - bcR.eventBus = b -} - //----------------------------------------------------------------------------- // Messages diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index 36cdc080..ecb4a9e6 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -14,14 +14,14 @@ import ( "github.com/tendermint/tendermint/types" ) -func makeStateAndBlockStore(logger log.Logger) (*sm.State, *BlockStore) { +func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) { config := cfg.ResetTestRoot("blockchain_reactor_test") blockStore := NewBlockStore(dbm.NewMemDB()) // Get State - state, _ := sm.GetState(dbm.NewMemDB(), config.GenesisFile()) - state.SetLogger(logger.With("module", "state")) - state.Save() + stateDB := dbm.NewMemDB() + state, _ := sm.GetState(stateDB, config.GenesisFile()) + sm.SaveState(stateDB, state, state.AppHash) return state, blockStore } @@ -31,7 +31,10 @@ func newBlockchainReactor(logger log.Logger, maxBlockHeight int64) *BlockchainRe // Make the blockchainReactor itself fastSync := true - bcReactor := NewBlockchainReactor(state.Copy(), nil, blockStore, fastSync) + blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), + nil, nil, types.MockMempool{}, types.MockEvidencePool{}) + + bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) bcReactor.SetLogger(logger.With("module", "blockchain")) // Next: we need to set a switch in order for peers to be added in @@ -51,7 +54,7 @@ func newBlockchainReactor(logger log.Logger, maxBlockHeight int64) *BlockchainRe func TestNoBlockMessageResponse(t *testing.T) { maxBlockHeight := int64(20) - bcr := newBlockchainReactor(log.NewNopLogger(), maxBlockHeight) + bcr := newBlockchainReactor(log.TestingLogger(), maxBlockHeight) bcr.Start() defer bcr.Stop() @@ -71,6 +74,8 @@ func TestNoBlockMessageResponse(t *testing.T) { {100, false}, } + // receive a request message from peer, + // wait to hear response for _, tt := range tests { reqBlockMsg := &bcBlockRequestMessage{tt.height} reqBlockBytes := wire.BinaryBytes(struct{ BlockchainMessage }{reqBlockMsg}) @@ -104,7 +109,7 @@ func makeTxs(height int64) (txs []types.Tx) { return txs } -func makeBlock(height int64, state *sm.State) *types.Block { +func makeBlock(height int64, state sm.State) *types.Block { block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit)) return block } From 0acca7fe69a95f33f4521aaf0b0aca270f7cbf4d Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 27 Dec 2017 22:09:48 -0500 Subject: [PATCH 07/33] final updates for state --- blockchain/reactor_test.go | 2 +- consensus/common_test.go | 36 +++++++++---------- consensus/reactor.go | 2 +- consensus/replay.go | 69 +++++++++++++++++++++--------------- consensus/replay_file.go | 22 +++++++----- consensus/replay_test.go | 71 ++++++++++++++++++-------------------- consensus/state.go | 39 ++++++++++----------- consensus/wal_generator.go | 8 ++--- evidence/pool.go | 21 ++++++----- node/node.go | 38 +++++++++++--------- rpc/core/blocks.go | 3 +- rpc/core/consensus.go | 3 +- rpc/core/pipe.go | 8 ++++- state/db.go | 6 ++-- state/execution.go | 21 ++++++----- state/state.go | 2 +- state/state_test.go | 6 ++-- 17 files changed, 192 insertions(+), 165 deletions(-) diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index ecb4a9e6..f58b8394 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -21,7 +21,7 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) { // Get State stateDB := dbm.NewMemDB() state, _ := sm.GetState(stateDB, config.GenesisFile()) - sm.SaveState(stateDB, state, state.AppHash) + sm.SaveState(stateDB, state) return state, blockStore } diff --git a/consensus/common_test.go b/consensus/common_test.go index 6598c15e..ba3564aa 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -235,16 +235,16 @@ func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} { //------------------------------------------------------------------------------- // consensus states -func newConsensusState(state *sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState { +func newConsensusState(state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState { return newConsensusStateWithConfig(config, state, pv, app) } -func newConsensusStateWithConfig(thisConfig *cfg.Config, state *sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState { +func newConsensusStateWithConfig(thisConfig *cfg.Config, state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState { blockDB := dbm.NewMemDB() return newConsensusStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB) } -func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state *sm.State, pv types.PrivValidator, app abci.Application, blockDB dbm.DB) *ConsensusState { +func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.State, pv types.PrivValidator, app abci.Application, blockDB dbm.DB) *ConsensusState { // Get BlockStore blockStore := bc.NewBlockStore(blockDB) @@ -264,7 +264,10 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state *sm. evpool := types.MockEvidencePool{} // Make ConsensusReactor - cs := NewConsensusState(thisConfig.Consensus, state, proxyAppConnCon, blockStore, mempool, evpool) + stateDB := dbm.NewMemDB() // XXX !! + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), + nil, proxyAppConnCon, mempool, evpool) + cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) cs.SetLogger(log.TestingLogger()) cs.SetPrivValidator(pv) @@ -284,9 +287,7 @@ func loadPrivValidator(config *cfg.Config) *types.PrivValidatorFS { } func fixedConsensusStateDummy(config *cfg.Config, logger log.Logger) *ConsensusState { - stateDB := dbm.NewMemDB() - state, _ := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile()) - state.SetLogger(logger.With("module", "state")) + state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) privValidator := loadPrivValidator(config) cs := newConsensusState(state, privValidator, dummy.NewDummyApplication()) cs.SetLogger(logger) @@ -354,10 +355,9 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou css := make([]*ConsensusState, nValidators) logger := consensusLogger() for i := 0; i < nValidators; i++ { - db := dbm.NewMemDB() // each state needs its own db - state, _ := sm.MakeGenesisState(db, genDoc) - state.SetLogger(logger.With("module", "state", "validator", i)) - state.Save() + stateDB := dbm.NewMemDB() // each state needs its own db + state, _ := sm.MakeGenesisState(genDoc) + sm.SaveState(stateDB, state) thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) for _, opt := range configOpts { opt(thisConfig) @@ -380,10 +380,9 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF css := make([]*ConsensusState, nPeers) logger := consensusLogger() for i := 0; i < nPeers; i++ { - db := dbm.NewMemDB() // each state needs its own db - state, _ := sm.MakeGenesisState(db, genDoc) - state.SetLogger(logger.With("module", "state", "validator", i)) - state.Save() + stateDB := dbm.NewMemDB() // each state needs its own db + state, _ := sm.MakeGenesisState(genDoc) + sm.SaveState(stateDB, state) thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal var privVal types.PrivValidator @@ -437,12 +436,11 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G }, privValidators } -func randGenesisState(numValidators int, randPower bool, minPower int64) (*sm.State, []*types.PrivValidatorFS) { +func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []*types.PrivValidatorFS) { genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower) + s0, _ := sm.MakeGenesisState(genDoc) db := dbm.NewMemDB() - s0, _ := sm.MakeGenesisState(db, genDoc) - s0.SetLogger(log.TestingLogger().With("module", "state")) - s0.Save() + sm.SaveState(db, s0) return s0, privValidators } diff --git a/consensus/reactor.go b/consensus/reactor.go index eb752ee1..9b3393e9 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -82,7 +82,7 @@ func (conR *ConsensusReactor) OnStop() { // SwitchToConsensus switches from fast_sync mode to consensus mode. // It resets the state, turns off fast_sync, and starts the consensus state-machine -func (conR *ConsensusReactor) SwitchToConsensus(state *sm.State, blocksSynced int) { +func (conR *ConsensusReactor) SwitchToConsensus(state sm.State, blocksSynced int) { conR.Logger.Info("SwitchToConsensus") conR.conS.reconstructLastCommit(state) // NOTE: The line below causes broadcastNewRoundStepRoutine() to diff --git a/consensus/replay.go b/consensus/replay.go index a9aaeefc..55c29178 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -13,6 +13,7 @@ import ( abci "github.com/tendermint/abci/types" //auto "github.com/tendermint/tmlibs/autofile" cmn "github.com/tendermint/tmlibs/common" + dbm "github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/log" "github.com/tendermint/tendermint/proxy" @@ -186,15 +187,16 @@ func makeHeightSearchFunc(height int64) auto.SearchFunc { // we were last and using the WAL to recover there type Handshaker struct { - state *sm.State - store types.BlockStore - logger log.Logger + stateDB dbm.DB + initialState sm.State + store types.BlockStore + logger log.Logger nBlocks int // number of blocks applied to the state } -func NewHandshaker(state *sm.State, store types.BlockStore) *Handshaker { - return &Handshaker{state, store, log.NewNopLogger(), 0} +func NewHandshaker(stateDB dbm.DB, state sm.State, store types.BlockStore) *Handshaker { + return &Handshaker{stateDB, state, store, log.NewNopLogger(), 0} } func (h *Handshaker) SetLogger(l log.Logger) { @@ -224,7 +226,7 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { // TODO: check version // replay blocks up to the latest in the blockstore - _, err = h.ReplayBlocks(appHash, blockHeight, proxyApp) + _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) if err != nil { return fmt.Errorf("Error on replay: %v", err) } @@ -238,15 +240,15 @@ func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { // Replay all blocks since appBlockHeight and ensure the result matches the current state. // Returns the final AppHash or an error -func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) { +func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) { storeBlockHeight := h.store.Height() - stateBlockHeight := h.state.LastBlockHeight + stateBlockHeight := state.LastBlockHeight h.logger.Info("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight) // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain if appBlockHeight == 0 { - validators := types.TM2PB.Validators(h.state.Validators) + validators := types.TM2PB.Validators(state.Validators) if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{validators}); err != nil { return nil, err } @@ -254,7 +256,7 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int64, proxyApp // First handle edge cases and constraints on the storeBlockHeight if storeBlockHeight == 0 { - return appHash, h.checkAppHash(appHash) + return appHash, checkAppHash(state, appHash) } else if storeBlockHeight < appBlockHeight { // the app should never be ahead of the store (but this is under app's control) @@ -269,6 +271,7 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int64, proxyApp cmn.PanicSanity(cmn.Fmt("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1)) } + var err error // Now either store is equal to state, or one ahead. // For each, consider all cases of where the app could be, given app <= store if storeBlockHeight == stateBlockHeight { @@ -276,11 +279,11 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int64, proxyApp // Either the app is asking for replay, or we're all synced up. if appBlockHeight < storeBlockHeight { // the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store) - return h.replayBlocks(proxyApp, appBlockHeight, storeBlockHeight, false) + return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, false) } else if appBlockHeight == storeBlockHeight { // We're good! - return appHash, h.checkAppHash(appHash) + return appHash, checkAppHash(state, appHash) } } else if storeBlockHeight == stateBlockHeight+1 { @@ -289,7 +292,7 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int64, proxyApp if appBlockHeight < stateBlockHeight { // the app is further behind than it should be, so replay blocks // but leave the last block to go through the WAL - return h.replayBlocks(proxyApp, appBlockHeight, storeBlockHeight, true) + return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true) } else if appBlockHeight == stateBlockHeight { // We haven't run Commit (both the state and app are one block behind), @@ -297,17 +300,19 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int64, proxyApp // NOTE: We could instead use the cs.WAL on cs.Start, // but we'd have to allow the WAL to replay a block that wrote it's ENDHEIGHT h.logger.Info("Replay last block using real app") - return h.replayBlock(storeBlockHeight, proxyApp.Consensus()) + state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) + return state.AppHash, err } else if appBlockHeight == storeBlockHeight { // We ran Commit, but didn't save the state, so replayBlock with mock app - abciResponses, err := sm.LoadABCIResponses(h.state.DB(), storeBlockHeight) + abciResponses, err := sm.LoadABCIResponses(h.stateDB, storeBlockHeight) if err != nil { return nil, err } mockApp := newMockProxyApp(appHash, abciResponses) h.logger.Info("Replay last block using mock app") - return h.replayBlock(storeBlockHeight, mockApp) + state, err = h.replayBlock(state, storeBlockHeight, mockApp) + return state.AppHash, err } } @@ -316,7 +321,7 @@ func (h *Handshaker) ReplayBlocks(appHash []byte, appBlockHeight int64, proxyApp return nil, nil } -func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int64, mutateState bool) ([]byte, error) { +func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int64, mutateState bool) ([]byte, error) { // App is further behind than it should be, so we need to replay blocks. // We replay all blocks from appBlockHeight+1. // @@ -336,7 +341,7 @@ func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, store for i := appBlockHeight + 1; i <= finalBlock; i++ { h.logger.Info("Applying block", "height", i) block := h.store.LoadBlock(i) - appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.state.LastValidators) + appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger) if err != nil { return nil, err } @@ -346,33 +351,41 @@ func (h *Handshaker) replayBlocks(proxyApp proxy.AppConns, appBlockHeight, store if mutateState { // sync the final block - return h.replayBlock(storeBlockHeight, proxyApp.Consensus()) + state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) + if err != nil { + return nil, err + } + appHash = state.AppHash } - return appHash, h.checkAppHash(appHash) + return appHash, checkAppHash(state, appHash) } // ApplyBlock on the proxyApp with the last block. -func (h *Handshaker) replayBlock(height int64, proxyApp proxy.AppConnConsensus) ([]byte, error) { +func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.AppConnConsensus) (sm.State, error) { mempool := types.MockMempool{} evpool := types.MockEvidencePool{} block := h.store.LoadBlock(height) meta := h.store.LoadBlockMeta(height) - if err := h.state.ApplyBlock(types.NopEventBus{}, proxyApp, - block, meta.BlockID.PartsHeader, mempool, evpool); err != nil { - return nil, err + blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, + types.NopEventBus{}, proxyApp, mempool, evpool) + + var err error + state, err = blockExec.ApplyBlock(state, meta.BlockID, block) + if err != nil { + return sm.State{}, err } h.nBlocks += 1 - return h.state.AppHash, nil + return state, nil } -func (h *Handshaker) checkAppHash(appHash []byte) error { - if !bytes.Equal(h.state.AppHash, appHash) { - panic(fmt.Errorf("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, h.state.AppHash).Error()) +func checkAppHash(state sm.State, appHash []byte) error { + if !bytes.Equal(state.AppHash, appHash) { + panic(fmt.Errorf("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, state.AppHash).Error()) } return nil } diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 4db58ada..d832abad 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -18,6 +18,7 @@ import ( "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tmlibs/common" dbm "github.com/tendermint/tmlibs/db" + "github.com/tendermint/tmlibs/log" ) const ( @@ -104,11 +105,11 @@ type playback struct { count int // how many lines/msgs into the file are we // replays can be reset to beginning - fileName string // so we can close/reopen the file - genesisState *sm.State // so the replay session knows where to restart from + fileName string // so we can close/reopen the file + genesisState sm.State // so the replay session knows where to restart from } -func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState *sm.State) *playback { +func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState sm.State) *playback { return &playback{ cs: cs, fp: fp, @@ -123,7 +124,7 @@ func (pb *playback) replayReset(count int, newStepCh chan interface{}) error { pb.cs.Stop() pb.cs.Wait() - newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.proxyAppConn, + newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec, pb.cs.blockStore, pb.cs.mempool, pb.cs.evpool) newCS.SetEventBus(pb.cs.eventBus) newCS.startForReplay() @@ -285,14 +286,14 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo // Get State stateDB := dbm.NewDB("state", config.DBBackend, config.DBDir()) - state, err := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile()) + state, err := sm.MakeGenesisStateFromFile(config.GenesisFile()) if err != nil { cmn.Exit(err.Error()) } // Create proxyAppConn connection (consensus, mempool, query) clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()) - proxyApp := proxy.NewAppConns(clientCreator, NewHandshaker(state, blockStore)) + proxyApp := proxy.NewAppConns(clientCreator, NewHandshaker(stateDB, state, blockStore)) err = proxyApp.Start() if err != nil { cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err)) @@ -303,8 +304,13 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo cmn.Exit(cmn.Fmt("Failed to start event bus: %v", err)) } - consensusState := NewConsensusState(csConfig, state.Copy(), proxyApp.Consensus(), - blockStore, types.MockMempool{}, types.MockEvidencePool{}) + mempool, evpool := types.MockMempool{}, types.MockEvidencePool{} + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), + nil, proxyApp.Consensus(), + mempool, evpool) + + consensusState := NewConsensusState(csConfig, state.Copy(), blockExec, + blockStore, mempool, evpool) consensusState.SetEventBus(eventBus) return consensusState diff --git a/consensus/replay_test.go b/consensus/replay_test.go index f1a060ec..4647ff3d 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -54,7 +54,6 @@ func init() { func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) { logger := log.TestingLogger() state, _ := sm.GetState(stateDB, consensusReplayConfig.GenesisFile()) - state.SetLogger(logger.With("module", "state")) privValidator := loadPrivValidator(consensusReplayConfig) cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, dummy.NewDummyApplication(), blockDB) cs.SetLogger(logger) @@ -98,22 +97,22 @@ func sendTxs(cs *ConsensusState, ctx context.Context) { func TestWALCrash(t *testing.T) { testCases := []struct { name string - initFn func(*ConsensusState, context.Context) + initFn func(dbm.DB, *ConsensusState, context.Context) heightToStop int64 }{ {"empty block", - func(cs *ConsensusState, ctx context.Context) {}, + func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) {}, 1}, {"block with a smaller part size", - func(cs *ConsensusState, ctx context.Context) { + func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) { // XXX: is there a better way to change BlockPartSizeBytes? cs.state.ConsensusParams.BlockPartSizeBytes = 512 - cs.state.Save() + sm.SaveState(stateDB, cs.state) go sendTxs(cs, ctx) }, 1}, {"many non-empty blocks", - func(cs *ConsensusState, ctx context.Context) { + func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) { go sendTxs(cs, ctx) }, 3}, @@ -126,7 +125,7 @@ func TestWALCrash(t *testing.T) { } } -func crashWALandCheckLiveness(t *testing.T, initFn func(*ConsensusState, context.Context), heightToStop int64) { +func crashWALandCheckLiveness(t *testing.T, initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) { walPaniced := make(chan error) crashingWal := &crashingWAL{panicCh: walPaniced, heightToStop: heightToStop} @@ -139,8 +138,7 @@ LOOP: // create consensus state from a clean slate logger := log.NewNopLogger() stateDB := dbm.NewMemDB() - state, _ := sm.MakeGenesisStateFromFile(stateDB, consensusReplayConfig.GenesisFile()) - state.SetLogger(logger.With("module", "state")) + state, _ := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) privValidator := loadPrivValidator(consensusReplayConfig) blockDB := dbm.NewMemDB() cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, dummy.NewDummyApplication(), blockDB) @@ -148,7 +146,7 @@ LOOP: // start sending transactions ctx, cancel := context.WithCancel(context.Background()) - initFn(cs, ctx) + initFn(stateDB, cs, ctx) // clean up WAL file from the previous iteration walFile := cs.config.WalFile() @@ -344,12 +342,13 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { t.Fatalf(err.Error()) } - state, store := stateAndStore(config, privVal.GetPubKey()) + stateDB, state, store := stateAndStore(config, privVal.GetPubKey()) store.chain = chain store.commits = commits // run the chain through state.ApplyBlock to build up the tendermint state - latestAppHash := buildTMStateFromChain(config, state, chain, mode) + state = buildTMStateFromChain(config, stateDB, state, chain, mode) + latestAppHash := state.AppHash // make a new client creator dummyApp := dummy.NewPersistentDummyApplication(path.Join(config.DBDir(), "2")) @@ -358,12 +357,12 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { // run nBlocks against a new client to build up the app state. // use a throwaway tendermint state proxyApp := proxy.NewAppConns(clientCreator2, nil) - state, _ := stateAndStore(config, privVal.GetPubKey()) - buildAppStateFromChain(proxyApp, state, chain, nBlocks, mode) + stateDB, state, _ := stateAndStore(config, privVal.GetPubKey()) + buildAppStateFromChain(proxyApp, stateDB, state, chain, nBlocks, mode) } // now start the app using the handshake - it should sync - handshaker := NewHandshaker(state, store) + handshaker := NewHandshaker(stateDB, state, store) proxyApp := proxy.NewAppConns(clientCreator2, handshaker) if err := proxyApp.Start(); err != nil { t.Fatalf("Error starting proxy app connections: %v", err) @@ -393,16 +392,21 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { } } -func applyBlock(st *sm.State, blk *types.Block, proxyApp proxy.AppConns) { +func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.AppConns) sm.State { testPartSize := st.ConsensusParams.BlockPartSizeBytes - err := st.ApplyBlock(types.NopEventBus{}, proxyApp.Consensus(), blk, blk.MakePartSet(testPartSize).Header(), mempool, evpool) + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), + types.NopEventBus{}, proxyApp.Consensus(), mempool, evpool) + + blkID := types.BlockID{blk.Hash(), blk.MakePartSet(testPartSize).Header()} + newState, err := blockExec.ApplyBlock(st, blkID, blk) if err != nil { panic(err) } + return newState } -func buildAppStateFromChain(proxyApp proxy.AppConns, - state *sm.State, chain []*types.Block, nBlocks int, mode uint) { +func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, + state sm.State, chain []*types.Block, nBlocks int, mode uint) { // start a new app without handshake, play nBlocks blocks if err := proxyApp.Start(); err != nil { panic(err) @@ -418,24 +422,24 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, case 0: for i := 0; i < nBlocks; i++ { block := chain[i] - applyBlock(state, block, proxyApp) + state = applyBlock(stateDB, state, block, proxyApp) } case 1, 2: for i := 0; i < nBlocks-1; i++ { block := chain[i] - applyBlock(state, block, proxyApp) + state = applyBlock(stateDB, state, block, proxyApp) } if mode == 2 { // update the dummy height and apphash // as if we ran commit but not - applyBlock(state, chain[nBlocks-1], proxyApp) + state = applyBlock(stateDB, state, chain[nBlocks-1], proxyApp) } } } -func buildTMStateFromChain(config *cfg.Config, state *sm.State, chain []*types.Block, mode uint) []byte { +func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, mode uint) sm.State { // run the whole chain against this client to build up the tendermint state clientCreator := proxy.NewLocalClientCreator(dummy.NewPersistentDummyApplication(path.Join(config.DBDir(), "1"))) proxyApp := proxy.NewAppConns(clientCreator, nil) // sm.NewHandshaker(config, state, store, ReplayLastBlock)) @@ -449,31 +453,26 @@ func buildTMStateFromChain(config *cfg.Config, state *sm.State, chain []*types.B panic(err) } - var latestAppHash []byte - switch mode { case 0: // sync right up for _, block := range chain { - applyBlock(state, block, proxyApp) + state = applyBlock(stateDB, state, block, proxyApp) } - latestAppHash = state.AppHash case 1, 2: // sync up to the penultimate as if we stored the block. // whether we commit or not depends on the appHash for _, block := range chain[:len(chain)-1] { - applyBlock(state, block, proxyApp) + state = applyBlock(stateDB, state, block, proxyApp) } // apply the final block to a state copy so we can // get the right next appHash but keep the state back - stateCopy := state.Copy() - applyBlock(stateCopy, chain[len(chain)-1], proxyApp) - latestAppHash = stateCopy.AppHash + applyBlock(stateDB, state, chain[len(chain)-1], proxyApp) } - return latestAppHash + return state } //-------------------------- @@ -587,13 +586,11 @@ func readPieceFromWAL(msg *TimedWALMessage) interface{} { } // fresh state and mock store -func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (*sm.State, *mockBlockStore) { +func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (dbm.DB, sm.State, *mockBlockStore) { stateDB := dbm.NewMemDB() - state, _ := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile()) - state.SetLogger(log.TestingLogger().With("module", "state")) - + state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) store := NewMockBlockStore(config, state.ConsensusParams) - return state, store + return stateDB, state, store } //---------------------------------- diff --git a/consensus/state.go b/consensus/state.go index 5e83e6a5..477d872b 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -17,7 +17,6 @@ import ( cfg "github.com/tendermint/tendermint/config" cstypes "github.com/tendermint/tendermint/consensus/types" - "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -75,15 +74,16 @@ type ConsensusState struct { privValidator types.PrivValidator // for signing votes // services for creating and executing blocks - proxyAppConn proxy.AppConnConsensus - blockStore types.BlockStore - mempool types.Mempool - evpool types.EvidencePool + // TODO: encapsulate all of this in one "BlockManager" + blockExec *sm.BlockExecutor + blockStore types.BlockStore + mempool types.Mempool + evpool types.EvidencePool // internal state mtx sync.Mutex cstypes.RoundState - state *sm.State // State until height-1. + state sm.State // State until height-1. // state changes may be triggered by msgs from peers, // msgs from ourself, or by timeouts @@ -114,10 +114,10 @@ type ConsensusState struct { } // NewConsensusState returns a new ConsensusState. -func NewConsensusState(config *cfg.ConsensusConfig, state *sm.State, proxyAppConn proxy.AppConnConsensus, blockStore types.BlockStore, mempool types.Mempool, evpool types.EvidencePool) *ConsensusState { +func NewConsensusState(config *cfg.ConsensusConfig, state sm.State, blockExec *sm.BlockExecutor, blockStore types.BlockStore, mempool types.Mempool, evpool types.EvidencePool) *ConsensusState { cs := &ConsensusState{ config: config, - proxyAppConn: proxyAppConn, + blockExec: blockExec, blockStore: blockStore, mempool: mempool, peerMsgQueue: make(chan msgInfo, msgQueueSize), @@ -162,7 +162,7 @@ func (cs *ConsensusState) String() string { } // GetState returns a copy of the chain state. -func (cs *ConsensusState) GetState() *sm.State { +func (cs *ConsensusState) GetState() sm.State { cs.mtx.Lock() defer cs.mtx.Unlock() return cs.state.Copy() @@ -399,7 +399,7 @@ func (cs *ConsensusState) sendInternalMessage(mi msgInfo) { // Reconstruct LastCommit from SeenCommit, which we saved along with the block, // (which happens even before saving the state) -func (cs *ConsensusState) reconstructLastCommit(state *sm.State) { +func (cs *ConsensusState) reconstructLastCommit(state sm.State) { if state.LastBlockHeight == 0 { return } @@ -422,12 +422,12 @@ func (cs *ConsensusState) reconstructLastCommit(state *sm.State) { // Updates ConsensusState and increments height to match that of state. // The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight. -func (cs *ConsensusState) updateToState(state *sm.State) { +func (cs *ConsensusState) updateToState(state sm.State) { if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight { cmn.PanicSanity(cmn.Fmt("updateToState() expected state height of %v but found %v", cs.Height, state.LastBlockHeight)) } - if cs.state != nil && cs.state.LastBlockHeight+1 != cs.Height { + if !cs.state.IsEmpty() && cs.state.LastBlockHeight+1 != cs.Height { // This might happen when someone else is mutating cs.state. // Someone forgot to pass in state.Copy() somewhere?! cmn.PanicSanity(cmn.Fmt("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", @@ -437,7 +437,7 @@ func (cs *ConsensusState) updateToState(state *sm.State) { // If state isn't further out than cs.state, just ignore. // This happens when SwitchToConsensus() is called in the reactor. // We don't want to reset e.g. the Votes. - if cs.state != nil && (state.LastBlockHeight <= cs.state.LastBlockHeight) { + if !cs.state.IsEmpty() && (state.LastBlockHeight <= cs.state.LastBlockHeight) { cs.Logger.Info("Ignoring updateToState()", "newHeight", state.LastBlockHeight+1, "oldHeight", cs.state.LastBlockHeight+1) return } @@ -922,7 +922,7 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) { } // Validate proposal block - err := cs.state.ValidateBlock(cs.ProposalBlock) + err := sm.ValidateBlock(cs.state, cs.ProposalBlock) if err != nil { // ProposalBlock is invalid, prevote nil. logger.Error("enterPrevote: ProposalBlock is invalid", "err", err) @@ -1030,7 +1030,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { if cs.ProposalBlock.HashesTo(blockID.Hash) { cs.Logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash) // Validate the block. - if err := cs.state.ValidateBlock(cs.ProposalBlock); err != nil { + if err := sm.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) } cs.LockedRound = round @@ -1165,7 +1165,7 @@ func (cs *ConsensusState) finalizeCommit(height int64) { if !block.HashesTo(blockID.Hash) { cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, ProposalBlock does not hash to commit hash")) } - if err := cs.state.ValidateBlock(block); err != nil { + if err := sm.ValidateBlock(cs.state, block); err != nil { cmn.PanicConsensus(cmn.Fmt("+2/3 committed an invalid block: %v", err)) } @@ -1204,13 +1204,12 @@ func (cs *ConsensusState) finalizeCommit(height int64) { // and an event cache for txs stateCopy := cs.state.Copy() txEventBuffer := types.NewTxEventBuffer(cs.eventBus, int(block.NumTxs)) + cs.blockExec.SetTxEventPublisher(txEventBuffer) // Execute and commit the block, update and save the state, and update the mempool. - // All calls to the proxyAppConn come here. // NOTE: the block.AppHash wont reflect these txs until the next block - err := stateCopy.ApplyBlock(txEventBuffer, cs.proxyAppConn, - block, blockParts.Header(), - cs.mempool, cs.evpool) + var err error + stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{block.Hash(), blockParts.Header()}, block) if err != nil { cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "err", err) err := cmn.Kill() diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index 73ad3e7f..fe9066b3 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -47,13 +47,12 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) { } stateDB := db.NewMemDB() blockStoreDB := db.NewMemDB() - state, err := sm.MakeGenesisState(stateDB, genDoc) - state.SetLogger(logger.With("module", "state")) + state, err := sm.MakeGenesisState(genDoc) if err != nil { return nil, errors.Wrap(err, "failed to make genesis state") } blockStore := bc.NewBlockStore(blockStoreDB) - handshaker := NewHandshaker(state, blockStore) + handshaker := NewHandshaker(stateDB, state, blockStore) proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), handshaker) proxyApp.SetLogger(logger.With("module", "proxy")) if err := proxyApp.Start(); err != nil { @@ -68,7 +67,8 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) { defer eventBus.Stop() mempool := types.MockMempool{} evpool := types.MockEvidencePool{} - consensusState := NewConsensusState(config.Consensus, state.Copy(), proxyApp.Consensus(), blockStore, mempool, evpool) + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), nil, proxyApp.Consensus(), mempool, evpool) + consensusState := NewConsensusState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetLogger(logger) consensusState.SetEventBus(eventBus) if privValidator != nil { diff --git a/evidence/pool.go b/evidence/pool.go index 381801df..2e7cd470 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -21,13 +21,13 @@ type EvidencePool struct { evidenceChan chan types.Evidence } -func NewEvidencePool(params types.EvidenceParams, evidenceStore *EvidenceStore, state types.State) *EvidencePool { +func NewEvidencePool(params types.EvidenceParams, evidenceStore *EvidenceStore) *EvidencePool { evpool := &EvidencePool{ params: params, logger: log.NewNopLogger(), evidenceStore: evidenceStore, - state: *state, - evidenceChan: make(chan types.Evidence), + // state: *state, + evidenceChan: make(chan types.Evidence), } return evpool } @@ -58,12 +58,15 @@ func (evpool *EvidencePool) AddEvidence(evidence types.Evidence) (err error) { // TODO: check if we already have evidence for this // validator at this height so we dont get spammed - priority, err := sm.VerifyEvidence(evpool.state, evidence) - if err != nil { - // TODO: if err is just that we cant find it cuz we pruned, ignore. - // TODO: if its actually bad evidence, punish peer - return err - } + // TODO + var priority int64 + /* + priority, err := sm.VerifyEvidence(evpool.state, evidence) + if err != nil { + // TODO: if err is just that we cant find it cuz we pruned, ignore. + // TODO: if its actually bad evidence, punish peer + return err + }*/ added := evpool.evidenceStore.AddNewEvidence(evidence, priority) if !added { diff --git a/node/node.go b/node/node.go index 53eab6e0..15e4f043 100644 --- a/node/node.go +++ b/node/node.go @@ -102,7 +102,8 @@ type Node struct { trustMetricStore *trust.TrustMetricStore // trust metrics for all peers // services - eventBus *types.EventBus // pub/sub for services + eventBus *types.EventBus // pub/sub for services + stateDB dbm.DB blockStore *bc.BlockStore // store the blockchain to disk bcReactor *bc.BlockchainReactor // for fast-syncing mempoolReactor *mempl.MempoolReactor // for gossipping transactions @@ -148,21 +149,20 @@ func NewNode(config *cfg.Config, saveGenesisDoc(stateDB, genDoc) } - stateLogger := logger.With("module", "state") state := sm.LoadState(stateDB) - if state == nil { - state, err = sm.MakeGenesisState(stateDB, genDoc) + if state.IsEmpty() { + state, err = sm.MakeGenesisState(genDoc) if err != nil { return nil, err } - state.Save() + sm.SaveState(stateDB, state) } - state.SetLogger(stateLogger) // Create the proxyApp, which manages connections (consensus, mempool, query) - // and sync tendermint and the app by replaying any necessary blocks + // and sync tendermint and the app by performing a handshake + // and replaying any necessary blocks consensusLogger := logger.With("module", "consensus") - handshaker := consensus.NewHandshaker(state, blockStore) + handshaker := consensus.NewHandshaker(stateDB, state, blockStore) handshaker.SetLogger(consensusLogger) proxyApp := proxy.NewAppConns(clientCreator, handshaker) proxyApp.SetLogger(logger.With("module", "proxy")) @@ -172,7 +172,6 @@ func NewNode(config *cfg.Config, // reload the state (it may have been updated by the handshake) state = sm.LoadState(stateDB) - state.SetLogger(stateLogger) // Generate node PrivKey privKey := crypto.GenPrivKeyEd25519() @@ -194,10 +193,6 @@ func NewNode(config *cfg.Config, consensusLogger.Info("This node is not a validator") } - // Make BlockchainReactor - bcReactor := bc.NewBlockchainReactor(state.Copy(), proxyApp.Consensus(), blockStore, fastSync) - bcReactor.SetLogger(logger.With("module", "blockchain")) - // Make MempoolReactor mempoolLogger := logger.With("module", "mempool") mempool := mempl.NewMempool(config.Mempool, proxyApp.Mempool(), state.LastBlockHeight) @@ -216,14 +211,24 @@ func NewNode(config *cfg.Config, } evidenceLogger := logger.With("module", "evidence") evidenceStore := evidence.NewEvidenceStore(evidenceDB) - evidencePool := evidence.NewEvidencePool(state.ConsensusParams.EvidenceParams, evidenceStore, state.Copy()) + evidencePool := evidence.NewEvidencePool(state.ConsensusParams.EvidenceParams, evidenceStore) // , state.Copy()) evidencePool.SetLogger(evidenceLogger) evidenceReactor := evidence.NewEvidenceReactor(evidencePool) evidenceReactor.SetLogger(evidenceLogger) + blockExecLogger := logger.With("module", "state") + // make block executor for consensus and blockchain reactors to execute blocks + blockExec := sm.NewBlockExecutor(stateDB, blockExecLogger, + nil, proxyApp.Consensus(), + mempool, evidencePool) + + // Make BlockchainReactor + bcReactor := bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) + bcReactor.SetLogger(logger.With("module", "blockchain")) + // Make ConsensusReactor consensusState := consensus.NewConsensusState(config.Consensus, state.Copy(), - proxyApp.Consensus(), blockStore, mempool, evidencePool) + blockExec, blockStore, mempool, evidencePool) consensusState.SetLogger(consensusLogger) if privValidator != nil { consensusState.SetPrivValidator(privValidator) @@ -291,7 +296,6 @@ func NewNode(config *cfg.Config, eventBus.SetLogger(logger.With("module", "events")) // services which will be publishing and/or subscribing for messages (events) - bcReactor.SetEventBus(eventBus) consensusReactor.SetEventBus(eventBus) // Transaction indexing @@ -333,6 +337,7 @@ func NewNode(config *cfg.Config, addrBook: addrBook, trustMetricStore: trustMetricStore, + stateDB: stateDB, blockStore: blockStore, bcReactor: bcReactor, mempoolReactor: mempoolReactor, @@ -429,6 +434,7 @@ func (n *Node) AddListener(l p2p.Listener) { // ConfigureRPC sets all variables in rpccore so they will serve // rpc calls from this node func (n *Node) ConfigureRPC() { + rpccore.SetStateDB(n.stateDB) rpccore.SetBlockStore(n.blockStore) rpccore.SetConsensusState(n.consensusState) rpccore.SetMempool(n.mempoolReactor.Mempool) diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index 8b0ee459..853bb0f7 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -337,8 +337,7 @@ func BlockResults(heightPtr *int64) (*ctypes.ResultBlockResults, error) { } // load the results - state := consensusState.GetState() - results, err := sm.LoadABCIResponses(state.DB(), height) + results, err := sm.LoadABCIResponses(stateDB, height) if err != nil { return nil, err } diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index eedcce27..65c9fc36 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -50,8 +50,7 @@ func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { return nil, err } - state := consensusState.GetState() - validators, err := sm.LoadValidators(state.DB(), height) + validators, err := sm.LoadValidators(stateDB, height) if err != nil { return nil, err } diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index 325625c7..927d7cca 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -11,6 +11,7 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/log" ) @@ -20,7 +21,7 @@ var subscribeTimeout = 5 * time.Second // These interfaces are used by RPC and must be thread safe type Consensus interface { - GetState() *sm.State + GetState() sm.State GetValidators() (int64, []*types.Validator) GetRoundState() *cstypes.RoundState } @@ -43,6 +44,7 @@ var ( proxyAppQuery proxy.AppConnQuery // interfaces defined in types and above + stateDB dbm.DB blockStore types.BlockStore mempool types.Mempool evidencePool types.EvidencePool @@ -60,6 +62,10 @@ var ( logger log.Logger ) +func SetStateDB(db dbm.DB) { + stateDB = db +} + func SetBlockStore(bs types.BlockStore) { blockStore = bs } diff --git a/state/db.go b/state/db.go index 32f62584..fbe99863 100644 --- a/state/db.go +++ b/state/db.go @@ -36,7 +36,7 @@ func GetState(stateDB dbm.DB, genesisFile string) (State, error) { if err != nil { return state, err } - SaveState(stateDB, state, state.AppHash) + SaveState(stateDB, state) } return state, nil @@ -66,9 +66,7 @@ func loadState(db dbm.DB, key []byte) (state State) { } // SaveState persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database. -// It sets the given appHash on the state before persisting. -func SaveState(db dbm.DB, s State, appHash []byte) { - s.AppHash = appHash +func SaveState(db dbm.DB, s State) { nextHeight := s.LastBlockHeight + 1 saveValidatorsInfo(db, nextHeight, s.LastHeightValidatorsChanged, s.Validators) saveConsensusParamsInfo(db, nextHeight, s.LastHeightConsensusParamsChanged, s.ConsensusParams) diff --git a/state/execution.go b/state/execution.go index 88ee1127..ce45e4c4 100644 --- a/state/execution.go +++ b/state/execution.go @@ -30,6 +30,10 @@ type BlockExecutor struct { evpool types.EvidencePool } +func (blockExec *BlockExecutor) SetTxEventPublisher(txEventPublisher types.TxEventPublisher) { + blockExec.txEventPublisher = txEventPublisher +} + // NewBlockExecutor returns a new BlockExecutor. func NewBlockExecutor(db dbm.DB, logger log.Logger, txEventer types.TxEventPublisher, proxyApp proxy.AppConnConsensus, @@ -82,8 +86,9 @@ func (blockExec *BlockExecutor) ApplyBlock(s State, blockID types.BlockID, block fail.Fail() // XXX - // save the state and the validators - SaveState(blockExec.db, s, appHash) + // update the app hash and save the state + s.AppHash = appHash + SaveState(blockExec.db, s) return s, nil } @@ -110,7 +115,7 @@ func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) { blockExec.logger.Debug("Commit.Log: " + res.Log) } - blockExec.logger.Info("Committed state", "height", block.Height, "txs", block.NumTxs, "hash", res.Data) + blockExec.logger.Info("Committed state", "height", block.Height, "txs", block.NumTxs, "appHash", res.Data) // Update evpool blockExec.evpool.MarkEvidenceAsCommitted(block.Evidence.Evidence) @@ -343,16 +348,14 @@ func updateState(s State, blockID types.BlockID, header *types.Header, } func fireEvents(txEventPublisher types.TxEventPublisher, block *types.Block, abciResponses *ABCIResponses) { - // TODO: Fire events - /* - tx := types.Tx(req.GetDeliverTx().Tx) + for i, tx := range block.Data.Txs { txEventPublisher.PublishEventTx(types.EventDataTx{types.TxResult{ Height: block.Height, - Index: uint32(txIndex), + Index: uint32(i), Tx: tx, - Result: *txRes, + Result: *(abciResponses.DeliverTx[i]), }}) - */ + } } //---------------------------------------------------------------------------------------------------- diff --git a/state/state.go b/state/state.go index ed8a2013..7ffa56ed 100644 --- a/state/state.go +++ b/state/state.go @@ -91,7 +91,7 @@ func (s State) Bytes() []byte { // IsEmpty returns true if the State is equal to the empty State. func (s State) IsEmpty() bool { - return s.LastBlockHeight == 0 // XXX can't compare to Empty + return s.Validators == nil // XXX can't compare to Empty } // GetValidators returns the last and current validator sets. diff --git a/state/state_test.go b/state/state_test.go index cbd3c813..7e9ed2cf 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -57,7 +57,7 @@ func TestStateSaveLoad(t *testing.T) { assert := assert.New(t) state.LastBlockHeight++ - SaveState(stateDB, state, state.AppHash) + SaveState(stateDB, state) loadedState := LoadState(stateDB) assert.True(state.Equals(loadedState), @@ -261,7 +261,7 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { const valSetSize = 7 tearDown, stateDB, state := setupTestCase(t) state.Validators = genValSet(valSetSize) - SaveState(stateDB, state, state.AppHash) + SaveState(stateDB, state) defer tearDown(t) const height = 1 @@ -425,7 +425,7 @@ func TestLessThanOneThirdOfVotingPowerPerBlockEnforced(t *testing.T) { for i, tc := range testCases { tearDown, stateDB, state := setupTestCase(t) state.Validators = genValSet(tc.initialValSetSize) - SaveState(stateDB, state, state.AppHash) + SaveState(stateDB, state) height := state.LastBlockHeight + 1 block := makeBlock(state, height) abciResponses := &ABCIResponses{ From 537b0dfa1a0cb0048c6100b54c413c0877681c3a Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 27 Dec 2017 22:25:46 -0500 Subject: [PATCH 08/33] use NopEventBus --- blockchain/reactor_test.go | 2 +- consensus/common_test.go | 2 +- consensus/replay_file.go | 2 +- consensus/wal_generator.go | 2 +- node/node.go | 2 +- 5 files changed, 5 insertions(+), 5 deletions(-) diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index f58b8394..2edff18b 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -32,7 +32,7 @@ func newBlockchainReactor(logger log.Logger, maxBlockHeight int64) *BlockchainRe // Make the blockchainReactor itself fastSync := true blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), - nil, nil, types.MockMempool{}, types.MockEvidencePool{}) + types.NopEventBus{}, nil, types.MockMempool{}, types.MockEvidencePool{}) bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) bcReactor.SetLogger(logger.With("module", "blockchain")) diff --git a/consensus/common_test.go b/consensus/common_test.go index ba3564aa..1516365e 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -266,7 +266,7 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S // Make ConsensusReactor stateDB := dbm.NewMemDB() // XXX !! blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), - nil, proxyAppConnCon, mempool, evpool) + types.NopEventBus{}, proxyAppConnCon, mempool, evpool) cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) cs.SetLogger(log.TestingLogger()) cs.SetPrivValidator(pv) diff --git a/consensus/replay_file.go b/consensus/replay_file.go index d832abad..4e490ccb 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -306,7 +306,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo mempool, evpool := types.MockMempool{}, types.MockEvidencePool{} blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), - nil, proxyApp.Consensus(), + types.NopEventBus{}, proxyApp.Consensus(), mempool, evpool) consensusState := NewConsensusState(csConfig, state.Copy(), blockExec, diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index fe9066b3..c4171b12 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -67,7 +67,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) { defer eventBus.Stop() mempool := types.MockMempool{} evpool := types.MockEvidencePool{} - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), nil, proxyApp.Consensus(), mempool, evpool) + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), types.NopEventBus{}, proxyApp.Consensus(), mempool, evpool) consensusState := NewConsensusState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetLogger(logger) consensusState.SetEventBus(eventBus) diff --git a/node/node.go b/node/node.go index 15e4f043..603fafec 100644 --- a/node/node.go +++ b/node/node.go @@ -219,7 +219,7 @@ func NewNode(config *cfg.Config, blockExecLogger := logger.With("module", "state") // make block executor for consensus and blockchain reactors to execute blocks blockExec := sm.NewBlockExecutor(stateDB, blockExecLogger, - nil, proxyApp.Consensus(), + types.NopEventBus{}, proxyApp.Consensus(), mempool, evidencePool) // Make BlockchainReactor From 397251b0f4d7f63f447d9b2d76bcdde612bf8633 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Wed, 27 Dec 2017 22:39:58 -0500 Subject: [PATCH 09/33] fix evidence --- evidence/pool.go | 35 +++++++++++++++++++++++------------ evidence/pool_test.go | 11 +++-------- evidence/reactor_test.go | 3 +-- evidence/store_test.go | 6 ++++-- node/node.go | 2 +- 5 files changed, 32 insertions(+), 25 deletions(-) diff --git a/evidence/pool.go b/evidence/pool.go index 2e7cd470..cdc01398 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -3,6 +3,7 @@ package evidence import ( "github.com/tendermint/tmlibs/log" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -13,16 +14,16 @@ type EvidencePool struct { evidenceStore *EvidenceStore - chainID string - lastBlockHeight int64 - params types.EvidenceParams + state sm.State + params types.EvidenceParams // never close evidenceChan chan types.Evidence } -func NewEvidencePool(params types.EvidenceParams, evidenceStore *EvidenceStore) *EvidencePool { +func NewEvidencePool(params types.EvidenceParams, evidenceStore *EvidenceStore, state sm.State) *EvidencePool { evpool := &EvidencePool{ + state: state, params: params, logger: log.NewNopLogger(), evidenceStore: evidenceStore, @@ -58,15 +59,25 @@ func (evpool *EvidencePool) AddEvidence(evidence types.Evidence) (err error) { // TODO: check if we already have evidence for this // validator at this height so we dont get spammed - // TODO + if err := sm.VerifyEvidence(evpool.state, evidence); err != nil { + return err + } + var priority int64 - /* - priority, err := sm.VerifyEvidence(evpool.state, evidence) - if err != nil { - // TODO: if err is just that we cant find it cuz we pruned, ignore. - // TODO: if its actually bad evidence, punish peer - return err - }*/ + /* // Needs a db ... + // TODO: if err is just that we cant find it cuz we pruned, ignore. + // TODO: if its actually bad evidence, punish peer + + valset, err := LoadValidators(s.db, ev.Height()) + if err != nil { + // XXX/TODO: what do we do if we can't load the valset? + // eg. if we have pruned the state or height is too high? + return err + } + if err := VerifyEvidenceValidator(valSet, ev); err != nil { + return types.NewEvidenceInvalidErr(ev, err) + } + */ added := evpool.evidenceStore.AddNewEvidence(evidence, priority) if !added { diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 0997505c..d7b94e88 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -6,24 +6,19 @@ import ( "github.com/stretchr/testify/assert" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tmlibs/db" ) -type mockState struct{} - -func (m mockState) VerifyEvidence(ev types.Evidence) (int64, error) { - err := ev.Verify("") - return 10, err -} +var mockState = sm.State{} func TestEvidencePool(t *testing.T) { assert := assert.New(t) params := types.EvidenceParams{} store := NewEvidenceStore(dbm.NewMemDB()) - state := mockState{} - pool := NewEvidencePool(params, store, state) + pool := NewEvidencePool(params, store, mockState) goodEvidence := newMockGoodEvidence(5, 1, []byte("val1")) badEvidence := MockBadEvidence{goodEvidence} diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index fb83667c..fc4ea571 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -39,8 +39,7 @@ func makeAndConnectEvidenceReactors(config *cfg.Config, N int) []*EvidenceReacto params := types.EvidenceParams{} store := NewEvidenceStore(dbm.NewMemDB()) - state := mockState{} - pool := NewEvidencePool(params, store, state) + pool := NewEvidencePool(params, store, mockState) reactors[i] = NewEvidenceReactor(pool) reactors[i].SetLogger(logger.With("validator", i)) } diff --git a/evidence/store_test.go b/evidence/store_test.go index 7828d37b..192aabc2 100644 --- a/evidence/store_test.go +++ b/evidence/store_test.go @@ -113,12 +113,14 @@ func TestStorePriority(t *testing.T) { //------------------------------------------- const ( - evidenceTypeMock = byte(0x01) + evidenceTypeMockGood = byte(0x01) + evidenceTypeMockBad = byte(0x02) ) var _ = wire.RegisterInterface( struct{ types.Evidence }{}, - wire.ConcreteType{MockGoodEvidence{}, evidenceTypeMock}, + wire.ConcreteType{MockGoodEvidence{}, evidenceTypeMockGood}, + wire.ConcreteType{MockBadEvidence{}, evidenceTypeMockBad}, ) type MockGoodEvidence struct { diff --git a/node/node.go b/node/node.go index 603fafec..cd1e6320 100644 --- a/node/node.go +++ b/node/node.go @@ -211,7 +211,7 @@ func NewNode(config *cfg.Config, } evidenceLogger := logger.With("module", "evidence") evidenceStore := evidence.NewEvidenceStore(evidenceDB) - evidencePool := evidence.NewEvidencePool(state.ConsensusParams.EvidenceParams, evidenceStore) // , state.Copy()) + evidencePool := evidence.NewEvidencePool(state.ConsensusParams.EvidenceParams, evidenceStore, state.Copy()) evidencePool.SetLogger(evidenceLogger) evidenceReactor := evidence.NewEvidenceReactor(evidencePool) evidenceReactor.SetLogger(evidenceLogger) From 1d6f00859dd46beae036146782f0f088b2e50e63 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 28 Dec 2017 18:26:13 -0500 Subject: [PATCH 10/33] fixes from review --- blockchain/reactor_test.go | 12 ++++-------- consensus/common_test.go | 9 +++------ consensus/replay.go | 6 +----- consensus/replay_file.go | 4 +--- consensus/replay_test.go | 5 ++--- consensus/wal_generator.go | 2 +- evidence/pool.go | 3 +-- node/node.go | 15 +++++---------- state/execution.go | 35 +++++++++++++++++++++-------------- state/execution_test.go | 11 +---------- state/state.go | 2 +- state/state_test.go | 4 ++-- state/{db.go => store.go} | 29 +++++++++++++++++++++++++---- 13 files changed, 68 insertions(+), 69 deletions(-) rename state/{db.go => store.go} (90%) diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index 2edff18b..fcb8a6f8 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -10,6 +10,7 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/p2p" + "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -17,12 +18,7 @@ import ( func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) { config := cfg.ResetTestRoot("blockchain_reactor_test") blockStore := NewBlockStore(dbm.NewMemDB()) - - // Get State - stateDB := dbm.NewMemDB() - state, _ := sm.GetState(stateDB, config.GenesisFile()) - sm.SaveState(stateDB, state) - + state, _ := sm.LoadStateFromDBOrGenesisFile(dbm.NewMemDB(), config.GenesisFile()) return state, blockStore } @@ -31,8 +27,8 @@ func newBlockchainReactor(logger log.Logger, maxBlockHeight int64) *BlockchainRe // Make the blockchainReactor itself fastSync := true - blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), - types.NopEventBus{}, nil, types.MockMempool{}, types.MockEvidencePool{}) + var nilApp proxy.AppConnConsensus + blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nilApp, types.MockMempool{}, types.MockEvidencePool{}) bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) bcReactor.SetLogger(logger.With("module", "blockchain")) diff --git a/consensus/common_test.go b/consensus/common_test.go index 1516365e..eb574a21 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -265,8 +265,7 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S // Make ConsensusReactor stateDB := dbm.NewMemDB() // XXX !! - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), - types.NopEventBus{}, proxyAppConnCon, mempool, evpool) + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool) cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) cs.SetLogger(log.TestingLogger()) cs.SetPrivValidator(pv) @@ -356,8 +355,7 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou logger := consensusLogger() for i := 0; i < nValidators; i++ { stateDB := dbm.NewMemDB() // each state needs its own db - state, _ := sm.MakeGenesisState(genDoc) - sm.SaveState(stateDB, state) + state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) for _, opt := range configOpts { opt(thisConfig) @@ -381,8 +379,7 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF logger := consensusLogger() for i := 0; i < nPeers; i++ { stateDB := dbm.NewMemDB() // each state needs its own db - state, _ := sm.MakeGenesisState(genDoc) - sm.SaveState(stateDB, state) + state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal var privVal types.PrivValidator diff --git a/consensus/replay.go b/consensus/replay.go index 55c29178..784e8bd6 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -363,14 +363,10 @@ func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBl // ApplyBlock on the proxyApp with the last block. func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.AppConnConsensus) (sm.State, error) { - mempool := types.MockMempool{} - evpool := types.MockEvidencePool{} - block := h.store.LoadBlock(height) meta := h.store.LoadBlockMeta(height) - blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, - types.NopEventBus{}, proxyApp, mempool, evpool) + blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, types.MockMempool{}, types.MockEvidencePool{}) var err error state, err = blockExec.ApplyBlock(state, meta.BlockID, block) diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 4e490ccb..26b8baeb 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -305,9 +305,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo } mempool, evpool := types.MockMempool{}, types.MockEvidencePool{} - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), - types.NopEventBus{}, proxyApp.Consensus(), - mempool, evpool) + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) consensusState := NewConsensusState(csConfig, state.Copy(), blockExec, blockStore, mempool, evpool) diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 4647ff3d..c497ed54 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -53,7 +53,7 @@ func init() { func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) { logger := log.TestingLogger() - state, _ := sm.GetState(stateDB, consensusReplayConfig.GenesisFile()) + state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile()) privValidator := loadPrivValidator(consensusReplayConfig) cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, dummy.NewDummyApplication(), blockDB) cs.SetLogger(logger) @@ -394,8 +394,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.AppConns) sm.State { testPartSize := st.ConsensusParams.BlockPartSizeBytes - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), - types.NopEventBus{}, proxyApp.Consensus(), mempool, evpool) + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) blkID := types.BlockID{blk.Hash(), blk.MakePartSet(testPartSize).Header()} newState, err := blockExec.ApplyBlock(st, blkID, blk) diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index c4171b12..45609e56 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -67,7 +67,7 @@ func WALWithNBlocks(numBlocks int) (data []byte, err error) { defer eventBus.Stop() mempool := types.MockMempool{} evpool := types.MockEvidencePool{} - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), types.NopEventBus{}, proxyApp.Consensus(), mempool, evpool) + blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) consensusState := NewConsensusState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetLogger(logger) consensusState.SetEventBus(eventBus) diff --git a/evidence/pool.go b/evidence/pool.go index cdc01398..4d706da8 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -27,8 +27,7 @@ func NewEvidencePool(params types.EvidenceParams, evidenceStore *EvidenceStore, params: params, logger: log.NewNopLogger(), evidenceStore: evidenceStore, - // state: *state, - evidenceChan: make(chan types.Evidence), + evidenceChan: make(chan types.Evidence), } return evpool } diff --git a/node/node.go b/node/node.go index cd1e6320..2eea4ed2 100644 --- a/node/node.go +++ b/node/node.go @@ -138,6 +138,7 @@ func NewNode(config *cfg.Config, } // Get genesis doc + // TODO: move to state package? genDoc, err := loadGenesisDoc(stateDB) if err != nil { genDoc, err = genesisDocProvider() @@ -149,13 +150,9 @@ func NewNode(config *cfg.Config, saveGenesisDoc(stateDB, genDoc) } - state := sm.LoadState(stateDB) - if state.IsEmpty() { - state, err = sm.MakeGenesisState(genDoc) - if err != nil { - return nil, err - } - sm.SaveState(stateDB, state) + state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) + if err != nil { + return nil, err } // Create the proxyApp, which manages connections (consensus, mempool, query) @@ -218,9 +215,7 @@ func NewNode(config *cfg.Config, blockExecLogger := logger.With("module", "state") // make block executor for consensus and blockchain reactors to execute blocks - blockExec := sm.NewBlockExecutor(stateDB, blockExecLogger, - types.NopEventBus{}, proxyApp.Consensus(), - mempool, evidencePool) + blockExec := sm.NewBlockExecutor(stateDB, blockExecLogger, proxyApp.Consensus(), mempool, evidencePool) // Make BlockchainReactor bcReactor := bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) diff --git a/state/execution.go b/state/execution.go index ce45e4c4..68cb13bd 100644 --- a/state/execution.go +++ b/state/execution.go @@ -20,34 +20,41 @@ import ( // BlockExecutor provides the context and accessories for properly executing a block. type BlockExecutor struct { - db dbm.DB - logger log.Logger + // save state, validators, consensus params, abci responses here + db dbm.DB + // execute the app against this + proxyApp proxy.AppConnConsensus + + // tx events txEventPublisher types.TxEventPublisher - proxyApp proxy.AppConnConsensus + // update these with block results after commit mempool types.Mempool evpool types.EvidencePool -} -func (blockExec *BlockExecutor) SetTxEventPublisher(txEventPublisher types.TxEventPublisher) { - blockExec.txEventPublisher = txEventPublisher + logger log.Logger } // NewBlockExecutor returns a new BlockExecutor. -func NewBlockExecutor(db dbm.DB, logger log.Logger, - txEventer types.TxEventPublisher, proxyApp proxy.AppConnConsensus, +func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus, mempool types.Mempool, evpool types.EvidencePool) *BlockExecutor { return &BlockExecutor{ - db, - logger, - txEventer, - proxyApp, - mempool, - evpool, + db: db, + proxyApp: proxyApp, + txEventPublisher: types.NopEventBus{}, + mempool: mempool, + evpool: evpool, + logger: logger, } } +// SetTxEventPublisher - set the transaction event publisher. If not called, +// it defaults to types.NopEventBus. +func (blockExec *BlockExecutor) SetTxEventPublisher(txEventPublisher types.TxEventPublisher) { + blockExec.txEventPublisher = txEventPublisher +} + // ApplyBlock validates the block against the state, executes it against the app, // commits it, and saves the block and state. It's the only function that needs to be called // from outside this package to process and commit an entire block. diff --git a/state/execution_test.go b/state/execution_test.go index 1a63d3ed..9db26911 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -32,8 +32,7 @@ func TestApplyBlock(t *testing.T) { state, stateDB := state(), dbm.NewMemDB() - blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), - types.NopEventBus{}, proxyApp.Consensus(), + blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), types.MockMempool{}, types.MockEvidencePool{}) block := makeBlock(state, 1) @@ -56,14 +55,6 @@ func TestBeginBlockAbsentValidators(t *testing.T) { state := state() - // there were 2 validators - /*val1PrivKey := crypto.GenPrivKeyEd25519() - val2PrivKey := crypto.GenPrivKeyEd25519() - lastValidators := types.NewValidatorSet([]*types.Validator{ - types.NewValidator(val1PrivKey.PubKey(), 10), - types.NewValidator(val2PrivKey.PubKey(), 5), - })*/ - prevHash := state.LastBlockID.Hash prevParts := types.PartSetHeader{} prevBlockID := types.BlockID{prevHash, prevParts} diff --git a/state/state.go b/state/state.go index 7ffa56ed..575a1630 100644 --- a/state/state.go +++ b/state/state.go @@ -23,7 +23,7 @@ var ( // including the last validator set and the consensus params. // All fields are exposed so the struct can be easily serialized, // but none of them should be mutated directly. -// Instead, use state.Copy() ro state.NextState(...). +// Instead, use state.Copy() or state.NextState(...). // NOTE: not goroutine-safe. type State struct { // Immutable diff --git a/state/state_test.go b/state/state_test.go index 7e9ed2cf..61b3167b 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -23,8 +23,8 @@ import ( func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, State) { config := cfg.ResetTestRoot("state_") stateDB := dbm.NewDB("state", config.DBBackend, config.DBDir()) - state, err := GetState(stateDB, config.GenesisFile()) - assert.NoError(t, err, "expected no error on GetState") + state, err := LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) + assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile") tearDown := func(t *testing.T) {} diff --git a/state/db.go b/state/store.go similarity index 90% rename from state/db.go rename to state/store.go index fbe99863..de2d4d67 100644 --- a/state/db.go +++ b/state/store.go @@ -25,14 +25,31 @@ func calcABCIResponsesKey(height int64) []byte { return []byte(cmn.Fmt("abciResponsesKey:%v", height)) } -// GetState loads the most recent state from the database, -// or creates a new one from the given genesisFile and persists the result +// LoadStateFromDBOrGenesisFile loads the most recent state from the database, +// or creates a new one from the given genesisFilePath and persists the result // to the database. -func GetState(stateDB dbm.DB, genesisFile string) (State, error) { +func LoadStateFromDBOrGenesisFile(stateDB dbm.DB, genesisFilePath string) (State, error) { state := LoadState(stateDB) if state.IsEmpty() { var err error - state, err = MakeGenesisStateFromFile(genesisFile) + state, err = MakeGenesisStateFromFile(genesisFilePath) + if err != nil { + return state, err + } + SaveState(stateDB, state) + } + + return state, nil +} + +// LoadStateFromDBOrGenesisDoc loads the most recent state from the database, +// or creates a new one from the given genesisDoc and persists the result +// to the database. +func LoadStateFromDBOrGenesisDoc(stateDB dbm.DB, genesisDoc *types.GenesisDoc) (State, error) { + state := LoadState(stateDB) + if state.IsEmpty() { + var err error + state, err = MakeGenesisState(genesisDoc) if err != nil { return state, err } @@ -67,6 +84,10 @@ func loadState(db dbm.DB, key []byte) (state State) { // SaveState persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database. func SaveState(db dbm.DB, s State) { + saveState(db, s, stateKey) +} + +func saveState(db dbm.DB, s State, key []byte) { nextHeight := s.LastBlockHeight + 1 saveValidatorsInfo(db, nextHeight, s.LastHeightValidatorsChanged, s.Validators) saveConsensusParamsInfo(db, nextHeight, s.LastHeightConsensusParamsChanged, s.ConsensusParams) From 8d8d63c94cc26a8b7456f9e85d869da4ab8fed6e Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 28 Dec 2017 18:28:27 -0500 Subject: [PATCH 11/33] changelog --- CHANGELOG.md | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index f9f0809d..71fe223e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -3,9 +3,7 @@ ## Roadmap BREAKING CHANGES: -- Upgrade the header to support better proofs on validtors, results, evidence, and possibly more - Better support for injecting randomness -- Pass evidence/voteInfo through ABCI - Upgrade consensus for more real-time use of evidence FEATURES: @@ -32,6 +30,27 @@ BUG FIXES: BREAKING CHANGES: - [p2p] enable the Peer Exchange reactor by default - [types] add Timestamp field to Proposal/Vote +- [types] add new fields to Header: TotalTxs, ConsensusParamsHash, LastResultsHash, EvidenceHash +- [types] add Evidence to Block +- [types] simplify ValidateBasic +- [state] updates to support changes to the header +- [state] Enforce <1/3 of validator set can change at a time + +FEATURES: +- [state] Send indices of absent validators and addresses of byzantine validators in BeginBlock +- [state] Historical ConsensusParams and ABCIResponses +- [docs] Specification for the base Tendermint data structures. +- [evidence] New evidence reactor for gossiping and managing evidence +- [rpc] `/block_results?height=X` returns the DeliverTx results for a given height. + +IMPROVEMENTS: +- [consensus] Better handling of corrupt WAL file + +BUG FIXES: +- [lite] fix race +- [state] validate block.Header.ValidatorsHash +- [p2p] allow seed addresses to be prefixed with eg. `tcp://` +- [cmd] fix `tendermint init` to ignore files that are there and generate files that aren't. ## 0.14.0 (December 11, 2017) From ae68fcb78a5400f0ab83a756ca8da28ea3c3cf6b Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 28 Dec 2017 18:58:05 -0500 Subject: [PATCH 12/33] move fireEvents to ApplyBlock --- blockchain/reactor.go | 1 - consensus/common_test.go | 2 +- consensus/state.go | 18 -------------- node/node.go | 1 + state/execution.go | 54 ++++++++++++++++++++++++++-------------- types/events.go | 7 ++++++ 6 files changed, 45 insertions(+), 38 deletions(-) diff --git a/blockchain/reactor.go b/blockchain/reactor.go index c8e794a1..d4b803dd 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -284,7 +284,6 @@ FOR_LOOP: bcR.store.SaveBlock(first, firstParts, second.LastCommit) - // TODO: should we be firing events? need to fire NewBlock events manually ... // NOTE: we could improve performance if we // didn't make the app commit to disk every block // ... but we would need a way to get the hash without it persisting diff --git a/consensus/common_test.go b/consensus/common_test.go index eb574a21..249e7732 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -264,7 +264,7 @@ func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.S evpool := types.MockEvidencePool{} // Make ConsensusReactor - stateDB := dbm.NewMemDB() // XXX !! + stateDB := dbm.NewMemDB() blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool) cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) cs.SetLogger(log.TestingLogger()) diff --git a/consensus/state.go b/consensus/state.go index 477d872b..69858da0 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -1203,8 +1203,6 @@ func (cs *ConsensusState) finalizeCommit(height int64) { // Create a copy of the state for staging // and an event cache for txs stateCopy := cs.state.Copy() - txEventBuffer := types.NewTxEventBuffer(cs.eventBus, int(block.NumTxs)) - cs.blockExec.SetTxEventPublisher(txEventBuffer) // Execute and commit the block, update and save the state, and update the mempool. // NOTE: the block.AppHash wont reflect these txs until the next block @@ -1221,22 +1219,6 @@ func (cs *ConsensusState) finalizeCommit(height int64) { fail.Fail() // XXX - // Fire event for new block. - // NOTE: If we fail before firing, these events will never fire - // - // TODO: Either - // * Fire before persisting state, in ApplyBlock - // * Fire on start up if we haven't written any new WAL msgs - // Both options mean we may fire more than once. Is that fine ? - cs.eventBus.PublishEventNewBlock(types.EventDataNewBlock{block}) - cs.eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{block.Header}) - err = txEventBuffer.Flush() - if err != nil { - cs.Logger.Error("Failed to flush event buffer", "err", err) - } - - fail.Fail() // XXX - // NewHeightStep! cs.updateToState(stateCopy) diff --git a/node/node.go b/node/node.go index 2eea4ed2..fe51b941 100644 --- a/node/node.go +++ b/node/node.go @@ -291,6 +291,7 @@ func NewNode(config *cfg.Config, eventBus.SetLogger(logger.With("module", "events")) // services which will be publishing and/or subscribing for messages (events) + blockExec.SetEventBus(eventBus) consensusReactor.SetEventBus(eventBus) // Transaction indexing diff --git a/state/execution.go b/state/execution.go index 68cb13bd..8b05733a 100644 --- a/state/execution.go +++ b/state/execution.go @@ -26,8 +26,8 @@ type BlockExecutor struct { // execute the app against this proxyApp proxy.AppConnConsensus - // tx events - txEventPublisher types.TxEventPublisher + // events + eventBus types.BlockEventPublisher // update these with block results after commit mempool types.Mempool @@ -36,27 +36,29 @@ type BlockExecutor struct { logger log.Logger } -// NewBlockExecutor returns a new BlockExecutor. +// NewBlockExecutor returns a new BlockExecutor with a NopEventBus. +// Call SetEventBus to provide one. func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus, mempool types.Mempool, evpool types.EvidencePool) *BlockExecutor { return &BlockExecutor{ - db: db, - proxyApp: proxyApp, - txEventPublisher: types.NopEventBus{}, - mempool: mempool, - evpool: evpool, - logger: logger, + db: db, + proxyApp: proxyApp, + eventBus: types.NopEventBus{}, + mempool: mempool, + evpool: evpool, + logger: logger, } } -// SetTxEventPublisher - set the transaction event publisher. If not called, -// it defaults to types.NopEventBus. -func (blockExec *BlockExecutor) SetTxEventPublisher(txEventPublisher types.TxEventPublisher) { - blockExec.txEventPublisher = txEventPublisher +// SetEventBus - sets the event bus for publishing block related events. +// If not called, it defaults to types.NopEventBus. +func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) { + blockExec.eventBus = eventBus } // ApplyBlock validates the block against the state, executes it against the app, -// commits it, and saves the block and state. It's the only function that needs to be called +// fires the relevent events, commits the app, and saves the new state and responses. +// It's the only function that needs to be called // from outside this package to process and commit an entire block. // It takes a blockID to avoid recomputing the parts hash. func (blockExec *BlockExecutor) ApplyBlock(s State, blockID types.BlockID, block *types.Block) (State, error) { @@ -70,8 +72,6 @@ func (blockExec *BlockExecutor) ApplyBlock(s State, blockID types.BlockID, block return s, ErrProxyAppConn(err) } - fireEvents(blockExec.txEventPublisher, block, abciResponses) - fail.Fail() // XXX // save the results before we commit @@ -97,6 +97,12 @@ func (blockExec *BlockExecutor) ApplyBlock(s State, blockID types.BlockID, block s.AppHash = appHash SaveState(blockExec.db, s) + fail.Fail() // XXX + + // events are fired after everything else + // NOTE: if we crash between Commit and Save, events wont be fired during replay + fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses) + return s, nil } @@ -354,15 +360,27 @@ func updateState(s State, blockID types.BlockID, header *types.Header, }, nil } -func fireEvents(txEventPublisher types.TxEventPublisher, block *types.Block, abciResponses *ABCIResponses) { +// Fire NewBlock, NewBlockHeader. +// Fire TxEvent for every tx. +// NOTE: if Tendermint crashes before commit, some or all of these events may be published again. +func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *types.Block, abciResponses *ABCIResponses) { + // NOTE: do we still need this buffer ? + txEventBuffer := types.NewTxEventBuffer(eventBus, int(block.NumTxs)) for i, tx := range block.Data.Txs { - txEventPublisher.PublishEventTx(types.EventDataTx{types.TxResult{ + txEventBuffer.PublishEventTx(types.EventDataTx{types.TxResult{ Height: block.Height, Index: uint32(i), Tx: tx, Result: *(abciResponses.DeliverTx[i]), }}) } + + eventBus.PublishEventNewBlock(types.EventDataNewBlock{block}) + eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{block.Header}) + err := txEventBuffer.Flush() + if err != nil { + logger.Error("Failed to flush event buffer", "err", err) + } } //---------------------------------------------------------------------------------------------------- diff --git a/types/events.go b/types/events.go index 5c41c6df..d6f7b012 100644 --- a/types/events.go +++ b/types/events.go @@ -175,6 +175,13 @@ func QueryForEvent(eventType string) tmpubsub.Query { return tmquery.MustParse(fmt.Sprintf("%s='%s'", EventTypeKey, eventType)) } +// BlockEventPublisher publishes all block related events +type BlockEventPublisher interface { + PublishEventNewBlock(block EventDataNewBlock) error + PublishEventNewBlockHeader(header EventDataNewBlockHeader) error + PublishEventTx(EventDataTx) error +} + type TxEventPublisher interface { PublishEventTx(EventDataTx) error } From 6112578d07c97033380ac4c5902c5bcb108d8340 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 28 Dec 2017 19:35:56 -0500 Subject: [PATCH 13/33] ValidateBlock is a method on blockExec --- consensus/state.go | 7 ++++--- node/node.go | 2 +- state/execution.go | 10 +++++++++- state/validation.go | 7 ++++--- 4 files changed, 18 insertions(+), 8 deletions(-) diff --git a/consensus/state.go b/consensus/state.go index 69858da0..518d81c5 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -153,6 +153,7 @@ func (cs *ConsensusState) SetLogger(l log.Logger) { // SetEventBus sets event bus. func (cs *ConsensusState) SetEventBus(b *types.EventBus) { cs.eventBus = b + cs.blockExec.SetEventBus(b) } // String returns a string. @@ -922,7 +923,7 @@ func (cs *ConsensusState) defaultDoPrevote(height int64, round int) { } // Validate proposal block - err := sm.ValidateBlock(cs.state, cs.ProposalBlock) + err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock) if err != nil { // ProposalBlock is invalid, prevote nil. logger.Error("enterPrevote: ProposalBlock is invalid", "err", err) @@ -1030,7 +1031,7 @@ func (cs *ConsensusState) enterPrecommit(height int64, round int) { if cs.ProposalBlock.HashesTo(blockID.Hash) { cs.Logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash) // Validate the block. - if err := sm.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { + if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) } cs.LockedRound = round @@ -1165,7 +1166,7 @@ func (cs *ConsensusState) finalizeCommit(height int64) { if !block.HashesTo(blockID.Hash) { cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, ProposalBlock does not hash to commit hash")) } - if err := sm.ValidateBlock(cs.state, block); err != nil { + if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil { cmn.PanicConsensus(cmn.Fmt("+2/3 committed an invalid block: %v", err)) } diff --git a/node/node.go b/node/node.go index fe51b941..04b1fb14 100644 --- a/node/node.go +++ b/node/node.go @@ -291,7 +291,7 @@ func NewNode(config *cfg.Config, eventBus.SetLogger(logger.With("module", "events")) // services which will be publishing and/or subscribing for messages (events) - blockExec.SetEventBus(eventBus) + // consensusReactor will set it on consensusState and blockExecutor consensusReactor.SetEventBus(eventBus) // Transaction indexing diff --git a/state/execution.go b/state/execution.go index 8b05733a..b3acd711 100644 --- a/state/execution.go +++ b/state/execution.go @@ -56,6 +56,14 @@ func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) blockExec.eventBus = eventBus } +// ValidateBlock validates the given block against the given state. +// If the block is invalid, it returns an error. +// Validation does not mutate state, but does require historical information from the stateDB, +// ie. to verify evidence from a validator at an old height. +func (blockExec *BlockExecutor) ValidateBlock(s State, block *types.Block) error { + return validateBlock(blockExec.db, s, block) +} + // ApplyBlock validates the block against the state, executes it against the app, // fires the relevent events, commits the app, and saves the new state and responses. // It's the only function that needs to be called @@ -63,7 +71,7 @@ func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) // It takes a blockID to avoid recomputing the parts hash. func (blockExec *BlockExecutor) ApplyBlock(s State, blockID types.BlockID, block *types.Block) (State, error) { - if err := validateBlock(s, block); err != nil { + if err := blockExec.ValidateBlock(s, block); err != nil { return s, ErrInvalidBlock(err) } diff --git a/state/validation.go b/state/validation.go index 69200840..5c9197bc 100644 --- a/state/validation.go +++ b/state/validation.go @@ -6,17 +6,18 @@ import ( "fmt" "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tmlibs/db" ) //----------------------------------------------------- // Validate block // ValidateBlock validates the block against the state. -func ValidateBlock(s State, block *types.Block) error { - return validateBlock(s, block) +func _ValidateBlock(s State, block *types.Block) error { + return validateBlock(dbm.NewMemDB(), s, block) } -func validateBlock(s State, b *types.Block) error { +func validateBlock(stateDB dbm.DB, s State, b *types.Block) error { // validate internal consistency if err := b.ValidateBasic(); err != nil { return err From cb845ebff5d3772bedf7536b1b14fda0d0a0310a Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 28 Dec 2017 21:08:39 -0500 Subject: [PATCH 14/33] fix EvidencePool and VerifyEvidence --- evidence/pool.go | 63 ++++++++++++++++++++++++++-------------- evidence/pool_test.go | 39 +++++++++++++++++++++++-- evidence/reactor_test.go | 29 ++++++++++++------ node/node.go | 2 +- state/execution.go | 8 +++-- state/validation.go | 34 ++++++++-------------- state/validation_test.go | 24 ++++++++------- types/services.go | 8 ++--- 8 files changed, 134 insertions(+), 73 deletions(-) diff --git a/evidence/pool.go b/evidence/pool.go index 4d706da8..07c35134 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -1,6 +1,10 @@ package evidence import ( + "fmt" + "sync" + + dbm "github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/log" sm "github.com/tendermint/tendermint/state" @@ -14,17 +18,21 @@ type EvidencePool struct { evidenceStore *EvidenceStore - state sm.State - params types.EvidenceParams + // needed to load validators to verify evidence + stateDB dbm.DB + + // latest state + mtx sync.Mutex + state sm.State // never close evidenceChan chan types.Evidence } -func NewEvidencePool(params types.EvidenceParams, evidenceStore *EvidenceStore, state sm.State) *EvidencePool { +func NewEvidencePool(stateDB dbm.DB, evidenceStore *EvidenceStore) *EvidencePool { evpool := &EvidencePool{ - state: state, - params: params, + stateDB: stateDB, + state: sm.LoadState(stateDB), logger: log.NewNopLogger(), evidenceStore: evidenceStore, evidenceChan: make(chan types.Evidence), @@ -52,31 +60,44 @@ func (evpool *EvidencePool) PendingEvidence() []types.Evidence { return evpool.evidenceStore.PendingEvidence() } +// State returns the current state of the evpool. +func (evpool *EvidencePool) State() sm.State { + evpool.mtx.Lock() + defer evpool.mtx.Unlock() + return evpool.state +} + +// Update loads the latest +func (evpool *EvidencePool) Update(block *types.Block) { + evpool.mtx.Lock() + defer evpool.mtx.Unlock() + + state := sm.LoadState(evpool.stateDB) + if state.LastBlockHeight != block.Height { + panic(fmt.Sprintf("EvidencePool.Update: loaded state with height %d when block.Height=%d", state.LastBlockHeight, block.Height)) + } + evpool.state = state + + // NOTE: shouldn't need the mutex + evpool.MarkEvidenceAsCommitted(block.Evidence.Evidence) +} + // AddEvidence checks the evidence is valid and adds it to the pool. // Blocks on the EvidenceChan. func (evpool *EvidencePool) AddEvidence(evidence types.Evidence) (err error) { + // TODO: check if we already have evidence for this // validator at this height so we dont get spammed - if err := sm.VerifyEvidence(evpool.state, evidence); err != nil { + if err := sm.VerifyEvidence(evpool.stateDB, evpool.State(), evidence); err != nil { return err } - var priority int64 - /* // Needs a db ... - // TODO: if err is just that we cant find it cuz we pruned, ignore. - // TODO: if its actually bad evidence, punish peer - - valset, err := LoadValidators(s.db, ev.Height()) - if err != nil { - // XXX/TODO: what do we do if we can't load the valset? - // eg. if we have pruned the state or height is too high? - return err - } - if err := VerifyEvidenceValidator(valSet, ev); err != nil { - return types.NewEvidenceInvalidErr(ev, err) - } - */ + // fetch the validator and return its voting power as its priority + // TODO: something better ? + valset, _ := sm.LoadValidators(evpool.stateDB, evidence.Height()) + _, val := valset.GetByAddress(evidence.Address()) + priority := val.VotingPower added := evpool.evidenceStore.AddNewEvidence(evidence, priority) if !added { diff --git a/evidence/pool_test.go b/evidence/pool_test.go index d7b94e88..f5b5205b 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -3,6 +3,7 @@ package evidence import ( "sync" "testing" + "time" "github.com/stretchr/testify/assert" @@ -13,14 +14,46 @@ import ( var mockState = sm.State{} +func initializeValidatorState(valAddr []byte, height int64) dbm.DB { + stateDB := dbm.NewMemDB() + + // create validator set and state + valSet := &types.ValidatorSet{ + Validators: []*types.Validator{ + {Address: valAddr}, + }, + } + state := sm.State{ + LastBlockHeight: 0, + LastBlockTime: time.Now(), + Validators: valSet, + LastHeightValidatorsChanged: 1, + ConsensusParams: types.ConsensusParams{ + EvidenceParams: types.EvidenceParams{ + MaxAge: 1000000, + }, + }, + } + + // save all states up to height + for i := int64(0); i < height; i++ { + state.LastBlockHeight = i + sm.SaveState(stateDB, state) + } + + return stateDB +} + func TestEvidencePool(t *testing.T) { assert := assert.New(t) - params := types.EvidenceParams{} + valAddr := []byte("val1") + height := int64(5) + stateDB := initializeValidatorState(valAddr, height) store := NewEvidenceStore(dbm.NewMemDB()) - pool := NewEvidencePool(params, store, mockState) + pool := NewEvidencePool(stateDB, store) - goodEvidence := newMockGoodEvidence(5, 1, []byte("val1")) + goodEvidence := newMockGoodEvidence(height, 0, valAddr) badEvidence := MockBadEvidence{goodEvidence} err := pool.AddEvidence(badEvidence) diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index fc4ea571..77c58734 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -32,14 +32,14 @@ func evidenceLogger() log.Logger { } // connect N evidence reactors through N switches -func makeAndConnectEvidenceReactors(config *cfg.Config, N int) []*EvidenceReactor { +func makeAndConnectEvidenceReactors(config *cfg.Config, stateDBs []dbm.DB) []*EvidenceReactor { + N := len(stateDBs) reactors := make([]*EvidenceReactor, N) logger := evidenceLogger() for i := 0; i < N; i++ { - params := types.EvidenceParams{} store := NewEvidenceStore(dbm.NewMemDB()) - pool := NewEvidencePool(params, store, mockState) + pool := NewEvidencePool(stateDBs[i], store) reactors[i] = NewEvidenceReactor(pool) reactors[i].SetLogger(logger.With("validator", i)) } @@ -98,10 +98,10 @@ func _waitForEvidence(t *testing.T, wg *sync.WaitGroup, evs types.EvidenceList, wg.Done() } -func sendEvidence(t *testing.T, evpool *EvidencePool, n int) types.EvidenceList { +func sendEvidence(t *testing.T, evpool *EvidencePool, valAddr []byte, n int) types.EvidenceList { evList := make([]types.Evidence, n) for i := 0; i < n; i++ { - ev := newMockGoodEvidence(int64(i), 2, []byte("val")) + ev := newMockGoodEvidence(int64(i+1), 0, valAddr) err := evpool.AddEvidence(ev) assert.Nil(t, err) evList[i] = ev @@ -110,17 +110,28 @@ func sendEvidence(t *testing.T, evpool *EvidencePool, n int) types.EvidenceList } var ( - NUM_EVIDENCE = 1000 + NUM_EVIDENCE = 1 TIMEOUT = 120 * time.Second // ridiculously high because CircleCI is slow ) func TestReactorBroadcastEvidence(t *testing.T) { config := cfg.TestConfig() N := 7 - reactors := makeAndConnectEvidenceReactors(config, N) - // send a bunch of evidence to the first reactor's evpool + // create statedb for everyone + stateDBs := make([]dbm.DB, N) + valAddr := []byte("myval") + // we need validators saved for heights at least as high as we have evidence for + height := int64(NUM_EVIDENCE) + 10 + for i := 0; i < N; i++ { + stateDBs[i] = initializeValidatorState(valAddr, height) + } + + // make reactors from statedb + reactors := makeAndConnectEvidenceReactors(config, stateDBs) + + // send a bunch of valid evidence to the first reactor's evpool // and wait for them all to be received in the others - evList := sendEvidence(t, reactors[0].evpool, NUM_EVIDENCE) + evList := sendEvidence(t, reactors[0].evpool, valAddr, NUM_EVIDENCE) waitForEvidence(t, evList, reactors) } diff --git a/node/node.go b/node/node.go index 04b1fb14..f922d832 100644 --- a/node/node.go +++ b/node/node.go @@ -208,7 +208,7 @@ func NewNode(config *cfg.Config, } evidenceLogger := logger.With("module", "evidence") evidenceStore := evidence.NewEvidenceStore(evidenceDB) - evidencePool := evidence.NewEvidencePool(state.ConsensusParams.EvidenceParams, evidenceStore, state.Copy()) + evidencePool := evidence.NewEvidencePool(stateDB, evidenceStore) evidencePool.SetLogger(evidenceLogger) evidenceReactor := evidence.NewEvidenceReactor(evidencePool) evidenceReactor.SetLogger(evidenceLogger) diff --git a/state/execution.go b/state/execution.go index b3acd711..847ac131 100644 --- a/state/execution.go +++ b/state/execution.go @@ -107,6 +107,11 @@ func (blockExec *BlockExecutor) ApplyBlock(s State, blockID types.BlockID, block fail.Fail() // XXX + // Update evpool now that state is saved + // TODO: handle the crash/recover scenario + // ie. (may need to call Update for last block) + blockExec.evpool.Update(block) + // events are fired after everything else // NOTE: if we crash between Commit and Save, events wont be fired during replay fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses) @@ -138,9 +143,6 @@ func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) { blockExec.logger.Info("Committed state", "height", block.Height, "txs", block.NumTxs, "appHash", res.Data) - // Update evpool - blockExec.evpool.MarkEvidenceAsCommitted(block.Evidence.Evidence) - // Update mempool. if err := blockExec.mempool.Update(block.Height, block.Txs); err != nil { return nil, err diff --git a/state/validation.go b/state/validation.go index 5c9197bc..dfca78ac 100644 --- a/state/validation.go +++ b/state/validation.go @@ -79,20 +79,9 @@ func validateBlock(stateDB dbm.DB, s State, b *types.Block) error { } for _, ev := range b.Evidence.Evidence { - if err := VerifyEvidence(s, ev); err != nil { + if err := VerifyEvidence(stateDB, s, ev); err != nil { return types.NewEvidenceInvalidErr(ev, err) } - /* // Needs a db ... - valset, err := LoadValidators(s.db, ev.Height()) - if err != nil { - // XXX/TODO: what do we do if we can't load the valset? - // eg. if we have pruned the state or height is too high? - return err - } - if err := VerifyEvidenceValidator(valSet, ev); err != nil { - return types.NewEvidenceInvalidErr(ev, err) - } - */ } return nil @@ -103,7 +92,7 @@ func validateBlock(stateDB dbm.DB, s State, b *types.Block) error { // VerifyEvidence verifies the evidence fully by checking it is internally // consistent and sufficiently recent. -func VerifyEvidence(s State, evidence types.Evidence) error { +func VerifyEvidence(stateDB dbm.DB, s State, evidence types.Evidence) error { height := s.LastBlockHeight evidenceAge := height - evidence.Height() @@ -116,22 +105,23 @@ func VerifyEvidence(s State, evidence types.Evidence) error { if err := evidence.Verify(s.ChainID); err != nil { return err } - return nil -} -// VerifyEvidenceValidator returns the voting power of the validator at the height of the evidence. -// It returns an error if the validator did not exist or does not match that loaded from the historical validator set. -func VerifyEvidenceValidator(valset *types.ValidatorSet, evidence types.Evidence) (priority int64, err error) { + valset, err := LoadValidators(stateDB, evidence.Height()) + if err != nil { + // TODO: if err is just that we cant find it cuz we pruned, ignore. + // TODO: if its actually bad evidence, punish peer + return err + } + // The address must have been an active validator at the height ev := evidence height, addr, idx := ev.Height(), ev.Address(), ev.Index() valIdx, val := valset.GetByAddress(addr) if val == nil { - return priority, fmt.Errorf("Address %X was not a validator at height %d", addr, height) + return fmt.Errorf("Address %X was not a validator at height %d", addr, height) } else if idx != valIdx { - return priority, fmt.Errorf("Address %X was validator %d at height %d, not %d", addr, valIdx, height, idx) + return fmt.Errorf("Address %X was validator %d at height %d, not %d", addr, valIdx, height, idx) } - priority = val.VotingPower - return priority, nil + return nil } diff --git a/state/validation_test.go b/state/validation_test.go index a8e4d42e..e0b7fe9e 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -4,61 +4,65 @@ import ( "testing" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tmlibs/db" + "github.com/tendermint/tmlibs/log" ) -func _TestValidateBlock(t *testing.T) { +func TestValidateBlock(t *testing.T) { state := state() + blockExec := NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nil, nil, nil) + // proper block must pass block := makeBlock(state, 1) - err := ValidateBlock(state, block) + err := blockExec.ValidateBlock(state, block) require.NoError(t, err) // wrong chain fails block = makeBlock(state, 1) block.ChainID = "not-the-real-one" - err = ValidateBlock(state, block) + err = blockExec.ValidateBlock(state, block) require.Error(t, err) // wrong height fails block = makeBlock(state, 1) block.Height += 10 - err = ValidateBlock(state, block) + err = blockExec.ValidateBlock(state, block) require.Error(t, err) // wrong total tx fails block = makeBlock(state, 1) block.TotalTxs += 10 - err = ValidateBlock(state, block) + err = blockExec.ValidateBlock(state, block) require.Error(t, err) // wrong blockid fails block = makeBlock(state, 1) block.LastBlockID.PartsHeader.Total += 10 - err = ValidateBlock(state, block) + err = blockExec.ValidateBlock(state, block) require.Error(t, err) // wrong app hash fails block = makeBlock(state, 1) block.AppHash = []byte("wrong app hash") - err = ValidateBlock(state, block) + err = blockExec.ValidateBlock(state, block) require.Error(t, err) // wrong consensus hash fails block = makeBlock(state, 1) block.ConsensusHash = []byte("wrong consensus hash") - err = ValidateBlock(state, block) + err = blockExec.ValidateBlock(state, block) require.Error(t, err) // wrong results hash fails block = makeBlock(state, 1) block.LastResultsHash = []byte("wrong results hash") - err = ValidateBlock(state, block) + err = blockExec.ValidateBlock(state, block) require.Error(t, err) // wrong validators hash fails block = makeBlock(state, 1) block.ValidatorsHash = []byte("wrong validators hash") - err = ValidateBlock(state, block) + err = blockExec.ValidateBlock(state, block) require.Error(t, err) } diff --git a/types/services.go b/types/services.go index a901898f..6900fae7 100644 --- a/types/services.go +++ b/types/services.go @@ -78,7 +78,7 @@ type BlockStore interface { type EvidencePool interface { PendingEvidence() []Evidence AddEvidence(Evidence) error - MarkEvidenceAsCommitted([]Evidence) + Update(*Block) } // MockMempool is an empty implementation of a Mempool, useful for testing. @@ -86,6 +86,6 @@ type EvidencePool interface { type MockEvidencePool struct { } -func (m MockEvidencePool) PendingEvidence() []Evidence { return nil } -func (m MockEvidencePool) AddEvidence(Evidence) error { return nil } -func (m MockEvidencePool) MarkEvidenceAsCommitted([]Evidence) {} +func (m MockEvidencePool) PendingEvidence() []Evidence { return nil } +func (m MockEvidencePool) AddEvidence(Evidence) error { return nil } +func (m MockEvidencePool) Update(*Block) {} From 444db4c24294123784b84984b34634bd42caa9a6 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Thu, 28 Dec 2017 22:10:23 -0500 Subject: [PATCH 15/33] metalinter --- state/execution.go | 2 +- state/validation.go | 5 ----- 2 files changed, 1 insertion(+), 6 deletions(-) diff --git a/state/execution.go b/state/execution.go index 847ac131..4ccf87d4 100644 --- a/state/execution.go +++ b/state/execution.go @@ -65,7 +65,7 @@ func (blockExec *BlockExecutor) ValidateBlock(s State, block *types.Block) error } // ApplyBlock validates the block against the state, executes it against the app, -// fires the relevent events, commits the app, and saves the new state and responses. +// fires the relevant events, commits the app, and saves the new state and responses. // It's the only function that needs to be called // from outside this package to process and commit an entire block. // It takes a blockID to avoid recomputing the parts hash. diff --git a/state/validation.go b/state/validation.go index dfca78ac..fb3e8d13 100644 --- a/state/validation.go +++ b/state/validation.go @@ -12,11 +12,6 @@ import ( //----------------------------------------------------- // Validate block -// ValidateBlock validates the block against the state. -func _ValidateBlock(s State, block *types.Block) error { - return validateBlock(dbm.NewMemDB(), s, block) -} - func validateBlock(stateDB dbm.DB, s State, b *types.Block) error { // validate internal consistency if err := b.ValidateBasic(); err != nil { From 3b70c89e079b4264abaad5af356856d613616b70 Mon Sep 17 00:00:00 2001 From: Emmanuel Odeke Date: Thu, 28 Dec 2017 23:26:42 -0700 Subject: [PATCH 16/33] README: document the minimum Go version Solidify in writing, the minimum Go version that we support, as Go1.9. --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index c6ece82f..1a4cc00c 100644 --- a/README.md +++ b/README.md @@ -26,6 +26,12 @@ and securely replicates it on many machines. For more information, from introduction to install to application development, [Read The Docs](https://tendermint.readthedocs.io/en/master/). +## Minimum requirements + +Requirement|Notes +---|--- +Go version | Go1.9 or higher + ## Install To download pre-built binaries, see our [downloads page](https://tendermint.com/downloads). From abfdfe67e8df2c847d2e6a663dd69150231e778f Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 29 Dec 2017 11:02:41 -0500 Subject: [PATCH 17/33] test/p2p: add some timeouts --- test/p2p/basic/test.sh | 23 ++++++++++++++++++++++- 1 file changed, 22 insertions(+), 1 deletion(-) diff --git a/test/p2p/basic/test.sh b/test/p2p/basic/test.sh index 93444792..63df62be 100644 --- a/test/p2p/basic/test.sh +++ b/test/p2p/basic/test.sh @@ -10,16 +10,25 @@ N=$1 # wait to be at height > 1 ################################################################### +# wait 60s per step per peer +MAX_SLEEP=60 + # wait for everyone to come online echo "Waiting for nodes to come online" for i in `seq 1 $N`; do addr=$(test/p2p/ip.sh $i):46657 curl -s $addr/status > /dev/null ERR=$? + COUNT=0 while [ "$ERR" != 0 ]; do - sleep 1 + sleep 1 curl -s $addr/status > /dev/null ERR=$? + COUNT=$((COUNT+1)) + if [ "$COUNT" -gt "$MAX_SLEEP" ]; then + echo "Waited too long for node $i to come online" + exit 1 + fi done echo "... node $i is up" done @@ -32,18 +41,30 @@ for i in `seq 1 $N`; do # - assert everyone has N-1 other peers N_PEERS=`curl -s $addr/net_info | jq '.result.peers | length'` + COUNT=0 while [ "$N_PEERS" != $N_1 ]; do echo "Waiting for node $i to connect to all peers ..." sleep 1 N_PEERS=`curl -s $addr/net_info | jq '.result.peers | length'` + COUNT=$((COUNT+1)) + if [ "$COUNT" -gt "$MAX_SLEEP" ]; then + echo "Waited too long for node $i to connect to all peers" + exit 1 + fi done # - assert block height is greater than 1 BLOCK_HEIGHT=`curl -s $addr/status | jq .result.latest_block_height` + COUNT=0 while [ "$BLOCK_HEIGHT" -le 1 ]; do echo "Waiting for node $i to commit a block ..." sleep 1 BLOCK_HEIGHT=`curl -s $addr/status | jq .result.latest_block_height` + COUNT=$((COUNT+1)) + if [ "$COUNT" -gt "$MAX_SLEEP" ]; then + echo "Waited too long for node $i to commit a block" + exit 1 + fi done echo "Node $i is connected to all peers and at block $BLOCK_HEIGHT" done From e97e0bacd10a0af98219a8b533b378dbd411ae0a Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 29 Dec 2017 11:11:13 -0500 Subject: [PATCH 18/33] update glide --- glide.lock | 26 ++++++++++++-------------- glide.yaml | 4 ++-- 2 files changed, 14 insertions(+), 16 deletions(-) diff --git a/glide.lock b/glide.lock index bdddaf58..b8335886 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: bd982742a0aee39426f3866914a59f7c47e68dc8dedb87219e2099c0d9c19f7e -updated: 2017-12-21T17:45:25.372327218Z +hash: 072c8e685dd519c1f509da67379b70451a681bf3ef6cbd82900a1f68c55bbe16 +updated: 2017-12-29T11:08:17.355999228-05:00 imports: - name: github.com/btcsuite/btcd version: 2e60448ffcc6bf78332d1fe590260095f554dd78 @@ -64,19 +64,17 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a + version: 49d762b9817ba1c2e9d0c69183c2b4a8b8f1d934 - name: github.com/mitchellh/mapstructure version: 06020f85339e21b2478f756a78e295255ffa4d6a -- name: github.com/pelletier/go-buffruneio - version: c37440a7cf42ac63b919c752ca73a85067e05992 - name: github.com/pelletier/go-toml - version: b8b5e7696574464b2f9bf303a7b37781bb52889f + version: 0131db6d737cfbbfb678f8b7d92e55e27ce46224 - name: github.com/pkg/errors version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/rcrowley/go-metrics - version: 1f30fe9094a513ce4c700b9a54458bbb0c96996c + version: e181e095bae94582363434144c61a9653aff6e50 - name: github.com/spf13/afero - version: 5660eeed305fe5f69c8fc6cf899132a459a97064 + version: 57afd63c68602b63ed976de00dd066ccb3c319db subpackages: - mem - name: github.com/spf13/cast @@ -105,7 +103,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/abci - version: e4b9f1abe794a2117a59738a1294e09b46d0fa00 + version: 5d5ea6869b91cadb55dbc4211ad7b326f053a33e subpackages: - client - example/code @@ -131,7 +129,7 @@ imports: subpackages: - iavl - name: github.com/tendermint/tmlibs - version: 662a886dc20600ce11e1d684a15b83b5813e7277 + version: 91b4b534ad78e442192c8175db92a06a51064064 subpackages: - autofile - cli @@ -146,7 +144,7 @@ imports: - pubsub/query - test - name: golang.org/x/crypto - version: d585fd2cc9195196078f516b69daff6744ef5e84 + version: 95a4943f35d008beabde8c11e5075a1b714e6419 subpackages: - curve25519 - nacl/box @@ -167,11 +165,11 @@ imports: - lex/httplex - trace - name: golang.org/x/sys - version: d818ba11af4465e00c1998bd3f8a55603b422290 + version: 83801418e1b59fb1880e363299581ee543af32ca subpackages: - unix - name: golang.org/x/text - version: eb22672bea55af56d225d4e35405f4d2e9f062a0 + version: e19ae1496984b1c655b8044a65c0300a3c878dd3 subpackages: - secure/bidirule - transform @@ -203,7 +201,7 @@ imports: - name: gopkg.in/go-playground/validator.v9 version: b1f51f36f1c98cc97f777d6fc9d4b05eaa0cabb5 - name: gopkg.in/yaml.v2 - version: eb3733d160e74a9c7e442f435eb3bea458e1d19f + version: 287cf08546ab5e7e37d55a84f7ed3fd1db036de5 testImports: - name: github.com/davecgh/go-spew version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 diff --git a/glide.yaml b/glide.yaml index b14a3b8a..b7846d64 100644 --- a/glide.yaml +++ b/glide.yaml @@ -18,7 +18,7 @@ import: - package: github.com/spf13/viper version: v1.0.0 - package: github.com/tendermint/abci - version: develop + version: v0.9.0 subpackages: - client - example/dummy @@ -34,7 +34,7 @@ import: subpackages: - iavl - package: github.com/tendermint/tmlibs - version: develop + version: v0.6.0 subpackages: - autofile - cli From 28bbeac7639694e11e03f167b44376f9a12c9b3e Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 29 Dec 2017 11:26:55 -0500 Subject: [PATCH 19/33] state: send byzantine validators in BeginBlock --- evidence/pool_test.go | 4 +-- evidence/reactor_test.go | 2 +- evidence/store_test.go | 66 +++++++--------------------------------- state/execution.go | 9 +++++- state/execution_test.go | 49 ++++++++++++++++++++++++++++- types/evidence.go | 47 ++++++++++++++++++++++++++++ 6 files changed, 117 insertions(+), 60 deletions(-) diff --git a/evidence/pool_test.go b/evidence/pool_test.go index f5b5205b..97a29a27 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -53,8 +53,8 @@ func TestEvidencePool(t *testing.T) { store := NewEvidenceStore(dbm.NewMemDB()) pool := NewEvidencePool(stateDB, store) - goodEvidence := newMockGoodEvidence(height, 0, valAddr) - badEvidence := MockBadEvidence{goodEvidence} + goodEvidence := types.NewMockGoodEvidence(height, 0, valAddr) + badEvidence := types.MockBadEvidence{goodEvidence} err := pool.AddEvidence(badEvidence) assert.NotNil(err) diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index 77c58734..11c63929 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -101,7 +101,7 @@ func _waitForEvidence(t *testing.T, wg *sync.WaitGroup, evs types.EvidenceList, func sendEvidence(t *testing.T, evpool *EvidencePool, valAddr []byte, n int) types.EvidenceList { evList := make([]types.Evidence, n) for i := 0; i < n; i++ { - ev := newMockGoodEvidence(int64(i+1), 0, valAddr) + ev := types.NewMockGoodEvidence(int64(i+1), 0, valAddr) err := evpool.AddEvidence(ev) assert.Nil(t, err) evList[i] = ev diff --git a/evidence/store_test.go b/evidence/store_test.go index 192aabc2..180bee58 100644 --- a/evidence/store_test.go +++ b/evidence/store_test.go @@ -1,8 +1,6 @@ package evidence import ( - "bytes" - "fmt" "testing" "github.com/stretchr/testify/assert" @@ -20,7 +18,7 @@ func TestStoreAddDuplicate(t *testing.T) { store := NewEvidenceStore(db) priority := int64(10) - ev := newMockGoodEvidence(2, 1, []byte("val1")) + ev := types.NewMockGoodEvidence(2, 1, []byte("val1")) added := store.AddNewEvidence(ev, priority) assert.True(added) @@ -43,7 +41,7 @@ func TestStoreMark(t *testing.T) { assert.Equal(0, len(pendingEv)) priority := int64(10) - ev := newMockGoodEvidence(2, 1, []byte("val1")) + ev := types.NewMockGoodEvidence(2, 1, []byte("val1")) added := store.AddNewEvidence(ev, priority) assert.True(added) @@ -89,15 +87,15 @@ func TestStorePriority(t *testing.T) { // sorted by priority and then height cases := []struct { - ev MockGoodEvidence + ev types.MockGoodEvidence priority int64 }{ - {newMockGoodEvidence(2, 1, []byte("val1")), 17}, - {newMockGoodEvidence(5, 2, []byte("val2")), 15}, - {newMockGoodEvidence(10, 2, []byte("val2")), 13}, - {newMockGoodEvidence(100, 2, []byte("val2")), 11}, - {newMockGoodEvidence(90, 2, []byte("val2")), 11}, - {newMockGoodEvidence(80, 2, []byte("val2")), 11}, + {types.NewMockGoodEvidence(2, 1, []byte("val1")), 17}, + {types.NewMockGoodEvidence(5, 2, []byte("val2")), 15}, + {types.NewMockGoodEvidence(10, 2, []byte("val2")), 13}, + {types.NewMockGoodEvidence(100, 2, []byte("val2")), 11}, + {types.NewMockGoodEvidence(90, 2, []byte("val2")), 11}, + {types.NewMockGoodEvidence(80, 2, []byte("val2")), 11}, } for _, c := range cases { @@ -119,48 +117,6 @@ const ( var _ = wire.RegisterInterface( struct{ types.Evidence }{}, - wire.ConcreteType{MockGoodEvidence{}, evidenceTypeMockGood}, - wire.ConcreteType{MockBadEvidence{}, evidenceTypeMockBad}, + wire.ConcreteType{types.MockGoodEvidence{}, evidenceTypeMockGood}, + wire.ConcreteType{types.MockBadEvidence{}, evidenceTypeMockBad}, ) - -type MockGoodEvidence struct { - Height_ int64 - Address_ []byte - Index_ int -} - -func newMockGoodEvidence(height int64, index int, address []byte) MockGoodEvidence { - return MockGoodEvidence{height, address, index} -} - -func (e MockGoodEvidence) Height() int64 { return e.Height_ } -func (e MockGoodEvidence) Address() []byte { return e.Address_ } -func (e MockGoodEvidence) Index() int { return e.Index_ } -func (e MockGoodEvidence) Hash() []byte { - return []byte(fmt.Sprintf("%d-%d", e.Height_, e.Index_)) -} -func (e MockGoodEvidence) Verify(chainID string) error { return nil } -func (e MockGoodEvidence) Equal(ev types.Evidence) bool { - e2 := ev.(MockGoodEvidence) - return e.Height_ == e2.Height_ && - bytes.Equal(e.Address_, e2.Address_) && - e.Index_ == e2.Index_ -} -func (e MockGoodEvidence) String() string { - return fmt.Sprintf("GoodEvidence: %d/%s/%d", e.Height_, e.Address_, e.Index_) -} - -type MockBadEvidence struct { - MockGoodEvidence -} - -func (e MockBadEvidence) Verify(chainID string) error { return fmt.Errorf("MockBadEvidence") } -func (e MockBadEvidence) Equal(ev types.Evidence) bool { - e2 := ev.(MockBadEvidence) - return e.Height_ == e2.Height_ && - bytes.Equal(e.Address_, e2.Address_) && - e.Index_ == e2.Index_ -} -func (e MockBadEvidence) String() string { - return fmt.Sprintf("BadEvidence: %d/%s/%d", e.Height_, e.Address_, e.Index_) -} diff --git a/state/execution.go b/state/execution.go index 4ccf87d4..921799b8 100644 --- a/state/execution.go +++ b/state/execution.go @@ -191,13 +191,20 @@ func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, } // TODO: determine which validators were byzantine + byzantineVals := make([]*abci.Evidence, len(block.Evidence.Evidence)) + for i, ev := range block.Evidence.Evidence { + byzantineVals[i] = &abci.Evidence{ + PubKey: ev.Address(), // XXX + Height: ev.Height(), + } + } // Begin block _, err := proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{ Hash: block.Hash(), Header: types.TM2PB.Header(block.Header), AbsentValidators: absentVals, - ByzantineValidators: nil, + ByzantineValidators: byzantineVals, }) if err != nil { logger.Error("Error in proxyAppConn.BeginBlock", "err", err) diff --git a/state/execution_test.go b/state/execution_test.go index 9db26911..ffb10f17 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -82,6 +82,51 @@ func TestBeginBlockAbsentValidators(t *testing.T) { } } +// TestBeginBlockByzantineValidators ensures we send byzantine validators list. +func TestBeginBlockByzantineValidators(t *testing.T) { + app := &testApp{} + cc := proxy.NewLocalClientCreator(app) + proxyApp := proxy.NewAppConns(cc, nil) + err := proxyApp.Start() + require.Nil(t, err) + defer proxyApp.Stop() + + state := state() + + prevHash := state.LastBlockID.Hash + prevParts := types.PartSetHeader{} + prevBlockID := types.BlockID{prevHash, prevParts} + + height1, idx1, val1 := int64(8), 0, []byte("val1") + height2, idx2, val2 := int64(3), 1, []byte("val2") + ev1 := types.NewMockGoodEvidence(height1, idx1, val1) + ev2 := types.NewMockGoodEvidence(height2, idx2, val2) + + testCases := []struct { + desc string + evidence []types.Evidence + expectedByzantineValidators []*abci.Evidence + }{ + {"none byzantine", []types.Evidence{}, []*abci.Evidence{}}, + {"one byzantine", []types.Evidence{ev1}, []*abci.Evidence{{ev1.Address(), ev1.Height()}}}, + {"multiple byzantine", []types.Evidence{ev1, ev2}, []*abci.Evidence{ + {ev1.Address(), ev1.Height()}, + {ev2.Address(), ev2.Height()}}}, + } + + for _, tc := range testCases { + lastCommit := &types.Commit{BlockID: prevBlockID} + + block, _ := state.MakeBlock(10, makeTxs(2), lastCommit) + block.Evidence.Evidence = tc.evidence + _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger()) + require.Nil(t, err, tc.desc) + + // -> app must receive an index of the byzantine validator + assert.Equal(t, tc.expectedByzantineValidators, app.ByzantineValidators, tc.desc) + } +} + //---------------------------------------------------------------------------- // make some bogus txs @@ -115,7 +160,8 @@ var _ abci.Application = (*testApp)(nil) type testApp struct { abci.BaseApplication - AbsentValidators []int32 + AbsentValidators []int32 + ByzantineValidators []*abci.Evidence } func NewDummyApplication() *testApp { @@ -128,6 +174,7 @@ func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) { func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { app.AbsentValidators = req.AbsentValidators + app.ByzantineValidators = req.ByzantineValidators return abci.ResponseBeginBlock{} } diff --git a/types/evidence.go b/types/evidence.go index 94bab1a3..3ae3e40b 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -167,3 +167,50 @@ func (dve *DuplicateVoteEvidence) Equal(ev Evidence) bool { // just check their hashes return bytes.Equal(merkle.SimpleHashFromBinary(dve), merkle.SimpleHashFromBinary(ev)) } + +//----------------------------------------------------------------- + +// UNSTABLE +type MockGoodEvidence struct { + Height_ int64 + Address_ []byte + Index_ int +} + +// UNSTABLE +func NewMockGoodEvidence(height int64, index int, address []byte) MockGoodEvidence { + return MockGoodEvidence{height, address, index} +} + +func (e MockGoodEvidence) Height() int64 { return e.Height_ } +func (e MockGoodEvidence) Address() []byte { return e.Address_ } +func (e MockGoodEvidence) Index() int { return e.Index_ } +func (e MockGoodEvidence) Hash() []byte { + return []byte(fmt.Sprintf("%d-%d", e.Height_, e.Index_)) +} +func (e MockGoodEvidence) Verify(chainID string) error { return nil } +func (e MockGoodEvidence) Equal(ev Evidence) bool { + e2 := ev.(MockGoodEvidence) + return e.Height_ == e2.Height_ && + bytes.Equal(e.Address_, e2.Address_) && + e.Index_ == e2.Index_ +} +func (e MockGoodEvidence) String() string { + return fmt.Sprintf("GoodEvidence: %d/%s/%d", e.Height_, e.Address_, e.Index_) +} + +// UNSTABLE +type MockBadEvidence struct { + MockGoodEvidence +} + +func (e MockBadEvidence) Verify(chainID string) error { return fmt.Errorf("MockBadEvidence") } +func (e MockBadEvidence) Equal(ev Evidence) bool { + e2 := ev.(MockBadEvidence) + return e.Height_ == e2.Height_ && + bytes.Equal(e.Address_, e2.Address_) && + e.Index_ == e2.Index_ +} +func (e MockBadEvidence) String() string { + return fmt.Sprintf("BadEvidence: %d/%s/%d", e.Height_, e.Address_, e.Index_) +} From 60f95cd9ea1dbba01c25ca903f8bd76999fef7d9 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 29 Dec 2017 11:28:44 -0500 Subject: [PATCH 20/33] changelog and version --- CHANGELOG.md | 1 + version/version.go | 4 ++-- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 71fe223e..5106698d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -50,6 +50,7 @@ BUG FIXES: - [lite] fix race - [state] validate block.Header.ValidatorsHash - [p2p] allow seed addresses to be prefixed with eg. `tcp://` +- [p2p] use consistent key to refer to peers so we dont try to connect to existing peers - [cmd] fix `tendermint init` to ignore files that are there and generate files that aren't. ## 0.14.0 (December 11, 2017) diff --git a/version/version.go b/version/version.go index baec0b34..d328b41d 100644 --- a/version/version.go +++ b/version/version.go @@ -1,13 +1,13 @@ package version const Maj = "0" -const Min = "14" +const Min = "15" const Fix = "0" var ( // Version is the current version of Tendermint // Must be a string because scripts like dist.sh read this file. - Version = "0.14.0" + Version = "0.15.0" // GitCommit is the current HEAD set using ldflags. GitCommit string From ff99ca7cdf1e91ad70bdd0935ad485452daa7eaf Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 29 Dec 2017 13:14:51 -0500 Subject: [PATCH 21/33] bump wal test timeout --- consensus/replay_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/consensus/replay_test.go b/consensus/replay_test.go index c497ed54..24255262 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -75,7 +75,7 @@ func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64, require.NoError(t, err) select { case <-newBlockCh: - case <-time.After(10 * time.Second): + case <-time.After(60 * time.Second): t.Fatalf("Timed out waiting for new block (see trace above)") } } From 381fe19335ba8825e04d1d0fefa5cac709bb7178 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 29 Dec 2017 14:11:26 -0500 Subject: [PATCH 22/33] changelog date [ci skip] --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5106698d..cda15071 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -25,7 +25,7 @@ BUG FIXES: - Graceful handling/recovery for apps that have non-determinism or fail to halt - Graceful handling/recovery for violations of safety, or liveness -## 0.15.0 (TBD) +## 0.15.0 (December 29, 2017) BREAKING CHANGES: - [p2p] enable the Peer Exchange reactor by default From 96e0e4ab5a7b74a3918cf51d001b0800309afc51 Mon Sep 17 00:00:00 2001 From: Zarko Milosevic Date: Fri, 29 Dec 2017 22:12:04 +0100 Subject: [PATCH 23/33] Describe messages sent as part of consensus/gossip protocol --- docs/specification/new-spec/consensus.md | 197 +++++++++++++++++++++++ docs/specification/new-spec/encoding.md | 21 +++ 2 files changed, 218 insertions(+) create mode 100644 docs/specification/new-spec/consensus.md diff --git a/docs/specification/new-spec/consensus.md b/docs/specification/new-spec/consensus.md new file mode 100644 index 00000000..5c681056 --- /dev/null +++ b/docs/specification/new-spec/consensus.md @@ -0,0 +1,197 @@ +# Tendermint Consensus + +Tendermint consensus is a distributed protocol executed by validator processes to agree on +the next block to be added to the Tendermint blockchain. The protocol proceeds in rounds, where +each round is a try to reach agreement on the next block. A round starts by having a dedicated +process (called proposer) suggesting to other processes what should be the next block with +the `ProposalMessage`. +The processes respond by voting for a block with `VoteMessage` (there are two kinds of vote messages, prevote +and precommit votes). Note that a proposal message is just a suggestion what the next block should be; a +validator might vote with a `VoteMessage` for a different block. If in some round, enough number +of processes vote for the same block, then this block is committed and later added to the blockchain. +`ProposalMessage` and `VoteMessage` are signed by the private key of the validator. +The internals of the protocol and how it ensures safety and liveness properties are +explained [here](https://github.com/tendermint/spec). + +For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the block +as the block size is big, i.e., they don't embed the block inside `Proposal` and `VoteMessage`. Instead, they +reach agreement on the `BlockID` (see `BlockID` definition in [Blockchain](blockchain.md) section) +that uniquely identifies each block. The block itself is disseminated to validator processes using +peer-to-peer gossiping protocol. It starts by having a proposer first splitting a block into a +number of block parts, that are then gossiped between processes using `BlockPartMessage`. + +Validators in Tendermint communicate by peer-to-peer gossiping protocol. Each validator is connected +only to a subset of processes called peers. By the gossiping protocol, a validator send to its peers +all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can +reach agreement on some block, and also obtain the content of the chosen block (block parts). As part of +the gossiping protocol, processes also send auxiliary messages that inform peers about the +executed steps of the core consensus algorithm (`NewRoundStepMessage` and `CommitStepMessage`), and +also messages that inform peers what votes the process has seen (`HasVoteMessage`, +`VoteSetMaj23Message` and `VoteSetBitsMessage`). These messages are then used in the gossiping protocol +to determine what messages a process should send to its peers. + +We now describe the content of each message exchanged during Tendermint consensus protocol. + +## ProposalMessage +ProposalMessage is sent when a new block is proposed. It is a suggestion of what the +next block in the blockchain should be. +``` +type ProposalMessage struct { + Proposal Proposal +} +``` +### Proposal +Proposal contains height and round for which this proposal is made, BlockID as a unique identifier of proposed +block, timestamp, and two fields (POLRound and POLBlockID) that are needed for termination of the consensus. +The message is signed by the validator private key. + +``` +type Proposal struct { + Height int64 + Round int + Timestamp Time + BlockID BlockID + POLRound int + POLBlockID BlockID + Signature Signature +} +``` + +NOTE: In the current version of the Tendermint, the consensus value in proposal is represented with +PartSetHeader, and with BlockID in vote message. It should be aligned as suggested in this spec as +BlockID contains PartSetHeader. + +## VoteMessage +VoteMessage is sent to vote for some block (or to inform others that a process does not vote in the current round). +Vote is defined in [Blockchain](blockchain.md) section and contains validator's information (validator address +and index), height and round for which the vote is sent, vote type, blockID if process vote for some +block (`nil` otherwise) and a timestamp when the vote is sent. The message is signed by the validator private key. +``` +type VoteMessage struct { + Vote Vote +} +``` + +## BlockPartMessage +BlockPartMessage is sent when gossipping a piece of the proposed block. It contains height, round +and the block part. + +``` +type BlockPartMessage struct { + Height int64 + Round int + Part Part +} +``` + +## ProposalHeartbeatMessage +ProposalHeartbeatMessage is sent to signal that a node is alive and waiting for transactions +to be able to create a next block proposal. + +``` +type ProposalHeartbeatMessage struct { + Heartbeat Heartbeat +} +``` + +### Heartbeat +Heartbeat contains validator information (address and index), +height, round and sequence number. It is signed by the private key of the validator. + +``` +type Heartbeat struct { + ValidatorAddress []byte + ValidatorIndex int + Height int64 + Round int + Sequence int + Signature Signature +} +``` + +## NewRoundStepMessage +NewRoundStepMessage is sent for every step transition during the core consensus algorithm execution. It is +used in the gossip part of the Tendermint protocol to inform peers about a current height/round/step +a process is in. + +``` +type NewRoundStepMessage struct { + Height int64 + Round int + Step RoundStepType + SecondsSinceStartTime int + LastCommitRound int +} +``` + +## CommitStepMessage +CommitStepMessage is sent when an agreement on some block is reached. It contains height for which agreement +is reached, block parts header that describes the decided block and is used to obtain all block parts, +and a bit array of the block parts a process currently has, so its peers can know what parts +it is missing so they can send them. + +``` +type CommitStepMessage struct { + Height int64 + BlockID BlockID + BlockParts BitArray +} +``` + +TODO: We use BlockID instead of BlockPartsHeader (in current implementation) for symmetry. + +## ProposalPOLMessage +ProposalPOLMessage is sent when a previous block is re-proposed. +It is used to inform peers in what round the process learned for this block (ProposalPOLRound), +and what prevotes for the re-proposed block the process has. + +``` +type ProposalPOLMessage struct { + Height int64 + ProposalPOLRound int + ProposalPOL BitArray +} +``` + + +## HasVoteMessage +HasVoteMessage is sent to indicate that a particular vote has been received. It contains height, +round, vote type and the index of the validator that is the originator of the corresponding vote. + +``` +type HasVoteMessage struct { + Height int64 + Round int + Type byte + Index int +} +``` + +## VoteSetMaj23Message +VoteSetMaj23Message is sent to indicate that a process has seen +2/3 votes for some BlockID. +It contains height, round, vote type and the BlockID. + +``` +type VoteSetMaj23Message struct { + Height int64 + Round int + Type byte + BlockID BlockID +} +``` + +## VoteSetBitsMessage +VoteSetBitsMessage is sent to communicate the bit-array of votes a process has seen for a given +BlockID. It contains height, round, vote type, BlockID and a bit array of +the votes a process has. + +``` +type VoteSetBitsMessage struct { + Height int64 + Round int + Type byte + BlockID BlockID + Votes BitArray +} +``` + diff --git a/docs/specification/new-spec/encoding.md b/docs/specification/new-spec/encoding.md index a7482e6c..02456d84 100644 --- a/docs/specification/new-spec/encoding.md +++ b/docs/specification/new-spec/encoding.md @@ -93,6 +93,17 @@ encode([]int{1, 2, 3, 4}) == [0x01, 0x04, 0x01, 0x01, 0x01, 0x02, 0x01, 0x encode([]string{"abc", "efg"}) == [0x01, 0x02, 0x01, 0x03, 0x61, 0x62, 0x63, 0x01, 0x03, 0x65, 0x66, 0x67] ``` +### BitArray +BitArray is encoded as an `int` of the number of bits, and with an array of `uint64` to encode +value of each array element. + +``` +type BitArray struct { + Bits int + Elems []uint64 +} +``` + ### Time Time is encoded as an `int64` of the number of nanoseconds since January 1, 1970, @@ -176,3 +187,13 @@ TMBIN encode an object and slice it into parts. ``` MakeParts(object, partSize) ``` + +### Part + +``` +type Part struct { + Index int + Bytes byte[] + Proof byte[] +} +``` From 4e834baa9a4cb7638539e6ce0bf30c51e3f6e3d3 Mon Sep 17 00:00:00 2001 From: Zach Date: Sun, 31 Dec 2017 13:54:50 +0000 Subject: [PATCH 24/33] docs: update ecosystem.rst (#1037) * docs: update ecosystem.rst * typo [ci skip] --- docs/ecosystem.rst | 117 ++------------------------------------------- 1 file changed, 5 insertions(+), 112 deletions(-) diff --git a/docs/ecosystem.rst b/docs/ecosystem.rst index 30ab9a35..39e6785e 100644 --- a/docs/ecosystem.rst +++ b/docs/ecosystem.rst @@ -1,122 +1,15 @@ Tendermint Ecosystem ==================== -Below are the many applications built using various pieces of the Tendermint stack. We thank the community for their contributions thus far and welcome the addition of new projects. Feel free to submit a pull request to add your project! +The growing list of applications built using various pieces of the Tendermint stack can be found at: -ABCI Applications ------------------ +* https://tendermint.com/ecosystem -Burrow -^^^^^^ +We thank the community for their contributions thus far and welcome the addition of new projects. A pull request can be submitted to `this file `__ to include your project. -Ethereum Virtual Machine augmented with native permissioning scheme and global key-value store, written in Go, authored by Monax Industries, and incubated `by Hyperledger `__. - -cb-ledger -^^^^^^^^^ - -Custodian Bank Ledger, integrating central banking with the blockchains of tomorrow, written in C++, and `authored by Block Finance `__. - -Clearchain -^^^^^^^^^^ - -Application to manage a distributed ledger for money transfers that support multi-currency accounts, written in Go, and `authored by Allession Treglia `__. - -Comit -^^^^^ - -Public service reporting and tracking, written in Go, and `authored by Zach Balder `__. - -Cosmos SDK -^^^^^^^^^^ - -A prototypical account based crypto currency state machine supporting plugins, written in Go, and `authored by Cosmos `__. - -Ethermint -^^^^^^^^^ - -The go-ethereum state machine run as a ABCI app, written in Go, `authored by Tendermint `__. - -IAVL -^^^^ - -Immutable AVL+ tree with Merkle proofs, Written in Go, `authored by Tendermint `__. - -Lotion -^^^^^^ - -A Javascript microframework for building blockchain applications with Tendermint, written in Javascript, `authored by Judd Keppel of Tendermint `__. See also `lotion-chat `__ and `lotion-coin `__ apps written using Lotion. - -MerkleTree -^^^^^^^^^^ - -Immutable AVL+ tree with Merkle proofs, Written in Java, `authored by jTendermint `__. - -Passchain -^^^^^^^^^ - -Passchain is a tool to securely store and share passwords, tokens and other short secrets, `authored by trusch `__. - -Passwerk -^^^^^^^^ - -Encrypted storage web-utility backed by Tendermint, written in Go, `authored by Rigel Rozanski `__. - -Py-Tendermint -^^^^^^^^^^^^^ - -A Python microframework for building blockchain applications with Tendermint, written in Python, `authored by Dave Bryson `__. - -Stratumn -^^^^^^^^ - -SDK for "Proof-of-Process" networks, written in Go, `authored by the Stratumn team `__. - -TMChat -^^^^^^ - -P2P chat using Tendermint, written in Java, `authored by wolfposd `__. - - -ABCI Servers ------------- - -+------------------------------------------------------------------+--------------------+--------------+ -| **Name** | **Author** | **Language** | -| | | | -+------------------------------------------------------------------+--------------------+--------------+ -| `abci `__ | Tendermint | Go | -+------------------------------------------------------------------+--------------------+--------------+ -| `js abci `__ | Tendermint | Javascript | -+------------------------------------------------------------------+--------------------+--------------+ -| `cpp-tmsp `__ | Martin Dyring | C++ | -+------------------------------------------------------------------+--------------------+--------------+ -| `c-abci `__ | ChainX | C | -+------------------------------------------------------------------+--------------------+--------------+ -| `jabci `__ | jTendermint | Java | -+------------------------------------------------------------------+--------------------+--------------+ -| `ocaml-tmsp `__ | Zach Balder | Ocaml | -+------------------------------------------------------------------+--------------------+--------------+ -| `abci_server `__ | Krzysztof Jurewicz | Erlang | -+------------------------------------------------------------------+--------------------+--------------+ -| `rust-tsp `__   | Adrian Brink | Rust       | -+------------------------------------------------------------------+--------------------+--------------+ -| `hs-abci `__ | Alberto Gonzalez | Haskell | -+------------------------------------------------------------------+--------------------+--------------+ -| `haskell-abci `__ | Christoper Goes | Haskell | -+------------------------------------------------------------------+--------------------+--------------+ -| `Spearmint `__ | Dennis Mckinnon | Javascript | -+------------------------------------------------------------------+--------------------+--------------+ -| `py-abci `__ | Dave Bryson | Python | -+------------------------------------------------------------------+--------------------+--------------+ - -Deployment Tools ----------------- +Other Tools +----------- See `deploy testnets <./deploy-testnets.html>`__ for information about all the tools built by Tendermint. We have Kubernetes, Ansible, and Terraform integrations. -Cloudsoft built `brooklyn-tendermint `__ for deploying a tendermint testnet in docker continers. It uses Clocker for Apache Brooklyn. - -Dev Tools ---------- - For upgrading from older to newer versions of tendermint and to migrate your chain data, see `tm-migrator `__ written by @hxzqlh. From 1acb12edf5e8aae084c0f6e9b257d7a2f9dbfe13 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 31 Dec 2017 17:07:08 -0500 Subject: [PATCH 25/33] p2p docs --- p2p/README.md | 119 ++--------------------------------------- p2p/docs/connection.md | 116 +++++++++++++++++++++++++++++++++++++++ p2p/docs/node.md | 53 ++++++++++++++++++ p2p/docs/peer.md | 105 ++++++++++++++++++++++++++++++++++++ p2p/docs/reputation.md | 23 ++++++++ 5 files changed, 302 insertions(+), 114 deletions(-) create mode 100644 p2p/docs/connection.md create mode 100644 p2p/docs/node.md create mode 100644 p2p/docs/peer.md create mode 100644 p2p/docs/reputation.md diff --git a/p2p/README.md b/p2p/README.md index d653b2ca..5d1f984c 100644 --- a/p2p/README.md +++ b/p2p/README.md @@ -4,119 +4,10 @@ `tendermint/tendermint/p2p` provides an abstraction around peer-to-peer communication.
-## MConnection +See: -`MConnection` is a multiplex connection: +- [docs/connection] for details on how connections and multiplexing work +- [docs/peer] for details on peer ID, handshakes, and peer exchange +- [docs/node] for details about different types of nodes and how they should work +- [docs/reputation] for details on how peer reputation is managed -__multiplex__ *noun* a system or signal involving simultaneous transmission of -several messages along a single channel of communication. - -Each `MConnection` handles message transmission on multiple abstract communication -`Channel`s. Each channel has a globally unique byte id. -The byte id and the relative priorities of each `Channel` are configured upon -initialization of the connection. - -The `MConnection` supports three packet types: Ping, Pong, and Msg. - -### Ping and Pong - -The ping and pong messages consist of writing a single byte to the connection; 0x1 and 0x2, respectively - -When we haven't received any messages on an `MConnection` in a time `pingTimeout`, we send a ping message. -When a ping is received on the `MConnection`, a pong is sent in response. - -If a pong is not received in sufficient time, the peer's score should be decremented (TODO). - -### Msg - -Messages in channels are chopped into smaller msgPackets for multiplexing. - -``` -type msgPacket struct { - ChannelID byte - EOF byte // 1 means message ends here. - Bytes []byte -} -``` - -The msgPacket is serialized using go-wire, and prefixed with a 0x3. -The received `Bytes` of a sequential set of packets are appended together -until a packet with `EOF=1` is received, at which point the complete serialized message -is returned for processing by the corresponding channels `onReceive` function. - -### Multiplexing - -Messages are sent from a single `sendRoutine`, which loops over a select statement that results in the sending -of a ping, a pong, or a batch of data messages. The batch of data messages may include messages from multiple channels. -Message bytes are queued for sending in their respective channel, with each channel holding one unsent message at a time. -Messages are chosen for a batch one a time from the channel with the lowest ratio of recently sent bytes to channel priority. - -## Sending Messages - -There are two methods for sending messages: -```go -func (m MConnection) Send(chID byte, msg interface{}) bool {} -func (m MConnection) TrySend(chID byte, msg interface{}) bool {} -``` - -`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued -for the channel with the given id byte `chID`. The message `msg` is serialized -using the `tendermint/wire` submodule's `WriteBinary()` reflection routine. - -`TrySend(chID, msg)` is a nonblocking call that returns false if the channel's -queue is full. - -`Send()` and `TrySend()` are also exposed for each `Peer`. - -## Peer - -Each peer has one `MConnection` instance, and includes other information such as whether the connection -was outbound, whether the connection should be recreated if it closes, various identity information about the node, -and other higher level thread-safe data used by the reactors. - -## Switch/Reactor - -The `Switch` handles peer connections and exposes an API to receive incoming messages -on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one -or more `Channels`. So while sending outgoing messages is typically performed on the peer, -incoming messages are received on the reactor. - -```go -// Declare a MyReactor reactor that handles messages on MyChannelID. -type MyReactor struct{} - -func (reactor MyReactor) GetChannels() []*ChannelDescriptor { - return []*ChannelDescriptor{ChannelDescriptor{ID:MyChannelID, Priority: 1}} -} - -func (reactor MyReactor) Receive(chID byte, peer *Peer, msgBytes []byte) { - r, n, err := bytes.NewBuffer(msgBytes), new(int64), new(error) - msgString := ReadString(r, n, err) - fmt.Println(msgString) -} - -// Other Reactor methods omitted for brevity -... - -switch := NewSwitch([]Reactor{MyReactor{}}) - -... - -// Send a random message to all outbound connections -for _, peer := range switch.Peers().List() { - if peer.IsOutbound() { - peer.Send(MyChannelID, "Here's a random message") - } -} -``` - -### PexReactor/AddrBook - -A `PEXReactor` reactor implementation is provided to automate peer discovery. - -```go -book := p2p.NewAddrBook(addrBookFilePath) -pexReactor := p2p.NewPEXReactor(book) -... -switch := NewSwitch([]Reactor{pexReactor, myReactor, ...}) -``` diff --git a/p2p/docs/connection.md b/p2p/docs/connection.md new file mode 100644 index 00000000..72847fa1 --- /dev/null +++ b/p2p/docs/connection.md @@ -0,0 +1,116 @@ +## MConnection + +`MConnection` is a multiplex connection: + +__multiplex__ *noun* a system or signal involving simultaneous transmission of +several messages along a single channel of communication. + +Each `MConnection` handles message transmission on multiple abstract communication +`Channel`s. Each channel has a globally unique byte id. +The byte id and the relative priorities of each `Channel` are configured upon +initialization of the connection. + +The `MConnection` supports three packet types: Ping, Pong, and Msg. + +### Ping and Pong + +The ping and pong messages consist of writing a single byte to the connection; 0x1 and 0x2, respectively + +When we haven't received any messages on an `MConnection` in a time `pingTimeout`, we send a ping message. +When a ping is received on the `MConnection`, a pong is sent in response. + +If a pong is not received in sufficient time, the peer's score should be decremented (TODO). + +### Msg + +Messages in channels are chopped into smaller msgPackets for multiplexing. + +``` +type msgPacket struct { + ChannelID byte + EOF byte // 1 means message ends here. + Bytes []byte +} +``` + +The msgPacket is serialized using go-wire, and prefixed with a 0x3. +The received `Bytes` of a sequential set of packets are appended together +until a packet with `EOF=1` is received, at which point the complete serialized message +is returned for processing by the corresponding channels `onReceive` function. + +### Multiplexing + +Messages are sent from a single `sendRoutine`, which loops over a select statement that results in the sending +of a ping, a pong, or a batch of data messages. The batch of data messages may include messages from multiple channels. +Message bytes are queued for sending in their respective channel, with each channel holding one unsent message at a time. +Messages are chosen for a batch one a time from the channel with the lowest ratio of recently sent bytes to channel priority. + +## Sending Messages + +There are two methods for sending messages: +```go +func (m MConnection) Send(chID byte, msg interface{}) bool {} +func (m MConnection) TrySend(chID byte, msg interface{}) bool {} +``` + +`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued +for the channel with the given id byte `chID`. The message `msg` is serialized +using the `tendermint/wire` submodule's `WriteBinary()` reflection routine. + +`TrySend(chID, msg)` is a nonblocking call that returns false if the channel's +queue is full. + +`Send()` and `TrySend()` are also exposed for each `Peer`. + +## Peer + +Each peer has one `MConnection` instance, and includes other information such as whether the connection +was outbound, whether the connection should be recreated if it closes, various identity information about the node, +and other higher level thread-safe data used by the reactors. + +## Switch/Reactor + +The `Switch` handles peer connections and exposes an API to receive incoming messages +on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one +or more `Channels`. So while sending outgoing messages is typically performed on the peer, +incoming messages are received on the reactor. + +```go +// Declare a MyReactor reactor that handles messages on MyChannelID. +type MyReactor struct{} + +func (reactor MyReactor) GetChannels() []*ChannelDescriptor { + return []*ChannelDescriptor{ChannelDescriptor{ID:MyChannelID, Priority: 1}} +} + +func (reactor MyReactor) Receive(chID byte, peer *Peer, msgBytes []byte) { + r, n, err := bytes.NewBuffer(msgBytes), new(int64), new(error) + msgString := ReadString(r, n, err) + fmt.Println(msgString) +} + +// Other Reactor methods omitted for brevity +... + +switch := NewSwitch([]Reactor{MyReactor{}}) + +... + +// Send a random message to all outbound connections +for _, peer := range switch.Peers().List() { + if peer.IsOutbound() { + peer.Send(MyChannelID, "Here's a random message") + } +} +``` + +### PexReactor/AddrBook + +A `PEXReactor` reactor implementation is provided to automate peer discovery. + +```go +book := p2p.NewAddrBook(addrBookFilePath) +pexReactor := p2p.NewPEXReactor(book) +... +switch := NewSwitch([]Reactor{pexReactor, myReactor, ...}) +``` diff --git a/p2p/docs/node.md b/p2p/docs/node.md new file mode 100644 index 00000000..a8afc85c --- /dev/null +++ b/p2p/docs/node.md @@ -0,0 +1,53 @@ +# Tendermint Peer Discovery + +A Tendermint P2P network has different kinds of nodes with different requirements for connectivity to others. +This document describes what kind of nodes Tendermint should enable and how they should work. + +## Node startup options +--p2p.seed_mode // If present, this node operates in seed mode. It will kick incoming peers after sharing some peers. +--p2p.seeds “1.2.3.4:466656,2.3.4.5:4444” // Dials these seeds to get peers and disconnects. +--p2p.persistent_peers “1.2.3.4:46656,2.3.4.5:466656” // These connections will be auto-redialed. If dial_seeds and persistent intersect, the user will be WARNED that seeds may auto-close connections and the node may not be able to keep the connection persistent + +## Seeds + +Seeds are the first point of contact for a new node. +They return a list of known active peers and disconnect. + +Seeds should operate full nodes, and with the PEX reactor in a "crawler" mode +that continuously explores to validate the availability of peers. + +Seeds should only respond with some top percentile of the best peers it knows about. + +## New Full Node + +A new node has seeds hardcoded into the software, but they can also be set manually (config file or flags). +The new node must also have access to a recent block height, H, and hash, HASH. + +The node then queries some seeds for peers for its chain, +dials those peers, and runs the Tendermint protocols with those it successfully connects to. + +When the peer catches up to height H, it ensures the block hash matches HASH. + +## Restarted Full Node + +A node checks its address book on startup and attempts to connect to peers from there. +If it can't connect to any peers after some time, it falls back to the seeds to find more. + +## Validator Node + +A validator node is a node that interfaces with a validator signing key. +These nodes require the highest security, and should not accept incoming connections. +They should maintain outgoing connections to a controlled set of "Sentry Nodes" that serve +as their proxy shield to the rest of the network. + +Validators that know and trust each other can accept incoming connections from one another and maintain direct private connectivity via VPN. + +## Sentry Node + +Sentry nodes are guardians of a validator node and provide it access to the rest of the network. +Sentry nodes may be dynamic, but should maintain persistent connections to some evolving random subset of each other. +They should always expect to have direct incoming connections from the validator node and its backup/s. +They do not report the validator node's address in the PEX. +They may be more strict about the quality of peers they keep. + +Sentry nodes belonging to validators that trust each other may wish to maintain persistent connections via VPN with one another, but only report each other sparingly in the PEX. diff --git a/p2p/docs/peer.md b/p2p/docs/peer.md new file mode 100644 index 00000000..15870ea7 --- /dev/null +++ b/p2p/docs/peer.md @@ -0,0 +1,105 @@ +# Tendermint Peers + +This document explains how Tendermint Peers are identified, how they connect to one another, +and how other peers are found. + +## Peer Identity + +Tendermint peers are expected to maintain long-term persistent identities in the form of a private key. +Each peer has an ID defined as `peer.ID == peer.PrivKey.Address()`, where `Address` uses the scheme defined in go-crypto. + +Peer ID's must come with some Proof-of-Work; that is, +they must satisfy `peer.PrivKey.Address() < target` for some difficulty target. +This ensures they are not too easy to generate. + +A single peer ID can have multiple IP addresses associated with - for simplicity, we only keep track +of the latest one. + +When attempting to connect to a peer, we use the PeerURL: `@:`. +We will attempt to connect to the peer at IP:PORT, and verify, +via authenticated encryption, that it is in possession of the private key +corresponding to ``. This prevents man-in-the-middle attacks on the peer layer. + +Peers can also be connected to without specifying an ID, ie. `:`. +In this case, the peer cannot be authenticated and other means, such as a VPN, +must be used. + +## Connections + +All p2p connections use TCP. +Upon establishing a successful TCP connection with a peer, +two handhsakes are performed: one for authenticated encryption, and one for Tendermint versioning. +Both handshakes have configurable timeouts (they should complete quickly). + +### Authenticated Encryption Handshake + +Tendermint implements the Station-to-Station protocol +using ED25519 keys for Diffie-Helman key-exchange and NACL SecretBox for encryption. +It goes as follows: +- generate an emphemeral ED25519 keypair +- send the ephemeral public key to the peer +- wait to receive the peer's ephemeral public key +- compute the Diffie-Hellman shared secret using the peers ephemeral public key and our ephemeral private key +- generate nonces to use for encryption + - TODO +- all communications from now on are encrypted using the shared secret +- generate a common challenge to sign +- sign the common challenge with our persistent private key +- send the signed challenge and persistent public key to the peer +- wait to receive the signed challenge and persistent public key from the peer +- verify the signature in the signed challenge using the peers persistent public key + + +If this is an outgoing connection (we dialed the peer) and we used a peer ID, +then finally verify that the `peer.PubKey` corresponds to the peer ID we dialed, +ie. `peer.PubKey.Address() == `. + +The connection has now been authenticated. All traffic is encrypted. + +Note that only the dialer can authenticate the identity of the peer, +but this is what we care about since when we join the network we wish to +ensure we have reached the intended peer (and are not being MITMd). + + +### Peer Filter + +Before continuing, we check if the new peer has the same ID has ourselves or +an existing peer. If so, we disconnect. + +We also check the peer's address and public key against +an optional whitelist which can be managed through the ABCI app - +if the whitelist is enabled and the peer is not on it, the connection is +terminated. + + +### Tendermint Version Handshake + +The Tendermint Version Handshake allows the peers to exchange their NodeInfo, which contains: + +``` +type NodeInfo struct { + PubKey crypto.PubKey `json:"pub_key"` + Moniker string `json:"moniker"` + Network string `json:"network"` + RemoteAddr string `json:"remote_addr"` + ListenAddr string `json:"listen_addr"` // accepting in + Version string `json:"version"` // major.minor.revision + Channels []int8 `json:"channels"` // active reactor channels + Other []string `json:"other"` // other application specific data +} +``` + +The connection is disconnected if: +- `peer.NodeInfo.PubKey != peer.PubKey` +- `peer.NodeInfo.Version` is not formatted as `X.X.X` where X are integers known as Major, Minor, and Revision +- `peer.NodeInfo.Version` Major is not the same as ours +- `peer.NodeInfo.Version` Minor is not the same as ours +- `peer.NodeInfo.Network` is not the same as ours + + +At this point, if we have not disconnected, the peer is valid and added to the switch, +so it is added to all reactors. + + +### Connection Activity + diff --git a/p2p/docs/reputation.md b/p2p/docs/reputation.md new file mode 100644 index 00000000..a2a995e5 --- /dev/null +++ b/p2p/docs/reputation.md @@ -0,0 +1,23 @@ + +# Peer Strategy + +Peers are managed using an address book and a trust metric. +The book keeps a record of vetted peers and unvetted peers. +When we need more peers, we pick them randomly from the addrbook with some +configurable bias for unvetted peers. When we’re asked for peers, we provide a random selection with no bias. + +The trust metric tracks the quality of the peers. +When a peer exceeds a certain quality for a certain amount of time, +it is marked as vetted in the addrbook. +If a vetted peer's quality degrades sufficiently, it is booted, and must prove itself from scratch. +If we need to make room for a new vetted peer, we move the lowest scoring vetted peer back to unvetted. +If we need to make room for a new unvetted peer, we remove the lowest scoring unvetted peer - +possibly only if its below some absolute minimum ? + +Peer quality is tracked in the connection and across the reactors. +Behaviours are defined as one of: + - fatal - something outright malicious. we should disconnect and remember them. + - bad - any kind of timeout, msgs that dont unmarshal, or fail other validity checks, or msgs we didn't ask for or arent expecting + - neutral - normal correct behaviour. unknown channels/msg types (version upgrades). + - good - some random majority of peers per reactor sending us useful messages + From cd15b677ec8339e0ac4992290aa103b524232b25 Mon Sep 17 00:00:00 2001 From: Zach Ramsay Date: Mon, 1 Jan 2018 15:35:28 +0000 Subject: [PATCH 26/33] docs: add abci spec --- docs/abci-cli.rst | 2 +- docs/conf.py | 45 +++++++++++++++++++++++++++------------------ docs/index.rst | 1 + 3 files changed, 29 insertions(+), 19 deletions(-) diff --git a/docs/abci-cli.rst b/docs/abci-cli.rst index 9a5ba833..ae410568 100644 --- a/docs/abci-cli.rst +++ b/docs/abci-cli.rst @@ -289,4 +289,4 @@ its own pattern of messages. For more information, see the `application developers guide <./app-development.html>`__. For examples of running an ABCI app with Tendermint, see the `getting started -guide <./getting-started.html>`__. +guide <./getting-started.html>`__. Next is the ABCI specification. diff --git a/docs/conf.py b/docs/conf.py index d5c49355..92c5e120 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -171,29 +171,38 @@ texinfo_documents = [ 'Database'), ] -repo = "https://raw.githubusercontent.com/tendermint/tools/" -branch = "master" +# ---- customization ------------------------- -tools = "./tools" -assets = tools + "/assets" +tools_repo = "https://raw.githubusercontent.com/tendermint/tools/" +tools_branch = "master" -if os.path.isdir(tools) != True: - os.mkdir(tools) -if os.path.isdir(assets) != True: - os.mkdir(assets) +tools_dir = "./tools" +assets_dir = tools_dir + "/assets" -urllib.urlretrieve(repo+branch+'/ansible/README.rst', filename=tools+'/ansible.rst') -urllib.urlretrieve(repo+branch+'/ansible/assets/a_plus_t.png', filename=assets+'/a_plus_t.png') +if os.path.isdir(tools_dir) != True: + os.mkdir(tools_dir) +if os.path.isdir(assets_dir) != True: + os.mkdir(assets_dir) -urllib.urlretrieve(repo+branch+'/docker/README.rst', filename=tools+'/docker.rst') +urllib.urlretrieve(tools_repo+tools_branch+'/ansible/README.rst', filename=tools_dir+'/ansible.rst') +urllib.urlretrieve(tools_repo+tools_branch+'/ansible/assets/a_plus_t.png', filename=assets_dir+'/a_plus_t.png') -urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/README.rst', filename=tools+'/mintnet-kubernetes.rst') -urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/gce1.png', filename=assets+'/gce1.png') -urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/gce2.png', filename=assets+'/gce2.png') -urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/statefulset.png', filename=assets+'/statefulset.png') -urllib.urlretrieve(repo+branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets+'/t_plus_k.png') +urllib.urlretrieve(tools_repo+tools_branch+'/docker/README.rst', filename=tools_dir+'/docker.rst') -urllib.urlretrieve(repo+branch+'/terraform-digitalocean/README.rst', filename=tools+'/terraform-digitalocean.rst') -urllib.urlretrieve(repo+branch+'/tm-bench/README.rst', filename=tools+'/benchmarking-and-monitoring.rst') +urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/README.rst', filename=tools_dir+'/mintnet-kubernetes.rst') +urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/gce1.png', filename=assets_dir+'/gce1.png') +urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/gce2.png', filename=assets_dir+'/gce2.png') +urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/statefulset.png', filename=assets_dir+'/statefulset.png') +urllib.urlretrieve(tools_repo+tools_branch+'/mintnet-kubernetes/assets/t_plus_k.png', filename=assets_dir+'/t_plus_k.png') + +urllib.urlretrieve(tools_repo+tools_branch+'/terraform-digitalocean/README.rst', filename=tools_dir+'/terraform-digitalocean.rst') +urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.rst', filename=tools_dir+'/benchmarking-and-monitoring.rst') # the readme for below is included in tm-bench # urllib.urlretrieve('https://raw.githubusercontent.com/tendermint/tools/master/tm-monitor/README.rst', filename='tools/tm-monitor.rst') + +#### abci spec ################################# + +abci_repo = "https://raw.githubusercontent.com/tendermint/abci/" +abci_branch = "spec-docs" + +urllib.urlretrieve(abci_repo+abci_branch+'/specification.rst', filename='abci-spec.rst') diff --git a/docs/index.rst b/docs/index.rst index 3ad3c4c5..b32ba484 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -53,6 +53,7 @@ Tendermint 102 :maxdepth: 2 abci-cli.rst + abci-spec.rst app-architecture.rst app-development.rst how-to-read-logs.rst From bc71840f06af0aee7bc9ec17f2dc71e2bbd65f26 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Mon, 1 Jan 2018 15:59:53 -0500 Subject: [PATCH 27/33] more p2p docs --- p2p/README.md | 3 +- p2p/docs/config.md | 39 +++++++++ p2p/docs/node.md | 25 ++++-- p2p/docs/peer.md | 55 ++++++++----- p2p/docs/pex.md | 94 ++++++++++++++++++++++ p2p/docs/{reputation.md => trustmetric.md} | 13 +-- 6 files changed, 189 insertions(+), 40 deletions(-) create mode 100644 p2p/docs/config.md create mode 100644 p2p/docs/pex.md rename p2p/docs/{reputation.md => trustmetric.md} (51%) diff --git a/p2p/README.md b/p2p/README.md index 5d1f984c..a30b83b7 100644 --- a/p2p/README.md +++ b/p2p/README.md @@ -9,5 +9,6 @@ See: - [docs/connection] for details on how connections and multiplexing work - [docs/peer] for details on peer ID, handshakes, and peer exchange - [docs/node] for details about different types of nodes and how they should work -- [docs/reputation] for details on how peer reputation is managed +- [docs/pex] for details on peer discovery and exchange +- [docs/config] for details on some config options diff --git a/p2p/docs/config.md b/p2p/docs/config.md new file mode 100644 index 00000000..bc3c343c --- /dev/null +++ b/p2p/docs/config.md @@ -0,0 +1,39 @@ +# P2P Config + +Here we describe configuration options around the Peer Exchange. + +## Seed Mode + +`--p2p.seed_mode` + +The node operates in seed mode. It will kick incoming peers after sharing some peers. +It will continually crawl the network for peers. + +## Seeds + +`--p2p.seeds “1.2.3.4:466656,2.3.4.5:4444”` + +Dials these seeds when we need more peers. They will return a list of peers and then disconnect. +If we already have enough peers in the address book, we may never need to dial them. + +## Persistent Peers + +`--p2p.persistent_peers “1.2.3.4:46656,2.3.4.5:466656”` + +Dial these peers and auto-redial them if the connection fails. +These are intended to be trusted persistent peers that can help +anchor us in the p2p network. + +Note that the auto-redial uses exponential backoff and will give up +after a day of trying to connect. + +NOTE: If `dial_seeds` and `persistent_peers` intersect, +the user will be WARNED that seeds may auto-close connections +and the node may not be able to keep the connection persistent. + +## Private Persistent Peers + +`--p2p.private_persistent_peers “1.2.3.4:46656,2.3.4.5:466656”` + +These are persistent peers that we do not add to the address book or +gossip to other peers. They stay private to us. diff --git a/p2p/docs/node.md b/p2p/docs/node.md index a8afc85c..9f9fc529 100644 --- a/p2p/docs/node.md +++ b/p2p/docs/node.md @@ -3,11 +3,6 @@ A Tendermint P2P network has different kinds of nodes with different requirements for connectivity to others. This document describes what kind of nodes Tendermint should enable and how they should work. -## Node startup options ---p2p.seed_mode // If present, this node operates in seed mode. It will kick incoming peers after sharing some peers. ---p2p.seeds “1.2.3.4:466656,2.3.4.5:4444” // Dials these seeds to get peers and disconnects. ---p2p.persistent_peers “1.2.3.4:46656,2.3.4.5:466656” // These connections will be auto-redialed. If dial_seeds and persistent intersect, the user will be WARNED that seeds may auto-close connections and the node may not be able to keep the connection persistent - ## Seeds Seeds are the first point of contact for a new node. @@ -17,22 +12,36 @@ Seeds should operate full nodes, and with the PEX reactor in a "crawler" mode that continuously explores to validate the availability of peers. Seeds should only respond with some top percentile of the best peers it knows about. +See [reputation] for details on peer quality. ## New Full Node -A new node has seeds hardcoded into the software, but they can also be set manually (config file or flags). -The new node must also have access to a recent block height, H, and hash, HASH. +A new node needs a few things to connect to the network: +- a list of seeds, which can be provided to Tendermint via config file or flags, +or hardcoded into the software by in-process apps +- a `ChainID`, also called `Network` at the p2p layer +- a recent block height, H, and hash, HASH for the blockchain. -The node then queries some seeds for peers for its chain, +The values `H` and `HASH` must be received and corroborated by means external to Tendermint, and specific to the user - ie. via the user's trusted social consensus. +This requirement to validate `H` and `HASH` out-of-band and via social consensus +is the essential difference in security models between Proof-of-Work and Proof-of-Stake blockchains. + +With the above, the node then queries some seeds for peers for its chain, dials those peers, and runs the Tendermint protocols with those it successfully connects to. When the peer catches up to height H, it ensures the block hash matches HASH. +If not, Tendermint will exit, and the user must try again - either they are connected +to bad peers or their social consensus was invalidated. ## Restarted Full Node A node checks its address book on startup and attempts to connect to peers from there. If it can't connect to any peers after some time, it falls back to the seeds to find more. +Restarted full nodes can run the `blockchain` or `consensus` reactor protocols to sync up +to the latest state of the blockchain, assuming they aren't too far behind. +If they are too far behind, they may need to validate a recent `H` and `HASH` out-of-band again. + ## Validator Node A validator node is a node that interfaces with a validator signing key. diff --git a/p2p/docs/peer.md b/p2p/docs/peer.md index 15870ea7..5281a702 100644 --- a/p2p/docs/peer.md +++ b/p2p/docs/peer.md @@ -10,19 +10,19 @@ Each peer has an ID defined as `peer.ID == peer.PrivKey.Address()`, where `Addre Peer ID's must come with some Proof-of-Work; that is, they must satisfy `peer.PrivKey.Address() < target` for some difficulty target. -This ensures they are not too easy to generate. +This ensures they are not too easy to generate. To begin, let `target == 2^240`. -A single peer ID can have multiple IP addresses associated with - for simplicity, we only keep track -of the latest one. +A single peer ID can have multiple IP addresses associated with it. +For simplicity, we only keep track of the latest one. When attempting to connect to a peer, we use the PeerURL: `@:`. We will attempt to connect to the peer at IP:PORT, and verify, via authenticated encryption, that it is in possession of the private key corresponding to ``. This prevents man-in-the-middle attacks on the peer layer. -Peers can also be connected to without specifying an ID, ie. `:`. -In this case, the peer cannot be authenticated and other means, such as a VPN, -must be used. +Peers can also be connected to without specifying an ID, ie. just `:`. +In this case, the peer must be authenticated out-of-band of Tendermint, +for instance via VPN ## Connections @@ -40,18 +40,27 @@ It goes as follows: - send the ephemeral public key to the peer - wait to receive the peer's ephemeral public key - compute the Diffie-Hellman shared secret using the peers ephemeral public key and our ephemeral private key -- generate nonces to use for encryption - - TODO -- all communications from now on are encrypted using the shared secret -- generate a common challenge to sign +- generate two nonces to use for encryption (sending and receiving) as follows: + - sort the ephemeral public keys in ascending order and concatenate them + - RIPEMD160 the result + - append 4 empty bytes (extending the hash to 24-bytes) + - the result is nonce1 + - flip the last bit of nonce1 to get nonce2 + - if we had the smaller ephemeral pubkey, use nonce1 for receiving, nonce2 for sending; + else the opposite +- all communications from now on are encrypted using the shared secret and the nonces, where each nonce +- we now have an encrypted channel, but still need to authenticate +increments by 2 every time it is used +- generate a common challenge to sign: + - SHA256 of the sorted (lowest first) and concatenated ephemeral pub keys - sign the common challenge with our persistent private key -- send the signed challenge and persistent public key to the peer -- wait to receive the signed challenge and persistent public key from the peer -- verify the signature in the signed challenge using the peers persistent public key +- send the go-wire encoded persistent pubkey and signature to the peer +- wait to receive the persistent public key and signature from the peer +- verify the signature on the challenge using the peer's persistent public key If this is an outgoing connection (we dialed the peer) and we used a peer ID, -then finally verify that the `peer.PubKey` corresponds to the peer ID we dialed, +then finally verify that the peer's persistent public key corresponds to the peer ID we dialed, ie. `peer.PubKey.Address() == `. The connection has now been authenticated. All traffic is encrypted. @@ -60,21 +69,20 @@ Note that only the dialer can authenticate the identity of the peer, but this is what we care about since when we join the network we wish to ensure we have reached the intended peer (and are not being MITMd). - ### Peer Filter -Before continuing, we check if the new peer has the same ID has ourselves or +Before continuing, we check if the new peer has the same ID as ourselves or an existing peer. If so, we disconnect. We also check the peer's address and public key against an optional whitelist which can be managed through the ABCI app - -if the whitelist is enabled and the peer is not on it, the connection is +if the whitelist is enabled and the peer does not qualigy, the connection is terminated. ### Tendermint Version Handshake -The Tendermint Version Handshake allows the peers to exchange their NodeInfo, which contains: +The Tendermint Version Handshake allows the peers to exchange their NodeInfo: ``` type NodeInfo struct { @@ -95,11 +103,16 @@ The connection is disconnected if: - `peer.NodeInfo.Version` Major is not the same as ours - `peer.NodeInfo.Version` Minor is not the same as ours - `peer.NodeInfo.Network` is not the same as ours +- `peer.Channels` does not intersect with our known Channels. -At this point, if we have not disconnected, the peer is valid and added to the switch, -so it is added to all reactors. +At this point, if we have not disconnected, the peer is valid. +It is added to the switch and hence all reactors via the `AddPeer` method. +Note that each reactor may handle multiple channels. +## Connection Activity -### Connection Activity +Once a peer is added, incoming messages for a given reactor are handled through +that reactor's `Receive` method, and output messages are sent directly by the Reactors +on each peer. A typical reactor maintains per-peer go-routine/s that handle this. diff --git a/p2p/docs/pex.md b/p2p/docs/pex.md new file mode 100644 index 00000000..a71b9717 --- /dev/null +++ b/p2p/docs/pex.md @@ -0,0 +1,94 @@ +# Peer Strategy and Exchange + +Here we outline the design of the AddressBook +and how it used by the Peer Exchange Reactor (PEX) to ensure we are connected +to good peers and to gossip peers to others. + +## Peer Types + +Certain peers are special in that they are specified by the user as `persistent`, +which means we auto-redial them if the connection fails. +Some such peers can additional be marked as `private`, which means +we will not gossip them to others. + +All others peers are tracked using an address book. + +## Discovery + +Peer discovery begins with a list of seeds. +When we have no peers, or have been unable to find enough peers from existing ones, +we dial a randomly selected seed to get a list of peers to dial. + +So long as we have less than `MaxPeers`, we periodically request additional peers +from each of our own. If sufficient time goes by and we still can't find enough peers, +we try the seeds again. + +## Address Book + +Peers are tracked via their ID (their PubKey.Address()). +For each ID, the address book keeps the most recent IP:PORT. +Peers are added to the address book from the PEX when they first connect to us or +when we hear about them from other peers. + +The address book is arranged in sets of buckets, and distinguishes between +vetted and unvetted peers. It keeps different sets of buckets for vetted and +unvetted peers. Buckets provide randomization over peer selection. + +A vetted peer can only be in one bucket. An unvetted peer can be in multiple buckets. + +## Vetting + +When a peer is first added, it is unvetted. +Marking a peer as vetted is outside the scope of the `p2p` package. +For Tendermint, a Peer becomes vetted once it has contributed sufficiently +at the consensus layer; ie. once it has sent us valid and not-yet-known +votes and/or block parts for `NumBlocksForVetted` blocks. +Other users of the p2p package can determine their own conditions for when a peer is marked vetted. + +If a peer becomes vetted but there are already too many vetted peers, +a randomly selected one of the vetted peers becomes unvetted. + +If a peer becomes unvetted (either a new peer, or one that was previously vetted), +a randomly selected one of the unvetted peers is removed from the address book. + +More fine-grained tracking of peer behaviour can be done using +a Trust Metric, but it's best to start with something simple. + +## Select Peers to Dial + +When we need more peers, we pick them randomly from the addrbook with some +configurable bias for unvetted peers. The bias should be lower when we have fewer peers, +and can increase as we obtain more, ensuring that our first peers are more trustworthy, +but always giving us the chance to discover new good peers. + +## Select Peers to Exchange + +When we’re asked for peers, we select them as follows: +- select at most `maxGetSelection` peers +- try to select at least `minGetSelection` peers - if we have less than that, select them all. +- select a random, unbiased `getSelectionPercent` of the peers + +Send the selected peers. Note we select peers for sending without bias for vetted/unvetted. + +## Preventing Spam + +There are various cases where we decide a peer has misbehaved and we disconnect from them. +When this happens, the peer is removed from the address book and black listed for +some amount of time. We call this "Disconnect and Mark". +Note that the bad behaviour may be detected outside the PEX reactor itseld +(for instance, in the mconnection, or another reactor), but it must be communicated to the PEX reactor +so it can remove and mark the peer. + +In the PEX, if a peer sends us unsolicited lists of peers, +or if the peer sends too many requests for more peers in a given amount of time, +we Disconnect and Mark. + +## Trust Metric + +The quality of peers can be tracked in more fine-grained detail using a +Proportional-Integral-Derrivative (PID) controller that incorporates +current, past, and rate-of-change data to inform peer quality. + +While a PID trust metric has been implemented, it remains for future work +to use it in the PEX. + diff --git a/p2p/docs/reputation.md b/p2p/docs/trustmetric.md similarity index 51% rename from p2p/docs/reputation.md rename to p2p/docs/trustmetric.md index a2a995e5..b0eaf96e 100644 --- a/p2p/docs/reputation.md +++ b/p2p/docs/trustmetric.md @@ -1,11 +1,4 @@ -# Peer Strategy - -Peers are managed using an address book and a trust metric. -The book keeps a record of vetted peers and unvetted peers. -When we need more peers, we pick them randomly from the addrbook with some -configurable bias for unvetted peers. When we’re asked for peers, we provide a random selection with no bias. - The trust metric tracks the quality of the peers. When a peer exceeds a certain quality for a certain amount of time, it is marked as vetted in the addrbook. @@ -17,7 +10,7 @@ possibly only if its below some absolute minimum ? Peer quality is tracked in the connection and across the reactors. Behaviours are defined as one of: - fatal - something outright malicious. we should disconnect and remember them. - - bad - any kind of timeout, msgs that dont unmarshal, or fail other validity checks, or msgs we didn't ask for or arent expecting - - neutral - normal correct behaviour. unknown channels/msg types (version upgrades). - - good - some random majority of peers per reactor sending us useful messages + - bad - any kind of timeout, msgs that dont unmarshal, or fail other validity checks, or msgs we didn't ask for or arent expecting + - neutral - normal correct behaviour. unknown channels/msg types (version upgrades). + - good - some random majority of peers per reactor sending us useful messages From a573b20888d0079dc771e9bcf1f206a247b1200a Mon Sep 17 00:00:00 2001 From: Zach Ramsay Date: Wed, 3 Jan 2018 01:23:38 +0000 Subject: [PATCH 28/33] docs: add counter/dummy code snippets closes https://github.com/tendermint/abci/issues/134 --- docs/abci-cli.rst | 87 ++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 82 insertions(+), 5 deletions(-) diff --git a/docs/abci-cli.rst b/docs/abci-cli.rst index ae410568..efbeb71b 100644 --- a/docs/abci-cli.rst +++ b/docs/abci-cli.rst @@ -53,7 +53,7 @@ Now run ``abci-cli`` to see the list of commands: -h, --help help for abci-cli -v, --verbose print the command and results as if it were a console session - Use "abci-cli [command] --help" for more information about a command. + Use "abci-cli [command] --help" for more information about a command. Dummy - First Example @@ -66,14 +66,56 @@ The most important messages are ``deliver_tx``, ``check_tx``, and ``commit``, but there are others for convenience, configuration, and information purposes. -Let's start a dummy application, which was installed at the same time as -``abci-cli`` above. The dummy just stores transactions in a merkle tree: +We'll start a dummy application, which was installed at the same time as +``abci-cli`` above. The dummy just stores transactions in a merkle tree. + +Its code can be found `here `__ and looks like: + +.. container:: toggle + + .. container:: header + + **Show/Hide Dummy Example** + + .. code-block:: go + + func cmdDummy(cmd *cobra.Command, args []string) error { + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + + // Create the application - in memory or persisted to disk + var app types.Application + if flagPersist == "" { + app = dummy.NewDummyApplication() + } else { + app = dummy.NewPersistentDummyApplication(flagPersist) + app.(*dummy.PersistentDummyApplication).SetLogger(logger.With("module", "dummy")) + } + + // Start the listener + srv, err := server.NewServer(flagAddrD, flagAbci, app) + if err != nil { + return err + } + srv.SetLogger(logger.With("module", "abci-server")) + if err := srv.Start(); err != nil { + return err + } + + // Wait forever + cmn.TrapSignal(func() { + // Cleanup + srv.Stop() + }) + return nil + } + +Start by running: :: abci-cli dummy -In another terminal, run +And in another terminal, run :: @@ -187,6 +229,41 @@ Counter - Another Example Now that we've got the hang of it, let's try another application, the "counter" app. +Like the dummy app, its code can be found `here `__ and looks like: + +.. container:: toggle + + .. container:: header + + **Show/Hide Counter Example** + + .. code-block:: go + + func cmdCounter(cmd *cobra.Command, args []string) error { + + app := counter.NewCounterApplication(flagSerial) + + logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) + + // Start the listener + srv, err := server.NewServer(flagAddrC, flagAbci, app) + if err != nil { + return err + } + srv.SetLogger(logger.With("module", "abci-server")) + if err := srv.Start(); err != nil { + return err + } + + // Wait forever + cmn.TrapSignal(func() { + // Cleanup + srv.Stop() + }) + return nil + } + + The counter app doesn't use a Merkle tree, it just counts how many times we've sent a transaction, asked for a hash, or committed the state. The result of ``commit`` is just the number of transactions sent. @@ -261,7 +338,7 @@ But the ultimate flexibility comes from being able to write the application easily in any language. We have implemented the counter in a number of languages (see the -example directory). +`example directory Date: Wed, 3 Jan 2018 10:49:47 +0100 Subject: [PATCH 29/33] Move P2P docs into docs folder --- docs/specification/new-spec/README.md | 1 + {p2p/docs => docs/specification/new-spec/p2p}/config.md | 0 {p2p/docs => docs/specification/new-spec/p2p}/connection.md | 0 {p2p/docs => docs/specification/new-spec/p2p}/node.md | 0 {p2p/docs => docs/specification/new-spec/p2p}/peer.md | 0 {p2p/docs => docs/specification/new-spec/p2p}/pex.md | 0 {p2p/docs => docs/specification/new-spec/p2p}/trustmetric.md | 0 7 files changed, 1 insertion(+) rename {p2p/docs => docs/specification/new-spec/p2p}/config.md (100%) rename {p2p/docs => docs/specification/new-spec/p2p}/connection.md (100%) rename {p2p/docs => docs/specification/new-spec/p2p}/node.md (100%) rename {p2p/docs => docs/specification/new-spec/p2p}/peer.md (100%) rename {p2p/docs => docs/specification/new-spec/p2p}/pex.md (100%) rename {p2p/docs => docs/specification/new-spec/p2p}/trustmetric.md (100%) diff --git a/docs/specification/new-spec/README.md b/docs/specification/new-spec/README.md index a5061e62..8a07d922 100644 --- a/docs/specification/new-spec/README.md +++ b/docs/specification/new-spec/README.md @@ -9,6 +9,7 @@ It contains the following components: - [Encoding and Digests](encoding.md) - [Blockchain](blockchain.md) - [State](state.md) +- [P2P](p2p/node.md) ## Overview diff --git a/p2p/docs/config.md b/docs/specification/new-spec/p2p/config.md similarity index 100% rename from p2p/docs/config.md rename to docs/specification/new-spec/p2p/config.md diff --git a/p2p/docs/connection.md b/docs/specification/new-spec/p2p/connection.md similarity index 100% rename from p2p/docs/connection.md rename to docs/specification/new-spec/p2p/connection.md diff --git a/p2p/docs/node.md b/docs/specification/new-spec/p2p/node.md similarity index 100% rename from p2p/docs/node.md rename to docs/specification/new-spec/p2p/node.md diff --git a/p2p/docs/peer.md b/docs/specification/new-spec/p2p/peer.md similarity index 100% rename from p2p/docs/peer.md rename to docs/specification/new-spec/p2p/peer.md diff --git a/p2p/docs/pex.md b/docs/specification/new-spec/p2p/pex.md similarity index 100% rename from p2p/docs/pex.md rename to docs/specification/new-spec/p2p/pex.md diff --git a/p2p/docs/trustmetric.md b/docs/specification/new-spec/p2p/trustmetric.md similarity index 100% rename from p2p/docs/trustmetric.md rename to docs/specification/new-spec/p2p/trustmetric.md From 0430ebf95cc0ae40d61778d60e92bf9c8401a3ce Mon Sep 17 00:00:00 2001 From: Greg Szabo Date: Wed, 3 Jan 2018 14:58:23 -0500 Subject: [PATCH 30/33] Makefile changes for cross-building and standardized builds using gox --- Makefile | 59 ++++++++++++++++++++++++++++++++++--------- scripts/dist_build.sh | 28 ++------------------ 2 files changed, 49 insertions(+), 38 deletions(-) diff --git a/Makefile b/Makefile index 2aed1acf..bb1d72d5 100644 --- a/Makefile +++ b/Makefile @@ -1,13 +1,29 @@ -GOTOOLS = \ +GOTOOLS := \ github.com/mitchellh/gox \ github.com/Masterminds/glide \ github.com/tcnksm/ghr \ gopkg.in/alecthomas/gometalinter.v2 -GOTOOLS_CHECK = gox glide ghr gometalinter.v2 -PACKAGES=$(shell go list ./... | grep -v '/vendor/') -BUILD_TAGS?=tendermint -TMHOME = $${TMHOME:-$$HOME/.tendermint} -BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short HEAD`" +GO_MIN_VERSION := 1.9.2 +PACKAGES := $(shell go list ./... | grep -v '/vendor/') +BUILD_TAGS ?= tendermint +TMHOME ?= $(HOME)/.tendermint +GOPATH ?= $(shell go env GOPATH) +GOROOT ?= $(shell go env GOROOT) +GOGCCFLAGS ?= $(shell go env GOGCCFLAGS) +PROD_LDFLAGS ?= -w -s +XC_ARCH ?= 386 amd64 arm +XC_OS ?= solaris darwin freebsd linux windows +XC_OSARCH ?= !darwin/arm !solaris/amd64 !freebsd/amd64 +BUILD_OUTPUT ?= $(GOPATH)/bin/{{.OS}}_{{.Arch}}/tendermint + +GOX_FLAGS = -os="$(XC_OS)" -arch="$(XC_ARCH)" -osarch="$(XC_OSARCH)" -output="$(BUILD_OUTPUT)" +ifeq ($(BUILD_FLAGS_RACE),YES) +RACEFLAG=-race +else +RACEFLAG= +endif +BUILD_FLAGS = -asmflags "-trimpath $(GOPATH)" -gcflags "-trimpath $(GOPATH)" -tags "$(BUILD_TAGS)" -ldflags "$(PROD_LDFLAGS) -X github.com/tendermint/tendermint/version.GitCommit=$(shell git rev-parse --short=7 HEAD)" $(RACEFLAG) +GO_VERSION:=$(shell go version | grep -o '[[:digit:]]\+.[[:digit:]]\+.[[:digit:]]\+') all: check build test install metalinter @@ -17,27 +33,46 @@ check: check_tools get_vendor_deps ######################################## ### Build +build_cc: + $(shell which gox) $(BUILD_FLAGS) $(GOX_FLAGS) ./cmd/tendermint/ + build: - go build $(BUILD_FLAGS) -o build/tendermint ./cmd/tendermint/ + make build_cc PROD_LDFLAGS="" XC_ARCH=amd64 XC_OS="$(shell uname -s)" BUILD_OUTPUT=$(GOPATH)/bin/tendermint build_race: - go build -race $(BUILD_FLAGS) -o build/tendermint ./cmd/tendermint + $(shell which go) build $(BUILD_FLAGS) -race -o "$(BUILD_OUTPUT)" ./cmd/tendermint/ +#For the future when this is merged: https://github.com/mitchellh/gox/pull/105 +# make build_cc PROD_LDFLAGS="" XC_ARCH=amd64 XC_OS=$(shell uname -s) BUILD_FLAGS_RACE=YES BUILD_OUTPUT=build/tendermint # dist builds binaries for all platforms and packages them for distribution dist: @BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'" install: - go install $(BUILD_FLAGS) ./cmd/tendermint + make build ######################################## ### Tools & dependencies check_tools: - @# https://stackoverflow.com/a/25668869 - @echo "Found tools: $(foreach tool,$(GOTOOLS_CHECK),\ - $(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))" +ifeq ($(GO_VERSION),) + $(error go not found) +endif +ifneq ($(GO_VERSION),$(GO_MIN_VERSION)) + $(warning WARNING: build will not be deterministic. go version should be $(GO_MIN_VERSION)) +endif +ifneq ($(findstring -fdebug-prefix-map,$(GOGCCFLAGS)),-fdebug-prefix-map) + $(warning WARNING: build will not be deterministic. The compiler does not support the '-fdebug-prefix-map' flag.) +endif +ifneq ($(GOROOT),/usr/local/go) + $(warning WARNING: build will not be deterministic. GOPATH should be set to /usr/local/go) +endif +ifneq ($(findstring $(GOPATH)/bin,$(PATH)),$(GOPATH)/bin) + $(warning WARNING: PATH does not contain GOPATH/bin. Some external dependencies might be unavailable.) +endif +# https://stackoverflow.com/a/25668869 + @echo "Found tools: $(foreach tool,$(notdir $(GOTOOLS)),$(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH. Add GOPATH/bin to PATH and run 'make get_tools'")))" get_tools: @echo "--> Installing tools" diff --git a/scripts/dist_build.sh b/scripts/dist_build.sh index 587199e0..e7471c4d 100755 --- a/scripts/dist_build.sh +++ b/scripts/dist_build.sh @@ -9,32 +9,8 @@ DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" # Change into that dir because we expect that. cd "$DIR" -# Get the git commit -GIT_COMMIT="$(git rev-parse --short HEAD)" -GIT_IMPORT="github.com/tendermint/tendermint/version" - -# Determine the arch/os combos we're building for -XC_ARCH=${XC_ARCH:-"386 amd64 arm"} -XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"} - -# Make sure build tools are available. -make tools - -# Get VENDORED dependencies -make get_vendor_deps - -# Build! -# ldflags: -s Omit the symbol table and debug information. -# -w Omit the DWARF symbol table. -echo "==> Building..." -"$(which gox)" \ - -os="${XC_OS}" \ - -arch="${XC_ARCH}" \ - -osarch="!darwin/arm !solaris/amd64 !freebsd/amd64" \ - -ldflags "-s -w -X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}" \ - -output "build/pkg/{{.OS}}_{{.Arch}}/tendermint" \ - -tags="${BUILD_TAGS}" \ - github.com/tendermint/tendermint/cmd/tendermint +# Make sure build tools are available, get VENDORED dependencies and build +make get_tools get_vendor_deps build_cc # Zip all the files. echo "==> Packaging..." From f67f99c227e66c9bdbda2f3e34b5c7fcbc6f67c2 Mon Sep 17 00:00:00 2001 From: Greg Szabo Date: Wed, 3 Jan 2018 17:24:11 -0500 Subject: [PATCH 31/33] Extended install document with docker option. Added extra checks to developer's build target. --- Makefile | 16 ++++++++++------ docs/install.rst | 13 +++++++++++++ 2 files changed, 23 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index bb1d72d5..3aa5e051 100644 --- a/Makefile +++ b/Makefile @@ -10,7 +10,7 @@ TMHOME ?= $(HOME)/.tendermint GOPATH ?= $(shell go env GOPATH) GOROOT ?= $(shell go env GOROOT) GOGCCFLAGS ?= $(shell go env GOGCCFLAGS) -PROD_LDFLAGS ?= -w -s +#LDFLAGS_EXTRA ?= -w -s XC_ARCH ?= 386 amd64 arm XC_OS ?= solaris darwin freebsd linux windows XC_OSARCH ?= !darwin/arm !solaris/amd64 !freebsd/amd64 @@ -22,7 +22,7 @@ RACEFLAG=-race else RACEFLAG= endif -BUILD_FLAGS = -asmflags "-trimpath $(GOPATH)" -gcflags "-trimpath $(GOPATH)" -tags "$(BUILD_TAGS)" -ldflags "$(PROD_LDFLAGS) -X github.com/tendermint/tendermint/version.GitCommit=$(shell git rev-parse --short=7 HEAD)" $(RACEFLAG) +BUILD_FLAGS = -asmflags "-trimpath $(GOPATH)" -gcflags "-trimpath $(GOPATH)" -tags "$(BUILD_TAGS)" -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=$(shell git rev-parse --short=7 HEAD) $(LDFLAGS_EXTRA)" $(RACEFLAG) GO_VERSION:=$(shell go version | grep -o '[[:digit:]]\+.[[:digit:]]\+.[[:digit:]]\+') all: check build test install metalinter @@ -33,16 +33,20 @@ check: check_tools get_vendor_deps ######################################## ### Build -build_cc: +build_cc: check_tools $(shell which gox) $(BUILD_FLAGS) $(GOX_FLAGS) ./cmd/tendermint/ build: - make build_cc PROD_LDFLAGS="" XC_ARCH=amd64 XC_OS="$(shell uname -s)" BUILD_OUTPUT=$(GOPATH)/bin/tendermint +ifeq ($(OS),Windows_NT) + make build_cc XC_ARCH=amd64 XC_OS=windows BUILD_OUTPUT=$(GOPATH)/bin/tendermint +else + make build_cc XC_ARCH=amd64 XC_OS="$(shell uname -s)" BUILD_OUTPUT=$(GOPATH)/bin/tendermint +endif build_race: +#TODO: Wait for this to be merged: https://github.com/mitchellh/gox/pull/105 Then switch over to make build and remove the go build line. +# make build BUILD_FLAGS_RACE=YES $(shell which go) build $(BUILD_FLAGS) -race -o "$(BUILD_OUTPUT)" ./cmd/tendermint/ -#For the future when this is merged: https://github.com/mitchellh/gox/pull/105 -# make build_cc PROD_LDFLAGS="" XC_ARCH=amd64 XC_OS=$(shell uname -s) BUILD_FLAGS_RACE=YES BUILD_OUTPUT=build/tendermint # dist builds binaries for all platforms and packages them for distribution dist: diff --git a/docs/install.rst b/docs/install.rst index 64fae4cd..9edc051a 100644 --- a/docs/install.rst +++ b/docs/install.rst @@ -6,6 +6,19 @@ From Binary To download pre-built binaries, see the `Download page `__. +From Source using Docker +------------------------ + +If you have docker running, all you need is the ``golang`` image to build tendermint. +If you don't, you can get help setting it up `here `__. + +:: + mkdir $HOME/tendermintbin + docker run --rm -it -v $HOME/tendermintbin:/go/bin:Z golang:1.9.2 /bin/bash -c "go-wrapper download github.com/tendermint/tendermint/cmd/tendermint ; make -C /go/src/github.com/tendermint/tendermint get_tools get_vendor_deps build_cc" + +You will find the ``tendermint`` binaries for different architectures and operating systems in your ``$HOME/tendermintbin`` folder. + + From Source ----------- From bb3dc10f24917601ce1a1b25882eecf6ef0a79b0 Mon Sep 17 00:00:00 2001 From: Greg Szabo Date: Fri, 5 Jan 2018 14:22:13 -0500 Subject: [PATCH 32/33] Makefile improvements for deterministic builds based on Bucky's feedback --- Makefile | 28 ++++++++++++++++++++++------ scripts/dist_build.sh | 2 +- 2 files changed, 23 insertions(+), 7 deletions(-) diff --git a/Makefile b/Makefile index 3aa5e051..2a4b14e3 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ GOGCCFLAGS ?= $(shell go env GOGCCFLAGS) XC_ARCH ?= 386 amd64 arm XC_OS ?= solaris darwin freebsd linux windows XC_OSARCH ?= !darwin/arm !solaris/amd64 !freebsd/amd64 -BUILD_OUTPUT ?= $(GOPATH)/bin/{{.OS}}_{{.Arch}}/tendermint +BUILD_OUTPUT ?= ./build/{{.OS}}_{{.Arch}}/tendermint GOX_FLAGS = -os="$(XC_OS)" -arch="$(XC_ARCH)" -osarch="$(XC_OSARCH)" -output="$(BUILD_OUTPUT)" ifeq ($(BUILD_FLAGS_RACE),YES) @@ -22,8 +22,13 @@ RACEFLAG=-race else RACEFLAG= endif -BUILD_FLAGS = -asmflags "-trimpath $(GOPATH)" -gcflags "-trimpath $(GOPATH)" -tags "$(BUILD_TAGS)" -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=$(shell git rev-parse --short=7 HEAD) $(LDFLAGS_EXTRA)" $(RACEFLAG) +BUILD_FLAGS = -asmflags "-trimpath $(GOPATH)" -gcflags "-trimpath $(GOPATH)" -tags "$(BUILD_TAGS)" -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=$(shell git rev-parse --short=8 HEAD) $(LDFLAGS_EXTRA)" $(RACEFLAG) GO_VERSION:=$(shell go version | grep -o '[[:digit:]]\+.[[:digit:]]\+.[[:digit:]]\+') +#Check that that minor version of GO meets the minimum required +GO_MINOR_VERSION := $(shell grep -o \.[[:digit:]][[:digit:]]*\. <<< $(GO_VERSION) | grep -o [[:digit:]]* ) +GO_MIN_MINOR_VERSION := $(shell grep -o \.[[:digit:]][[:digit:]]*\. <<< $(GO_MIN_VERSION) | grep -o [[:digit:]]* ) +GO_MINOR_VERSION_CHECK := $(shell test $(GO_MINOR_VERSION) -ge $(GO_MIN_MINOR_VERSION) && echo YES) + all: check build test install metalinter @@ -33,14 +38,14 @@ check: check_tools get_vendor_deps ######################################## ### Build -build_cc: check_tools +build_xc: check_tools $(shell which gox) $(BUILD_FLAGS) $(GOX_FLAGS) ./cmd/tendermint/ build: ifeq ($(OS),Windows_NT) - make build_cc XC_ARCH=amd64 XC_OS=windows BUILD_OUTPUT=$(GOPATH)/bin/tendermint + make build_xc XC_ARCH=amd64 XC_OS=windows BUILD_OUTPUT=$(GOPATH)/bin/tendermint else - make build_cc XC_ARCH=amd64 XC_OS="$(shell uname -s)" BUILD_OUTPUT=$(GOPATH)/bin/tendermint + make build_xc XC_ARCH=amd64 XC_OS="$(shell uname -s)" BUILD_OUTPUT=$(GOPATH)/bin/tendermint endif build_race: @@ -63,15 +68,26 @@ check_tools: ifeq ($(GO_VERSION),) $(error go not found) endif +#Check minimum required go version ifneq ($(GO_VERSION),$(GO_MIN_VERSION)) $(warning WARNING: build will not be deterministic. go version should be $(GO_MIN_VERSION)) +ifneq ($(GO_MINOR_VERSION_CHECK),YES) + $(error ERROR: The minor version of Go ($(GO_VERSION)) is lower than the minimum required ($(GO_MIN_VERSION))) endif +endif +#-fdebug-prefix-map switches the temporary, randomized workdir name in the binary to a static text ifneq ($(findstring -fdebug-prefix-map,$(GOGCCFLAGS)),-fdebug-prefix-map) $(warning WARNING: build will not be deterministic. The compiler does not support the '-fdebug-prefix-map' flag.) endif +#GOROOT string is copied into the binary. For deterministic builds, we agree to keep it at /usr/local/go. (Default for golang:1.9.2 docker image, linux and osx.) ifneq ($(GOROOT),/usr/local/go) - $(warning WARNING: build will not be deterministic. GOPATH should be set to /usr/local/go) + $(warning WARNING: build will not be deterministic. GOROOT should be set to /usr/local/go) endif +#GOPATH string is copied into the binary. Although the -trimpath flag tries to eliminate it, it doesn't do it everywhere in Go 1.9.2. For deterministic builds we agree to keep it at /go. (Default for golang:1.9.2 docker image.) +ifneq ($(GOPATH),/go) + $(warning WARNING: build will not be deterministic. GOPATH should be set to /go) +endif +#External dependencies defined in GOTOOLS are built with get_tools. If they are already available on the system (for exmaple using a package manager), then get_tools might not be necessary. ifneq ($(findstring $(GOPATH)/bin,$(PATH)),$(GOPATH)/bin) $(warning WARNING: PATH does not contain GOPATH/bin. Some external dependencies might be unavailable.) endif diff --git a/scripts/dist_build.sh b/scripts/dist_build.sh index e7471c4d..f1d8779f 100755 --- a/scripts/dist_build.sh +++ b/scripts/dist_build.sh @@ -10,7 +10,7 @@ DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" cd "$DIR" # Make sure build tools are available, get VENDORED dependencies and build -make get_tools get_vendor_deps build_cc +make get_tools get_vendor_deps build_xc # Zip all the files. echo "==> Packaging..." From 92f5ae5a84591434311b92148b14edcf6551b7e5 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 5 Jan 2018 22:19:12 -0500 Subject: [PATCH 33/33] fix vagrant [ci skip] --- Vagrantfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Vagrantfile b/Vagrantfile index 80d44f9c..12cfce47 100644 --- a/Vagrantfile +++ b/Vagrantfile @@ -44,6 +44,6 @@ EOF chown ubuntu:ubuntu /home/ubuntu/.bash_profile # get all deps and tools, ready to install/test - su - ubuntu -c 'cd /home/ubuntu/go/src/github.com/tendermint/tendermint && make get_vendor_deps && make tools' + su - ubuntu -c 'cd /home/ubuntu/go/src/github.com/tendermint/tendermint && make get_tools && make get_vendor_deps' SHELL end