diff --git a/.gitignore b/.gitignore index 23ae616e..22a6be0b 100644 --- a/.gitignore +++ b/.gitignore @@ -17,3 +17,6 @@ test/logs coverage.txt docs/_build docs/tools + +scripts/wal2json/wal2json +scripts/cutWALUntil/cutWALUntil diff --git a/CHANGELOG.md b/CHANGELOG.md index c48d0850..1832ff81 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,6 @@ FEATURES: - Use the chain as its own CA for nodes and validators - Tooling to run multiple blockchains/apps, possibly in a single process - State syncing (without transaction replay) -- Improved support for querying history and state - Add authentication and rate-limitting to the RPC IMPROVEMENTS: @@ -28,6 +27,32 @@ BUG FIXES: - Graceful handling/recovery for apps that have non-determinism or fail to halt - Graceful handling/recovery for violations of safety, or liveness +## 0.12.0 (October 27, 2017) + +BREAKING CHANGES: + - rpc/client: websocket ResultsCh and ErrorsCh unified in ResponsesCh. + - rpc/client: ABCIQuery no longer takes `prove` + - state: remove GenesisDoc from state. + - consensus: new binary WAL format provides efficiency and uses checksums to detect corruption + - use scripts/wal2json to convert to json for debugging + +FEATURES: + - new `certifiers` pkg contains the tendermint light-client library (name subject to change)! + - rpc: `/genesis` includes the `app_options` . + - rpc: `/abci_query` takes an additional `height` parameter to support historical queries. + - rpc/client: new ABCIQueryWithOptions supports options like `trusted` (set false to get a proof) and `height` to query a historical height. + +IMPROVEMENTS: + - rpc: `/genesis` result includes `app_options` + - rpc/lib/client: add jitter to reconnects. + - rpc/lib/types: `RPCError` satisfies the `error` interface. + +BUG FIXES: + - rpc/client: fix ws deadlock after stopping + - blockchain: fix panic on AddBlock when peer is nil + - mempool: fix sending on TxsAvailable when a tx has been invalidated + - consensus: dont run WAL catchup if we fast synced + ## 0.11.1 (October 10, 2017) IMPROVEMENTS: diff --git a/Makefile b/Makefile index dfb0dc3a..2271abeb 100644 --- a/Makefile +++ b/Makefile @@ -35,6 +35,9 @@ test_race: test_integrations: @bash ./test/test.sh +release: + @go test -tags release $(PACKAGES) + test100: @for i in {1..100}; do make test; done diff --git a/benchmarks/blockchain/.gitignore b/benchmarks/blockchain/.gitignore new file mode 100644 index 00000000..9e67bd47 --- /dev/null +++ b/benchmarks/blockchain/.gitignore @@ -0,0 +1,2 @@ +data + diff --git a/benchmarks/blockchain/localsync.sh b/benchmarks/blockchain/localsync.sh new file mode 100755 index 00000000..e181c565 --- /dev/null +++ b/benchmarks/blockchain/localsync.sh @@ -0,0 +1,80 @@ +#!/bin/bash + +DATA=$GOPATH/src/github.com/tendermint/tendermint/benchmarks/blockchain/data +if [ ! -d $DATA ]; then + echo "no data found, generating a chain... (this only has to happen once)" + + tendermint init --home $DATA + cp $DATA/config.toml $DATA/config2.toml + echo " + [consensus] + timeout_commit = 0 + " >> $DATA/config.toml + + echo "starting node" + tendermint node \ + --home $DATA \ + --proxy_app dummy \ + --p2p.laddr tcp://127.0.0.1:56656 \ + --rpc.laddr tcp://127.0.0.1:56657 \ + --log_level error & + + echo "making blocks for 60s" + sleep 60 + + mv $DATA/config2.toml $DATA/config.toml + + kill %1 + + echo "done generating chain." +fi + +# validator node +HOME1=$TMPDIR$RANDOM$RANDOM +cp -R $DATA $HOME1 +echo "starting validator node" +tendermint node \ + --home $HOME1 \ + --proxy_app dummy \ + --p2p.laddr tcp://127.0.0.1:56656 \ + --rpc.laddr tcp://127.0.0.1:56657 \ + --log_level error & +sleep 1 + +# downloader node +HOME2=$TMPDIR$RANDOM$RANDOM +tendermint init --home $HOME2 +cp $HOME1/genesis.json $HOME2 +printf "starting downloader node" +tendermint node \ + --home $HOME2 \ + --proxy_app dummy \ + --p2p.laddr tcp://127.0.0.1:56666 \ + --rpc.laddr tcp://127.0.0.1:56667 \ + --p2p.seeds 127.0.0.1:56656 \ + --log_level error & + +# wait for node to start up so we only count time where we are actually syncing +sleep 0.5 +while curl localhost:56667/status 2> /dev/null | grep "\"latest_block_height\": 0," > /dev/null +do + printf '.' + sleep 0.2 +done +echo + +echo "syncing blockchain for 10s" +for i in {1..10} +do + sleep 1 + HEIGHT="$(curl localhost:56667/status 2> /dev/null \ + | grep 'latest_block_height' \ + | grep -o ' [0-9]*' \ + | xargs)" + let 'RATE = HEIGHT / i' + echo "height: $HEIGHT, blocks/sec: $RATE" +done + +kill %1 +kill %2 +rm -rf $HOME1 $HOME2 diff --git a/benchmarks/simu/counter.go b/benchmarks/simu/counter.go index ff5b14c0..c6b4c161 100644 --- a/benchmarks/simu/counter.go +++ b/benchmarks/simu/counter.go @@ -21,7 +21,7 @@ func main() { // Read a bunch of responses go func() { for { - _, ok := <-wsc.ResultsCh + _, ok := <-wsc.ResponsesCh if !ok { break } diff --git a/blockchain/pool.go b/blockchain/pool.go index bd52e280..47e59711 100644 --- a/blockchain/pool.go +++ b/blockchain/pool.go @@ -11,11 +11,25 @@ import ( "github.com/tendermint/tmlibs/log" ) +/* + +eg, L = latency = 0.1s + P = num peers = 10 + FN = num full nodes + BS = 1kB block size + CB = 1 Mbit/s = 128 kB/s + CB/P = 12.8 kB + B/S = CB/P/BS = 12.8 blocks/s + + 12.8 * 0.1 = 1.28 blocks on conn + +*/ + const ( - requestIntervalMS = 250 - maxTotalRequesters = 300 + requestIntervalMS = 100 + maxTotalRequesters = 1000 maxPendingRequests = maxTotalRequesters - maxPendingRequestsPerPeer = 75 + maxPendingRequestsPerPeer = 50 minRecvRate = 10240 // 10Kb/s ) @@ -42,7 +56,8 @@ type BlockPool struct { height int // the lowest key in requesters. numPending int32 // number of requests pending assignment or block response // peers - peers map[string]*bpPeer + peers map[string]*bpPeer + maxPeerHeight int requestsCh chan<- BlockRequest timeoutsCh chan<- string @@ -69,16 +84,16 @@ func (pool *BlockPool) OnStart() error { return nil } -func (pool *BlockPool) OnStop() { - pool.BaseService.OnStop() -} +func (pool *BlockPool) OnStop() {} // Run spawns requesters as needed. func (pool *BlockPool) makeRequestersRoutine() { + for { if !pool.IsRunning() { break } + _, numPending, lenRequesters := pool.GetStatus() if numPending >= maxPendingRequests { // sleep for a bit. @@ -135,16 +150,10 @@ func (pool *BlockPool) IsCaughtUp() bool { return false } - maxPeerHeight := 0 - for _, peer := range pool.peers { - maxPeerHeight = cmn.MaxInt(maxPeerHeight, peer.height) - } - // some conditions to determine if we're caught up receivedBlockOrTimedOut := (pool.height > 0 || time.Since(pool.startTime) > 5*time.Second) - ourChainIsLongestAmongPeers := maxPeerHeight == 0 || pool.height >= maxPeerHeight + ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers - pool.Logger.Info(cmn.Fmt("IsCaughtUp: %v", isCaughtUp), "height", pool.height, "maxPeerHeight", maxPeerHeight) return isCaughtUp } @@ -188,15 +197,16 @@ func (pool *BlockPool) PopRequest() { // Remove the peer and redo request from others. func (pool *BlockPool) RedoRequest(height int) { pool.mtx.Lock() + defer pool.mtx.Unlock() + request := pool.requesters[height] - pool.mtx.Unlock() if request.block == nil { cmn.PanicSanity("Expected block to be non-nil") } // RemovePeer will redo all requesters associated with this peer. // TODO: record this malfeasance - pool.RemovePeer(request.peerID) + pool.removePeer(request.peerID) } // TODO: ensure that blocks come in order for each peer. @@ -206,18 +216,29 @@ func (pool *BlockPool) AddBlock(peerID string, block *types.Block, blockSize int requester := pool.requesters[block.Height] if requester == nil { + // a block we didn't expect. + // TODO:if height is too far ahead, punish peer return } if requester.setBlock(block, peerID) { pool.numPending-- peer := pool.peers[peerID] - peer.decrPending(blockSize) + if peer != nil { + peer.decrPending(blockSize) + } } else { // Bad peer? } } +// MaxPeerHeight returns the heighest height reported by a peer +func (pool *BlockPool) MaxPeerHeight() int { + pool.mtx.Lock() + defer pool.mtx.Unlock() + return pool.maxPeerHeight +} + // Sets the peer's alleged blockchain height. func (pool *BlockPool) SetPeerHeight(peerID string, height int) { pool.mtx.Lock() @@ -231,6 +252,10 @@ func (pool *BlockPool) SetPeerHeight(peerID string, height int) { peer.setLogger(pool.Logger.With("peer", peerID)) pool.peers[peerID] = peer } + + if height > pool.maxPeerHeight { + pool.maxPeerHeight = height + } } func (pool *BlockPool) RemovePeer(peerID string) { @@ -281,7 +306,7 @@ func (pool *BlockPool) makeNextRequester() { nextHeight := pool.height + len(pool.requesters) request := newBPRequester(pool, nextHeight) - request.SetLogger(pool.Logger.With("height", nextHeight)) + // request.SetLogger(pool.Logger.With("height", nextHeight)) pool.requesters[nextHeight] = request pool.numPending++ diff --git a/blockchain/reactor.go b/blockchain/reactor.go index 91f0aded..9ac58031 100644 --- a/blockchain/reactor.go +++ b/blockchain/reactor.go @@ -12,14 +12,15 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" cmn "github.com/tendermint/tmlibs/common" + "github.com/tendermint/tmlibs/log" ) const ( // BlockchainChannel is a channel for blocks and status updates (`BlockStore` height) BlockchainChannel = byte(0x40) - defaultChannelCapacity = 100 - trySyncIntervalMS = 100 + defaultChannelCapacity = 1000 + trySyncIntervalMS = 50 // stop syncing when last block's time is // within this much of the system time. // stopSyncingDurationMinutes = 10 @@ -33,7 +34,7 @@ const ( type consensusReactor interface { // for when we switch from blockchain reactor and fast sync to // the consensus machine - SwitchToConsensus(*sm.State) + SwitchToConsensus(*sm.State, int) } // BlockchainReactor handles long-term catchup syncing. @@ -79,7 +80,13 @@ func NewBlockchainReactor(state *sm.State, proxyAppConn proxy.AppConnConsensus, return bcR } -// OnStart implements BaseService +// SetLogger implements cmn.Service by setting the logger on reactor and pool. +func (bcR *BlockchainReactor) SetLogger(l log.Logger) { + bcR.BaseService.Logger = l + bcR.pool.Logger = l +} + +// OnStart implements cmn.Service. func (bcR *BlockchainReactor) OnStart() error { bcR.BaseReactor.OnStart() if bcR.fastSync { @@ -92,7 +99,7 @@ func (bcR *BlockchainReactor) OnStart() error { return nil } -// OnStop implements BaseService +// OnStop implements cmn.Service. func (bcR *BlockchainReactor) OnStop() { bcR.BaseReactor.OnStop() bcR.pool.Stop() @@ -103,8 +110,8 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { return []*p2p.ChannelDescriptor{ &p2p.ChannelDescriptor{ ID: BlockchainChannel, - Priority: 5, - SendQueueCapacity: 100, + Priority: 10, + SendQueueCapacity: 1000, }, } } @@ -175,7 +182,7 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) // maxMsgSize returns the maximum allowable size of a // message on the blockchain reactor. func (bcR *BlockchainReactor) maxMsgSize() int { - return bcR.state.Params().BlockSizeParams.MaxBytes + 2 + return bcR.state.Params.BlockSizeParams.MaxBytes + 2 } // Handle messages from the poolReactor telling the reactor what to do. @@ -187,6 +194,13 @@ func (bcR *BlockchainReactor) poolRoutine() { statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) switchToConsensusTicker := time.NewTicker(switchToConsensusIntervalSeconds * time.Second) + blocksSynced := 0 + + chainID := bcR.state.ChainID + + lastHundred := time.Now() + lastRate := 0.0 + FOR_LOOP: for { select { @@ -212,16 +226,16 @@ FOR_LOOP: // ask for status updates go bcR.BroadcastStatusRequest() case <-switchToConsensusTicker.C: - height, numPending, _ := bcR.pool.GetStatus() + height, numPending, lenRequesters := bcR.pool.GetStatus() outbound, inbound, _ := bcR.Switch.NumPeers() - bcR.Logger.Info("Consensus ticker", "numPending", numPending, "total", len(bcR.pool.requesters), + bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters, "outbound", outbound, "inbound", inbound) if bcR.pool.IsCaughtUp() { bcR.Logger.Info("Time to switch to consensus reactor!", "height", height) bcR.pool.Stop() conR := bcR.Switch.Reactor("CONSENSUS").(consensusReactor) - conR.SwitchToConsensus(bcR.state) + conR.SwitchToConsensus(bcR.state, blocksSynced) break FOR_LOOP } @@ -236,14 +250,14 @@ FOR_LOOP: // We need both to sync the first block. break SYNC_LOOP } - firstParts := first.MakePartSet(bcR.state.Params().BlockPartSizeBytes) + firstParts := first.MakePartSet(bcR.state.Params.BlockPartSizeBytes) firstPartsHeader := firstParts.Header() // Finally, verify the first block using the second's commit // NOTE: we can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is // currently necessary. err := bcR.state.Validators.VerifyCommit( - bcR.state.ChainID, types.BlockID{first.Hash(), firstPartsHeader}, first.Height, second.LastCommit) + chainID, types.BlockID{first.Hash(), firstPartsHeader}, first.Height, second.LastCommit) if err != nil { bcR.Logger.Error("Error in validation", "err", err) bcR.pool.RedoRequest(first.Height) @@ -262,6 +276,14 @@ FOR_LOOP: // TODO This is bad, are we zombie? cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) } + blocksSynced += 1 + + if blocksSynced%100 == 0 { + lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) + bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height, + "max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate) + lastHundred = time.Now() + } } } continue FOR_LOOP diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go index 492ea7a8..584aadf3 100644 --- a/blockchain/reactor_test.go +++ b/blockchain/reactor_test.go @@ -1,12 +1,11 @@ package blockchain import ( - "bytes" "testing" wire "github.com/tendermint/go-wire" cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/db" + dbm "github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/log" cfg "github.com/tendermint/tendermint/config" @@ -15,34 +14,30 @@ import ( "github.com/tendermint/tendermint/types" ) -func newBlockchainReactor(logger log.Logger, maxBlockHeight int) *BlockchainReactor { - config := cfg.ResetTestRoot("node_node_test") +func newBlockchainReactor(maxBlockHeight int) *BlockchainReactor { + logger := log.TestingLogger() + config := cfg.ResetTestRoot("blockchain_reactor_test") - blockStoreDB := db.NewDB("blockstore", config.DBBackend, config.DBDir()) - blockStore := NewBlockStore(blockStoreDB) - - stateLogger := logger.With("module", "state") + blockStore := NewBlockStore(dbm.NewMemDB()) // Get State - stateDB := db.NewDB("state", config.DBBackend, config.DBDir()) - state, _ := sm.GetState(stateDB, config.GenesisFile()) - - state.SetLogger(stateLogger) + state, _ := sm.GetState(dbm.NewMemDB(), config.GenesisFile()) + state.SetLogger(logger.With("module", "state")) state.Save() // Make the blockchainReactor itself fastSync := true bcReactor := NewBlockchainReactor(state.Copy(), nil, blockStore, fastSync) + bcReactor.SetLogger(logger.With("module", "blockchain")) // Next: we need to set a switch in order for peers to be added in bcReactor.Switch = p2p.NewSwitch(cfg.DefaultP2PConfig()) - bcReactor.SetLogger(logger.With("module", "blockchain")) // Lastly: let's add some blocks in for blockHeight := 1; blockHeight <= maxBlockHeight; blockHeight++ { firstBlock := makeBlock(blockHeight, state) secondBlock := makeBlock(blockHeight+1, state) - firstParts := firstBlock.MakePartSet(state.Params().BlockGossipParams.BlockPartSizeBytes) + firstParts := firstBlock.MakePartSet(state.Params.BlockGossipParams.BlockPartSizeBytes) blockStore.SaveBlock(firstBlock, firstParts, secondBlock.LastCommit) } @@ -50,12 +45,10 @@ func newBlockchainReactor(logger log.Logger, maxBlockHeight int) *BlockchainReac } func TestNoBlockMessageResponse(t *testing.T) { - logBuf := new(bytes.Buffer) - logger := log.NewTMLogger(logBuf) maxBlockHeight := 20 - bcr := newBlockchainReactor(logger, maxBlockHeight) - go bcr.OnStart() + bcr := newBlockchainReactor(maxBlockHeight) + bcr.Start() defer bcr.Stop() // Add some peers in @@ -113,7 +106,7 @@ func makeBlock(blockNumber int, state *sm.State) *types.Block { valHash := state.Validators.Hash() prevBlockID := types.BlockID{prevHash, prevParts} block, _ := types.MakeBlock(blockNumber, "test_chain", makeTxs(blockNumber), - new(types.Commit), prevBlockID, valHash, state.AppHash, state.Params().BlockGossipParams.BlockPartSizeBytes) + new(types.Commit), prevBlockID, valHash, state.AppHash, state.Params.BlockGossipParams.BlockPartSizeBytes) return block } diff --git a/blockchain/store.go b/blockchain/store.go index a96aa0fb..5bf85477 100644 --- a/blockchain/store.go +++ b/blockchain/store.go @@ -7,10 +7,10 @@ import ( "io" "sync" + wire "github.com/tendermint/go-wire" + "github.com/tendermint/tendermint/types" . "github.com/tendermint/tmlibs/common" dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/go-wire" - "github.com/tendermint/tendermint/types" ) /* @@ -25,7 +25,8 @@ Currently the precommit signatures are duplicated in the Block parts as well as the Commit. In the future this may change, perhaps by moving the Commit data outside the Block. -Panics indicate probable corruption in the data +// NOTE: BlockStore methods will panic if they encounter errors +// deserializing loaded data, indicating probable corruption on disk. */ type BlockStore struct { db dbm.DB diff --git a/certifiers/client/main_test.go b/certifiers/client/main_test.go new file mode 100644 index 00000000..ab986768 --- /dev/null +++ b/certifiers/client/main_test.go @@ -0,0 +1,25 @@ +package client_test + +import ( + "os" + "testing" + + "github.com/tendermint/abci/example/dummy" + + nm "github.com/tendermint/tendermint/node" + rpctest "github.com/tendermint/tendermint/rpc/test" +) + +var node *nm.Node + +func TestMain(m *testing.M) { + // start a tendermint node (and merkleeyes) in the background to test against + app := dummy.NewDummyApplication() + node = rpctest.StartTendermint(app) + code := m.Run() + + // and shut down proper at the end + node.Stop() + node.Wait() + os.Exit(code) +} diff --git a/certifiers/client/provider.go b/certifiers/client/provider.go new file mode 100644 index 00000000..6240da11 --- /dev/null +++ b/certifiers/client/provider.go @@ -0,0 +1,133 @@ +/* +Package client defines a provider that uses a rpcclient +to get information, which is used to get new headers +and validators directly from a node. +*/ +package client + +import ( + "bytes" + + rpcclient "github.com/tendermint/tendermint/rpc/client" + ctypes "github.com/tendermint/tendermint/rpc/core/types" + "github.com/tendermint/tendermint/types" + + "github.com/tendermint/tendermint/certifiers" + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +type SignStatusClient interface { + rpcclient.SignClient + rpcclient.StatusClient +} + +type provider struct { + node SignStatusClient + lastHeight int +} + +// NewProvider can wrap any rpcclient to expose it as +// a read-only provider. +func NewProvider(node SignStatusClient) certifiers.Provider { + return &provider{node: node} +} + +// NewProvider can connects to a tendermint json-rpc endpoint +// at the given url, and uses that as a read-only provider. +func NewHTTPProvider(remote string) certifiers.Provider { + return &provider{ + node: rpcclient.NewHTTP(remote, "/websocket"), + } +} + +// StoreCommit is a noop, as clients can only read from the chain... +func (p *provider) StoreCommit(_ certifiers.FullCommit) error { return nil } + +// GetHash gets the most recent validator and sees if it matches +// +// TODO: improve when the rpc interface supports more functionality +func (p *provider) GetByHash(hash []byte) (certifiers.FullCommit, error) { + var fc certifiers.FullCommit + vals, err := p.node.Validators(nil) + // if we get no validators, or a different height, return an error + if err != nil { + return fc, err + } + p.updateHeight(vals.BlockHeight) + vhash := types.NewValidatorSet(vals.Validators).Hash() + if !bytes.Equal(hash, vhash) { + return fc, certerr.ErrCommitNotFound() + } + return p.seedFromVals(vals) +} + +// GetByHeight gets the validator set by height +func (p *provider) GetByHeight(h int) (fc certifiers.FullCommit, err error) { + commit, err := p.node.Commit(&h) + if err != nil { + return fc, err + } + return p.seedFromCommit(commit) +} + +func (p *provider) LatestCommit() (fc certifiers.FullCommit, err error) { + commit, err := p.GetLatestCommit() + if err != nil { + return fc, err + } + return p.seedFromCommit(commit) +} + +// GetLatestCommit should return the most recent commit there is, +// which handles queries for future heights as per the semantics +// of GetByHeight. +func (p *provider) GetLatestCommit() (*ctypes.ResultCommit, error) { + status, err := p.node.Status() + if err != nil { + return nil, err + } + return p.node.Commit(&status.LatestBlockHeight) +} + +func CommitFromResult(result *ctypes.ResultCommit) certifiers.Commit { + return (certifiers.Commit)(result.SignedHeader) +} + +func (p *provider) seedFromVals(vals *ctypes.ResultValidators) (certifiers.FullCommit, error) { + // now get the commits and build a full commit + commit, err := p.node.Commit(&vals.BlockHeight) + if err != nil { + return certifiers.FullCommit{}, err + } + fc := certifiers.NewFullCommit( + CommitFromResult(commit), + types.NewValidatorSet(vals.Validators), + ) + return fc, nil +} + +func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc certifiers.FullCommit, err error) { + fc.Commit = CommitFromResult(commit) + + // now get the proper validators + vals, err := p.node.Validators(&commit.Header.Height) + if err != nil { + return fc, err + } + + // make sure they match the commit (as we cannot enforce height) + vset := types.NewValidatorSet(vals.Validators) + if !bytes.Equal(vset.Hash(), commit.Header.ValidatorsHash) { + return fc, certerr.ErrValidatorsChanged() + } + + p.updateHeight(commit.Header.Height) + fc.Validators = vset + return fc, nil +} + +func (p *provider) updateHeight(h int) { + if h > p.lastHeight { + p.lastHeight = h + } +} diff --git a/certifiers/client/provider_test.go b/certifiers/client/provider_test.go new file mode 100644 index 00000000..c63cd6a1 --- /dev/null +++ b/certifiers/client/provider_test.go @@ -0,0 +1,62 @@ +package client_test + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + rpctest "github.com/tendermint/tendermint/rpc/test" + + "github.com/tendermint/tendermint/certifiers" + "github.com/tendermint/tendermint/certifiers/client" + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +func TestProvider(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + cfg := rpctest.GetConfig() + rpcAddr := cfg.RPC.ListenAddress + chainID := cfg.ChainID + p := client.NewHTTPProvider(rpcAddr) + require.NotNil(t, p) + + // let it produce some blocks + time.Sleep(500 * time.Millisecond) + + // let's get the highest block + seed, err := p.LatestCommit() + + require.Nil(err, "%+v", err) + sh := seed.Height() + vhash := seed.Header.ValidatorsHash + assert.True(sh < 5000) + + // let's check this is valid somehow + assert.Nil(seed.ValidateBasic(chainID)) + cert := certifiers.NewStatic(chainID, seed.Validators) + + // historical queries now work :) + lower := sh - 5 + seed, err = p.GetByHeight(lower) + assert.Nil(err, "%+v", err) + assert.Equal(lower, seed.Height()) + + // also get by hash (given the match) + seed, err = p.GetByHash(vhash) + require.Nil(err, "%+v", err) + require.Equal(vhash, seed.Header.ValidatorsHash) + err = cert.Certify(seed.Commit) + assert.Nil(err, "%+v", err) + + // get by hash fails without match + seed, err = p.GetByHash([]byte("foobar")) + assert.NotNil(err) + assert.True(certerr.IsCommitNotFoundErr(err)) + + // storing the seed silently ignored + err = p.StoreCommit(seed) + assert.Nil(err, "%+v", err) +} diff --git a/certifiers/commit.go b/certifiers/commit.go new file mode 100644 index 00000000..464a48ba --- /dev/null +++ b/certifiers/commit.go @@ -0,0 +1,96 @@ +package certifiers + +import ( + "bytes" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/types" + + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +// Certifier checks the votes to make sure the block really is signed properly. +// Certifier must know the current set of validitors by some other means. +type Certifier interface { + Certify(check Commit) error + ChainID() string +} + +// Commit is basically the rpc /commit response, but extended +// +// This is the basepoint for proving anything on the blockchain. It contains +// a signed header. If the signatures are valid and > 2/3 of the known set, +// we can store this checkpoint and use it to prove any number of aspects of +// the system: such as txs, abci state, validator sets, etc... +type Commit types.SignedHeader + +// FullCommit is a commit and the actual validator set, +// the base info you need to update to a given point, +// assuming knowledge of some previous validator set +type FullCommit struct { + Commit `json:"commit"` + Validators *types.ValidatorSet `json:"validator_set"` +} + +func NewFullCommit(commit Commit, vals *types.ValidatorSet) FullCommit { + return FullCommit{ + Commit: commit, + Validators: vals, + } +} + +func (c Commit) Height() int { + if c.Header == nil { + return 0 + } + return c.Header.Height +} + +func (c Commit) ValidatorsHash() []byte { + if c.Header == nil { + return nil + } + return c.Header.ValidatorsHash +} + +// ValidateBasic does basic consistency checks and makes sure the headers +// and commits are all consistent and refer to our chain. +// +// Make sure to use a Verifier to validate the signatures actually provide +// a significantly strong proof for this header's validity. +func (c Commit) ValidateBasic(chainID string) error { + // make sure the header is reasonable + if c.Header == nil { + return errors.New("Commit missing header") + } + if c.Header.ChainID != chainID { + return errors.Errorf("Header belongs to another chain '%s' not '%s'", + c.Header.ChainID, chainID) + } + + if c.Commit == nil { + return errors.New("Commit missing signatures") + } + + // make sure the header and commit match (height and hash) + if c.Commit.Height() != c.Header.Height { + return certerr.ErrHeightMismatch(c.Commit.Height(), c.Header.Height) + } + hhash := c.Header.Hash() + chash := c.Commit.BlockID.Hash + if !bytes.Equal(hhash, chash) { + return errors.Errorf("Commits sign block %X header is block %X", + chash, hhash) + } + + // make sure the commit is reasonable + err := c.Commit.ValidateBasic() + if err != nil { + return errors.WithStack(err) + } + + // looks good, we just need to make sure the signatures are really from + // empowered validators + return nil +} diff --git a/certifiers/doc.go b/certifiers/doc.go new file mode 100644 index 00000000..7566405b --- /dev/null +++ b/certifiers/doc.go @@ -0,0 +1,133 @@ +/* +Package certifiers allows you to securely validate headers +without a full node. + +This library pulls together all the crypto and algorithms, +so given a relatively recent (< unbonding period) known +validator set, one can get indisputable proof that data is in +the chain (current state) or detect if the node is lying to +the client. + +Tendermint RPC exposes a lot of info, but a malicious node +could return any data it wants to queries, or even to block +headers, even making up fake signatures from non-existent +validators to justify it. This is a lot of logic to get +right, to be contained in a small, easy to use library, +that does this for you, so you can just build nice UI. + +We design for clients who have no strong trust relationship +with any tendermint node, just the validator set as a whole. +Beyond building nice mobile or desktop applications, the +cosmos hub is another important example of a client, +that needs undeniable proof without syncing the full chain, +in order to efficiently implement IBC. + +Commits + +There are two main data structures that we pass around - Commit +and FullCommit. Both of them mirror what information is +exposed in tendermint rpc. + +Commit is a block header along with enough validator signatures +to prove its validity (> 2/3 of the voting power). A FullCommit +is a Commit along with the full validator set. When the +validator set doesn't change, the Commit is enough, but since +the block header only has a hash, we need the FullCommit to +follow any changes to the validator set. + +Certifiers + +A Certifier validates a new Commit given the currently known +state. There are three different types of Certifiers exposed, +each one building on the last one, with additional complexity. + +Static - given the validator set upon initialization. Verifies +all signatures against that set and if the validator set +changes, it will reject all headers. + +Dynamic - This wraps Static and has the same Certify +method. However, it adds an Update method, which can be called +with a FullCommit when the validator set changes. If it can +prove this is a valid transition, it will update the validator +set. + +Inquiring - this wraps Dynamic and implements an auto-update +strategy on top of the Dynamic update. If a call to +Certify fails as the validator set has changed, then it +attempts to find a FullCommit and Update to that header. +To get these FullCommits, it makes use of a Provider. + +Providers + +A Provider allows us to store and retrieve the FullCommits, +to provide memory to the Inquiring Certifier. + +NewMemStoreProvider - in-memory cache. + +files.NewProvider - disk backed storage. + +client.NewHTTPProvider - query tendermint rpc. + +NewCacheProvider - combine multiple providers. + +The suggested use for local light clients is +client.NewHTTPProvider for getting new data (Source), +and NewCacheProvider(NewMemStoreProvider(), +files.NewProvider()) to store confirmed headers (Trusted) + +How We Track Validators + +Unless you want to blindly trust the node you talk with, you +need to trace every response back to a hash in a block header +and validate the commit signatures of that block header match +the proper validator set. If there is a contant validator +set, you store it locally upon initialization of the client, +and check against that every time. + +Once there is a dynamic validator set, the issue of +verifying a block becomes a bit more tricky. There is +background information in a +github issue (https://github.com/tendermint/tendermint/issues/377). + +In short, if there is a block at height H with a known +(trusted) validator set V, and another block at height H' +(H' > H) with validator set V' != V, then we want a way to +safely update it. + +First, get the new (unconfirmed) validator set V' and +verify H' is internally consistent and properly signed by +this V'. Assuming it is a valid block, we check that at +least 2/3 of the validators in V also signed it, meaning +it would also be valid under our old assumptions. +That should be enough, but we can also check that the +V counts for at least 2/3 of the total votes in H' +for extra safety (we can have a discussion if this is +strictly required). If we can verify all this, +then we can accept H' and V' as valid and use that to +validate all blocks X > H'. + +If we cannot update directly from H -> H' because there was +too much change to the validator set, then we can look for +some Hm (H < Hm < H') with a validator set Vm. Then we try +to update H -> Hm and Hm -> H' in two separate steps. +If one of these steps doesn't work, then we continue +bisecting, until we eventually have to externally +validate the valdiator set changes at every block. + +Since we never trust any server in this protocol, only the +signatures themselves, it doesn't matter if the seed comes +from a (possibly malicious) node or a (possibly malicious) user. +We can accept it or reject it based only on our trusted +validator set and cryptographic proofs. This makes it +extremely important to verify that you have the proper +validator set when initializing the client, as that is the +root of all trust. + +Or course, this assumes that the known block is within the +unbonding period to avoid the "nothing at stake" problem. +If you haven't seen the state in a few months, you will need +to manually verify the new validator set hash using off-chain +means (the same as getting the initial hash). + +*/ +package certifiers diff --git a/certifiers/dynamic.go b/certifiers/dynamic.go new file mode 100644 index 00000000..b4017794 --- /dev/null +++ b/certifiers/dynamic.go @@ -0,0 +1,89 @@ +package certifiers + +import ( + "github.com/tendermint/tendermint/types" + + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +var _ Certifier = &Dynamic{} + +// Dynamic uses a Static for Certify, but adds an +// Update method to allow for a change of validators. +// +// You can pass in a FullCommit with another validator set, +// and if this is a provably secure transition (< 1/3 change, +// sufficient signatures), then it will update the +// validator set for the next Certify call. +// For security, it will only follow validator set changes +// going forward. +type Dynamic struct { + cert *Static + lastHeight int +} + +func NewDynamic(chainID string, vals *types.ValidatorSet, height int) *Dynamic { + return &Dynamic{ + cert: NewStatic(chainID, vals), + lastHeight: height, + } +} + +func (c *Dynamic) ChainID() string { + return c.cert.ChainID() +} + +func (c *Dynamic) Validators() *types.ValidatorSet { + return c.cert.vSet +} + +func (c *Dynamic) Hash() []byte { + return c.cert.Hash() +} + +func (c *Dynamic) LastHeight() int { + return c.lastHeight +} + +// Certify handles this with +func (c *Dynamic) Certify(check Commit) error { + err := c.cert.Certify(check) + if err == nil { + // update last seen height if input is valid + c.lastHeight = check.Height() + } + return err +} + +// Update will verify if this is a valid change and update +// the certifying validator set if safe to do so. +// +// Returns an error if update is impossible (invalid proof or IsTooMuchChangeErr) +func (c *Dynamic) Update(fc FullCommit) error { + // ignore all checkpoints in the past -> only to the future + h := fc.Height() + if h <= c.lastHeight { + return certerr.ErrPastTime() + } + + // first, verify if the input is self-consistent.... + err := fc.ValidateBasic(c.ChainID()) + if err != nil { + return err + } + + // now, make sure not too much change... meaning this commit + // would be approved by the currently known validator set + // as well as the new set + commit := fc.Commit.Commit + err = c.Validators().VerifyCommitAny(fc.Validators, c.ChainID(), + commit.BlockID, h, commit) + if err != nil { + return certerr.ErrTooMuchChange() + } + + // looks good, we can update + c.cert = NewStatic(c.ChainID(), fc.Validators) + c.lastHeight = h + return nil +} diff --git a/certifiers/dynamic_test.go b/certifiers/dynamic_test.go new file mode 100644 index 00000000..2c921099 --- /dev/null +++ b/certifiers/dynamic_test.go @@ -0,0 +1,130 @@ +package certifiers_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/types" + + "github.com/tendermint/tendermint/certifiers" + "github.com/tendermint/tendermint/certifiers/errors" +) + +// TestDynamicCert just makes sure it still works like StaticCert +func TestDynamicCert(t *testing.T) { + // assert, require := assert.New(t), require.New(t) + assert := assert.New(t) + // require := require.New(t) + + keys := certifiers.GenValKeys(4) + // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! + vals := keys.ToValidators(20, 10) + // and a certifier based on our known set + chainID := "test-dyno" + cert := certifiers.NewDynamic(chainID, vals, 0) + + cases := []struct { + keys certifiers.ValKeys + vals *types.ValidatorSet + height int + first, last int // who actually signs + proper bool // true -> expect no error + changed bool // true -> expect validator change error + }{ + // perfect, signed by everyone + {keys, vals, 1, 0, len(keys), true, false}, + // skip little guy is okay + {keys, vals, 2, 1, len(keys), true, false}, + // but not the big guy + {keys, vals, 3, 0, len(keys) - 1, false, false}, + // even changing the power a little bit breaks the static validator + // the sigs are enough, but the validator hash is unknown + {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, + } + + for _, tc := range cases { + check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, + []byte("bar"), tc.first, tc.last) + err := cert.Certify(check) + if tc.proper { + assert.Nil(err, "%+v", err) + assert.Equal(cert.LastHeight(), tc.height) + } else { + assert.NotNil(err) + if tc.changed { + assert.True(errors.IsValidatorsChangedErr(err), "%+v", err) + } + } + } +} + +// TestDynamicUpdate makes sure we update safely and sanely +func TestDynamicUpdate(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + chainID := "test-dyno-up" + keys := certifiers.GenValKeys(5) + vals := keys.ToValidators(20, 0) + cert := certifiers.NewDynamic(chainID, vals, 40) + + // one valid block to give us a sense of time + h := 100 + good := keys.GenCommit(chainID, h, nil, vals, []byte("foo"), 0, len(keys)) + err := cert.Certify(good) + require.Nil(err, "%+v", err) + + // some new sets to try later + keys2 := keys.Extend(2) + keys3 := keys2.Extend(4) + + // we try to update with some blocks + cases := []struct { + keys certifiers.ValKeys + vals *types.ValidatorSet + height int + first, last int // who actually signs + proper bool // true -> expect no error + changed bool // true -> expect too much change error + }{ + // same validator set, well signed, of course it is okay + {keys, vals, h + 10, 0, len(keys), true, false}, + // same validator set, poorly signed, fails + {keys, vals, h + 20, 2, len(keys), false, false}, + + // shift the power a little, works if properly signed + {keys, keys.ToValidators(10, 0), h + 30, 1, len(keys), true, false}, + // but not on a poor signature + {keys, keys.ToValidators(10, 0), h + 40, 2, len(keys), false, false}, + // and not if it was in the past + {keys, keys.ToValidators(10, 0), h + 25, 0, len(keys), false, false}, + + // let's try to adjust to a whole new validator set (we have 5/7 of the votes) + {keys2, keys2.ToValidators(10, 0), h + 33, 0, len(keys2), true, false}, + + // properly signed but too much change, not allowed (only 7/11 validators known) + {keys3, keys3.ToValidators(10, 0), h + 50, 0, len(keys3), false, true}, + } + + for _, tc := range cases { + fc := tc.keys.GenFullCommit(chainID, tc.height, nil, tc.vals, + []byte("bar"), tc.first, tc.last) + err := cert.Update(fc) + if tc.proper { + assert.Nil(err, "%d: %+v", tc.height, err) + // we update last seen height + assert.Equal(cert.LastHeight(), tc.height) + // and we update the proper validators + assert.EqualValues(fc.Header.ValidatorsHash, cert.Hash()) + } else { + assert.NotNil(err, "%d", tc.height) + // we don't update the height + assert.NotEqual(cert.LastHeight(), tc.height) + if tc.changed { + assert.True(errors.IsTooMuchChangeErr(err), + "%d: %+v", tc.height, err) + } + } + } +} diff --git a/certifiers/errors/errors.go b/certifiers/errors/errors.go new file mode 100644 index 00000000..c716c8fc --- /dev/null +++ b/certifiers/errors/errors.go @@ -0,0 +1,86 @@ +package errors + +import ( + "fmt" + + "github.com/pkg/errors" +) + +var ( + errValidatorsChanged = fmt.Errorf("Validators differ between header and certifier") + errCommitNotFound = fmt.Errorf("Commit not found by provider") + errTooMuchChange = fmt.Errorf("Validators change too much to safely update") + errPastTime = fmt.Errorf("Update older than certifier height") + errNoPathFound = fmt.Errorf("Cannot find a path of validators") +) + +// IsCommitNotFoundErr checks whether an error is due to missing data +func IsCommitNotFoundErr(err error) bool { + return err != nil && (errors.Cause(err) == errCommitNotFound) +} + +func ErrCommitNotFound() error { + return errors.WithStack(errCommitNotFound) +} + +// IsValidatorsChangedErr checks whether an error is due +// to a differing validator set +func IsValidatorsChangedErr(err error) bool { + return err != nil && (errors.Cause(err) == errValidatorsChanged) +} + +func ErrValidatorsChanged() error { + return errors.WithStack(errValidatorsChanged) +} + +// IsTooMuchChangeErr checks whether an error is due to too much change +// between these validators sets +func IsTooMuchChangeErr(err error) bool { + return err != nil && (errors.Cause(err) == errTooMuchChange) +} + +func ErrTooMuchChange() error { + return errors.WithStack(errTooMuchChange) +} + +func IsPastTimeErr(err error) bool { + return err != nil && (errors.Cause(err) == errPastTime) +} + +func ErrPastTime() error { + return errors.WithStack(errPastTime) +} + +// IsNoPathFoundErr checks whether an error is due to no path of +// validators in provider from where we are to where we want to be +func IsNoPathFoundErr(err error) bool { + return err != nil && (errors.Cause(err) == errNoPathFound) +} + +func ErrNoPathFound() error { + return errors.WithStack(errNoPathFound) +} + +//-------------------------------------------- + +type errHeightMismatch struct { + h1, h2 int +} + +func (e errHeightMismatch) Error() string { + return fmt.Sprintf("Blocks don't match - %d vs %d", e.h1, e.h2) +} + +// IsHeightMismatchErr checks whether an error is due to data from different blocks +func IsHeightMismatchErr(err error) bool { + if err == nil { + return false + } + _, ok := errors.Cause(err).(errHeightMismatch) + return ok +} + +// ErrHeightMismatch returns an mismatch error with stack-trace +func ErrHeightMismatch(h1, h2 int) error { + return errors.WithStack(errHeightMismatch{h1, h2}) +} diff --git a/certifiers/errors/errors_test.go b/certifiers/errors/errors_test.go new file mode 100644 index 00000000..479215e4 --- /dev/null +++ b/certifiers/errors/errors_test.go @@ -0,0 +1,18 @@ +package errors + +import ( + "errors" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestErrorHeight(t *testing.T) { + e1 := ErrHeightMismatch(2, 3) + e1.Error() + assert.True(t, IsHeightMismatchErr(e1)) + + e2 := errors.New("foobar") + assert.False(t, IsHeightMismatchErr(e2)) + assert.False(t, IsHeightMismatchErr(nil)) +} diff --git a/certifiers/files/commit.go b/certifiers/files/commit.go new file mode 100644 index 00000000..18994f0f --- /dev/null +++ b/certifiers/files/commit.go @@ -0,0 +1,77 @@ +package files + +import ( + "encoding/json" + "os" + + "github.com/pkg/errors" + + wire "github.com/tendermint/go-wire" + + "github.com/tendermint/tendermint/certifiers" + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +const ( + // MaxFullCommitSize is the maximum number of bytes we will + // read in for a full commit to avoid excessive allocations + // in the deserializer + MaxFullCommitSize = 1024 * 1024 +) + +// SaveFullCommit exports the seed in binary / go-wire style +func SaveFullCommit(fc certifiers.FullCommit, path string) error { + f, err := os.Create(path) + if err != nil { + return errors.WithStack(err) + } + defer f.Close() + + var n int + wire.WriteBinary(fc, f, &n, &err) + return errors.WithStack(err) +} + +// SaveFullCommitJSON exports the seed in a json format +func SaveFullCommitJSON(fc certifiers.FullCommit, path string) error { + f, err := os.Create(path) + if err != nil { + return errors.WithStack(err) + } + defer f.Close() + stream := json.NewEncoder(f) + err = stream.Encode(fc) + return errors.WithStack(err) +} + +func LoadFullCommit(path string) (certifiers.FullCommit, error) { + var fc certifiers.FullCommit + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return fc, certerr.ErrCommitNotFound() + } + return fc, errors.WithStack(err) + } + defer f.Close() + + var n int + wire.ReadBinaryPtr(&fc, f, MaxFullCommitSize, &n, &err) + return fc, errors.WithStack(err) +} + +func LoadFullCommitJSON(path string) (certifiers.FullCommit, error) { + var fc certifiers.FullCommit + f, err := os.Open(path) + if err != nil { + if os.IsNotExist(err) { + return fc, certerr.ErrCommitNotFound() + } + return fc, errors.WithStack(err) + } + defer f.Close() + + stream := json.NewDecoder(f) + err = stream.Decode(&fc) + return fc, errors.WithStack(err) +} diff --git a/certifiers/files/commit_test.go b/certifiers/files/commit_test.go new file mode 100644 index 00000000..934ab7b6 --- /dev/null +++ b/certifiers/files/commit_test.go @@ -0,0 +1,66 @@ +package files + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + cmn "github.com/tendermint/tmlibs/common" + + "github.com/tendermint/tendermint/certifiers" +) + +func tmpFile() string { + suffix := cmn.RandStr(16) + return filepath.Join(os.TempDir(), "fc-test-"+suffix) +} + +func TestSerializeFullCommits(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + // some constants + appHash := []byte("some crazy thing") + chainID := "ser-ial" + h := 25 + + // build a fc + keys := certifiers.GenValKeys(5) + vals := keys.ToValidators(10, 0) + fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, 5) + + require.Equal(h, fc.Height()) + require.Equal(vals.Hash(), fc.ValidatorsHash()) + + // try read/write with json + jfile := tmpFile() + defer os.Remove(jfile) + jseed, err := LoadFullCommitJSON(jfile) + assert.NotNil(err) + err = SaveFullCommitJSON(fc, jfile) + require.Nil(err) + jseed, err = LoadFullCommitJSON(jfile) + assert.Nil(err, "%+v", err) + assert.Equal(h, jseed.Height()) + assert.Equal(vals.Hash(), jseed.ValidatorsHash()) + + // try read/write with binary + bfile := tmpFile() + defer os.Remove(bfile) + bseed, err := LoadFullCommit(bfile) + assert.NotNil(err) + err = SaveFullCommit(fc, bfile) + require.Nil(err) + bseed, err = LoadFullCommit(bfile) + assert.Nil(err, "%+v", err) + assert.Equal(h, bseed.Height()) + assert.Equal(vals.Hash(), bseed.ValidatorsHash()) + + // make sure they don't read the other format (different) + _, err = LoadFullCommit(jfile) + assert.NotNil(err) + _, err = LoadFullCommitJSON(bfile) + assert.NotNil(err) +} diff --git a/certifiers/files/provider.go b/certifiers/files/provider.go new file mode 100644 index 00000000..9dcfb169 --- /dev/null +++ b/certifiers/files/provider.go @@ -0,0 +1,134 @@ +/* +Package files defines a Provider that stores all data in the filesystem + +We assume the same validator hash may be reused by many different +headers/Commits, and thus store it separately. This leaves us +with three issues: + + 1. Given a validator hash, retrieve the validator set if previously stored + 2. Given a block height, find the Commit with the highest height <= h + 3. Given a FullCommit, store it quickly to satisfy 1 and 2 + +Note that we do not worry about caching, as that can be achieved by +pairing this with a MemStoreProvider and CacheProvider from certifiers +*/ +package files + +import ( + "encoding/hex" + "fmt" + "math" + "os" + "path/filepath" + "sort" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/certifiers" + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +const ( + Ext = ".tsd" + ValDir = "validators" + CheckDir = "checkpoints" + dirPerm = os.FileMode(0755) + filePerm = os.FileMode(0644) +) + +type provider struct { + valDir string + checkDir string +} + +// NewProvider creates the parent dir and subdirs +// for validators and checkpoints as needed +func NewProvider(dir string) certifiers.Provider { + valDir := filepath.Join(dir, ValDir) + checkDir := filepath.Join(dir, CheckDir) + for _, d := range []string{valDir, checkDir} { + err := os.MkdirAll(d, dirPerm) + if err != nil { + panic(err) + } + } + return &provider{valDir: valDir, checkDir: checkDir} +} + +func (p *provider) encodeHash(hash []byte) string { + return hex.EncodeToString(hash) + Ext +} + +func (p *provider) encodeHeight(h int) string { + // pad up to 10^12 for height... + return fmt.Sprintf("%012d%s", h, Ext) +} + +func (p *provider) StoreCommit(fc certifiers.FullCommit) error { + // make sure the fc is self-consistent before saving + err := fc.ValidateBasic(fc.Commit.Header.ChainID) + if err != nil { + return err + } + + paths := []string{ + filepath.Join(p.checkDir, p.encodeHeight(fc.Height())), + filepath.Join(p.valDir, p.encodeHash(fc.Header.ValidatorsHash)), + } + for _, path := range paths { + err := SaveFullCommit(fc, path) + // unknown error in creating or writing immediately breaks + if err != nil { + return err + } + } + return nil +} + +func (p *provider) GetByHeight(h int) (certifiers.FullCommit, error) { + // first we look for exact match, then search... + path := filepath.Join(p.checkDir, p.encodeHeight(h)) + fc, err := LoadFullCommit(path) + if certerr.IsCommitNotFoundErr(err) { + path, err = p.searchForHeight(h) + if err == nil { + fc, err = LoadFullCommit(path) + } + } + return fc, err +} + +func (p *provider) LatestCommit() (fc certifiers.FullCommit, err error) { + // Note to future: please update by 2077 to avoid rollover + return p.GetByHeight(math.MaxInt32 - 1) +} + +// search for height, looks for a file with highest height < h +// return certifiers.ErrCommitNotFound() if not there... +func (p *provider) searchForHeight(h int) (string, error) { + d, err := os.Open(p.checkDir) + if err != nil { + return "", errors.WithStack(err) + } + files, err := d.Readdirnames(0) + + d.Close() + if err != nil { + return "", errors.WithStack(err) + } + + desired := p.encodeHeight(h) + sort.Strings(files) + i := sort.SearchStrings(files, desired) + if i == 0 { + return "", certerr.ErrCommitNotFound() + } + found := files[i-1] + path := filepath.Join(p.checkDir, found) + return path, errors.WithStack(err) +} + +func (p *provider) GetByHash(hash []byte) (certifiers.FullCommit, error) { + path := filepath.Join(p.valDir, p.encodeHash(hash)) + return LoadFullCommit(path) +} diff --git a/certifiers/files/provider_test.go b/certifiers/files/provider_test.go new file mode 100644 index 00000000..05e8f59d --- /dev/null +++ b/certifiers/files/provider_test.go @@ -0,0 +1,96 @@ +package files_test + +import ( + "bytes" + "errors" + "io/ioutil" + "os" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/certifiers" + certerr "github.com/tendermint/tendermint/certifiers/errors" + "github.com/tendermint/tendermint/certifiers/files" +) + +func checkEqual(stored, loaded certifiers.FullCommit, chainID string) error { + err := loaded.ValidateBasic(chainID) + if err != nil { + return err + } + if !bytes.Equal(stored.ValidatorsHash(), loaded.ValidatorsHash()) { + return errors.New("Different block hashes") + } + return nil +} + +func TestFileProvider(t *testing.T) { + assert, require := assert.New(t), require.New(t) + + dir, err := ioutil.TempDir("", "fileprovider-test") + assert.Nil(err) + defer os.RemoveAll(dir) + p := files.NewProvider(dir) + + chainID := "test-files" + appHash := []byte("some-data") + keys := certifiers.GenValKeys(5) + count := 10 + + // make a bunch of seeds... + seeds := make([]certifiers.FullCommit, count) + for i := 0; i < count; i++ { + // two seeds for each validator, to check how we handle dups + // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... + vals := keys.ToValidators(10, int64(count/2)) + h := 20 + 10*i + check := keys.GenCommit(chainID, h, nil, vals, appHash, 0, 5) + seeds[i] = certifiers.NewFullCommit(check, vals) + } + + // check provider is empty + seed, err := p.GetByHeight(20) + require.NotNil(err) + assert.True(certerr.IsCommitNotFoundErr(err)) + + seed, err = p.GetByHash(seeds[3].ValidatorsHash()) + require.NotNil(err) + assert.True(certerr.IsCommitNotFoundErr(err)) + + // now add them all to the provider + for _, s := range seeds { + err = p.StoreCommit(s) + require.Nil(err) + // and make sure we can get it back + s2, err := p.GetByHash(s.ValidatorsHash()) + assert.Nil(err) + err = checkEqual(s, s2, chainID) + assert.Nil(err) + // by height as well + s2, err = p.GetByHeight(s.Height()) + err = checkEqual(s, s2, chainID) + assert.Nil(err) + } + + // make sure we get the last hash if we overstep + seed, err = p.GetByHeight(5000) + if assert.Nil(err, "%+v", err) { + assert.Equal(seeds[count-1].Height(), seed.Height()) + err = checkEqual(seeds[count-1], seed, chainID) + assert.Nil(err) + } + + // and middle ones as well + seed, err = p.GetByHeight(47) + if assert.Nil(err, "%+v", err) { + // we only step by 10, so 40 must be the one below this + assert.Equal(40, seed.Height()) + } + + // and proper error for too low + _, err = p.GetByHeight(5) + assert.NotNil(err) + assert.True(certerr.IsCommitNotFoundErr(err)) +} diff --git a/certifiers/helper.go b/certifiers/helper.go new file mode 100644 index 00000000..6f2daa63 --- /dev/null +++ b/certifiers/helper.go @@ -0,0 +1,147 @@ +package certifiers + +import ( + "time" + + crypto "github.com/tendermint/go-crypto" + + "github.com/tendermint/tendermint/types" +) + +// ValKeys is a helper for testing. +// +// It lets us simulate signing with many keys, either ed25519 or secp256k1. +// The main use case is to create a set, and call GenCommit +// to get propely signed header for testing. +// +// You can set different weights of validators each time you call +// ToValidators, and can optionally extend the validator set later +// with Extend or ExtendSecp +type ValKeys []crypto.PrivKey + +// GenValKeys produces an array of private keys to generate commits +func GenValKeys(n int) ValKeys { + res := make(ValKeys, n) + for i := range res { + res[i] = crypto.GenPrivKeyEd25519().Wrap() + } + return res +} + +// Change replaces the key at index i +func (v ValKeys) Change(i int) ValKeys { + res := make(ValKeys, len(v)) + copy(res, v) + res[i] = crypto.GenPrivKeyEd25519().Wrap() + return res +} + +// Extend adds n more keys (to remove, just take a slice) +func (v ValKeys) Extend(n int) ValKeys { + extra := GenValKeys(n) + return append(v, extra...) +} + +// GenSecpValKeys produces an array of secp256k1 private keys to generate commits +func GenSecpValKeys(n int) ValKeys { + res := make(ValKeys, n) + for i := range res { + res[i] = crypto.GenPrivKeySecp256k1().Wrap() + } + return res +} + +// ExtendSecp adds n more secp256k1 keys (to remove, just take a slice) +func (v ValKeys) ExtendSecp(n int) ValKeys { + extra := GenSecpValKeys(n) + return append(v, extra...) +} + +// ToValidators produces a list of validators from the set of keys +// The first key has weight `init` and it increases by `inc` every step +// so we can have all the same weight, or a simple linear distribution +// (should be enough for testing) +func (v ValKeys) ToValidators(init, inc int64) *types.ValidatorSet { + res := make([]*types.Validator, len(v)) + for i, k := range v { + res[i] = types.NewValidator(k.PubKey(), init+int64(i)*inc) + } + return types.NewValidatorSet(res) +} + +// signHeader properly signs the header with all keys from first to last exclusive +func (v ValKeys) signHeader(header *types.Header, first, last int) *types.Commit { + votes := make([]*types.Vote, len(v)) + + // we need this list to keep the ordering... + vset := v.ToValidators(1, 0) + + // fill in the votes we want + for i := first; i < last; i++ { + vote := makeVote(header, vset, v[i]) + votes[vote.ValidatorIndex] = vote + } + + res := &types.Commit{ + BlockID: types.BlockID{Hash: header.Hash()}, + Precommits: votes, + } + return res +} + +func makeVote(header *types.Header, vals *types.ValidatorSet, key crypto.PrivKey) *types.Vote { + addr := key.PubKey().Address() + idx, _ := vals.GetByAddress(addr) + vote := &types.Vote{ + ValidatorAddress: addr, + ValidatorIndex: idx, + Height: header.Height, + Round: 1, + Type: types.VoteTypePrecommit, + BlockID: types.BlockID{Hash: header.Hash()}, + } + // Sign it + signBytes := types.SignBytes(header.ChainID, vote) + vote.Signature = key.Sign(signBytes) + return vote +} + +func genHeader(chainID string, height int, txs types.Txs, + vals *types.ValidatorSet, appHash []byte) *types.Header { + + return &types.Header{ + ChainID: chainID, + Height: height, + Time: time.Now(), + NumTxs: len(txs), + // LastBlockID + // LastCommitHash + ValidatorsHash: vals.Hash(), + DataHash: txs.Hash(), + AppHash: appHash, + } +} + +// GenCommit calls genHeader and signHeader and combines them into a Commit +func (v ValKeys) GenCommit(chainID string, height int, txs types.Txs, + vals *types.ValidatorSet, appHash []byte, first, last int) Commit { + + header := genHeader(chainID, height, txs, vals, appHash) + check := Commit{ + Header: header, + Commit: v.signHeader(header, first, last), + } + return check +} + +// GenFullCommit calls genHeader and signHeader and combines them into a Commit +func (v ValKeys) GenFullCommit(chainID string, height int, txs types.Txs, + vals *types.ValidatorSet, appHash []byte, first, last int) FullCommit { + + header := genHeader(chainID, height, txs, vals, appHash) + commit := Commit{ + Header: header, + Commit: v.signHeader(header, first, last), + } + return NewFullCommit(commit, vals) +} diff --git a/certifiers/inquirer.go b/certifiers/inquirer.go new file mode 100644 index 00000000..460b622a --- /dev/null +++ b/certifiers/inquirer.go @@ -0,0 +1,142 @@ +package certifiers + +import ( + "github.com/tendermint/tendermint/types" + + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +type Inquiring struct { + cert *Dynamic + // These are only properly validated data, from local system + trusted Provider + // This is a source of new info, like a node rpc, or other import method + Source Provider +} + +func NewInquiring(chainID string, fc FullCommit, trusted Provider, source Provider) *Inquiring { + // store the data in trusted + trusted.StoreCommit(fc) + + return &Inquiring{ + cert: NewDynamic(chainID, fc.Validators, fc.Height()), + trusted: trusted, + Source: source, + } +} + +func (c *Inquiring) ChainID() string { + return c.cert.ChainID() +} + +func (c *Inquiring) Validators() *types.ValidatorSet { + return c.cert.cert.vSet +} + +func (c *Inquiring) LastHeight() int { + return c.cert.lastHeight +} + +// Certify makes sure this is checkpoint is valid. +// +// If the validators have changed since the last know time, it looks +// for a path to prove the new validators. +// +// On success, it will store the checkpoint in the store for later viewing +func (c *Inquiring) Certify(commit Commit) error { + err := c.useClosestTrust(commit.Height()) + if err != nil { + return err + } + + err = c.cert.Certify(commit) + if !certerr.IsValidatorsChangedErr(err) { + return err + } + err = c.updateToHash(commit.Header.ValidatorsHash) + if err != nil { + return err + } + + err = c.cert.Certify(commit) + if err != nil { + return err + } + + // store the new checkpoint + c.trusted.StoreCommit( + NewFullCommit(commit, c.Validators())) + return nil +} + +func (c *Inquiring) Update(fc FullCommit) error { + err := c.useClosestTrust(fc.Height()) + if err != nil { + return err + } + + err = c.cert.Update(fc) + if err == nil { + c.trusted.StoreCommit(fc) + } + return err +} + +func (c *Inquiring) useClosestTrust(h int) error { + closest, err := c.trusted.GetByHeight(h) + if err != nil { + return err + } + + // if the best seed is not the one we currently use, + // let's just reset the dynamic validator + if closest.Height() != c.LastHeight() { + c.cert = NewDynamic(c.ChainID(), closest.Validators, closest.Height()) + } + return nil +} + +// updateToHash gets the validator hash we want to update to +// if IsTooMuchChangeErr, we try to find a path by binary search over height +func (c *Inquiring) updateToHash(vhash []byte) error { + // try to get the match, and update + fc, err := c.Source.GetByHash(vhash) + if err != nil { + return err + } + err = c.cert.Update(fc) + // handle IsTooMuchChangeErr by using divide and conquer + if certerr.IsTooMuchChangeErr(err) { + err = c.updateToHeight(fc.Height()) + } + return err +} + +// updateToHeight will use divide-and-conquer to find a path to h +func (c *Inquiring) updateToHeight(h int) error { + // try to update to this height (with checks) + fc, err := c.Source.GetByHeight(h) + if err != nil { + return err + } + start, end := c.LastHeight(), fc.Height() + if end <= start { + return certerr.ErrNoPathFound() + } + err = c.Update(fc) + + // we can handle IsTooMuchChangeErr specially + if !certerr.IsTooMuchChangeErr(err) { + return err + } + + // try to update to mid + mid := (start + end) / 2 + err = c.updateToHeight(mid) + if err != nil { + return err + } + + // if we made it to mid, we recurse + return c.updateToHeight(h) +} diff --git a/certifiers/inquirer_test.go b/certifiers/inquirer_test.go new file mode 100644 index 00000000..2a0ee555 --- /dev/null +++ b/certifiers/inquirer_test.go @@ -0,0 +1,165 @@ +package certifiers_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/certifiers" +) + +func TestInquirerValidPath(t *testing.T) { + assert, require := assert.New(t), require.New(t) + trust := certifiers.NewMemStoreProvider() + source := certifiers.NewMemStoreProvider() + + // set up the validators to generate test blocks + var vote int64 = 10 + keys := certifiers.GenValKeys(5) + vals := keys.ToValidators(vote, 0) + + // construct a bunch of commits, each with one more height than the last + chainID := "inquiry-test" + count := 50 + commits := make([]certifiers.FullCommit, count) + for i := 0; i < count; i++ { + // extend the keys by 1 each time + keys = keys.Extend(1) + vals = keys.ToValidators(vote, 0) + h := 20 + 10*i + appHash := []byte(fmt.Sprintf("h=%d", h)) + commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, len(keys)) + } + + // initialize a certifier with the initial state + cert := certifiers.NewInquiring(chainID, commits[0], trust, source) + + // this should fail validation.... + commit := commits[count-1].Commit + err := cert.Certify(commit) + require.NotNil(err) + + // add a few seed in the middle should be insufficient + for i := 10; i < 13; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + err = cert.Certify(commit) + assert.NotNil(err) + + // with more info, we succeed + for i := 0; i < count; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + err = cert.Certify(commit) + assert.Nil(err, "%+v", err) +} + +func TestInquirerMinimalPath(t *testing.T) { + assert, require := assert.New(t), require.New(t) + trust := certifiers.NewMemStoreProvider() + source := certifiers.NewMemStoreProvider() + + // set up the validators to generate test blocks + var vote int64 = 10 + keys := certifiers.GenValKeys(5) + vals := keys.ToValidators(vote, 0) + + // construct a bunch of commits, each with one more height than the last + chainID := "minimal-path" + count := 12 + commits := make([]certifiers.FullCommit, count) + for i := 0; i < count; i++ { + // extend the validators, so we are just below 2/3 + keys = keys.Extend(len(keys)/2 - 1) + vals = keys.ToValidators(vote, 0) + h := 5 + 10*i + appHash := []byte(fmt.Sprintf("h=%d", h)) + commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, len(keys)) + } + + // initialize a certifier with the initial state + cert := certifiers.NewInquiring(chainID, commits[0], trust, source) + + // this should fail validation.... + commit := commits[count-1].Commit + err := cert.Certify(commit) + require.NotNil(err) + + // add a few seed in the middle should be insufficient + for i := 5; i < 8; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + err = cert.Certify(commit) + assert.NotNil(err) + + // with more info, we succeed + for i := 0; i < count; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + err = cert.Certify(commit) + assert.Nil(err, "%+v", err) +} + +func TestInquirerVerifyHistorical(t *testing.T) { + assert, require := assert.New(t), require.New(t) + trust := certifiers.NewMemStoreProvider() + source := certifiers.NewMemStoreProvider() + + // set up the validators to generate test blocks + var vote int64 = 10 + keys := certifiers.GenValKeys(5) + vals := keys.ToValidators(vote, 0) + + // construct a bunch of commits, each with one more height than the last + chainID := "inquiry-test" + count := 10 + commits := make([]certifiers.FullCommit, count) + for i := 0; i < count; i++ { + // extend the keys by 1 each time + keys = keys.Extend(1) + vals = keys.ToValidators(vote, 0) + h := 20 + 10*i + appHash := []byte(fmt.Sprintf("h=%d", h)) + commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, len(keys)) + } + + // initialize a certifier with the initial state + cert := certifiers.NewInquiring(chainID, commits[0], trust, source) + + // store a few commits as trust + for _, i := range []int{2, 5} { + trust.StoreCommit(commits[i]) + } + + // let's see if we can jump forward using trusted commits + err := source.StoreCommit(commits[7]) + require.Nil(err, "%+v", err) + check := commits[7].Commit + err = cert.Certify(check) + require.Nil(err, "%+v", err) + assert.Equal(check.Height(), cert.LastHeight()) + + // add access to all commits via untrusted source + for i := 0; i < count; i++ { + err := source.StoreCommit(commits[i]) + require.Nil(err) + } + + // try to check an unknown seed in the past + mid := commits[3].Commit + err = cert.Certify(mid) + require.Nil(err, "%+v", err) + assert.Equal(mid.Height(), cert.LastHeight()) + + // and jump all the way forward again + end := commits[count-1].Commit + err = cert.Certify(end) + require.Nil(err, "%+v", err) + assert.Equal(end.Height(), cert.LastHeight()) +} diff --git a/certifiers/memprovider.go b/certifiers/memprovider.go new file mode 100644 index 00000000..cdad75e4 --- /dev/null +++ b/certifiers/memprovider.go @@ -0,0 +1,78 @@ +package certifiers + +import ( + "encoding/hex" + "sort" + + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +type memStoreProvider struct { + // byHeight is always sorted by Height... need to support range search (nil, h] + // btree would be more efficient for larger sets + byHeight fullCommits + byHash map[string]FullCommit +} + +// fullCommits just exists to allow easy sorting +type fullCommits []FullCommit + +func (s fullCommits) Len() int { return len(s) } +func (s fullCommits) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +func (s fullCommits) Less(i, j int) bool { + return s[i].Height() < s[j].Height() +} + +func NewMemStoreProvider() Provider { + return &memStoreProvider{ + byHeight: fullCommits{}, + byHash: map[string]FullCommit{}, + } +} + +func (m *memStoreProvider) encodeHash(hash []byte) string { + return hex.EncodeToString(hash) +} + +func (m *memStoreProvider) StoreCommit(fc FullCommit) error { + // make sure the fc is self-consistent before saving + err := fc.ValidateBasic(fc.Commit.Header.ChainID) + if err != nil { + return err + } + + // store the valid fc + key := m.encodeHash(fc.ValidatorsHash()) + m.byHash[key] = fc + m.byHeight = append(m.byHeight, fc) + sort.Sort(m.byHeight) + return nil +} + +func (m *memStoreProvider) GetByHeight(h int) (FullCommit, error) { + // search from highest to lowest + for i := len(m.byHeight) - 1; i >= 0; i-- { + fc := m.byHeight[i] + if fc.Height() <= h { + return fc, nil + } + } + return FullCommit{}, certerr.ErrCommitNotFound() +} + +func (m *memStoreProvider) GetByHash(hash []byte) (FullCommit, error) { + var err error + fc, ok := m.byHash[m.encodeHash(hash)] + if !ok { + err = certerr.ErrCommitNotFound() + } + return fc, err +} + +func (m *memStoreProvider) LatestCommit() (FullCommit, error) { + l := len(m.byHeight) + if l == 0 { + return FullCommit{}, certerr.ErrCommitNotFound() + } + return m.byHeight[l-1], nil +} diff --git a/certifiers/performance_test.go b/certifiers/performance_test.go new file mode 100644 index 00000000..2a6c6ced --- /dev/null +++ b/certifiers/performance_test.go @@ -0,0 +1,116 @@ +package certifiers_test + +import ( + "fmt" + "testing" + + "github.com/tendermint/tendermint/certifiers" +) + +func BenchmarkGenCommit20(b *testing.B) { + keys := certifiers.GenValKeys(20) + benchmarkGenCommit(b, keys) +} + +func BenchmarkGenCommit100(b *testing.B) { + keys := certifiers.GenValKeys(100) + benchmarkGenCommit(b, keys) +} + +func BenchmarkGenCommitSec20(b *testing.B) { + keys := certifiers.GenSecpValKeys(20) + benchmarkGenCommit(b, keys) +} + +func BenchmarkGenCommitSec100(b *testing.B) { + keys := certifiers.GenSecpValKeys(100) + benchmarkGenCommit(b, keys) +} + +func benchmarkGenCommit(b *testing.B, keys certifiers.ValKeys) { + chainID := fmt.Sprintf("bench-%d", len(keys)) + vals := keys.ToValidators(20, 10) + for i := 0; i < b.N; i++ { + h := 1 + i + appHash := []byte(fmt.Sprintf("h=%d", h)) + keys.GenCommit(chainID, h, nil, vals, appHash, 0, len(keys)) + } +} + +// this benchmarks generating one key +func BenchmarkGenValKeys(b *testing.B) { + keys := certifiers.GenValKeys(20) + for i := 0; i < b.N; i++ { + keys = keys.Extend(1) + } +} + +// this benchmarks generating one key +func BenchmarkGenSecpValKeys(b *testing.B) { + keys := certifiers.GenSecpValKeys(20) + for i := 0; i < b.N; i++ { + keys = keys.Extend(1) + } +} + +func BenchmarkToValidators20(b *testing.B) { + benchmarkToValidators(b, 20) +} + +func BenchmarkToValidators100(b *testing.B) { + benchmarkToValidators(b, 100) +} + +// this benchmarks constructing the validator set (.PubKey() * nodes) +func benchmarkToValidators(b *testing.B, nodes int) { + keys := certifiers.GenValKeys(nodes) + for i := 1; i <= b.N; i++ { + keys.ToValidators(int64(2*i), int64(i)) + } +} + +func BenchmarkToValidatorsSec100(b *testing.B) { + benchmarkToValidatorsSec(b, 100) +} + +// this benchmarks constructing the validator set (.PubKey() * nodes) +func benchmarkToValidatorsSec(b *testing.B, nodes int) { + keys := certifiers.GenSecpValKeys(nodes) + for i := 1; i <= b.N; i++ { + keys.ToValidators(int64(2*i), int64(i)) + } +} + +func BenchmarkCertifyCommit20(b *testing.B) { + keys := certifiers.GenValKeys(20) + benchmarkCertifyCommit(b, keys) +} + +func BenchmarkCertifyCommit100(b *testing.B) { + keys := certifiers.GenValKeys(100) + benchmarkCertifyCommit(b, keys) +} + +func BenchmarkCertifyCommitSec20(b *testing.B) { + keys := certifiers.GenSecpValKeys(20) + benchmarkCertifyCommit(b, keys) +} + +func BenchmarkCertifyCommitSec100(b *testing.B) { + keys := certifiers.GenSecpValKeys(100) + benchmarkCertifyCommit(b, keys) +} + +func benchmarkCertifyCommit(b *testing.B, keys certifiers.ValKeys) { + chainID := "bench-certify" + vals := keys.ToValidators(20, 10) + cert := certifiers.NewStatic(chainID, vals) + check := keys.GenCommit(chainID, 123, nil, vals, []byte("foo"), 0, len(keys)) + for i := 0; i < b.N; i++ { + err := cert.Certify(check) + if err != nil { + panic(err) + } + } + +} diff --git a/certifiers/provider.go b/certifiers/provider.go new file mode 100644 index 00000000..64b4212d --- /dev/null +++ b/certifiers/provider.go @@ -0,0 +1,125 @@ +package certifiers + +import ( + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +// Provider is used to get more validators by other means +// +// Examples: MemProvider, files.Provider, client.Provider.... +type Provider interface { + // StoreCommit saves a FullCommit after we have verified it, + // so we can query for it later. Important for updating our + // store of trusted commits + StoreCommit(fc FullCommit) error + // GetByHeight returns the closest commit with height <= h + GetByHeight(h int) (FullCommit, error) + // GetByHash returns a commit exactly matching this validator hash + GetByHash(hash []byte) (FullCommit, error) + // LatestCommit returns the newest commit stored + LatestCommit() (FullCommit, error) +} + +// cacheProvider allows you to place one or more caches in front of a source +// Provider. It runs through them in order until a match is found. +// So you can keep a local cache, and check with the network if +// no data is there. +type cacheProvider struct { + Providers []Provider +} + +func NewCacheProvider(providers ...Provider) Provider { + return cacheProvider{ + Providers: providers, + } +} + +// StoreCommit tries to add the seed to all providers. +// +// Aborts on first error it encounters (closest provider) +func (c cacheProvider) StoreCommit(fc FullCommit) (err error) { + for _, p := range c.Providers { + err = p.StoreCommit(fc) + if err != nil { + break + } + } + return err +} + +/* +GetByHeight should return the closest possible match from all providers. + +The Cache is usually organized in order from cheapest call (memory) +to most expensive calls (disk/network). However, since GetByHeight returns +a FullCommit at h' <= h, if the memory has a seed at h-10, but the network would +give us the exact match, a naive "stop at first non-error" would hide +the actual desired results. + +Thus, we query each provider in order until we find an exact match +or we finished querying them all. If at least one returned a non-error, +then this returns the best match (minimum h-h'). +*/ +func (c cacheProvider) GetByHeight(h int) (fc FullCommit, err error) { + for _, p := range c.Providers { + var tfc FullCommit + tfc, err = p.GetByHeight(h) + if err == nil { + if tfc.Height() > fc.Height() { + fc = tfc + } + if tfc.Height() == h { + break + } + } + } + // even if the last one had an error, if any was a match, this is good + if fc.Height() > 0 { + err = nil + } + return fc, err +} + +func (c cacheProvider) GetByHash(hash []byte) (fc FullCommit, err error) { + for _, p := range c.Providers { + fc, err = p.GetByHash(hash) + if err == nil { + break + } + } + return fc, err +} + +func (c cacheProvider) LatestCommit() (fc FullCommit, err error) { + for _, p := range c.Providers { + var tfc FullCommit + tfc, err = p.LatestCommit() + if err == nil && tfc.Height() > fc.Height() { + fc = tfc + } + } + // even if the last one had an error, if any was a match, this is good + if fc.Height() > 0 { + err = nil + } + return fc, err +} + +// missingProvider doens't store anything, always a miss +// Designed as a mock for testing +type missingProvider struct{} + +func NewMissingProvider() Provider { + return missingProvider{} +} + +func (missingProvider) StoreCommit(_ FullCommit) error { return nil } +func (missingProvider) GetByHeight(_ int) (FullCommit, error) { + return FullCommit{}, certerr.ErrCommitNotFound() +} +func (missingProvider) GetByHash(_ []byte) (FullCommit, error) { + return FullCommit{}, certerr.ErrCommitNotFound() +} +func (missingProvider) LatestCommit() (FullCommit, error) { + return FullCommit{}, certerr.ErrCommitNotFound() +} diff --git a/certifiers/provider_test.go b/certifiers/provider_test.go new file mode 100644 index 00000000..c1e9ae51 --- /dev/null +++ b/certifiers/provider_test.go @@ -0,0 +1,128 @@ +package certifiers_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/tendermint/tendermint/certifiers" + "github.com/tendermint/tendermint/certifiers/errors" +) + +func TestMemProvider(t *testing.T) { + p := certifiers.NewMemStoreProvider() + checkProvider(t, p, "test-mem", "empty") +} + +func TestCacheProvider(t *testing.T) { + p := certifiers.NewCacheProvider( + certifiers.NewMissingProvider(), + certifiers.NewMemStoreProvider(), + certifiers.NewMissingProvider(), + ) + checkProvider(t, p, "test-cache", "kjfhekfhkewhgit") +} + +func checkProvider(t *testing.T, p certifiers.Provider, chainID, app string) { + assert, require := assert.New(t), require.New(t) + appHash := []byte(app) + keys := certifiers.GenValKeys(5) + count := 10 + + // make a bunch of commits... + commits := make([]certifiers.FullCommit, count) + for i := 0; i < count; i++ { + // two commits for each validator, to check how we handle dups + // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... + vals := keys.ToValidators(10, int64(count/2)) + h := 20 + 10*i + commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, 5) + } + + // check provider is empty + fc, err := p.GetByHeight(20) + require.NotNil(err) + assert.True(errors.IsCommitNotFoundErr(err)) + + fc, err = p.GetByHash(commits[3].ValidatorsHash()) + require.NotNil(err) + assert.True(errors.IsCommitNotFoundErr(err)) + + // now add them all to the provider + for _, s := range commits { + err = p.StoreCommit(s) + require.Nil(err) + // and make sure we can get it back + s2, err := p.GetByHash(s.ValidatorsHash()) + assert.Nil(err) + assert.Equal(s, s2) + // by height as well + s2, err = p.GetByHeight(s.Height()) + assert.Nil(err) + assert.Equal(s, s2) + } + + // make sure we get the last hash if we overstep + fc, err = p.GetByHeight(5000) + if assert.Nil(err) { + assert.Equal(commits[count-1].Height(), fc.Height()) + assert.Equal(commits[count-1], fc) + } + + // and middle ones as well + fc, err = p.GetByHeight(47) + if assert.Nil(err) { + // we only step by 10, so 40 must be the one below this + assert.Equal(40, fc.Height()) + } + +} + +// this will make a get height, and if it is good, set the data as well +func checkGetHeight(t *testing.T, p certifiers.Provider, ask, expect int) { + fc, err := p.GetByHeight(ask) + require.Nil(t, err, "%+v", err) + if assert.Equal(t, expect, fc.Height()) { + err = p.StoreCommit(fc) + require.Nil(t, err, "%+v", err) + } +} + +func TestCacheGetsBestHeight(t *testing.T) { + // assert, require := assert.New(t), require.New(t) + require := require.New(t) + + // we will write data to the second level of the cache (p2), + // and see what gets cached, stored in + p := certifiers.NewMemStoreProvider() + p2 := certifiers.NewMemStoreProvider() + cp := certifiers.NewCacheProvider(p, p2) + + chainID := "cache-best-height" + appHash := []byte("01234567") + keys := certifiers.GenValKeys(5) + count := 10 + + // set a bunch of commits + for i := 0; i < count; i++ { + vals := keys.ToValidators(10, int64(count/2)) + h := 10 * (i + 1) + fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, 0, 5) + err := p2.StoreCommit(fc) + require.NoError(err) + } + + // let's get a few heights from the cache and set them proper + checkGetHeight(t, cp, 57, 50) + checkGetHeight(t, cp, 33, 30) + + // make sure they are set in p as well (but nothing else) + checkGetHeight(t, p, 44, 30) + checkGetHeight(t, p, 50, 50) + checkGetHeight(t, p, 99, 50) + + // now, query the cache for a higher value + checkGetHeight(t, p2, 99, 90) + checkGetHeight(t, cp, 99, 90) +} diff --git a/certifiers/static.go b/certifiers/static.go new file mode 100644 index 00000000..787aecb3 --- /dev/null +++ b/certifiers/static.go @@ -0,0 +1,66 @@ +package certifiers + +import ( + "bytes" + + "github.com/pkg/errors" + + "github.com/tendermint/tendermint/types" + + certerr "github.com/tendermint/tendermint/certifiers/errors" +) + +var _ Certifier = &Static{} + +// Static assumes a static set of validators, set on +// initilization and checks against them. +// The signatures on every header is checked for > 2/3 votes +// against the known validator set upon Certify +// +// Good for testing or really simple chains. Building block +// to support real-world functionality. +type Static struct { + chainID string + vSet *types.ValidatorSet + vhash []byte +} + +func NewStatic(chainID string, vals *types.ValidatorSet) *Static { + return &Static{ + chainID: chainID, + vSet: vals, + } +} + +func (c *Static) ChainID() string { + return c.chainID +} + +func (c *Static) Validators() *types.ValidatorSet { + return c.vSet +} + +func (c *Static) Hash() []byte { + if len(c.vhash) == 0 { + c.vhash = c.vSet.Hash() + } + return c.vhash +} + +func (c *Static) Certify(commit Commit) error { + // do basic sanity checks + err := commit.ValidateBasic(c.chainID) + if err != nil { + return err + } + + // make sure it has the same validator set we have (static means static) + if !bytes.Equal(c.Hash(), commit.Header.ValidatorsHash) { + return certerr.ErrValidatorsChanged() + } + + // then make sure we have the proper signatures for this + err = c.vSet.VerifyCommit(c.chainID, commit.Commit.BlockID, + commit.Header.Height, commit.Commit) + return errors.WithStack(err) +} diff --git a/certifiers/static_test.go b/certifiers/static_test.go new file mode 100644 index 00000000..f1f40c6c --- /dev/null +++ b/certifiers/static_test.go @@ -0,0 +1,59 @@ +package certifiers_test + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/tendermint/tendermint/types" + + "github.com/tendermint/tendermint/certifiers" + errors "github.com/tendermint/tendermint/certifiers/errors" +) + +func TestStaticCert(t *testing.T) { + // assert, require := assert.New(t), require.New(t) + assert := assert.New(t) + // require := require.New(t) + + keys := certifiers.GenValKeys(4) + // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! + vals := keys.ToValidators(20, 10) + // and a certifier based on our known set + chainID := "test-static" + cert := certifiers.NewStatic(chainID, vals) + + cases := []struct { + keys certifiers.ValKeys + vals *types.ValidatorSet + height int + first, last int // who actually signs + proper bool // true -> expect no error + changed bool // true -> expect validator change error + }{ + // perfect, signed by everyone + {keys, vals, 1, 0, len(keys), true, false}, + // skip little guy is okay + {keys, vals, 2, 1, len(keys), true, false}, + // but not the big guy + {keys, vals, 3, 0, len(keys) - 1, false, false}, + // even changing the power a little bit breaks the static validator + // the sigs are enough, but the validator hash is unknown + {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, + } + + for _, tc := range cases { + check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, + []byte("foo"), tc.first, tc.last) + err := cert.Certify(check) + if tc.proper { + assert.Nil(err, "%+v", err) + } else { + assert.NotNil(err) + if tc.changed { + assert.True(errors.IsValidatorsChangedErr(err), "%+v", err) + } + } + } + +} diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index 38dff406..c96ccf97 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -101,10 +101,10 @@ func TestByzantine(t *testing.T) { // start the state machines byzR := reactors[0].(*ByzantineReactor) s := byzR.reactor.conS.GetState() - byzR.reactor.SwitchToConsensus(s) + byzR.reactor.SwitchToConsensus(s, 0) for i := 1; i < N; i++ { cr := reactors[i].(*ConsensusReactor) - cr.SwitchToConsensus(cr.conS.GetState()) + cr.SwitchToConsensus(cr.conS.GetState(), 0) } // byz proposer sends one block to peers[0] diff --git a/consensus/common_test.go b/consensus/common_test.go index 33b613a0..9810024d 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -347,7 +347,11 @@ func randConsensusNet(nValidators int, testName string, tickerFunc func() Timeou opt(thisConfig) } ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal - css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], appFunc()) + app := appFunc() + vals := types.TM2PB.Validators(state.Validators) + app.InitChain(abci.RequestInitChain{Validators: vals}) + + css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app) css[i].SetLogger(logger.With("validator", i)) css[i].SetTimeoutTicker(tickerFunc()) } @@ -373,7 +377,11 @@ func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerF privVal = types.GenPrivValidatorFS(tempFilePath) } - css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, appFunc()) + app := appFunc() + vals := types.TM2PB.Validators(state.Validators) + app.InitChain(abci.RequestInitChain{Validators: vals}) + + css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app) css[i].SetLogger(log.TestingLogger()) css[i].SetTimeoutTicker(tickerFunc()) } diff --git a/consensus/reactor.go b/consensus/reactor.go index 48041e2f..e6849992 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -76,7 +76,7 @@ func (conR *ConsensusReactor) OnStop() { // SwitchToConsensus switches from fast_sync mode to consensus mode. // It resets the state, turns off fast_sync, and starts the consensus state-machine -func (conR *ConsensusReactor) SwitchToConsensus(state *sm.State) { +func (conR *ConsensusReactor) SwitchToConsensus(state *sm.State, blocksSynced int) { conR.Logger.Info("SwitchToConsensus") conR.conS.reconstructLastCommit(state) // NOTE: The line below causes broadcastNewRoundStepRoutine() to @@ -87,6 +87,10 @@ func (conR *ConsensusReactor) SwitchToConsensus(state *sm.State) { conR.fastSync = false conR.mtx.Unlock() + if blocksSynced > 0 { + // dont bother with the WAL if we fast synced + conR.conS.doWALCatchup = false + } conR.conS.Start() } diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 623a6541..ed8fa87b 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -54,7 +54,7 @@ func startConsensusNet(t *testing.T, css []*ConsensusState, N int, subscribeEven // we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors for i := 0; i < N; i++ { s := reactors[i].conS.GetState() - reactors[i].SwitchToConsensus(s) + reactors[i].SwitchToConsensus(s, 0) } return reactors, eventChans } diff --git a/consensus/replay.go b/consensus/replay.go index c5cb4219..d3c5cd5d 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -4,6 +4,7 @@ import ( "bytes" "errors" "fmt" + "hash/crc32" "io" "reflect" "strconv" @@ -11,7 +12,6 @@ import ( "time" abci "github.com/tendermint/abci/types" - wire "github.com/tendermint/go-wire" auto "github.com/tendermint/tmlibs/autofile" cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/tmlibs/log" @@ -22,6 +22,8 @@ import ( "github.com/tendermint/tendermint/version" ) +var crc32c = crc32.MakeTable(crc32.Castagnoli) + // Functionality to replay blocks and messages on recovery from a crash. // There are two general failure scenarios: failure during consensus, and failure while applying the block. // The former is handled by the WAL, the latter by the proxyApp Handshake on restart, @@ -35,18 +37,11 @@ import ( // as if it were received in receiveRoutine // Lines that start with "#" are ignored. // NOTE: receiveRoutine should not be running -func (cs *ConsensusState) readReplayMessage(msgBytes []byte, newStepCh chan interface{}) error { - // Skip over empty and meta lines - if len(msgBytes) == 0 || msgBytes[0] == '#' { +func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan interface{}) error { + // skip meta messages + if _, ok := msg.Msg.(EndHeightMessage); ok { return nil } - var err error - var msg TimedWALMessage - wire.ReadJSON(&msg, msgBytes, &err) - if err != nil { - fmt.Println("MsgBytes:", msgBytes, string(msgBytes)) - return fmt.Errorf("Error reading json data: %v", err) - } // for logging switch m := msg.Msg.(type) { @@ -104,7 +99,7 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error { // Ensure that ENDHEIGHT for this height doesn't exist // NOTE: This is just a sanity check. As far as we know things work fine without it, // and Handshake could reuse ConsensusState if it weren't for this check (since we can crash after writing ENDHEIGHT). - gr, found, err := cs.wal.group.Search("#ENDHEIGHT: ", makeHeightSearchFunc(csHeight)) + gr, found, err := cs.wal.SearchForEndHeight(uint64(csHeight)) if gr != nil { gr.Close() } @@ -113,33 +108,33 @@ func (cs *ConsensusState) catchupReplay(csHeight int) error { } // Search for last height marker - gr, found, err = cs.wal.group.Search("#ENDHEIGHT: ", makeHeightSearchFunc(csHeight-1)) + gr, found, err = cs.wal.SearchForEndHeight(uint64(csHeight - 1)) if err == io.EOF { cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", csHeight-1) } else if err != nil { return err - } else { - defer gr.Close() } if !found { return errors.New(cmn.Fmt("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d.", csHeight, csHeight-1)) } + defer gr.Close() cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight) + var msg *TimedWALMessage + dec := WALDecoder{gr} + for { - line, err := gr.ReadLine() - if err != nil { - if err == io.EOF { - break - } else { - return err - } + msg, err = dec.Decode() + if err == io.EOF { + break + } else if err != nil { + return err } // NOTE: since the priv key is set when the msgs are received // it will attempt to eg double sign but we can just ignore it // since the votes will be replayed and we'll get to the next step - if err := cs.readReplayMessage([]byte(line), nil); err != nil { + if err := cs.readReplayMessage(msg, nil); err != nil { return err } } diff --git a/consensus/replay_file.go b/consensus/replay_file.go index 1182aaf0..24df20fb 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -4,6 +4,7 @@ import ( "bufio" "errors" "fmt" + "io" "os" "strconv" "strings" @@ -53,12 +54,20 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error { defer pb.fp.Close() var nextN int // apply N msgs in a row - for pb.scanner.Scan() { + var msg *TimedWALMessage + for { if nextN == 0 && console { nextN = pb.replayConsoleLoop() } - if err := pb.cs.readReplayMessage(pb.scanner.Bytes(), newStepCh); err != nil { + msg, err = pb.dec.Decode() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + + if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil { return err } @@ -76,9 +85,9 @@ func (cs *ConsensusState) ReplayFile(file string, console bool) error { type playback struct { cs *ConsensusState - fp *os.File - scanner *bufio.Scanner - count int // how many lines/msgs into the file are we + fp *os.File + dec *WALDecoder + count int // how many lines/msgs into the file are we // replays can be reset to beginning fileName string // so we can close/reopen the file @@ -91,7 +100,7 @@ func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState *sm. fp: fp, fileName: fileName, genesisState: genState, - scanner: bufio.NewScanner(fp), + dec: NewWALDecoder(fp), } } @@ -111,13 +120,20 @@ func (pb *playback) replayReset(count int, newStepCh chan interface{}) error { return err } pb.fp = fp - pb.scanner = bufio.NewScanner(fp) + pb.dec = NewWALDecoder(fp) count = pb.count - count fmt.Printf("Reseting from %d to %d\n", pb.count, count) pb.count = 0 pb.cs = newCS - for i := 0; pb.scanner.Scan() && i < count; i++ { - if err := pb.cs.readReplayMessage(pb.scanner.Bytes(), newStepCh); err != nil { + var msg *TimedWALMessage + for i := 0; i < count; i++ { + msg, err = pb.dec.Decode() + if err == io.EOF { + return nil + } else if err != nil { + return err + } + if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil { return err } pb.count += 1 diff --git a/consensus/replay_test.go b/consensus/replay_test.go index c478a095..7d882dc1 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -8,7 +8,6 @@ import ( "io/ioutil" "os" "path" - "strings" "testing" "time" @@ -44,7 +43,7 @@ func init() { // after running it (eg. sometimes small_block2 will have 5 block parts, sometimes 6). // It should only have to be re-run if there is some breaking change to the consensus data structures (eg. blocks, votes) // or to the behaviour of the app (eg. computes app hash differently) -var data_dir = path.Join(cmn.GoPath, "src/github.com/tendermint/tendermint/consensus", "test_data") +var data_dir = path.Join(cmn.GoPath(), "src/github.com/tendermint/tendermint/consensus", "test_data") //------------------------------------------------------------------------------------------ // WAL Tests @@ -60,12 +59,12 @@ var baseStepChanges = []int{3, 6, 8} var testCases = []*testCase{ newTestCase("empty_block", baseStepChanges), // empty block (has 1 block part) newTestCase("small_block1", baseStepChanges), // small block with txs in 1 block part - newTestCase("small_block2", []int{3, 11, 13}), // small block with txs across 6 smaller block parts + newTestCase("small_block2", []int{3, 12, 14}), // small block with txs across 6 smaller block parts } type testCase struct { name string - log string //full cs wal + log []byte //full cs wal stepMap map[int]int8 // map lines of log to privval step proposeLine int @@ -100,29 +99,27 @@ func newMapFromChanges(changes []int) map[int]int8 { return m } -func readWAL(p string) string { +func readWAL(p string) []byte { b, err := ioutil.ReadFile(p) if err != nil { panic(err) } - return string(b) + return b } -func writeWAL(walMsgs string) string { - tempDir := os.TempDir() - walDir := path.Join(tempDir, "/wal"+cmn.RandStr(12)) - walFile := path.Join(walDir, "wal") - // Create WAL directory - err := cmn.EnsureDir(walDir, 0700) +func writeWAL(walMsgs []byte) string { + walFile, err := ioutil.TempFile("", "wal") if err != nil { - panic(err) + panic(fmt.Errorf("failed to create temp WAL file: %v", err)) } - // Write the needed WAL to file - err = cmn.WriteFile(walFile, []byte(walMsgs), 0600) + _, err = walFile.Write(walMsgs) if err != nil { - panic(err) + panic(fmt.Errorf("failed to write to temp WAL file: %v", err)) } - return walFile + if err := walFile.Close(); err != nil { + panic(fmt.Errorf("failed to close temp WAL file: %v", err)) + } + return walFile.Name() } func waitForBlock(newBlockCh chan interface{}, thisCase *testCase, i int) { @@ -167,7 +164,7 @@ func toPV(pv types.PrivValidator) *types.PrivValidatorFS { return pv.(*types.PrivValidatorFS) } -func setupReplayTest(t *testing.T, thisCase *testCase, nLines int, crashAfter bool) (*ConsensusState, chan interface{}, string, string) { +func setupReplayTest(t *testing.T, thisCase *testCase, nLines int, crashAfter bool) (*ConsensusState, chan interface{}, []byte, string) { t.Log("-------------------------------------") t.Logf("Starting replay test %v (of %d lines of WAL). Crash after = %v", thisCase.name, nLines, crashAfter) @@ -176,11 +173,13 @@ func setupReplayTest(t *testing.T, thisCase *testCase, nLines int, crashAfter bo lineStep -= 1 } - split := strings.Split(thisCase.log, "\n") + split := bytes.Split(thisCase.log, walSeparator) lastMsg := split[nLines] // we write those lines up to (not including) one with the signature - walFile := writeWAL(strings.Join(split[:nLines], "\n") + "\n") + b := bytes.Join(split[:nLines], walSeparator) + b = append(b, walSeparator...) + walFile := writeWAL(b) cs := fixedConsensusStateDummy() @@ -195,14 +194,19 @@ func setupReplayTest(t *testing.T, thisCase *testCase, nLines int, crashAfter bo return cs, newBlockCh, lastMsg, walFile } -func readTimedWALMessage(t *testing.T, walMsg string) TimedWALMessage { - var err error - var msg TimedWALMessage - wire.ReadJSON(&msg, []byte(walMsg), &err) +func readTimedWALMessage(t *testing.T, rawMsg []byte) TimedWALMessage { + b := bytes.NewBuffer(rawMsg) + // because rawMsg does not contain a separator and WALDecoder#Decode expects it + _, err := b.Write(walSeparator) + if err != nil { + t.Fatal(err) + } + dec := NewWALDecoder(b) + msg, err := dec.Decode() if err != nil { t.Fatalf("Error reading json data: %v", err) } - return msg + return *msg } //----------------------------------------------- @@ -211,10 +215,15 @@ func readTimedWALMessage(t *testing.T, walMsg string) TimedWALMessage { func TestWALCrashAfterWrite(t *testing.T) { for _, thisCase := range testCases { - split := strings.Split(thisCase.log, "\n") - for i := 0; i < len(split)-1; i++ { - cs, newBlockCh, _, walFile := setupReplayTest(t, thisCase, i+1, true) - runReplayTest(t, cs, walFile, newBlockCh, thisCase, i+1) + splitSize := bytes.Count(thisCase.log, walSeparator) + for i := 0; i < splitSize-1; i++ { + t.Run(fmt.Sprintf("%s:%d", thisCase.name, i), func(t *testing.T) { + cs, newBlockCh, _, walFile := setupReplayTest(t, thisCase, i+1, true) + cs.config.TimeoutPropose = 100 + runReplayTest(t, cs, walFile, newBlockCh, thisCase, i+1) + // cleanup + os.Remove(walFile) + }) } } } @@ -226,14 +235,19 @@ func TestWALCrashAfterWrite(t *testing.T) { func TestWALCrashBeforeWritePropose(t *testing.T) { for _, thisCase := range testCases { lineNum := thisCase.proposeLine - // setup replay test where last message is a proposal - cs, newBlockCh, proposalMsg, walFile := setupReplayTest(t, thisCase, lineNum, false) - msg := readTimedWALMessage(t, proposalMsg) - proposal := msg.Msg.(msgInfo).Msg.(*ProposalMessage) - // Set LastSig - toPV(cs.privValidator).LastSignBytes = types.SignBytes(cs.state.ChainID, proposal.Proposal) - toPV(cs.privValidator).LastSignature = proposal.Proposal.Signature - runReplayTest(t, cs, walFile, newBlockCh, thisCase, lineNum) + t.Run(fmt.Sprintf("%s:%d", thisCase.name, lineNum), func(t *testing.T) { + // setup replay test where last message is a proposal + cs, newBlockCh, proposalMsg, walFile := setupReplayTest(t, thisCase, lineNum, false) + cs.config.TimeoutPropose = 100 + msg := readTimedWALMessage(t, proposalMsg) + proposal := msg.Msg.(msgInfo).Msg.(*ProposalMessage) + // Set LastSig + toPV(cs.privValidator).LastSignBytes = types.SignBytes(cs.state.ChainID, proposal.Proposal) + toPV(cs.privValidator).LastSignature = proposal.Proposal.Signature + runReplayTest(t, cs, walFile, newBlockCh, thisCase, lineNum) + // cleanup + os.Remove(walFile) + }) } } @@ -315,7 +329,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { if err != nil { t.Fatal(err) } - walFile := writeWAL(string(walBody)) + walFile := writeWAL(walBody) config.Consensus.SetWalFile(walFile) privVal := types.LoadPrivValidatorFS(config.PrivValidatorFile()) @@ -382,7 +396,7 @@ func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { } func applyBlock(st *sm.State, blk *types.Block, proxyApp proxy.AppConns) { - testPartSize := st.Params().BlockPartSizeBytes + testPartSize := st.Params.BlockPartSizeBytes err := st.ApplyBlock(nil, proxyApp.Consensus(), blk, blk.MakePartSet(testPartSize).Header(), mempool) if err != nil { panic(err) @@ -465,7 +479,7 @@ func buildTMStateFromChain(config *cfg.Config, state *sm.State, chain []*types.B func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) { // Search for height marker - gr, found, err := wal.group.Search("#ENDHEIGHT: ", makeHeightSearchFunc(0)) + gr, found, err := wal.SearchForEndHeight(0) if err != nil { return nil, nil, err } @@ -479,20 +493,17 @@ func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) { var blockParts *types.PartSet var blocks []*types.Block var commits []*types.Commit - for { - line, err := gr.ReadLine() - if err != nil { - if err == io.EOF { - break - } else { - return nil, nil, err - } - } - piece, err := readPieceFromWAL([]byte(line)) - if err != nil { + dec := NewWALDecoder(gr) + for { + msg, err := dec.Decode() + if err == io.EOF { + break + } else if err != nil { return nil, nil, err } + + piece := readPieceFromWAL(msg) if piece == nil { continue } @@ -528,17 +539,10 @@ func makeBlockchainFromWAL(wal *WAL) ([]*types.Block, []*types.Commit, error) { return blocks, commits, nil } -func readPieceFromWAL(msgBytes []byte) (interface{}, error) { - // Skip over empty and meta lines - if len(msgBytes) == 0 || msgBytes[0] == '#' { - return nil, nil - } - var err error - var msg TimedWALMessage - wire.ReadJSON(&msg, msgBytes, &err) - if err != nil { - fmt.Println("MsgBytes:", msgBytes, string(msgBytes)) - return nil, fmt.Errorf("Error reading json data: %v", err) +func readPieceFromWAL(msg *TimedWALMessage) interface{} { + // skip meta messages + if _, ok := msg.Msg.(EndHeightMessage); ok { + return nil } // for logging @@ -546,14 +550,15 @@ func readPieceFromWAL(msgBytes []byte) (interface{}, error) { case msgInfo: switch msg := m.Msg.(type) { case *ProposalMessage: - return &msg.Proposal.BlockPartsHeader, nil + return &msg.Proposal.BlockPartsHeader case *BlockPartMessage: - return msg.Part, nil + return msg.Part case *VoteMessage: - return msg.Vote, nil + return msg.Vote } } - return nil, nil + + return nil } // fresh state and mock store @@ -562,7 +567,7 @@ func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (*sm.State, *mockBl state, _ := sm.MakeGenesisStateFromFile(stateDB, config.GenesisFile()) state.SetLogger(log.TestingLogger().With("module", "state")) - store := NewMockBlockStore(config, state.Params()) + store := NewMockBlockStore(config, state.Params) return state, store } diff --git a/consensus/state.go b/consensus/state.go index f0fbad81..e5b7641f 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -97,8 +97,9 @@ type ConsensusState struct { // a Write-Ahead Log ensures we can recover from any kind of crash // and helps us avoid signing conflicting votes - wal *WAL - replayMode bool // so we don't log signing errors during replay + wal *WAL + replayMode bool // so we don't log signing errors during replay + doWALCatchup bool // determines if we even try to do the catchup // for tests where we want to limit the number of transitions the state makes nSteps int @@ -123,6 +124,7 @@ func NewConsensusState(config *cfg.ConsensusConfig, state *sm.State, proxyAppCon internalMsgQueue: make(chan msgInfo, msgQueueSize), timeoutTicker: NewTimeoutTicker(), done: make(chan struct{}), + doWALCatchup: true, } // set function defaults (may be overwritten before calling Start) cs.decideProposal = cs.defaultDecideProposal @@ -226,10 +228,12 @@ func (cs *ConsensusState) OnStart() error { // we may have lost some votes if the process crashed // reload from consensus log to catchup - if err := cs.catchupReplay(cs.Height); err != nil { - cs.Logger.Error("Error on catchup replay. Proceeding to start ConsensusState anyway", "err", err.Error()) - // NOTE: if we ever do return an error here, - // make sure to stop the timeoutTicker + if cs.doWALCatchup { + if err := cs.catchupReplay(cs.Height); err != nil { + cs.Logger.Error("Error on catchup replay. Proceeding to start ConsensusState anyway", "err", err.Error()) + // NOTE: if we ever do return an error here, + // make sure to stop the timeoutTicker + } } // now start the receiveRoutine @@ -390,7 +394,7 @@ func (cs *ConsensusState) reconstructLastCommit(state *sm.State) { return } seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight) - lastPrecommits := types.NewVoteSet(cs.state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.VoteTypePrecommit, state.LastValidators) + lastPrecommits := types.NewVoteSet(state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.VoteTypePrecommit, state.LastValidators) for _, precommit := range seenCommit.Precommits { if precommit == nil { continue @@ -707,6 +711,7 @@ func (cs *ConsensusState) proposalHeartbeat(height, round int) { // not a validator valIndex = -1 } + chainID := cs.state.ChainID for { rs := cs.GetRoundState() // if we've already moved on, no need to send more heartbeats @@ -720,7 +725,7 @@ func (cs *ConsensusState) proposalHeartbeat(height, round int) { ValidatorAddress: addr, ValidatorIndex: valIndex, } - cs.privValidator.SignHeartbeat(cs.state.ChainID, heartbeat) + cs.privValidator.SignHeartbeat(chainID, heartbeat) heartbeatEvent := types.EventDataProposalHeartbeat{heartbeat} types.FireEventProposalHeartbeat(cs.evsw, heartbeatEvent) counter += 1 @@ -797,8 +802,7 @@ func (cs *ConsensusState) defaultDecideProposal(height, round int) { // Make proposal polRound, polBlockID := cs.Votes.POLInfo() proposal := types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID) - err := cs.privValidator.SignProposal(cs.state.ChainID, proposal) - if err == nil { + if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal); err == nil { // Set fields /* fields set by setProposal and addBlockPart cs.Proposal = proposal @@ -857,10 +861,9 @@ func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts // Mempool validated transactions txs := cs.mempool.Reap(cs.config.MaxBlockSizeTxs) - return types.MakeBlock(cs.Height, cs.state.ChainID, txs, commit, cs.state.LastBlockID, cs.state.Validators.Hash(), - cs.state.AppHash, cs.state.Params().BlockPartSizeBytes) + cs.state.AppHash, cs.state.Params.BlockPartSizeBytes) } // Enter: `timeoutPropose` after entering Propose. @@ -1130,7 +1133,7 @@ func (cs *ConsensusState) tryFinalizeCommit(height int) { if !cs.ProposalBlock.HashesTo(blockID.Hash) { // TODO: this happens every time if we're not a validator (ugly logs) // TODO: ^^ wait, why does it matter that we're a validator? - cs.Logger.Error("Attempt to finalize failed. We don't have the commit block.", "height", height, "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash) + cs.Logger.Info("Attempt to finalize failed. We don't have the commit block.", "height", height, "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash) return } @@ -1189,7 +1192,7 @@ func (cs *ConsensusState) finalizeCommit(height int) { // As is, ConsensusState should not be started again // until we successfully call ApplyBlock (ie. here or in Handshake after restart) if cs.wal != nil { - cs.wal.writeEndHeight(height) + cs.wal.Save(EndHeightMessage{uint64(height)}) } fail.Fail() // XXX @@ -1295,7 +1298,7 @@ func (cs *ConsensusState) addProposalBlockPart(height int, part *types.Part, ver var n int var err error cs.ProposalBlock = wire.ReadBinary(&types.Block{}, cs.ProposalBlockParts.GetReader(), - cs.state.Params().BlockSizeParams.MaxBytes, &n, &err).(*types.Block) + cs.state.Params.BlockSizeParams.MaxBytes, &n, &err).(*types.Block) // NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) if cs.Step == cstypes.RoundStepPropose && cs.isProposalComplete() { diff --git a/consensus/state_test.go b/consensus/state_test.go index 69b6d53c..060e37d4 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -181,7 +181,7 @@ func TestBadProposal(t *testing.T) { height, round := cs1.Height, cs1.Round vs2 := vss[1] - partSize := cs1.state.Params().BlockPartSizeBytes + partSize := cs1.state.Params.BlockPartSizeBytes proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1) voteCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringVote(), 1) @@ -328,7 +328,7 @@ func TestLockNoPOL(t *testing.T) { vs2 := vss[1] height := cs1.Height - partSize := cs1.state.Params().BlockPartSizeBytes + partSize := cs1.state.Params.BlockPartSizeBytes timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1) timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1) @@ -494,7 +494,7 @@ func TestLockPOLRelock(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - partSize := cs1.state.Params().BlockPartSizeBytes + partSize := cs1.state.Params.BlockPartSizeBytes timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1) timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1) @@ -607,7 +607,7 @@ func TestLockPOLUnlock(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - partSize := cs1.state.Params().BlockPartSizeBytes + partSize := cs1.state.Params.BlockPartSizeBytes proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1) timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1) @@ -702,7 +702,7 @@ func TestLockPOLSafety1(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - partSize := cs1.state.Params().BlockPartSizeBytes + partSize := cs1.state.Params.BlockPartSizeBytes proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1) timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1) @@ -823,7 +823,7 @@ func TestLockPOLSafety2(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - partSize := cs1.state.Params().BlockPartSizeBytes + partSize := cs1.state.Params.BlockPartSizeBytes proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1) timeoutProposeCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutPropose(), 1) @@ -998,7 +998,7 @@ func TestHalt1(t *testing.T) { cs1, vss := randConsensusState(4) vs2, vs3, vs4 := vss[1], vss[2], vss[3] - partSize := cs1.state.Params().BlockPartSizeBytes + partSize := cs1.state.Params.BlockPartSizeBytes proposalCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringCompleteProposal(), 1) timeoutWaitCh := subscribeToEvent(cs1.evsw, "tester", types.EventStringTimeoutWait(), 1) diff --git a/consensus/test_data/build.sh b/consensus/test_data/build.sh index d50c2629..dcec6f2a 100755 --- a/consensus/test_data/build.sh +++ b/consensus/test_data/build.sh @@ -1,88 +1,125 @@ #!/usr/bin/env bash -# XXX: removes tendermint dir +# Requires: killall command and jq JSON processor. -cd "$GOPATH/src/github.com/tendermint/tendermint" || exit 1 +# Get the parent directory of where this script is. +SOURCE="${BASH_SOURCE[0]}" +while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done +DIR="$( cd -P "$( dirname "$SOURCE" )/../.." && pwd )" + +# Change into that dir because we expect that. +cd "$DIR" || exit 1 # Make sure we have a tendermint command. if ! hash tendermint 2>/dev/null; then make install fi -# specify a dir to copy +# Make sure we have a cutWALUntil binary. +cutWALUntil=./scripts/cutWALUntil/cutWALUntil +cutWALUntilDir=$(dirname $cutWALUntil) +if ! hash $cutWALUntil 2>/dev/null; then + cd "$cutWALUntilDir" && go build && cd - || exit 1 +fi + +TMHOME=$(mktemp -d) +export TMHOME="$TMHOME" + +if [[ ! -d "$TMHOME" ]]; then + echo "Could not create temp directory" + exit 1 +else + echo "TMHOME: ${TMHOME}" +fi + # TODO: eventually we should replace with `tendermint init --test` DIR_TO_COPY=$HOME/.tendermint_test/consensus_state_test +if [ ! -d "$DIR_TO_COPY" ]; then + echo "$DIR_TO_COPY does not exist. Please run: go test ./consensus" + exit 1 +fi +echo "==> Copying ${DIR_TO_COPY} to ${TMHOME} directory..." +cp -r "$DIR_TO_COPY"/* "$TMHOME" -TMHOME="$HOME/.tendermint" -rm -rf "$TMHOME" -cp -r "$DIR_TO_COPY" "$TMHOME" -cp $TMHOME/config.toml $TMHOME/config.toml.bak +# preserve original genesis file because later it will be modified (see small_block2) +cp "$TMHOME/genesis.json" "$TMHOME/genesis.json.bak" function reset(){ + echo "==> Resetting tendermint..." tendermint unsafe_reset_all - cp $TMHOME/config.toml.bak $TMHOME/config.toml + cp "$TMHOME/genesis.json.bak" "$TMHOME/genesis.json" } reset -# empty block function empty_block(){ -tendermint node --proxy_app=persistent_dummy &> /dev/null & -sleep 5 -killall tendermint + echo "==> Starting tendermint..." + tendermint node --proxy_app=persistent_dummy &> /dev/null & + sleep 5 + echo "==> Killing tendermint..." + killall tendermint -# /q would print up to and including the match, then quit. -# /Q doesn't include the match. -# http://unix.stackexchange.com/questions/11305/grep-show-all-the-file-up-to-the-match -sed '/ENDHEIGHT: 1/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/empty_block.cswal + echo "==> Copying WAL log..." + $cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_empty_block.cswal + mv consensus/test_data/new_empty_block.cswal consensus/test_data/empty_block.cswal -reset + reset } -# many blocks function many_blocks(){ -bash scripts/txs/random.sh 1000 36657 &> /dev/null & -PID=$! -tendermint node --proxy_app=persistent_dummy &> /dev/null & -sleep 7 -killall tendermint -kill -9 $PID + bash scripts/txs/random.sh 1000 36657 &> /dev/null & + PID=$! + echo "==> Starting tendermint..." + tendermint node --proxy_app=persistent_dummy &> /dev/null & + sleep 10 + echo "==> Killing tendermint..." + kill -9 $PID + killall tendermint -sed '/ENDHEIGHT: 6/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/many_blocks.cswal + echo "==> Copying WAL log..." + $cutWALUntil "$TMHOME/data/cs.wal/wal" 6 consensus/test_data/new_many_blocks.cswal + mv consensus/test_data/new_many_blocks.cswal consensus/test_data/many_blocks.cswal -reset + reset } -# small block 1 function small_block1(){ -bash scripts/txs/random.sh 1000 36657 &> /dev/null & -PID=$! -tendermint node --proxy_app=persistent_dummy &> /dev/null & -sleep 10 -killall tendermint -kill -9 $PID + bash scripts/txs/random.sh 1000 36657 &> /dev/null & + PID=$! + echo "==> Starting tendermint..." + tendermint node --proxy_app=persistent_dummy &> /dev/null & + sleep 10 + echo "==> Killing tendermint..." + kill -9 $PID + killall tendermint -sed '/ENDHEIGHT: 1/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block1.cswal + echo "==> Copying WAL log..." + $cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_small_block1.cswal + mv consensus/test_data/new_small_block1.cswal consensus/test_data/small_block1.cswal -reset + reset } -# small block 2 (part size = 512) +# block part size = 512 function small_block2(){ -echo "" >> ~/.tendermint/config.toml -echo "block_part_size = 512" >> ~/.tendermint/config.toml -bash scripts/txs/random.sh 1000 36657 &> /dev/null & -PID=$! -tendermint node --proxy_app=persistent_dummy &> /dev/null & -sleep 5 -killall tendermint -kill -9 $PID + cat "$TMHOME/genesis.json" | jq '. + {consensus_params: {block_size_params: {max_bytes: 22020096}, block_gossip_params: {block_part_size_bytes: 512}}}' > "$TMHOME/new_genesis.json" + mv "$TMHOME/new_genesis.json" "$TMHOME/genesis.json" + bash scripts/txs/random.sh 1000 36657 &> /dev/null & + PID=$! + echo "==> Starting tendermint..." + tendermint node --proxy_app=persistent_dummy &> /dev/null & + sleep 5 + echo "==> Killing tendermint..." + kill -9 $PID + killall tendermint -sed '/ENDHEIGHT: 1/Q' ~/.tendermint/data/cs.wal/wal > consensus/test_data/small_block2.cswal + echo "==> Copying WAL log..." + $cutWALUntil "$TMHOME/data/cs.wal/wal" 1 consensus/test_data/new_small_block2.cswal + mv consensus/test_data/new_small_block2.cswal consensus/test_data/small_block2.cswal -reset + reset } @@ -107,4 +144,5 @@ case "$1" in many_blocks esac - +echo "==> Cleaning up..." +rm -rf "$TMHOME" diff --git a/consensus/test_data/empty_block.cswal b/consensus/test_data/empty_block.cswal index a7e5e79e..609f4ddf 100644 Binary files a/consensus/test_data/empty_block.cswal and b/consensus/test_data/empty_block.cswal differ diff --git a/consensus/test_data/many_blocks.cswal b/consensus/test_data/many_blocks.cswal index 9ceee2cf..ab486b5a 100644 Binary files a/consensus/test_data/many_blocks.cswal and b/consensus/test_data/many_blocks.cswal differ diff --git a/consensus/test_data/small_block1.cswal b/consensus/test_data/small_block1.cswal index 94ccf712..b7c7e777 100644 Binary files a/consensus/test_data/small_block1.cswal and b/consensus/test_data/small_block1.cswal differ diff --git a/consensus/test_data/small_block2.cswal b/consensus/test_data/small_block2.cswal index e3cfaad3..2ef077dc 100644 Binary files a/consensus/test_data/small_block2.cswal and b/consensus/test_data/small_block2.cswal differ diff --git a/consensus/wal.go b/consensus/wal.go index f9a2a801..80f4b809 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -1,6 +1,11 @@ package consensus import ( + "bytes" + "encoding/binary" + "fmt" + "hash/crc32" + "io" "time" wire "github.com/tendermint/go-wire" @@ -12,11 +17,21 @@ import ( //-------------------------------------------------------- // types and functions for savings consensus messages +var ( + walSeparator = []byte{55, 127, 6, 130} // 0x377f0682 - magic number +) + type TimedWALMessage struct { - Time time.Time `json:"time"` + Time time.Time `json:"time"` // for debugging purposes Msg WALMessage `json:"msg"` } +// EndHeightMessage marks the end of the given height inside WAL. +// @internal used by scripts/cutWALUntil util. +type EndHeightMessage struct { + Height uint64 `json:"height"` +} + type WALMessage interface{} var _ = wire.RegisterInterface( @@ -24,6 +39,7 @@ var _ = wire.RegisterInterface( wire.ConcreteType{types.EventDataRoundState{}, 0x01}, wire.ConcreteType{msgInfo{}, 0x02}, wire.ConcreteType{timeoutInfo{}, 0x03}, + wire.ConcreteType{EndHeightMessage{}, 0x04}, ) //-------------------------------------------------------- @@ -38,6 +54,8 @@ type WAL struct { group *auto.Group light bool // ignore block parts + + enc *WALEncoder } func NewWAL(walFile string, light bool) (*WAL, error) { @@ -48,6 +66,7 @@ func NewWAL(walFile string, light bool) (*WAL, error) { wal := &WAL{ group: group, light: light, + enc: NewWALEncoder(group), } wal.BaseService = *cmn.NewBaseService(nil, "WAL", wal) return wal, nil @@ -58,7 +77,7 @@ func (wal *WAL) OnStart() error { if err != nil { return err } else if size == 0 { - wal.writeEndHeight(0) + wal.Save(EndHeightMessage{0}) } _, err = wal.group.Start() return err @@ -70,35 +89,191 @@ func (wal *WAL) OnStop() { } // called in newStep and for each pass in receiveRoutine -func (wal *WAL) Save(wmsg WALMessage) { +func (wal *WAL) Save(msg WALMessage) { if wal == nil { return } + if wal.light { // in light mode we only write new steps, timeouts, and our own votes (no proposals, block parts) - if mi, ok := wmsg.(msgInfo); ok { + if mi, ok := msg.(msgInfo); ok { if mi.PeerKey != "" { return } } } + // Write the wal message - var wmsgBytes = wire.JSONBytes(TimedWALMessage{time.Now(), wmsg}) - err := wal.group.WriteLine(string(wmsgBytes)) + if err := wal.enc.Encode(&TimedWALMessage{time.Now(), msg}); err != nil { + cmn.PanicQ(cmn.Fmt("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg)) + } + + // TODO: only flush when necessary + if err := wal.group.Flush(); err != nil { + cmn.PanicQ(cmn.Fmt("Error flushing consensus wal buf to file. Error: %v \n", err)) + } +} + +// SearchForEndHeight searches for the EndHeightMessage with the height and +// returns an auto.GroupReader, whenever it was found or not and an error. +// Group reader will be nil if found equals false. +// +// CONTRACT: caller must close group reader. +func (wal *WAL) SearchForEndHeight(height uint64) (gr *auto.GroupReader, found bool, err error) { + var msg *TimedWALMessage + + // NOTE: starting from the last file in the group because we're usually + // searching for the last height. See replay.go + min, max := wal.group.MinIndex(), wal.group.MaxIndex() + wal.Logger.Debug("Searching for height", "height", height, "min", min, "max", max) + for index := max; index >= min; index-- { + gr, err = wal.group.NewReader(index) + if err != nil { + return nil, false, err + } + + dec := NewWALDecoder(gr) + for { + msg, err = dec.Decode() + if err == io.EOF { + // check next file + break + } + if err != nil { + gr.Close() + return nil, false, err + } + + if m, ok := msg.Msg.(EndHeightMessage); ok { + if m.Height == height { // found + wal.Logger.Debug("Found", "height", height, "index", index) + return gr, true, nil + } + } + } + + gr.Close() + } + + return nil, false, nil +} + +/////////////////////////////////////////////////////////////////////////////// + +// A WALEncoder writes custom-encoded WAL messages to an output stream. +// +// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value (go-wire encoded) +type WALEncoder struct { + wr io.Writer +} + +// NewWALEncoder returns a new encoder that writes to wr. +func NewWALEncoder(wr io.Writer) *WALEncoder { + return &WALEncoder{wr} +} + +// Encode writes the custom encoding of v to the stream. +func (enc *WALEncoder) Encode(v interface{}) error { + data := wire.BinaryBytes(v) + + crc := crc32.Checksum(data, crc32c) + length := uint32(len(data)) + totalLength := 8 + int(length) + + msg := make([]byte, totalLength) + binary.BigEndian.PutUint32(msg[0:4], crc) + binary.BigEndian.PutUint32(msg[4:8], length) + copy(msg[8:], data) + + _, err := enc.wr.Write(msg) + + if err == nil { + // TODO [Anton Kaliaev 23 Oct 2017]: remove separator + _, err = enc.wr.Write(walSeparator) + } + + return err +} + +/////////////////////////////////////////////////////////////////////////////// + +// A WALDecoder reads and decodes custom-encoded WAL messages from an input +// stream. See WALEncoder for the format used. +// +// It will also compare the checksums and make sure data size is equal to the +// length from the header. If that is not the case, error will be returned. +type WALDecoder struct { + rd io.Reader +} + +// NewWALDecoder returns a new decoder that reads from rd. +func NewWALDecoder(rd io.Reader) *WALDecoder { + return &WALDecoder{rd} +} + +// Decode reads the next custom-encoded value from its reader and returns it. +func (dec *WALDecoder) Decode() (*TimedWALMessage, error) { + b := make([]byte, 4) + + n, err := dec.rd.Read(b) + if err == io.EOF { + return nil, err + } if err != nil { - cmn.PanicQ(cmn.Fmt("Error writing msg to consensus wal. Error: %v \n\nMessage: %v", err, wmsg)) + return nil, fmt.Errorf("failed to read checksum: %v", err) } - // TODO: only flush when necessary - if err := wal.group.Flush(); err != nil { - cmn.PanicQ(cmn.Fmt("Error flushing consensus wal buf to file. Error: %v \n", err)) + crc := binary.BigEndian.Uint32(b) + + b = make([]byte, 4) + n, err = dec.rd.Read(b) + if err == io.EOF { + return nil, err } + if err != nil { + return nil, fmt.Errorf("failed to read length: %v", err) + } + length := binary.BigEndian.Uint32(b) + + data := make([]byte, length) + n, err = dec.rd.Read(data) + if err == io.EOF { + return nil, err + } + if err != nil { + return nil, fmt.Errorf("not enough bytes for data: %v (want: %d, read: %v)", err, length, n) + } + + // check checksum before decoding data + actualCRC := crc32.Checksum(data, crc32c) + if actualCRC != crc { + return nil, fmt.Errorf("checksums do not match: (read: %v, actual: %v)", crc, actualCRC) + } + + var nn int + var res *TimedWALMessage + res = wire.ReadBinary(&TimedWALMessage{}, bytes.NewBuffer(data), int(length), &nn, &err).(*TimedWALMessage) + if err != nil { + return nil, fmt.Errorf("failed to decode data: %v", err) + } + + // TODO [Anton Kaliaev 23 Oct 2017]: remove separator + if err = readSeparator(dec.rd); err != nil { + return nil, err + } + + return res, err } -func (wal *WAL) writeEndHeight(height int) { - wal.group.WriteLine(cmn.Fmt("#ENDHEIGHT: %v", height)) - - // TODO: only flush when necessary - if err := wal.group.Flush(); err != nil { - cmn.PanicQ(cmn.Fmt("Error flushing consensus wal buf to file. Error: %v \n", err)) +// readSeparator reads a separator from r. It returns any error from underlying +// reader or if it's not a separator. +func readSeparator(r io.Reader) error { + b := make([]byte, len(walSeparator)) + _, err := r.Read(b) + if err != nil { + return fmt.Errorf("failed to read separator: %v", err) } + if !bytes.Equal(b, walSeparator) { + return fmt.Errorf("not a separator: %v", b) + } + return nil } diff --git a/consensus/wal_test.go b/consensus/wal_test.go new file mode 100644 index 00000000..0235afab --- /dev/null +++ b/consensus/wal_test.go @@ -0,0 +1,62 @@ +package consensus + +import ( + "bytes" + "path" + "testing" + "time" + + "github.com/tendermint/tendermint/consensus/types" + tmtypes "github.com/tendermint/tendermint/types" + cmn "github.com/tendermint/tmlibs/common" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWALEncoderDecoder(t *testing.T) { + now := time.Now() + msgs := []TimedWALMessage{ + TimedWALMessage{Time: now, Msg: EndHeightMessage{0}}, + TimedWALMessage{Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}}, + } + + b := new(bytes.Buffer) + + for _, msg := range msgs { + b.Reset() + + enc := NewWALEncoder(b) + err := enc.Encode(&msg) + require.NoError(t, err) + + dec := NewWALDecoder(b) + decoded, err := dec.Decode() + require.NoError(t, err) + + assert.Equal(t, msg.Time.Truncate(time.Millisecond), decoded.Time) + assert.Equal(t, msg.Msg, decoded.Msg) + } +} + +func TestSearchForEndHeight(t *testing.T) { + wal, err := NewWAL(path.Join(data_dir, "many_blocks.cswal"), false) + if err != nil { + t.Fatal(err) + } + + h := 3 + gr, found, err := wal.SearchForEndHeight(uint64(h)) + assert.NoError(t, err, cmn.Fmt("expected not to err on height %d", h)) + assert.True(t, found, cmn.Fmt("expected to find end height for %d", h)) + assert.NotNil(t, gr, "expected group not to be nil") + defer gr.Close() + + dec := NewWALDecoder(gr) + msg, err := dec.Decode() + assert.NoError(t, err, "expected to decode a message") + rs, ok := msg.Msg.(tmtypes.EventDataRoundState) + assert.True(t, ok, "expected message of type EventDataRoundState") + assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height")) + +} diff --git a/docs/app-development.rst b/docs/app-development.rst index 770572e1..1d2db8fd 100644 --- a/docs/app-development.rst +++ b/docs/app-development.rst @@ -367,7 +367,7 @@ When the app or tendermint restarts, they need to sync to a common height. When an ABCI connection is first established, Tendermint will call ``Info`` on the Query connection. The response should contain the LastBlockHeight and LastBlockAppHash - the former is the last block for -the which the app ran Commit successfully, the latter is the response +which the app ran Commit successfully, the latter is the response from that Commit. Using this information, Tendermint will determine what needs to be diff --git a/docs/ecosystem.rst b/docs/ecosystem.rst index c0c38630..2bc390c9 100644 --- a/docs/ecosystem.rst +++ b/docs/ecosystem.rst @@ -80,32 +80,34 @@ P2P chat using Tendermint, written in Java, `authored by wolfposd `__ | Tendermint | Go | -+-------------------------------------------------------------+--------------------+--------------+ -| `js abci `__ | Tendermint | Javascript | -+-------------------------------------------------------------+--------------------+--------------+ -| `cpp-tmsp `__ | Martin Dyring | C++ | -+-------------------------------------------------------------+--------------------+--------------+ -| `c-abci `__ | ChainX | C | -+-------------------------------------------------------------+--------------------+--------------+ -| `jabci `__ | jTendermint | Java | -+-------------------------------------------------------------+--------------------+--------------+ -| `Spearmint `__ | Dennis Mckinnon | Javascript | -+-------------------------------------------------------------+--------------------+--------------+ -| `ocaml-tmsp `__ | Zach Balder | Ocaml | -+-------------------------------------------------------------+--------------------+--------------+ -| `abci_server `__ | Krzysztof Jurewicz | Erlang | -+-------------------------------------------------------------+--------------------+--------------+ -| `rust-tsp `__   | Adrian Brink | Rust       | -+-------------------------------------------------------------+--------------------+--------------+ -| `hs-abci `__ | Alberto Gonzalez | Haskell | -+-------------------------------------------------------------+--------------------+--------------+ -| `haskell-abci `__ | Christoper Goes | Haskell | -+-------------------------------------------------------------+--------------------+--------------+ ++------------------------------------------------------------------+--------------------+--------------+ +| **Name** | **Author** | **Language** | +| | | | ++------------------------------------------------------------------+--------------------+--------------+ +| `abci `__ | Tendermint | Go | ++------------------------------------------------------------------+--------------------+--------------+ +| `js abci `__ | Tendermint | Javascript | ++------------------------------------------------------------------+--------------------+--------------+ +| `cpp-tmsp `__ | Martin Dyring | C++ | ++------------------------------------------------------------------+--------------------+--------------+ +| `c-abci `__ | ChainX | C | ++------------------------------------------------------------------+--------------------+--------------+ +| `jabci `__ | jTendermint | Java | ++------------------------------------------------------------------+--------------------+--------------+ +| `ocaml-tmsp `__ | Zach Balder | Ocaml | ++------------------------------------------------------------------+--------------------+--------------+ +| `abci_server `__ | Krzysztof Jurewicz | Erlang | ++------------------------------------------------------------------+--------------------+--------------+ +| `rust-tsp `__   | Adrian Brink | Rust       | ++------------------------------------------------------------------+--------------------+--------------+ +| `hs-abci `__ | Alberto Gonzalez | Haskell | ++------------------------------------------------------------------+--------------------+--------------+ +| `haskell-abci `__ | Christoper Goes | Haskell | ++------------------------------------------------------------------+--------------------+--------------+ +| `Spearmint `__ | Dennis Mckinnon | Javascript | ++------------------------------------------------------------------+--------------------+--------------+ +| `py-tendermint `__ | Dave Bryson | Python | ++------------------------------------------------------------------+--------------------+--------------+ Deployment Tools ---------------- diff --git a/docs/install.rst b/docs/install.rst index 1746daa9..1d01b505 100644 --- a/docs/install.rst +++ b/docs/install.rst @@ -29,7 +29,7 @@ You should be able to install the latest with a simple Run ``tendermint --help`` and ``tendermint version`` to ensure your installation worked. -If the installation failed, a dependency may been updated and become +If the installation failed, a dependency may have been updated and become incompatible with the latest Tendermint master branch. We solve this using the ``glide`` tool for dependency management. diff --git a/glide.lock b/glide.lock index a7a2bb5a..cd105b3c 100644 --- a/glide.lock +++ b/glide.lock @@ -1,18 +1,16 @@ -hash: 816d84782ab66637e02bd0a3c7f652a9a31f9b88e3ae11438c5bf641cf585f19 -updated: 2017-10-02T23:32:49.162422718-04:00 +hash: 0f9ba99fd411afaaf90993037b0067c5f9f873554f407a6ae9afa0e2548343c5 +updated: 2017-10-27T22:34:38.187149434-04:00 imports: - name: github.com/btcsuite/btcd - version: b8df516b4b267acf2de46be593a9d948d1d2c420 + version: 8cea3866d0f7fb12d567a20744942c0d078c7d15 subpackages: - btcec -- name: github.com/btcsuite/fastsha256 - version: 637e656429416087660c84436a2a035d69d54e2e - name: github.com/ebuchman/fail-test version: 95f809107225be108efcf10a3509e4ea6ceef3c4 - name: github.com/fsnotify/fsnotify version: 4da3e2cfbabc9f751898f250b49f2439785783a1 - name: github.com/go-kit/kit - version: d67bb4c202e3b91377d1079b110a6c9ce23ab2f8 + version: e2b298466b32c7cd5579a9b9b07e968fc9d9452c subpackages: - log - log/level @@ -20,28 +18,31 @@ imports: - name: github.com/go-logfmt/logfmt version: 390ab7935ee28ec6b286364bba9b4dd6410cb3d5 - name: github.com/go-playground/locales - version: 1e5f1161c6416a5ff48840eb8724a394e48cc534 + version: e4cbcb5d0652150d40ad0646651076b6bd2be4f6 subpackages: - currency - name: github.com/go-playground/universal-translator version: 71201497bace774495daed26a3874fd339e0b538 - name: github.com/go-stack/stack - version: 100eb0c0a9c5b306ca2fb4f165df21d80ada4b82 + version: 817915b46b97fd7bb80e8ab6b69f01a53ac3eebf - name: github.com/gogo/protobuf - version: f7f1376d9d231a646d4e62fe1075623ced6db327 + version: 342cbe0a04158f6dcb03ca0079991a51a4248c02 subpackages: - proto - name: github.com/golang/protobuf - version: 18c9bb3261723cd5401db4d0c9fbc5c3b6c70fe8 + version: 1643683e1b54a9e88ad26d98f81400c8c9d9f4f9 subpackages: - proto + - ptypes - ptypes/any + - ptypes/duration + - ptypes/timestamp - name: github.com/golang/snappy version: 553a641470496b2327abcac10b36396bd98e45c9 - name: github.com/gorilla/websocket - version: a91eba7f97777409bc2c443f5534d41dd20c5720 + version: ea4d1f681babbce9545c9c5f3d5194a789c89f5b - name: github.com/hashicorp/hcl - version: 392dba7d905ed5d04a5794ba89f558b27e2ba1ca + version: 23c074d0eceb2b8a5bfdbb271ab780cde70f05a8 subpackages: - hcl/ast - hcl/parser @@ -58,33 +59,31 @@ imports: - name: github.com/kr/logfmt version: b84e30acd515aadc4b783ad4ff83aff3299bdfe0 - name: github.com/magiconair/properties - version: 51463bfca2576e06c62a8504b5c0f06d61312647 + version: 8d7837e64d3c1ee4e54a880c5a920ab4316fc90a - name: github.com/mitchellh/mapstructure - version: cc8532a8e9a55ea36402aa21efdf403a60d34096 -- name: github.com/pelletier/go-buffruneio - version: c37440a7cf42ac63b919c752ca73a85067e05992 + version: 06020f85339e21b2478f756a78e295255ffa4d6a - name: github.com/pelletier/go-toml - version: 13d49d4606eb801b8f01ae542b4afc4c6ee3d84a + version: 4e9e0ee19b60b13eb79915933f44d8ed5f268bdd - name: github.com/pkg/errors version: 645ef00459ed84a119197bfb8d8205042c6df63d - name: github.com/rcrowley/go-metrics version: 1f30fe9094a513ce4c700b9a54458bbb0c96996c - name: github.com/spf13/afero - version: 9be650865eab0c12963d8753212f4f9c66cdcf12 + version: 5660eeed305fe5f69c8fc6cf899132a459a97064 subpackages: - mem - name: github.com/spf13/cast version: acbeb36b902d72a7a4c18e8f3241075e7ab763e4 - name: github.com/spf13/cobra - version: 4cdb38c072b86bf795d2c81de50784d9fdd6eb77 + version: 7b2c5ac9fc04fc5efafb60700713d4fa609b777b - name: github.com/spf13/jwalterweatherman - version: 8f07c835e5cc1450c082fe3a439cf87b0cbb2d99 + version: 12bd96e66386c1960ab0f74ced1362f66f552f7b - name: github.com/spf13/pflag - version: e57e3eeb33f795204c1ca35f56c44f83227c6e66 + version: 97afa5e7ca8a08a383cb259e06636b5e2cc7897f - name: github.com/spf13/viper - version: 0967fc9aceab2ce9da34061253ac10fb99bba5b2 + version: 25b30aa063fc18e48662b86996252eabdcf2f0c7 - name: github.com/syndtr/goleveldb - version: 8c81ea47d4c41a385645e133e15510fc6a2a74b4 + version: b89cc31ef7977104127d34c1bd31ebd1a9db2199 subpackages: - leveldb - leveldb/cache @@ -99,7 +98,7 @@ imports: - leveldb/table - leveldb/util - name: github.com/tendermint/abci - version: 191c4b6d176169ffc7f9972d490fa362a3b7d940 + version: dc33aad9b4e514a2322725ef68f27f72d955c537 subpackages: - client - example/counter @@ -107,23 +106,23 @@ imports: - server - types - name: github.com/tendermint/ed25519 - version: 1f52c6f8b8a5c7908aff4497c186af344b428925 + version: d8387025d2b9d158cf4efb07e7ebf814bcce2057 subpackages: - edwards25519 - extra25519 - name: github.com/tendermint/go-crypto - version: 311e8c1bf00fa5868daad4f8ea56dcad539182c0 + version: dd20358a264c772b4a83e477b0cfce4c88a7001d - name: github.com/tendermint/go-wire - version: 5f88da3dbc1a72844e6dfaf274ce87f851d488eb + version: 2baffcb6b690057568bc90ef1d457efb150b979a subpackages: - data - data/base58 -- name: github.com/tendermint/merkleeyes - version: 2a93256d2c6fbcc3b55673c0d2b96a7e32c6238b +- name: github.com/tendermint/iavl + version: 594cc0c062a7174475f0ab654384038d77067917 subpackages: - iavl - name: github.com/tendermint/tmlibs - version: 096dcb90e60aa00b748b3fe49a4b95e48ebf1e13 + version: d9525c0fb671204450b160807480e1263053fb20 subpackages: - autofile - cli @@ -137,7 +136,7 @@ imports: - merkle - test - name: golang.org/x/crypto - version: c7af5bf2638a1164f2eb5467c39c6cffbd13a02e + version: 2509b142fb2b797aa7587dad548f113b2c0f20ce subpackages: - curve25519 - nacl/box @@ -148,7 +147,7 @@ imports: - ripemd160 - salsa20/salsa - name: golang.org/x/net - version: feeb485667d1fdabe727840fe00adc22431bc86e + version: c73622c77280266305273cb545f54516ced95b93 subpackages: - context - http2 @@ -158,43 +157,46 @@ imports: - lex/httplex - trace - name: golang.org/x/sys - version: e62c3de784db939836898e5c19ffd41bece347da + version: b98136db334ff9cb24f28a68e3be3cb6608f7630 subpackages: - unix - name: golang.org/x/text - version: 470f45bf29f4147d6fbd7dfd0a02a848e49f5bf4 + version: 6eab0e8f74e86c598ec3b6fad4888e0c11482d48 subpackages: - secure/bidirule - transform - unicode/bidi - unicode/norm - name: google.golang.org/genproto - version: 411e09b969b1170a9f0c467558eb4c4c110d9c77 + version: f676e0f3ac6395ff1a529ae59a6670878a8371a6 subpackages: - googleapis/rpc/status - name: google.golang.org/grpc - version: 844f573616520565fdc6fb4db242321b5456fd6d + version: f7bf885db0b7479a537ec317c6e48ce53145f3db subpackages: + - balancer - codes + - connectivity - credentials - - grpclb/grpc_lb_v1 + - grpclb/grpc_lb_v1/messages - grpclog - internal - keepalive - metadata - naming - peer + - resolver - stats - status - tap - transport - name: gopkg.in/go-playground/validator.v9 - version: 6d8c18553ea1ac493d049edd6f102f52e618f085 + version: 1304298bf10d085adec514b076772a79c9cadb6b - name: gopkg.in/yaml.v2 - version: cd8b52f8269e0feb286dfeef29f8fe4d5b397e0b + version: eb3733d160e74a9c7e442f435eb3bea458e1d19f testImports: - name: github.com/davecgh/go-spew - version: 6d212800a42e8ab5c146b8ace3490ee17e5225f9 + version: 04cdfd42973bb9c8589fd6a731800cf222fde1a9 subpackages: - spew - name: github.com/pmezard/go-difflib @@ -202,7 +204,7 @@ testImports: subpackages: - difflib - name: github.com/stretchr/testify - version: 69483b4bd14f5845b5a1e55bca19e954e827f1d0 + version: 2aa2c176b9dab406a6970f6a55f513e8a8c8b18f subpackages: - assert - require diff --git a/glide.yaml b/glide.yaml index 27b6cc39..4c1f7e21 100644 --- a/glide.yaml +++ b/glide.yaml @@ -2,35 +2,39 @@ package: github.com/tendermint/tendermint import: - package: github.com/ebuchman/fail-test - package: github.com/gogo/protobuf + version: v0.5 subpackages: - proto - package: github.com/golang/protobuf subpackages: - proto - package: github.com/gorilla/websocket + version: v1.2.0 - package: github.com/pkg/errors version: ~0.8.0 - package: github.com/rcrowley/go-metrics - package: github.com/spf13/cobra + version: v0.0.1 - package: github.com/spf13/viper + version: v1.0.0 - package: github.com/tendermint/abci - version: ~0.6.0 + version: ~0.7.0 subpackages: - client - example/dummy - types - package: github.com/tendermint/go-crypto - version: ~0.3.0 + version: ~0.4.1 - package: github.com/tendermint/go-wire - version: ~0.6.2 + version: ~0.7.1 subpackages: - data -- package: github.com/tendermint/merkleeyes - version: master +- package: github.com/tendermint/iavl + version: ~0.2.0 subpackages: - iavl - package: github.com/tendermint/tmlibs - version: ~0.3.2 + version: ~0.4.0 subpackages: - autofile - cli @@ -50,6 +54,7 @@ import: subpackages: - context - package: google.golang.org/grpc + version: v1.7.0 testImport: - package: github.com/go-kit/kit subpackages: diff --git a/mempool/mempool.go b/mempool/mempool.go index 07b267c4..caaa034e 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -50,9 +50,10 @@ TODO: Better handle abci client errors. (make it automatically handle connection const cacheSize = 100000 -// Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus round. -// Transaction validity is checked using the CheckTx abci message before the transaction is added to the pool. -// The Mempool uses a concurrent list structure for storing transactions that can be efficiently accessed by multiple concurrent readers. +// Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus +// round. Transaction validity is checked using the CheckTx abci message before the transaction is +// added to the pool. The Mempool uses a concurrent list structure for storing transactions that +// can be efficiently accessed by multiple concurrent readers. type Mempool struct { config *cfg.MempoolConfig @@ -78,6 +79,7 @@ type Mempool struct { } // NewMempool returns a new Mempool with the given configuration and connection to an application. +// TODO: Extract logger into arguments. func NewMempool(config *cfg.MempoolConfig, proxyAppConn proxy.AppConnMempool, height int) *Mempool { mempool := &Mempool{ config: config, @@ -269,7 +271,10 @@ func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) { atomic.StoreInt32(&mem.rechecking, 0) mem.logger.Info("Done rechecking txs") - mem.notifyTxsAvailable() + // incase the recheck removed all txs + if mem.Size() > 0 { + mem.notifyTxsAvailable() + } } default: // ignore other messages @@ -287,9 +292,7 @@ func (mem *Mempool) notifyTxsAvailable() { if mem.Size() == 0 { panic("notified txs available but mempool is empty!") } - if mem.txsAvailable != nil && - !mem.notifiedTxsAvailable { - + if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { mem.notifiedTxsAvailable = true mem.txsAvailable <- mem.height + 1 } diff --git a/node/node.go b/node/node.go index 824a0926..7bb71449 100644 --- a/node/node.go +++ b/node/node.go @@ -2,6 +2,7 @@ package node import ( "bytes" + "encoding/json" "errors" "fmt" "net" @@ -132,19 +133,27 @@ func NewNode(config *cfg.Config, if err != nil { return nil, err } - state := sm.LoadState(stateDB) - if state == nil { - genDoc, err := genesisDocProvider() + + // Get genesis doc + genDoc, err := loadGenesisDoc(stateDB) + if err != nil { + genDoc, err = genesisDocProvider() if err != nil { return nil, err } + // save genesis doc to prevent a certain class of user errors (e.g. when it + // was changed, accidentally or not). Also good for audit trail. + saveGenesisDoc(stateDB, genDoc) + } + + state := sm.LoadState(stateDB) + if state == nil { state, err = sm.MakeGenesisState(stateDB, genDoc) if err != nil { return nil, err } state.Save() } - state.SetLogger(stateLogger) // Create the proxyApp, which manages connections (consensus, mempool, query) @@ -286,7 +295,7 @@ func NewNode(config *cfg.Config, node := &Node{ config: config, - genesisDoc: state.GenesisDoc, + genesisDoc: genDoc, privValidator: privValidator, privKey: privKey, @@ -308,6 +317,16 @@ func NewNode(config *cfg.Config, // OnStart starts the Node. It implements cmn.Service. func (n *Node) OnStart() error { + // Run the RPC server first + // so we can eg. receive txs for the first block + if n.config.RPC.ListenAddress != "" { + listeners, err := n.startRPC() + if err != nil { + return err + } + n.rpcListeners = listeners + } + // Create & add listener protocol, address := cmn.ProtocolAndAddress(n.config.P2P.ListenAddress) l := p2p.NewDefaultListener(protocol, address, n.config.P2P.SkipUPNP, n.Logger.With("module", "p2p")) @@ -330,15 +349,6 @@ func (n *Node) OnStart() error { } } - // Run the RPC server - if n.config.RPC.ListenAddress != "" { - listeners, err := n.startRPC() - if err != nil { - return err - } - n.rpcListeners = listeners - } - return nil } @@ -485,11 +495,10 @@ func (n *Node) makeNodeInfo() *p2p.NodeInfo { if _, ok := n.txIndexer.(*null.TxIndex); ok { txIndexerStatus = "off" } - nodeInfo := &p2p.NodeInfo{ PubKey: n.privKey.PubKey().Unwrap().(crypto.PubKeyEd25519), Moniker: n.config.Moniker, - Network: n.consensusState.GetState().ChainID, + Network: n.genesisDoc.ChainID, Version: version.Version, Other: []string{ cmn.Fmt("wire_version=%v", wire.Version), @@ -536,3 +545,31 @@ func (n *Node) DialSeeds(seeds []string) error { } //------------------------------------------------------------------------------ + +var ( + genesisDocKey = []byte("genesisDoc") +) + +// panics if failed to unmarshal bytes +func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { + bytes := db.Get(genesisDocKey) + if len(bytes) == 0 { + return nil, errors.New("Genesis doc not found") + } else { + var genDoc *types.GenesisDoc + err := json.Unmarshal(bytes, &genDoc) + if err != nil { + cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes)) + } + return genDoc, nil + } +} + +// panics if failed to marshal the given genesis document +func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { + bytes, err := json.Marshal(genDoc) + if err != nil { + cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) + } + db.SetSync(genesisDocKey, bytes) +} diff --git a/p2p/peer.go b/p2p/peer.go index 1bdb8210..3652c465 100644 --- a/p2p/peer.go +++ b/p2p/peer.go @@ -268,11 +268,11 @@ func (p *peer) CanSend(chID byte) bool { } // WriteTo writes the peer's public key to w. -func (p *peer) WriteTo(w io.Writer) (n int64, err error) { - var n_ int - wire.WriteString(p.key, w, &n_, &err) - n += int64(n_) - return +func (p *peer) WriteTo(w io.Writer) (int64, error) { + var n int + var err error + wire.WriteString(p.key, w, &n, &err) + return int64(n), err } // String representation. diff --git a/p2p/pex_reactor.go b/p2p/pex_reactor.go index 69ab55cc..54c2d06b 100644 --- a/p2p/pex_reactor.go +++ b/p2p/pex_reactor.go @@ -139,6 +139,7 @@ func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) { switch msg := msg.(type) { case *pexRequestMessage: // src requested some peers. + // NOTE: we might send an empty selection r.SendAddrs(src, r.book.GetSelection()) case *pexAddrsMessage: // We received some peer addresses from src. diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index b00c97d2..e63fcd4b 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -39,17 +39,12 @@ func NewHTTP(remote, wsEndpoint string) *HTTP { } } -func (c *HTTP) _assertIsClient() Client { - return c -} - -func (c *HTTP) _assertIsNetworkClient() NetworkClient { - return c -} - -func (c *HTTP) _assertIsEventSwitch() types.EventSwitch { - return c -} +var ( + _ Client = (*HTTP)(nil) + _ NetworkClient = (*HTTP)(nil) + _ types.EventSwitch = (*HTTP)(nil) + _ types.EventSwitch = (*WSEvents)(nil) +) func (c *HTTP) Status() (*ctypes.ResultStatus, error) { result := new(ctypes.ResultStatus) @@ -69,10 +64,14 @@ func (c *HTTP) ABCIInfo() (*ctypes.ResultABCIInfo, error) { return result, nil } -func (c *HTTP) ABCIQuery(path string, data data.Bytes, prove bool) (*ctypes.ResultABCIQuery, error) { +func (c *HTTP) ABCIQuery(path string, data data.Bytes) (*ctypes.ResultABCIQuery, error) { + return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) +} + +func (c *HTTP) ABCIQueryWithOptions(path string, data data.Bytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { result := new(ctypes.ResultABCIQuery) _, err := c.rpc.Call("abci_query", - map[string]interface{}{"path": path, "data": data, "prove": prove}, + map[string]interface{}{"path": path, "data": data, "height": opts.Height, "trusted": opts.Trusted}, result) if err != nil { return nil, errors.Wrap(err, "ABCIQuery") @@ -216,10 +215,6 @@ func newWSEvents(remote, endpoint string) *WSEvents { } } -func (w *WSEvents) _assertIsEventSwitch() types.EventSwitch { - return w -} - // Start is the only way I could think the extend OnStart from // events.eventSwitch. If only it wasn't private... // BaseService.Start -> eventSwitch.OnStart -> WSEvents.Start @@ -323,16 +318,18 @@ func (w *WSEvents) redoSubscriptions() { func (w *WSEvents) eventListener() { for { select { - case res := <-w.ws.ResultsCh: + case resp := <-w.ws.ResponsesCh: // res is json.RawMessage - err := w.parseEvent(res) + if resp.Error != nil { + // FIXME: better logging/handling of errors?? + fmt.Printf("ws err: %+v\n", resp.Error.Error()) + continue + } + err := w.parseEvent(*resp.Result) if err != nil { // FIXME: better logging/handling of errors?? fmt.Printf("ws result: %+v\n", err) } - case err := <-w.ws.ErrorsCh: - // FIXME: better logging/handling of errors?? - fmt.Printf("ws err: %+v\n", err) case <-w.quit: // send a message so we can wait for the routine to exit // before cleaning up the w.ws stuff diff --git a/rpc/client/interface.go b/rpc/client/interface.go index ed7ccaba..10689a56 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -31,7 +31,8 @@ import ( type ABCIClient interface { // reading from abci app ABCIInfo() (*ctypes.ResultABCIInfo, error) - ABCIQuery(path string, data data.Bytes, prove bool) (*ctypes.ResultABCIQuery, error) + ABCIQuery(path string, data data.Bytes) (*ctypes.ResultABCIQuery, error) + ABCIQueryWithOptions(path string, data data.Bytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) // writing to abci app BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go index 134f935c..c6adfc5f 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/localclient.go @@ -41,13 +41,10 @@ func NewLocal(node *nm.Node) Local { } } -func (c Local) _assertIsClient() Client { - return c -} - -func (c Local) _assertIsNetworkClient() NetworkClient { - return c -} +var ( + _ Client = Local{} + _ NetworkClient = Local{} +) func (c Local) Status() (*ctypes.ResultStatus, error) { return core.Status() @@ -57,8 +54,12 @@ func (c Local) ABCIInfo() (*ctypes.ResultABCIInfo, error) { return core.ABCIInfo() } -func (c Local) ABCIQuery(path string, data data.Bytes, prove bool) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(path, data, prove) +func (c Local) ABCIQuery(path string, data data.Bytes) (*ctypes.ResultABCIQuery, error) { + return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) +} + +func (c Local) ABCIQueryWithOptions(path string, data data.Bytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + return core.ABCIQuery(path, data, opts.Height, opts.Trusted) } func (c Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index db3fa4f1..2ed012e4 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -16,16 +16,22 @@ type ABCIApp struct { App abci.Application } -func (a ABCIApp) _assertABCIClient() client.ABCIClient { - return a -} +var ( + _ client.ABCIClient = ABCIApp{} + _ client.ABCIClient = ABCIMock{} + _ client.ABCIClient = (*ABCIRecorder)(nil) +) func (a ABCIApp) ABCIInfo() (*ctypes.ResultABCIInfo, error) { return &ctypes.ResultABCIInfo{a.App.Info(abci.RequestInfo{version.Version})}, nil } -func (a ABCIApp) ABCIQuery(path string, data data.Bytes, prove bool) (*ctypes.ResultABCIQuery, error) { - q := a.App.Query(abci.RequestQuery{data, path, 0, prove}) +func (a ABCIApp) ABCIQuery(path string, data data.Bytes) (*ctypes.ResultABCIQuery, error) { + return a.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) +} + +func (a ABCIApp) ABCIQueryWithOptions(path string, data data.Bytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + q := a.App.Query(abci.RequestQuery{data, path, opts.Height, opts.Trusted}) return &ctypes.ResultABCIQuery{q.Result()}, nil } @@ -67,10 +73,6 @@ type ABCIMock struct { Broadcast Call } -func (m ABCIMock) _assertABCIClient() client.ABCIClient { - return m -} - func (m ABCIMock) ABCIInfo() (*ctypes.ResultABCIInfo, error) { res, err := m.Info.GetResponse(nil) if err != nil { @@ -79,8 +81,12 @@ func (m ABCIMock) ABCIInfo() (*ctypes.ResultABCIInfo, error) { return &ctypes.ResultABCIInfo{res.(abci.ResponseInfo)}, nil } -func (m ABCIMock) ABCIQuery(path string, data data.Bytes, prove bool) (*ctypes.ResultABCIQuery, error) { - res, err := m.Query.GetResponse(QueryArgs{path, data, prove}) +func (m ABCIMock) ABCIQuery(path string, data data.Bytes) (*ctypes.ResultABCIQuery, error) { + return m.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) +} + +func (m ABCIMock) ABCIQueryWithOptions(path string, data data.Bytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Trusted}) if err != nil { return nil, err } @@ -126,14 +132,11 @@ func NewABCIRecorder(client client.ABCIClient) *ABCIRecorder { } } -func (r *ABCIRecorder) _assertABCIClient() client.ABCIClient { - return r -} - type QueryArgs struct { - Path string - Data data.Bytes - Prove bool + Path string + Data data.Bytes + Height uint64 + Trusted bool } func (r *ABCIRecorder) addCall(call Call) { @@ -150,11 +153,15 @@ func (r *ABCIRecorder) ABCIInfo() (*ctypes.ResultABCIInfo, error) { return res, err } -func (r *ABCIRecorder) ABCIQuery(path string, data data.Bytes, prove bool) (*ctypes.ResultABCIQuery, error) { - res, err := r.Client.ABCIQuery(path, data, prove) +func (r *ABCIRecorder) ABCIQuery(path string, data data.Bytes) (*ctypes.ResultABCIQuery, error) { + return r.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) +} + +func (r *ABCIRecorder) ABCIQueryWithOptions(path string, data data.Bytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + res, err := r.Client.ABCIQueryWithOptions(path, data, opts) r.addCall(Call{ Name: "abci_query", - Args: QueryArgs{path, data, prove}, + Args: QueryArgs{path, data, opts.Height, opts.Trusted}, Response: res, Error: err, }) diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index 245db6c6..a7afa089 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -12,6 +12,7 @@ import ( "github.com/tendermint/abci/example/dummy" abci "github.com/tendermint/abci/types" data "github.com/tendermint/go-wire/data" + "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/mock" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" @@ -50,7 +51,7 @@ func TestABCIMock(t *testing.T) { assert.Equal("foobar", err.Error()) // query always returns the response - query, err := m.ABCIQuery("/", nil, false) + query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Trusted: true}) require.Nil(err) require.NotNil(query) assert.EqualValues(key, query.Key) @@ -92,7 +93,7 @@ func TestABCIRecorder(t *testing.T) { require.Equal(0, len(r.Calls)) r.ABCIInfo() - r.ABCIQuery("path", data.Bytes("data"), true) + r.ABCIQueryWithOptions("path", data.Bytes("data"), client.ABCIQueryOptions{Trusted: false}) require.Equal(2, len(r.Calls)) info := r.Calls[0] @@ -115,7 +116,7 @@ func TestABCIRecorder(t *testing.T) { require.True(ok) assert.Equal("path", qa.Path) assert.EqualValues("data", qa.Data) - assert.True(qa.Prove) + assert.False(qa.Trusted) // now add some broadcasts txs := []types.Tx{{1}, {2}, {3}} @@ -164,7 +165,7 @@ func TestABCIApp(t *testing.T) { assert.True(res.DeliverTx.Code.IsOK()) // check the key - qres, err := m.ABCIQuery("/key", data.Bytes(key), false) + qres, err := m.ABCIQueryWithOptions("/key", data.Bytes(key), client.ABCIQueryOptions{Trusted: true}) require.Nil(err) assert.EqualValues(value, qres.Value) } diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index f32694ed..b5973474 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -37,9 +37,7 @@ type Client struct { types.EventSwitch } -func (c Client) _assertIsClient() client.Client { - return c -} +var _ client.Client = Client{} // Call is used by recorders to save a call and response. // It can also be used to configure mock responses. @@ -84,8 +82,12 @@ func (c Client) ABCIInfo() (*ctypes.ResultABCIInfo, error) { return core.ABCIInfo() } -func (c Client) ABCIQuery(path string, data data.Bytes, prove bool) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(path, data, prove) +func (c Client) ABCIQuery(path string, data data.Bytes) (*ctypes.ResultABCIQuery, error) { + return c.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) +} + +func (c Client) ABCIQueryWithOptions(path string, data data.Bytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + return core.ABCIQuery(path, data, opts.Height, opts.Trusted) } func (c Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { diff --git a/rpc/client/mock/status.go b/rpc/client/mock/status.go index af0f5335..58b29d57 100644 --- a/rpc/client/mock/status.go +++ b/rpc/client/mock/status.go @@ -10,9 +10,10 @@ type StatusMock struct { Call } -func (m *StatusMock) _assertStatusClient() client.StatusClient { - return m -} +var ( + _ client.StatusClient = (*StatusMock)(nil) + _ client.StatusClient = (*StatusRecorder)(nil) +) func (m *StatusMock) Status() (*ctypes.ResultStatus, error) { res, err := m.GetResponse(nil) @@ -36,10 +37,6 @@ func NewStatusRecorder(client client.StatusClient) *StatusRecorder { } } -func (r *StatusRecorder) _assertStatusClient() client.StatusClient { - return r -} - func (r *StatusRecorder) addCall(call Call) { r.Calls = append(r.Calls, call) } diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 9bcd3de4..d329a120 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -7,7 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/tendermint/merkleeyes/iavl" //TODO use tendermint/iavl ? + "github.com/tendermint/iavl" "github.com/tendermint/tendermint/rpc/client" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" @@ -98,6 +98,23 @@ func TestGenesisAndValidators(t *testing.T) { } } +func TestABCIQuery(t *testing.T) { + for i, c := range GetClients() { + // write something + k, v, tx := MakeTxKV() + bres, err := c.BroadcastTxCommit(tx) + require.Nil(t, err, "%d: %+v", i, err) + apph := bres.Height + 1 // this is where the tx will be applied to the state + + // wait before querying + client.WaitForHeight(c, apph, nil) + qres, err := c.ABCIQuery("/key", k) + if assert.Nil(t, err) && assert.True(t, qres.Code.IsOK()) { + assert.EqualValues(t, v, qres.Value) + } + } +} + // Make some app checks func TestAppCalls(t *testing.T) { assert, require := assert.New(t), require.New(t) @@ -124,7 +141,7 @@ func TestAppCalls(t *testing.T) { // wait before querying client.WaitForHeight(c, apph, nil) - qres, err := c.ABCIQuery("/key", k, false) + qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: true}) if assert.Nil(err) && assert.True(qres.Code.IsOK()) { // assert.Equal(k, data.GetKey()) // only returned for proofs assert.EqualValues(v, qres.Value) @@ -172,15 +189,15 @@ func TestAppCalls(t *testing.T) { assert.Equal(block.Block.LastCommit, commit2.Commit) // and we got a proof that works! - pres, err := c.ABCIQuery("/key", k, true) + pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: false}) if assert.Nil(err) && assert.True(pres.Code.IsOK()) { - proof, err := iavl.ReadProof(pres.Proof) + proof, err := iavl.ReadKeyExistsProof(pres.Proof) if assert.Nil(err) { key := pres.Key value := pres.Value assert.EqualValues(appHash, proof.RootHash) valid := proof.Verify(key, value, appHash) - assert.True(valid) + assert.Nil(valid) } } } diff --git a/rpc/client/types.go b/rpc/client/types.go new file mode 100644 index 00000000..dc573edd --- /dev/null +++ b/rpc/client/types.go @@ -0,0 +1,12 @@ +package client + +// ABCIQueryOptions can be used to provide options for ABCIQuery call other +// than the DefaultABCIQueryOptions. +type ABCIQueryOptions struct { + Height uint64 + Trusted bool +} + +// DefaultABCIQueryOptions are latest height (0) and trusted equal to false +// (which will result in a proof being returned). +var DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Trusted: false} diff --git a/rpc/core/abci.go b/rpc/core/abci.go index 06275a9e..564c0bc6 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -41,16 +41,18 @@ import ( // // ### Query Parameters // -// | Parameter | Type | Default | Required | Description | -// |-----------+--------+---------+----------+---------------------------------------| -// | path | string | false | false | Path to the data ("/a/b/c") | -// | data | []byte | false | true | Data | -// | prove | bool | false | false | Include a proof of the data inclusion | -func ABCIQuery(path string, data data.Bytes, prove bool) (*ctypes.ResultABCIQuery, error) { +// | Parameter | Type | Default | Required | Description | +// |-----------+--------+---------+----------+------------------------------------------------| +// | path | string | false | false | Path to the data ("/a/b/c") | +// | data | []byte | false | true | Data | +// | height | uint64 | 0 | false | Height (0 means latest) | +// | trusted | bool | false | false | Does not include a proof of the data inclusion | +func ABCIQuery(path string, data data.Bytes, height uint64, trusted bool) (*ctypes.ResultABCIQuery, error) { resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{ - Path: path, - Data: data, - Prove: prove, + Path: path, + Data: data, + Height: height, + Prove: !trusted, }) if err != nil { return nil, err diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index ad00060f..6b5e2166 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -280,7 +280,7 @@ func Commit(heightPtr *int) (*ctypes.ResultCommit, error) { height := blockStore.Height() header := blockStore.LoadBlockMeta(height).Header commit := blockStore.LoadSeenCommit(height) - return &ctypes.ResultCommit{header, commit, false}, nil + return ctypes.NewResultCommit(header, commit, false), nil } height := *heightPtr @@ -298,10 +298,10 @@ func Commit(heightPtr *int) (*ctypes.ResultCommit, error) { // use a non-canonical commit if height == storeHeight { commit := blockStore.LoadSeenCommit(height) - return &ctypes.ResultCommit{header, commit, false}, nil + return ctypes.NewResultCommit(header, commit, false), nil } // Return the canonical commit (comes from the block at height+1) commit := blockStore.LoadBlockCommit(height) - return &ctypes.ResultCommit{header, commit, true}, nil + return ctypes.NewResultCommit(header, commit, true), nil } diff --git a/rpc/core/routes.go b/rpc/core/routes.go index 485f7a00..b1dbd378 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -29,7 +29,7 @@ var Routes = map[string]*rpc.RPCFunc{ "broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"), // abci API - "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,prove"), + "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"), "abci_info": rpc.NewRPCFunc(ABCIInfo, ""), } diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index a5ed6f5a..874e351d 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -26,9 +26,24 @@ type ResultBlock struct { } type ResultCommit struct { - Header *types.Header `json:"header"` - Commit *types.Commit `json:"commit"` - CanonicalCommit bool `json:"canonical"` + // SignedHeader is header and commit, embedded so we only have + // one level in the json output + types.SignedHeader + CanonicalCommit bool `json:"canonical"` +} + +// NewResultCommit is a helper to initialize the ResultCommit with +// the embedded struct +func NewResultCommit(header *types.Header, commit *types.Commit, + canonical bool) *ResultCommit { + + return &ResultCommit{ + SignedHeader: types.SignedHeader{ + Header: header, + Commit: commit, + }, + CanonicalCommit: canonical, + } } type ResultStatus struct { diff --git a/rpc/lib/client/http_client.go b/rpc/lib/client/http_client.go index 1fbaedfa..1f06112d 100644 --- a/rpc/lib/client/http_client.go +++ b/rpc/lib/client/http_client.go @@ -12,6 +12,7 @@ import ( "strings" "github.com/pkg/errors" + types "github.com/tendermint/tendermint/rpc/lib/types" ) @@ -41,7 +42,8 @@ func makeHTTPDialer(remoteAddr string) (string, func(string, string) (net.Conn, protocol = "tcp" } - trimmedAddress := strings.Replace(address, "/", ".", -1) // replace / with . for http requests (dummy domain) + // replace / with . for http requests (dummy domain) + trimmedAddress := strings.Replace(address, "/", ".", -1) return trimmedAddress, func(proto, addr string) (net.Conn, error) { return net.Dial(protocol, address) } @@ -60,12 +62,13 @@ func makeHTTPClient(remoteAddr string) (string, *http.Client) { //------------------------------------------------------------------------------------ -// JSON rpc takes params as a slice +// JSONRPCClient takes params as a slice type JSONRPCClient struct { address string client *http.Client } +// NewJSONRPCClient returns a JSONRPCClient pointed at the given address. func NewJSONRPCClient(remote string) *JSONRPCClient { address, client := makeHTTPClient(remote) return &JSONRPCClient{ @@ -147,7 +150,7 @@ func unmarshalResponseBytes(responseBytes []byte, result interface{}) (interface return nil, errors.Errorf("Error unmarshalling rpc response: %v", err) } if response.Error != nil { - return nil, errors.Errorf("Response error: %v", response.Error.Message) + return nil, errors.Errorf("Response error: %v", response.Error) } // unmarshal the RawMessage into the result err = json.Unmarshal(*response.Result, result) diff --git a/rpc/lib/client/integration_test.go b/rpc/lib/client/integration_test.go new file mode 100644 index 00000000..d3d99337 --- /dev/null +++ b/rpc/lib/client/integration_test.go @@ -0,0 +1,66 @@ +// +build release + +// The code in here is comprehensive as an integration +// test and is long, hence is only run before releases. + +package rpcclient + +import ( + "bytes" + "errors" + "net" + "regexp" + "testing" + "time" + + "github.com/stretchr/testify/require" + "github.com/tendermint/tmlibs/log" +) + +func TestWSClientReconnectWithJitter(t *testing.T) { + n := 8 + maxReconnectAttempts := 3 + // Max wait time is ceil(1+0.999) + ceil(2+0.999) + ceil(4+0.999) + ceil(...) = 2 + 3 + 5 = 10s + ... + maxSleepTime := time.Second * time.Duration(((1< height-to-stop +*/ +package main + +import ( + "fmt" + "io" + "os" + "strconv" + + cs "github.com/tendermint/tendermint/consensus" +) + +func main() { + if len(os.Args) < 4 { + fmt.Println("3 arguments required: ") + os.Exit(1) + } + + var heightToStop uint64 + var err error + if heightToStop, err = strconv.ParseUint(os.Args[2], 10, 64); err != nil { + panic(fmt.Errorf("failed to parse height: %v", err)) + } + + in, err := os.Open(os.Args[1]) + if err != nil { + panic(fmt.Errorf("failed to open input WAL file: %v", err)) + } + defer in.Close() + + out, err := os.Create(os.Args[3]) + if err != nil { + panic(fmt.Errorf("failed to open output WAL file: %v", err)) + } + defer out.Close() + + enc := cs.NewWALEncoder(out) + dec := cs.NewWALDecoder(in) + + for { + msg, err := dec.Decode() + if err == io.EOF { + break + } else if err != nil { + panic(fmt.Errorf("failed to decode msg: %v", err)) + } + + if m, ok := msg.Msg.(cs.EndHeightMessage); ok { + if m.Height == heightToStop { + break + } + } + + err = enc.Encode(msg) + if err != nil { + panic(fmt.Errorf("failed to encode msg: %v", err)) + } + } +} diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go new file mode 100644 index 00000000..2cf40c57 --- /dev/null +++ b/scripts/wal2json/main.go @@ -0,0 +1,50 @@ +/* + wal2json converts binary WAL file to JSON. + + Usage: + wal2json +*/ +package main + +import ( + "encoding/json" + "fmt" + "io" + "os" + + cs "github.com/tendermint/tendermint/consensus" +) + +func main() { + if len(os.Args) < 2 { + fmt.Println("missing one argument: ") + os.Exit(1) + } + + f, err := os.Open(os.Args[1]) + if err != nil { + panic(fmt.Errorf("failed to open WAL file: %v", err)) + } + defer f.Close() + + dec := cs.NewWALDecoder(f) + for { + msg, err := dec.Decode() + if err == io.EOF { + break + } else if err != nil { + panic(fmt.Errorf("failed to decode msg: %v", err)) + } + + json, err := json.Marshal(msg) + if err != nil { + panic(fmt.Errorf("failed to marshal msg: %v", err)) + } + + os.Stdout.Write(json) + os.Stdout.Write([]byte("\n")) + if end, ok := msg.Msg.(cs.EndHeightMessage); ok { + os.Stdout.Write([]byte(fmt.Sprintf("ENDHEIGHT %d\n", end.Height))) + } + } +} diff --git a/state/state.go b/state/state.go index 53ec8cc0..4241f9de 100644 --- a/state/state.go +++ b/state/state.go @@ -8,11 +8,13 @@ import ( "time" abci "github.com/tendermint/abci/types" + cmn "github.com/tendermint/tmlibs/common" dbm "github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/log" wire "github.com/tendermint/go-wire" + "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/state/txindex/null" "github.com/tendermint/tendermint/types" @@ -38,9 +40,9 @@ type State struct { mtx sync.Mutex db dbm.DB - // should not change - GenesisDoc *types.GenesisDoc - ChainID string + ChainID string + // Consensus parameters used for validating blocks + Params types.ConsensusParams // These fields are updated by SetBlockAndValidators. // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) @@ -50,18 +52,18 @@ type State struct { LastBlockTime time.Time Validators *types.ValidatorSet LastValidators *types.ValidatorSet - - // AppHash is updated after Commit - AppHash []byte - - TxIndexer txindex.TxIndexer `json:"-"` // Transaction indexer - // When a block returns a validator set change via EndBlock, // the change only applies to the next block. // So, if s.LastBlockHeight causes a valset change, // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 LastHeightValidatorsChanged int + // AppHash is updated after Commit + AppHash []byte + + // TxIndexer indexes transactions + TxIndexer txindex.TxIndexer `json:"-"` + logger log.Logger } @@ -69,15 +71,16 @@ type State struct { // or creates a new one from the given genesisFile and persists the result // to the database. func GetState(stateDB dbm.DB, genesisFile string) (*State, error) { - var err error state := LoadState(stateDB) if state == nil { + var err error state, err = MakeGenesisStateFromFile(stateDB, genesisFile) if err != nil { return nil, err } state.Save() } + return state, nil } @@ -87,20 +90,21 @@ func LoadState(db dbm.DB) *State { } func loadState(db dbm.DB, key []byte) *State { - s := &State{db: db, TxIndexer: &null.TxIndex{}} buf := db.Get(key) if len(buf) == 0 { return nil - } else { - r, n, err := bytes.NewReader(buf), new(int), new(error) - wire.ReadBinaryPtr(&s, r, 0, n, err) - if *err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt("LoadState: Data has been corrupted or its spec has changed: %v\n", *err)) - } - // TODO: ensure that buf is completely read. } + s := &State{db: db, TxIndexer: &null.TxIndex{}} + r, n, err := bytes.NewReader(buf), new(int), new(error) + wire.ReadBinaryPtr(&s, r, 0, n, err) + if *err != nil { + // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED + cmn.Exit(cmn.Fmt(`LoadState: Data has been corrupted or its spec has changed: + %v\n`, *err)) + } + // TODO: ensure that buf is completely read. + return s } @@ -110,20 +114,22 @@ func (s *State) SetLogger(l log.Logger) { } // Copy makes a copy of the State for mutating. +// NOTE: Does not create a copy of TxIndexer. It creates a new pointer that points to the same +// underlying TxIndexer. func (s *State) Copy() *State { return &State{ db: s.db, - GenesisDoc: s.GenesisDoc, - ChainID: s.ChainID, LastBlockHeight: s.LastBlockHeight, LastBlockID: s.LastBlockID, LastBlockTime: s.LastBlockTime, Validators: s.Validators.Copy(), LastValidators: s.LastValidators.Copy(), AppHash: s.AppHash, - TxIndexer: s.TxIndexer, // pointer here, not value + TxIndexer: s.TxIndexer, LastHeightValidatorsChanged: s.LastHeightValidatorsChanged, - logger: s.logger, + logger: s.logger, + ChainID: s.ChainID, + Params: s.Params, } } @@ -131,6 +137,7 @@ func (s *State) Copy() *State { func (s *State) Save() { s.mtx.Lock() defer s.mtx.Unlock() + s.saveValidatorsInfo() s.db.SetSync(stateKey, s.Bytes()) } @@ -142,38 +149,43 @@ func (s *State) SaveABCIResponses(abciResponses *ABCIResponses) { } // LoadABCIResponses loads the ABCIResponses from the database. +// This is useful for recovering from crashes where we called app.Commit and before we called +// s.Save() func (s *State) LoadABCIResponses() *ABCIResponses { - abciResponses := new(ABCIResponses) - buf := s.db.Get(abciResponsesKey) - if len(buf) != 0 { - r, n, err := bytes.NewReader(buf), new(int), new(error) - wire.ReadBinaryPtr(abciResponses, r, 0, n, err) - if *err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt("LoadABCIResponses: Data has been corrupted or its spec has changed: %v\n", *err)) - } - // TODO: ensure that buf is completely read. + if len(buf) == 0 { + return nil } + + abciResponses := new(ABCIResponses) + r, n, err := bytes.NewReader(buf), new(int), new(error) + wire.ReadBinaryPtr(abciResponses, r, 0, n, err) + if *err != nil { + // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED + cmn.Exit(cmn.Fmt(`LoadABCIResponses: Data has been corrupted or its spec has + changed: %v\n`, *err)) + } + // TODO: ensure that buf is completely read. + return abciResponses } // LoadValidators loads the ValidatorSet for a given height. func (s *State) LoadValidators(height int) (*types.ValidatorSet, error) { - v := s.loadValidators(height) - if v == nil { + valInfo := s.loadValidators(height) + if valInfo == nil { return nil, ErrNoValSetForHeight{height} } - if v.ValidatorSet == nil { - v = s.loadValidators(v.LastHeightChanged) - if v == nil { - cmn.PanicSanity(fmt.Sprintf(`Couldn't find validators at - height %d as last changed from height %d`, v.LastHeightChanged, height)) + if valInfo.ValidatorSet == nil { + valInfo = s.loadValidators(valInfo.LastHeightChanged) + if valInfo == nil { + cmn.PanicSanity(fmt.Sprintf(`Couldn't find validators at height %d as + last changed from height %d`, valInfo.LastHeightChanged, height)) } } - return v.ValidatorSet, nil + return valInfo.ValidatorSet, nil } func (s *State) loadValidators(height int) *ValidatorsInfo { @@ -187,9 +199,11 @@ func (s *State) loadValidators(height int) *ValidatorsInfo { wire.ReadBinaryPtr(v, r, 0, n, err) if *err != nil { // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt("LoadValidators: Data has been corrupted or its spec has changed: %v\n", *err)) + cmn.Exit(cmn.Fmt(`LoadValidators: Data has been corrupted or its spec has changed: + %v\n`, *err)) } // TODO: ensure that buf is completely read. + return v } @@ -200,13 +214,13 @@ func (s *State) loadValidators(height int) *ValidatorsInfo { func (s *State) saveValidatorsInfo() { changeHeight := s.LastHeightValidatorsChanged nextHeight := s.LastBlockHeight + 1 - vi := &ValidatorsInfo{ + valInfo := &ValidatorsInfo{ LastHeightChanged: changeHeight, } if changeHeight == nextHeight { - vi.ValidatorSet = s.Validators + valInfo.ValidatorSet = s.Validators } - s.db.SetSync(calcValidatorsKey(nextHeight), vi.Bytes()) + s.db.SetSync(calcValidatorsKey(nextHeight), valInfo.Bytes()) } // Equals returns true if the States are identical. @@ -219,8 +233,10 @@ func (s *State) Bytes() []byte { return wire.BinaryBytes(s) } -// SetBlockAndValidators mutates State variables to update block and validators after running EndBlock. -func (s *State) SetBlockAndValidators(header *types.Header, blockPartsHeader types.PartSetHeader, abciResponses *ABCIResponses) { +// SetBlockAndValidators mutates State variables +// to update block and validators after running EndBlock. +func (s *State) SetBlockAndValidators(header *types.Header, blockPartsHeader types.PartSetHeader, + abciResponses *ABCIResponses) { // copy the valset so we can apply changes from EndBlock // and update s.LastValidators and s.Validators @@ -248,8 +264,7 @@ func (s *State) SetBlockAndValidators(header *types.Header, blockPartsHeader typ } -func (s *State) setBlockAndValidators( - height int, blockID types.BlockID, blockTime time.Time, +func (s *State) setBlockAndValidators(height int, blockID types.BlockID, blockTime time.Time, prevValSet, nextValSet *types.ValidatorSet) { s.LastBlockHeight = height @@ -260,18 +275,10 @@ func (s *State) setBlockAndValidators( } // GetValidators returns the last and current validator sets. -func (s *State) GetValidators() (*types.ValidatorSet, *types.ValidatorSet) { +func (s *State) GetValidators() (last *types.ValidatorSet, current *types.ValidatorSet) { return s.LastValidators, s.Validators } -// Params returns the consensus parameters used for -// validating blocks -func (s *State) Params() types.ConsensusParams { - // TODO: this should move into the State proper - // when we allow the app to change it - return *s.GenesisDoc.ConsensusParams -} - //------------------------------------------------------------------------ // ABCIResponses retains the responses of the various ABCI calls during block processing. @@ -301,15 +308,15 @@ func (a *ABCIResponses) Bytes() []byte { //----------------------------------------------------------------------------- -// ValidatorsInfo represents the latest validator set, or the last time it changed +// ValidatorsInfo represents the latest validator set, or the last height it changed type ValidatorsInfo struct { ValidatorSet *types.ValidatorSet LastHeightChanged int } // Bytes serializes the ValidatorsInfo using go-wire -func (vi *ValidatorsInfo) Bytes() []byte { - return wire.BinaryBytes(*vi) +func (valInfo *ValidatorsInfo) Bytes() []byte { + return wire.BinaryBytes(*valInfo) } //------------------------------------------------------------------------ @@ -341,8 +348,6 @@ func MakeGenesisDocFromFile(genDocFile string) (*types.GenesisDoc, error) { } // MakeGenesisState creates state from types.GenesisDoc. -// -// Used in tests. func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) (*State, error) { err := genDoc.ValidateAndComplete() if err != nil { @@ -363,17 +368,20 @@ func MakeGenesisState(db dbm.DB, genDoc *types.GenesisDoc) (*State, error) { } } + // we do not need indexer during replay and in tests return &State{ - db: db, - GenesisDoc: genDoc, - ChainID: genDoc.ChainID, + db: db, + + ChainID: genDoc.ChainID, + Params: *genDoc.ConsensusParams, + LastBlockHeight: 0, LastBlockID: types.BlockID{}, LastBlockTime: genDoc.GenesisTime, Validators: types.NewValidatorSet(validators), LastValidators: types.NewValidatorSet(nil), AppHash: genDoc.AppHash, - TxIndexer: &null.TxIndex{}, // we do not need indexer during replay and in tests + TxIndexer: &null.TxIndex{}, LastHeightValidatorsChanged: 1, }, nil } diff --git a/state/state_test.go b/state/state_test.go index 2ab2e934..7bb43afa 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -7,14 +7,16 @@ import ( "github.com/stretchr/testify/assert" - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/types" - abci "github.com/tendermint/abci/types" + crypto "github.com/tendermint/go-crypto" + cmn "github.com/tendermint/tmlibs/common" dbm "github.com/tendermint/tmlibs/db" "github.com/tendermint/tmlibs/log" + + cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/types" ) // setupTestCase does setup common to all test cases @@ -30,22 +32,29 @@ func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, *State) { return tearDown, stateDB, state } +// TestStateCopy tests the correct copying behaviour of State. func TestStateCopy(t *testing.T) { tearDown, _, state := setupTestCase(t) defer tearDown(t) + // nolint: vetshadow assert := assert.New(t) stateCopy := state.Copy() assert.True(state.Equals(stateCopy), - cmn.Fmt("expected state and its copy to be identical. got %v\n expected %v\n", stateCopy, state)) + cmn.Fmt(`expected state and its copy to be identical. got %v\n expected %v\n`, + stateCopy, state)) + stateCopy.LastBlockHeight++ - assert.False(state.Equals(stateCopy), cmn.Fmt("expected states to be different. got same %v", state)) + assert.False(state.Equals(stateCopy), cmn.Fmt(`expected states to be different. got same + %v`, state)) } +// TestStateSaveLoad tests saving and loading State from a db. func TestStateSaveLoad(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) + // nolint: vetshadow assert := assert.New(t) state.LastBlockHeight++ @@ -53,12 +62,15 @@ func TestStateSaveLoad(t *testing.T) { loadedState := LoadState(stateDB) assert.True(state.Equals(loadedState), - cmn.Fmt("expected state and its copy to be identical. got %v\n expected %v\n", loadedState, state)) + cmn.Fmt(`expected state and its copy to be identical. got %v\n expected %v\n`, + loadedState, state)) } +// TestABCIResponsesSaveLoad tests saving and loading ABCIResponses. func TestABCIResponsesSaveLoad(t *testing.T) { tearDown, _, state := setupTestCase(t) defer tearDown(t) + // nolint: vetshadow assert := assert.New(t) state.LastBlockHeight++ @@ -77,17 +89,20 @@ func TestABCIResponsesSaveLoad(t *testing.T) { abciResponses.txs = nil state.SaveABCIResponses(abciResponses) - abciResponses2 := state.LoadABCIResponses() - assert.Equal(abciResponses, abciResponses2, - cmn.Fmt("ABCIResponses don't match: Got %v, Expected %v", abciResponses2, abciResponses)) + loadedAbciResponses := state.LoadABCIResponses() + assert.Equal(abciResponses, loadedAbciResponses, + cmn.Fmt(`ABCIResponses don't match: Got %v, Expected %v`, loadedAbciResponses, + abciResponses)) } +// TestValidatorSimpleSaveLoad tests saving and loading validators. func TestValidatorSimpleSaveLoad(t *testing.T) { tearDown, _, state := setupTestCase(t) defer tearDown(t) + // nolint: vetshadow assert := assert.New(t) - // cant load anything for height 0 + // can't load anything for height 0 v, err := state.LoadValidators(0) assert.IsType(ErrNoValSetForHeight{}, err, "expected err at height 0") @@ -115,9 +130,11 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { assert.IsType(ErrNoValSetForHeight{}, err, "expected err at unknown height") } +// TestValidatorChangesSaveLoad tests saving and loading a validator set with changes. func TestValidatorChangesSaveLoad(t *testing.T) { tearDown, _, state := setupTestCase(t) defer tearDown(t) + // nolint: vetshadow assert := assert.New(t) // change vals at these heights @@ -127,7 +144,8 @@ func TestValidatorChangesSaveLoad(t *testing.T) { // each valset is just one validator. // create list of them pubkeys := make([]crypto.PubKey, N+1) - pubkeys[0] = state.GenesisDoc.Validators[0].PubKey + _, val := state.Validators.GetByIndex(0) + pubkeys[0] = val.PubKey for i := 1; i < N+1; i++ { pubkeys[i] = crypto.GenPrivKeyEd25519().PubKey() } @@ -169,7 +187,8 @@ func TestValidatorChangesSaveLoad(t *testing.T) { assert.Equal(v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) addr, _ := v.GetByIndex(0) - assert.Equal(addr, testCase.vals.Address(), fmt.Sprintf("unexpected pubkey at height %d", testCase.height)) + assert.Equal(addr, testCase.vals.Address(), fmt.Sprintf(`unexpected pubkey at + height %d`, testCase.height)) } } diff --git a/state/txindex/indexer.go b/state/txindex/indexer.go index 1c311830..66897905 100644 --- a/state/txindex/indexer.go +++ b/state/txindex/indexer.go @@ -6,17 +6,16 @@ import ( "github.com/tendermint/tendermint/types" ) -// Indexer interface defines methods to index and search transactions. +// TxIndexer interface defines methods to index and search transactions. type TxIndexer interface { - // Batch analyzes, indexes or stores a batch of transactions. - // - // NOTE We do not specify Index method for analyzing a single transaction + // AddBatch analyzes, indexes or stores a batch of transactions. + // NOTE: We do not specify Index method for analyzing a single transaction // here because it bears heavy perfomance loses. Almost all advanced indexers // support batching. AddBatch(b *Batch) error - // Tx returns specified transaction or nil if the transaction is not indexed + // Get returns the transaction specified by hash or nil if the transaction is not indexed // or stored. Get(hash []byte) (*types.TxResult, error) } @@ -24,10 +23,8 @@ type TxIndexer interface { //---------------------------------------------------- // Txs are written as a batch -// A Batch groups together multiple Index operations you would like performed -// at the same time. The Batch structure is NOT thread-safe. You should only -// perform operations on a batch from a single thread at a time. Once batch -// execution has started, you may not modify it. +// Batch groups together multiple Index operations to be performed at the same time. +// NOTE: Batch is NOT thread-safe and must not be modified after starting its execution. type Batch struct { Ops []types.TxResult } @@ -39,7 +36,7 @@ func NewBatch(n int) *Batch { } } -// Index adds or updates entry for the given result.Index. +// Add or update an entry for the given result.Index. func (b *Batch) Add(result types.TxResult) error { b.Ops[result.Index] = result return nil diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go index 8f684c4a..db075e54 100644 --- a/state/txindex/kv/kv.go +++ b/state/txindex/kv/kv.go @@ -4,14 +4,16 @@ import ( "bytes" "fmt" - db "github.com/tendermint/tmlibs/db" "github.com/tendermint/go-wire" + + db "github.com/tendermint/tmlibs/db" + "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" ) // TxIndex is the simplest possible indexer, backed by Key-Value storage (levelDB). -// It could only index transaction by its identifier. +// It can only index transaction by its identifier. type TxIndex struct { store db.DB } @@ -44,7 +46,7 @@ func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { return txResult, nil } -// Batch writes a batch of transactions into the TxIndex storage. +// AddBatch writes a batch of transactions into the TxIndex storage. func (txi *TxIndex) AddBatch(b *txindex.Batch) error { storeBatch := txi.store.NewBatch() for _, result := range b.Ops { diff --git a/state/txindex/null/null.go b/state/txindex/null/null.go index 4999bbde..4939d6d8 100644 --- a/state/txindex/null/null.go +++ b/state/txindex/null/null.go @@ -10,12 +10,12 @@ import ( // TxIndex acts as a /dev/null. type TxIndex struct{} -// Tx panics. +// Get on a TxIndex is disabled and panics when invoked. func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { return nil, errors.New(`Indexing is disabled (set 'tx_index = "kv"' in config)`) } -// Batch returns nil. +// AddBatch is a noop and always returns nil. func (txi *TxIndex) AddBatch(batch *txindex.Batch) error { return nil } diff --git a/test/app/clean.sh b/test/app/clean.sh index 8c556223..22814f01 100644 --- a/test/app/clean.sh +++ b/test/app/clean.sh @@ -1,4 +1,3 @@ killall tendermint -killall dummy -killall counter +killall abci-cli rm -rf ~/.tendermint_app diff --git a/test/app/dummy_test.sh b/test/app/dummy_test.sh index 1a117c63..b3db0b86 100644 --- a/test/app/dummy_test.sh +++ b/test/app/dummy_test.sh @@ -2,7 +2,7 @@ set -e function toHex() { - echo -n $1 | hexdump -ve '1/1 "%.2X"' | awk '{print "0x" $0}' + echo -n $1 | hexdump -ve '1/1 "%.2X"' | awk '{print "0x" $0}' } @@ -32,9 +32,9 @@ RESPONSE=`abci-cli query \"$KEY\"` set +e A=`echo $RESPONSE | grep "$VALUE"` if [[ $? != 0 ]]; then - echo "Failed to find $VALUE for $KEY. Response:" - echo "$RESPONSE" - exit 1 + echo "Failed to find $VALUE for $KEY. Response:" + echo "$RESPONSE" + exit 1 fi set -e @@ -43,9 +43,9 @@ RESPONSE=`abci-cli query \"$VALUE\"` set +e A=`echo $RESPONSE | grep $VALUE` if [[ $? == 0 ]]; then - echo "Found '$VALUE' for $VALUE when we should not have. Response:" - echo "$RESPONSE" - exit 1 + echo "Found '$VALUE' for $VALUE when we should not have. Response:" + echo "$RESPONSE" + exit 1 fi set -e @@ -62,9 +62,9 @@ RESPONSE=`echo $RESPONSE | jq .result.response.log` set +e A=`echo $RESPONSE | grep 'exists'` if [[ $? != 0 ]]; then - echo "Failed to find 'exists' for $KEY. Response:" - echo "$RESPONSE" - exit 1 + echo "Failed to find 'exists' for $KEY. Response:" + echo "$RESPONSE" + exit 1 fi set -e @@ -74,9 +74,9 @@ RESPONSE=`echo $RESPONSE | jq .result.response.log` set +e A=`echo $RESPONSE | grep 'exists'` if [[ $? == 0 ]]; then - echo "Found 'exists' for $VALUE when we should not have. Response:" - echo "$RESPONSE" - exit 1 + echo "Found 'exists' for $VALUE when we should not have. Response:" + echo "$RESPONSE" + exit 1 fi set -e diff --git a/test/app/test.sh b/test/app/test.sh index cd312b68..355fab45 100644 --- a/test/app/test.sh +++ b/test/app/test.sh @@ -11,118 +11,118 @@ set -e export TMHOME=$HOME/.tendermint_app function dummy_over_socket(){ - rm -rf $TMHOME - tendermint init - echo "Starting dummy_over_socket" - dummy > /dev/null & - pid_dummy=$! - tendermint node > tendermint.log & - pid_tendermint=$! - sleep 5 + rm -rf $TMHOME + tendermint init + echo "Starting dummy_over_socket" + abci-cli dummy > /dev/null & + pid_dummy=$! + tendermint node > tendermint.log & + pid_tendermint=$! + sleep 5 - echo "running test" - bash dummy_test.sh "Dummy over Socket" + echo "running test" + bash dummy_test.sh "Dummy over Socket" - kill -9 $pid_dummy $pid_tendermint + kill -9 $pid_dummy $pid_tendermint } # start tendermint first function dummy_over_socket_reorder(){ - rm -rf $TMHOME - tendermint init - echo "Starting dummy_over_socket_reorder (ie. start tendermint first)" - tendermint node > tendermint.log & - pid_tendermint=$! - sleep 2 - dummy > /dev/null & - pid_dummy=$! - sleep 5 + rm -rf $TMHOME + tendermint init + echo "Starting dummy_over_socket_reorder (ie. start tendermint first)" + tendermint node > tendermint.log & + pid_tendermint=$! + sleep 2 + abci-cli dummy > /dev/null & + pid_dummy=$! + sleep 5 - echo "running test" - bash dummy_test.sh "Dummy over Socket" + echo "running test" + bash dummy_test.sh "Dummy over Socket" - kill -9 $pid_dummy $pid_tendermint + kill -9 $pid_dummy $pid_tendermint } function counter_over_socket() { - rm -rf $TMHOME - tendermint init - echo "Starting counter_over_socket" - counter --serial > /dev/null & - pid_counter=$! - tendermint node > tendermint.log & - pid_tendermint=$! - sleep 5 + rm -rf $TMHOME + tendermint init + echo "Starting counter_over_socket" + abci-cli counter --serial > /dev/null & + pid_counter=$! + tendermint node > tendermint.log & + pid_tendermint=$! + sleep 5 - echo "running test" - bash counter_test.sh "Counter over Socket" + echo "running test" + bash counter_test.sh "Counter over Socket" - kill -9 $pid_counter $pid_tendermint + kill -9 $pid_counter $pid_tendermint } function counter_over_grpc() { - rm -rf $TMHOME - tendermint init - echo "Starting counter_over_grpc" - counter --serial --abci grpc > /dev/null & - pid_counter=$! - tendermint node --abci grpc > tendermint.log & - pid_tendermint=$! - sleep 5 + rm -rf $TMHOME + tendermint init + echo "Starting counter_over_grpc" + abci-cli counter --serial --abci grpc > /dev/null & + pid_counter=$! + tendermint node --abci grpc > tendermint.log & + pid_tendermint=$! + sleep 5 - echo "running test" - bash counter_test.sh "Counter over GRPC" + echo "running test" + bash counter_test.sh "Counter over GRPC" - kill -9 $pid_counter $pid_tendermint + kill -9 $pid_counter $pid_tendermint } function counter_over_grpc_grpc() { - rm -rf $TMHOME - tendermint init - echo "Starting counter_over_grpc_grpc (ie. with grpc broadcast_tx)" - counter --serial --abci grpc > /dev/null & - pid_counter=$! - sleep 1 - GRPC_PORT=36656 - tendermint node --abci grpc --rpc.grpc_laddr tcp://localhost:$GRPC_PORT > tendermint.log & - pid_tendermint=$! - sleep 5 + rm -rf $TMHOME + tendermint init + echo "Starting counter_over_grpc_grpc (ie. with grpc broadcast_tx)" + abci-cli counter --serial --abci grpc > /dev/null & + pid_counter=$! + sleep 1 + GRPC_PORT=36656 + tendermint node --abci grpc --rpc.grpc_laddr tcp://localhost:$GRPC_PORT > tendermint.log & + pid_tendermint=$! + sleep 5 - echo "running test" - GRPC_BROADCAST_TX=true bash counter_test.sh "Counter over GRPC via GRPC BroadcastTx" + echo "running test" + GRPC_BROADCAST_TX=true bash counter_test.sh "Counter over GRPC via GRPC BroadcastTx" - kill -9 $pid_counter $pid_tendermint + kill -9 $pid_counter $pid_tendermint } cd $GOPATH/src/github.com/tendermint/tendermint/test/app case "$1" in - "dummy_over_socket") - dummy_over_socket - ;; - "dummy_over_socket_reorder") - dummy_over_socket_reorder - ;; - "counter_over_socket") - counter_over_socket - ;; - "counter_over_grpc") - counter_over_grpc - ;; - "counter_over_grpc_grpc") - counter_over_grpc_grpc - ;; - *) - echo "Running all" - dummy_over_socket - echo "" - dummy_over_socket_reorder - echo "" - counter_over_socket - echo "" - counter_over_grpc - echo "" - counter_over_grpc_grpc + "dummy_over_socket") + dummy_over_socket + ;; +"dummy_over_socket_reorder") + dummy_over_socket_reorder + ;; + "counter_over_socket") + counter_over_socket + ;; +"counter_over_grpc") + counter_over_grpc + ;; + "counter_over_grpc_grpc") + counter_over_grpc_grpc + ;; +*) + echo "Running all" + dummy_over_socket + echo "" + dummy_over_socket_reorder + echo "" + counter_over_socket + echo "" + counter_over_grpc + echo "" + counter_over_grpc_grpc esac diff --git a/test/persist/test_failure_indices.sh b/test/persist/test_failure_indices.sh index 41c17a09..6c40786d 100644 --- a/test/persist/test_failure_indices.sh +++ b/test/persist/test_failure_indices.sh @@ -9,66 +9,66 @@ tendermint init RPC_ADDR="$(pwd)/rpc.sock" TM_CMD="tendermint node --log_level=debug --rpc.laddr=unix://$RPC_ADDR" # &> tendermint_${name}.log" -DUMMY_CMD="dummy --persist $TMHOME/dummy" # &> dummy_${name}.log" +DUMMY_CMD="abci-cli dummy --persist $TMHOME/dummy" # &> dummy_${name}.log" function start_procs(){ - name=$1 - indexToFail=$2 - echo "Starting persistent dummy and tendermint" - if [[ "$CIRCLECI" == true ]]; then - $DUMMY_CMD & - else - $DUMMY_CMD &> "dummy_${name}.log" & - fi - PID_DUMMY=$! + name=$1 + indexToFail=$2 + echo "Starting persistent dummy and tendermint" + if [[ "$CIRCLECI" == true ]]; then + $DUMMY_CMD & + else + $DUMMY_CMD &> "dummy_${name}.log" & + fi + PID_DUMMY=$! - # before starting tendermint, remove the rpc socket - rm $RPC_ADDR - if [[ "$indexToFail" == "" ]]; then - # run in background, dont fail - if [[ "$CIRCLECI" == true ]]; then - $TM_CMD & - else - $TM_CMD &> "tendermint_${name}.log" & - fi - PID_TENDERMINT=$! - else - # run in foreground, fail - if [[ "$CIRCLECI" == true ]]; then - FAIL_TEST_INDEX=$indexToFail $TM_CMD - else - FAIL_TEST_INDEX=$indexToFail $TM_CMD &> "tendermint_${name}.log" - fi - PID_TENDERMINT=$! - fi + # before starting tendermint, remove the rpc socket + rm $RPC_ADDR + if [[ "$indexToFail" == "" ]]; then + # run in background, dont fail + if [[ "$CIRCLECI" == true ]]; then + $TM_CMD & + else + $TM_CMD &> "tendermint_${name}.log" & + fi + PID_TENDERMINT=$! + else + # run in foreground, fail + if [[ "$CIRCLECI" == true ]]; then + FAIL_TEST_INDEX=$indexToFail $TM_CMD + else + FAIL_TEST_INDEX=$indexToFail $TM_CMD &> "tendermint_${name}.log" + fi + PID_TENDERMINT=$! + fi } function kill_procs(){ - kill -9 "$PID_DUMMY" "$PID_TENDERMINT" - wait "$PID_DUMMY" - wait "$PID_TENDERMINT" + kill -9 "$PID_DUMMY" "$PID_TENDERMINT" + wait "$PID_DUMMY" + wait "$PID_TENDERMINT" } # wait for port to be available function wait_for_port() { - port=$1 - # this will succeed while port is bound - nc -z 127.0.0.1 $port - ERR=$? - i=0 - while [ "$ERR" == 0 ]; do - echo "... port $port is still bound. waiting ..." - sleep 1 - nc -z 127.0.0.1 $port - ERR=$? - i=$((i + 1)) - if [[ $i == 10 ]]; then - echo "Timed out waiting for port to be released" - exit 1 - fi - done - echo "... port $port is free!" + port=$1 + # this will succeed while port is bound + nc -z 127.0.0.1 $port + ERR=$? + i=0 + while [ "$ERR" == 0 ]; do + echo "... port $port is still bound. waiting ..." + sleep 1 + nc -z 127.0.0.1 $port + ERR=$? + i=$((i + 1)) + if [[ $i == 10 ]]; then + echo "Timed out waiting for port to be released" + exit 1 + fi + done + echo "... port $port is free!" } @@ -77,47 +77,47 @@ fails=$(grep -r "fail.Fail" --include \*.go . | wc -l) failsEnd=$((fails-1)) for failIndex in $(seq $failsStart $failsEnd); do - echo "" - echo "* Test FailIndex $failIndex" - # test failure at failIndex + echo "" + echo "* Test FailIndex $failIndex" + # test failure at failIndex - bash ./test/utils/txs.sh "localhost:46657" & - start_procs 1 "$failIndex" + bash ./test/utils/txs.sh "localhost:46657" & + start_procs 1 "$failIndex" - # tendermint should already have exited when it hits the fail index - # but kill -9 for good measure - kill_procs + # tendermint should already have exited when it hits the fail index + # but kill -9 for good measure + kill_procs - start_procs 2 + start_procs 2 - # wait for node to handshake and make a new block - # NOTE: --unix-socket is only available in curl v7.40+ - curl -s --unix-socket "$RPC_ADDR" http://localhost/status > /dev/null - ERR=$? - i=0 - while [ "$ERR" != 0 ]; do - sleep 1 - curl -s --unix-socket "$RPC_ADDR" http://localhost/status > /dev/null - ERR=$? - i=$((i + 1)) - if [[ $i == 20 ]]; then - echo "Timed out waiting for tendermint to start" - exit 1 - fi - done + # wait for node to handshake and make a new block + # NOTE: --unix-socket is only available in curl v7.40+ + curl -s --unix-socket "$RPC_ADDR" http://localhost/status > /dev/null + ERR=$? + i=0 + while [ "$ERR" != 0 ]; do + sleep 1 + curl -s --unix-socket "$RPC_ADDR" http://localhost/status > /dev/null + ERR=$? + i=$((i + 1)) + if [[ $i == 20 ]]; then + echo "Timed out waiting for tendermint to start" + exit 1 + fi + done - # wait for a new block - h1=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result.latest_block_height) - h2=$h1 - while [ "$h2" == "$h1" ]; do - sleep 1 - h2=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result.latest_block_height) - done + # wait for a new block + h1=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result.latest_block_height) + h2=$h1 + while [ "$h2" == "$h1" ]; do + sleep 1 + h2=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result.latest_block_height) + done - kill_procs + kill_procs - echo "* Passed Test for FailIndex $failIndex" - echo "" + echo "* Passed Test for FailIndex $failIndex" + echo "" done echo "Passed Test: Persistence" diff --git a/test/persist/test_simple.sh b/test/persist/test_simple.sh index 273c714c..964862c4 100644 --- a/test/persist/test_simple.sh +++ b/test/persist/test_simple.sh @@ -7,30 +7,30 @@ rm -rf $TMHOME tendermint init function start_procs(){ - name=$1 - echo "Starting persistent dummy and tendermint" - dummy --persist $TMHOME/dummy &> "dummy_${name}.log" & - PID_DUMMY=$! - tendermint node &> tendermint_${name}.log & - PID_TENDERMINT=$! - sleep 5 + name=$1 + echo "Starting persistent dummy and tendermint" + abci-cli dummy --persist $TMHOME/dummy &> "dummy_${name}.log" & + PID_DUMMY=$! + tendermint node &> tendermint_${name}.log & + PID_TENDERMINT=$! + sleep 5 } function kill_procs(){ - kill -9 $PID_DUMMY $PID_TENDERMINT + kill -9 $PID_DUMMY $PID_TENDERMINT } function send_txs(){ - # send a bunch of txs over a few blocks - echo "Sending txs" - for i in `seq 1 5`; do - for j in `seq 1 100`; do - tx=`head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"'` - curl -s 127.0.0.1:46657/broadcast_tx_async?tx=0x$tx &> /dev/null - done - sleep 1 - done + # send a bunch of txs over a few blocks + echo "Sending txs" + for i in `seq 1 5`; do + for j in `seq 1 100`; do + tx=`head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"'` + curl -s 127.0.0.1:46657/broadcast_tx_async?tx=0x$tx &> /dev/null + done + sleep 1 + done } @@ -46,22 +46,22 @@ curl -s $addr/status > /dev/null ERR=$? i=0 while [ "$ERR" != 0 ]; do - sleep 1 - curl -s $addr/status > /dev/null - ERR=$? - i=$(($i + 1)) - if [[ $i == 10 ]]; then - echo "Timed out waiting for tendermint to start" - exit 1 - fi + sleep 1 + curl -s $addr/status > /dev/null + ERR=$? + i=$(($i + 1)) + if [[ $i == 10 ]]; then + echo "Timed out waiting for tendermint to start" + exit 1 + fi done # wait for a new block h1=`curl -s $addr/status | jq .result.latest_block_height` h2=$h1 while [ "$h2" == "$h1" ]; do - sleep 1 - h2=`curl -s $addr/status | jq .result.latest_block_height` + sleep 1 + h2=`curl -s $addr/status | jq .result.latest_block_height` done kill_procs diff --git a/test/run_test.sh b/test/run_test.sh index b505126e..6e4823f1 100644 --- a/test/run_test.sh +++ b/test/run_test.sh @@ -16,8 +16,8 @@ bash test/app/test.sh bash test/persist/test.sh if [[ "$BRANCH" == "master" || $(echo "$BRANCH" | grep "release-") != "" ]]; then - echo "" - echo "* branch $BRANCH; testing libs" - # checkout every github.com/tendermint dir and run its tests - bash test/test_libs.sh + echo "" + echo "* branch $BRANCH; testing libs" + # checkout every github.com/tendermint dir and run its tests + bash test/test_libs.sh fi diff --git a/types/block.go b/types/block.go index c8cdf81a..2291de31 100644 --- a/types/block.go +++ b/types/block.go @@ -14,15 +14,15 @@ import ( "github.com/tendermint/tmlibs/merkle" ) -// Block defines the atomic unit of a Tendermint blockchain +// Block defines the atomic unit of a Tendermint blockchain. type Block struct { *Header `json:"header"` *Data `json:"data"` LastCommit *Commit `json:"last_commit"` } -// MakeBlock returns a new block and corresponding part set from the given information -// TODO: version +// MakeBlock returns a new block and corresponding partset from the given information. +// TODO: Add version information to the Block struct. func MakeBlock(height int, chainID string, txs []Tx, commit *Commit, prevBlockID BlockID, valHash, appHash []byte, partSize int) (*Block, *PartSet) { block := &Block{ @@ -368,6 +368,14 @@ func (commit *Commit) StringIndented(indent string) string { //----------------------------------------------------------------------------- +// SignedHeader is a header along with the commits that prove it +type SignedHeader struct { + Header *Header `json:"header"` + Commit *Commit `json:"commit"` +} + +//----------------------------------------------------------------------------- + // Data contains the set of transactions included in the block type Data struct { diff --git a/types/genesis.go b/types/genesis.go index f1b2736f..e33f6025 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -7,7 +7,7 @@ import ( "github.com/pkg/errors" - "github.com/tendermint/go-crypto" + crypto "github.com/tendermint/go-crypto" "github.com/tendermint/go-wire/data" cmn "github.com/tendermint/tmlibs/common" ) @@ -29,6 +29,7 @@ type GenesisDoc struct { ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` Validators []GenesisValidator `json:"validators"` AppHash data.Bytes `json:"app_hash"` + AppOptions interface{} `json:"app_options,omitempty"` } // SaveAs is a utility method for saving GenensisDoc as a JSON file. diff --git a/types/genesis_test.go b/types/genesis_test.go index 0ffce4b5..214bae40 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -30,7 +30,7 @@ func TestGenesis(t *testing.T) { } // test a good one by raw json - genDocBytes := []byte(`{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"test-chain-QDKdJr","consensus_params":null,"validators":[{"pub_key":{"type":"ed25519","data":"961EAB8752E51A03618502F55C2B6E09C38C65635C64CCF3173ED452CF86C957"},"power":10,"name":""}],"app_hash":""}`) + genDocBytes := []byte(`{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"test-chain-QDKdJr","consensus_params":null,"validators":[{"pub_key":{"type":"ed25519","data":"961EAB8752E51A03618502F55C2B6E09C38C65635C64CCF3173ED452CF86C957"},"power":10,"name":""}],"app_hash":"","app_options":{"account_owner": "Bob"}}`) _, err := GenesisDocFromJSON(genDocBytes) assert.NoError(t, err, "expected no error for good genDoc json") @@ -60,31 +60,3 @@ func TestGenesis(t *testing.T) { genDoc, err = GenesisDocFromJSON(genDocBytes) assert.Error(t, err, "expected error for genDoc json with block size of 0") } - -func newConsensusParams(blockSize, partSize int) ConsensusParams { - return ConsensusParams{ - BlockSizeParams: BlockSizeParams{MaxBytes: blockSize}, - BlockGossipParams: BlockGossipParams{BlockPartSizeBytes: partSize}, - } - -} - -func TestConsensusParams(t *testing.T) { - - testCases := []struct { - params ConsensusParams - valid bool - }{ - {newConsensusParams(1, 1), true}, - {newConsensusParams(1, 0), false}, - {newConsensusParams(0, 1), false}, - {newConsensusParams(0, 0), false}, - } - for _, testCase := range testCases { - if testCase.valid { - assert.NoError(t, testCase.params.Validate(), "expected no error for valid params") - } else { - assert.Error(t, testCase.params.Validate(), "expected error for non valid params") - } - } -} diff --git a/types/heartbeat.go b/types/heartbeat.go index 40a7b01b..64676ea6 100644 --- a/types/heartbeat.go +++ b/types/heartbeat.go @@ -10,8 +10,11 @@ import ( cmn "github.com/tendermint/tmlibs/common" ) -// Heartbeat is a simple vote-like structure so validators can alert others that -// they are alive and waiting for transactions. +// Heartbeat is a simple vote-like structure so validators can +// alert others that they are alive and waiting for transactions. +// Note: We aren't adding ",omitempty" to Heartbeat's +// json field tags because we always want the JSON +// representation to be in its canonical form. type Heartbeat struct { ValidatorAddress data.Bytes `json:"validator_address"` ValidatorIndex int `json:"validator_index"` @@ -22,6 +25,7 @@ type Heartbeat struct { } // WriteSignBytes writes the Heartbeat for signing. +// It panics if the Heartbeat is nil. func (heartbeat *Heartbeat) WriteSignBytes(chainID string, w io.Writer, n *int, err *error) { wire.WriteJSON(CanonicalJSONOnceHeartbeat{ chainID, @@ -31,6 +35,9 @@ func (heartbeat *Heartbeat) WriteSignBytes(chainID string, w io.Writer, n *int, // Copy makes a copy of the Heartbeat. func (heartbeat *Heartbeat) Copy() *Heartbeat { + if heartbeat == nil { + return nil + } heartbeatCopy := *heartbeat return &heartbeatCopy } diff --git a/types/heartbeat_test.go b/types/heartbeat_test.go new file mode 100644 index 00000000..8a096712 --- /dev/null +++ b/types/heartbeat_test.go @@ -0,0 +1,56 @@ +package types + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/tendermint/go-crypto" +) + +func TestHeartbeatCopy(t *testing.T) { + hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} + hbCopy := hb.Copy() + require.Equal(t, hbCopy, hb, "heartbeat copy should be the same") + hbCopy.Round = hb.Round + 10 + require.NotEqual(t, hbCopy, hb, "heartbeat copy mutation should not change original") + + var nilHb *Heartbeat + nilHbCopy := nilHb.Copy() + require.Nil(t, nilHbCopy, "copy of nil should also return nil") +} + +func TestHeartbeatString(t *testing.T) { + var nilHb *Heartbeat + require.Contains(t, nilHb.String(), "nil", "expecting a string and no panic") + + hb := &Heartbeat{ValidatorIndex: 1, Height: 11, Round: 2} + require.Equal(t, hb.String(), "Heartbeat{1:000000000000 11/02 (0) {}}") + + var key crypto.PrivKeyEd25519 + hb.Signature = key.Sign([]byte("Tendermint")) + require.Equal(t, hb.String(), "Heartbeat{1:000000000000 11/02 (0) {/FF41E371B9BF.../}}") +} + +func TestHeartbeatWriteSignBytes(t *testing.T) { + var n int + var err error + buf := new(bytes.Buffer) + + hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} + hb.WriteSignBytes("0xdeadbeef", buf, &n, &err) + require.Equal(t, string(buf.Bytes()), `{"chain_id":"0xdeadbeef","heartbeat":{"height":10,"round":1,"sequence":0,"validator_address":"","validator_index":1}}`) + + buf.Reset() + plainHb := &Heartbeat{} + plainHb.WriteSignBytes("0xdeadbeef", buf, &n, &err) + require.Equal(t, string(buf.Bytes()), `{"chain_id":"0xdeadbeef","heartbeat":{"height":0,"round":0,"sequence":0,"validator_address":"","validator_index":0}}`) + + require.Panics(t, func() { + buf.Reset() + var nilHb *Heartbeat + nilHb.WriteSignBytes("0xdeadbeef", buf, &n, &err) + require.Equal(t, string(buf.Bytes()), "null") + }) +} diff --git a/types/params.go b/types/params.go index 495d1fd4..322cba61 100644 --- a/types/params.go +++ b/types/params.go @@ -18,7 +18,7 @@ type ConsensusParams struct { // BlockSizeParams contain limits on the block size. type BlockSizeParams struct { - MaxBytes int `json:"max_bytes"` // NOTE: must not be 0 + MaxBytes int `json:"max_bytes"` // NOTE: must not be 0 nor greater than 100MB MaxTxs int `json:"max_txs"` MaxGas int `json:"max_gas"` } diff --git a/types/params_test.go b/types/params_test.go new file mode 100644 index 00000000..507c8513 --- /dev/null +++ b/types/params_test.go @@ -0,0 +1,40 @@ +package types + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func newConsensusParams(blockSize, partSize int) ConsensusParams { + return ConsensusParams{ + BlockSizeParams: BlockSizeParams{MaxBytes: blockSize}, + BlockGossipParams: BlockGossipParams{BlockPartSizeBytes: partSize}, + } +} + +func TestConsensusParamsValidation(t *testing.T) { + testCases := []struct { + params ConsensusParams + valid bool + }{ + {newConsensusParams(1, 1), true}, + {newConsensusParams(1, 0), false}, + {newConsensusParams(0, 1), false}, + {newConsensusParams(0, 0), false}, + {newConsensusParams(0, 10), false}, + {newConsensusParams(10, -1), false}, + {newConsensusParams(47*1024*1024, 400), true}, + {newConsensusParams(10, 400), true}, + {newConsensusParams(100*1024*1024, 400), true}, + {newConsensusParams(101*1024*1024, 400), false}, + {newConsensusParams(1024*1024*1024, 400), false}, + } + for _, testCase := range testCases { + if testCase.valid { + assert.NoError(t, testCase.params.Validate(), "expected no error for valid params") + } else { + assert.Error(t, testCase.params.Validate(), "expected error for non valid params") + } + } +} diff --git a/types/validator_set.go b/types/validator_set.go index 0e20417a..132957c1 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -6,7 +6,7 @@ import ( "sort" "strings" - "github.com/tendermint/go-wire" + "github.com/pkg/errors" cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/tmlibs/merkle" ) @@ -268,48 +268,84 @@ func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height } } -// Verify that +2/3 of this set had signed the given signBytes. -// Unlike VerifyCommit(), this function can verify commits with differeent sets. -func (valSet *ValidatorSet) VerifyCommitAny(chainID string, blockID BlockID, height int, commit *Commit) error { - panic("Not yet implemented") - /* - Start like: +// VerifyCommitAny will check to see if the set would +// be valid with a different validator set. +// +// valSet is the validator set that we know +// * over 2/3 of the power in old signed this block +// +// newSet is the validator set that signed this block +// * only votes from old are sufficient for 2/3 majority +// in the new set as well +// +// That means that: +// * 10% of the valset can't just declare themselves kings +// * If the validator set is 3x old size, we need more proof to trust +func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string, + blockID BlockID, height int, commit *Commit) error { - FOR_LOOP: - for _, val := range vals { - if len(precommits) == 0 { - break FOR_LOOP - } - next := precommits[0] - switch bytes.Compare(val.Address(), next.ValidatorAddress) { - case -1: - continue FOR_LOOP - case 0: - signBytes := tm.SignBytes(next) - ... - case 1: - ... // error? - } - } - */ -} - -func (valSet *ValidatorSet) ToBytes() []byte { - buf, n, err := new(bytes.Buffer), new(int), new(error) - wire.WriteBinary(valSet, buf, n, err) - if *err != nil { - cmn.PanicCrisis(*err) + if newSet.Size() != len(commit.Precommits) { + return errors.Errorf("Invalid commit -- wrong set size: %v vs %v", newSet.Size(), len(commit.Precommits)) } - return buf.Bytes() -} - -func (valSet *ValidatorSet) FromBytes(b []byte) { - r, n, err := bytes.NewReader(b), new(int), new(error) - wire.ReadBinary(valSet, r, 0, n, err) - if *err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.PanicCrisis(*err) + if height != commit.Height() { + return errors.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) } + + oldVotingPower := int64(0) + newVotingPower := int64(0) + seen := map[int]bool{} + round := commit.Round() + + for idx, precommit := range commit.Precommits { + // first check as in VerifyCommit + if precommit == nil { + continue + } + if precommit.Height != height { + // return certerr.ErrHeightMismatch(height, precommit.Height) + return errors.Errorf("Blocks don't match - %d vs %d", round, precommit.Round) + } + if precommit.Round != round { + return errors.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) + } + if precommit.Type != VoteTypePrecommit { + return errors.Errorf("Invalid commit -- not precommit @ index %v", idx) + } + if !blockID.Equals(precommit.BlockID) { + continue // Not an error, but doesn't count + } + + // we only grab by address, ignoring unknown validators + vi, ov := valSet.GetByAddress(precommit.ValidatorAddress) + if ov == nil || seen[vi] { + continue // missing or double vote... + } + seen[vi] = true + + // Validate signature old school + precommitSignBytes := SignBytes(chainID, precommit) + if !ov.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { + return errors.Errorf("Invalid commit -- invalid signature: %v", precommit) + } + // Good precommit! + oldVotingPower += ov.VotingPower + + // check new school + _, cv := newSet.GetByIndex(idx) + if cv.PubKey.Equals(ov.PubKey) { + // make sure this is properly set in the current block as well + newVotingPower += cv.VotingPower + } + } + + if oldVotingPower <= valSet.TotalVotingPower()*2/3 { + return errors.Errorf("Invalid commit -- insufficient old voting power: got %v, needed %v", + oldVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) + } else if newVotingPower <= newSet.TotalVotingPower()*2/3 { + return errors.Errorf("Invalid commit -- insufficient cur voting power: got %v, needed %v", + newVotingPower, (newSet.TotalVotingPower()*2/3 + 1)) + } + return nil } func (valSet *ValidatorSet) String() string { diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 71a1993e..a285adee 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -5,8 +5,9 @@ import ( "strings" "testing" - cmn "github.com/tendermint/tmlibs/common" "github.com/tendermint/go-crypto" + wire "github.com/tendermint/go-wire" + cmn "github.com/tendermint/tmlibs/common" ) func randPubKey() crypto.PubKey { @@ -166,8 +167,8 @@ func TestProposerSelection3(t *testing.T) { } // serialize, deserialize, check proposer - b := vset.ToBytes() - vset.FromBytes(b) + b := vset.toBytes() + vset.fromBytes(b) computed := vset.GetProposer() // findGetProposer() if i != 0 { @@ -206,3 +207,21 @@ func BenchmarkValidatorSetCopy(b *testing.B) { vset.Copy() } } + +func (valSet *ValidatorSet) toBytes() []byte { + buf, n, err := new(bytes.Buffer), new(int), new(error) + wire.WriteBinary(valSet, buf, n, err) + if *err != nil { + cmn.PanicCrisis(*err) + } + return buf.Bytes() +} + +func (valSet *ValidatorSet) fromBytes(b []byte) { + r, n, err := bytes.NewReader(b), new(int), new(error) + wire.ReadBinary(valSet, r, 0, n, err) + if *err != nil { + // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED + cmn.PanicCrisis(*err) + } +} diff --git a/version/version.go b/version/version.go index b874accc..6995caa4 100644 --- a/version/version.go +++ b/version/version.go @@ -1,12 +1,12 @@ package version const Maj = "0" -const Min = "11" -const Fix = "1" +const Min = "12" +const Fix = "0" var ( // The full version string - Version = "0.11.1" + Version = "0.12.0" // GitCommit is set with --ldflags "-X main.gitCommit=$(git rev-parse HEAD)" GitCommit string