diff --git a/CHANGELOG.md b/CHANGELOG.md deleted file mode 100644 index f69127d5..00000000 --- a/CHANGELOG.md +++ /dev/null @@ -1,868 +0,0 @@ -# Changelog - -## TBD - -FEATURES: -- [node] added metrics (served under /metrics using a Prometheus client; disabled by default) - -## 0.20.1 - -BUG FIXES: - -- [rpc] fix memory leak in Websocket (when using `/subscribe` method) - -## 0.20.0 - -*June 6th, 2018* - -This is the first in a series of breaking releases coming to Tendermint after -soliciting developer feedback and conducting security audits. - -This release does not break any blockchain data structures or -protocols other than the ABCI messages between Tendermint and the application. - -Applications that upgrade for ABCI v0.11.0 should be able to continue running Tendermint -v0.20.0 on blockchains created with v0.19.X - -BREAKING CHANGES - -- [abci] Upgrade to - [v0.11.0](https://github.com/tendermint/abci/blob/master/CHANGELOG.md#0110) -- [abci] Change Query path for filtering peers by node ID from - `p2p/filter/pubkey/` to `p2p/filter/id/` - -## 0.19.9 - -*June 5th, 2018* - -BREAKING CHANGES - -- [types/priv_validator] Moved to top level `privval` package - -FEATURES - -- [config] Collapse PeerConfig into P2PConfig -- [docs] Add quick-install script -- [docs/spec] Add table of Amino prefixes - -BUG FIXES - -- [rpc] Return 404 for unknown endpoints -- [consensus] Flush WAL on stop -- [evidence] Don't send evidence to peers that are behind -- [p2p] Fix memory leak on peer disconnects -- [rpc] Fix panic when `per_page=0` - -## 0.19.8 - -*June 4th, 2018* - -BREAKING: - -- [p2p] Remove `auth_enc` config option, peer connections are always auth - encrypted. Technically a breaking change but seems no one was using it and - arguably a bug fix :) - -BUG FIXES - -- [mempool] Fix deadlock under high load when `skip_timeout_commit=true` and - `create_empty_blocks=false` - -## 0.19.7 - -*May 31st, 2018* - -BREAKING: - -- [libs/pubsub] TagMap#Get returns a string value -- [libs/pubsub] NewTagMap accepts a map of strings - -FEATURES - -- [rpc] the RPC documentation is now published to https://tendermint.github.io/slate -- [p2p] AllowDuplicateIP config option to refuse connections from same IP. - - true by default for now, false by default in next breaking release -- [docs] Add docs for query, tx indexing, events, pubsub -- [docs] Add some notes about running Tendermint in production - -IMPROVEMENTS: - -- [consensus] Consensus reactor now receives events from a separate synchronous event bus, - which is not dependant on external RPC load -- [consensus/wal] do not look for height in older files if we've seen height - 1 -- [docs] Various cleanup and link fixes - -## 0.19.6 - -*May 29th, 2018* - -BUG FIXES - -- [blockchain] Fix fast-sync deadlock during high peer turnover - -BUG FIX: - -- [evidence] Dont send peers evidence from heights they haven't synced to yet -- [p2p] Refuse connections to more than one peer with the same IP -- [docs] Various fixes - -## 0.19.5 - -*May 20th, 2018* - -BREAKING CHANGES - -- [rpc/client] TxSearch and UnconfirmedTxs have new arguments (see below) -- [rpc/client] TxSearch returns ResultTxSearch -- [version] Breaking changes to Go APIs will not be reflected in breaking - version change, but will be included in changelog. - -FEATURES - -- [rpc] `/tx_search` takes `page` (starts at 1) and `per_page` (max 100, default 30) args to paginate results -- [rpc] `/unconfirmed_txs` takes `limit` (max 100, default 30) arg to limit the output -- [config] `mempool.size` and `mempool.cache_size` options - -IMPROVEMENTS - -- [docs] Lots of updates -- [consensus] Only Fsync() the WAL before executing msgs from ourselves - -BUG FIXES - -- [mempool] Enforce upper bound on number of transactions - -## 0.19.4 (May 17th, 2018) - -IMPROVEMENTS - -- [state] Improve tx indexing by using batches -- [consensus, state] Improve logging (more consensus logs, fewer tx logs) -- [spec] Moved to `docs/spec` (TODO cleanup the rest of the docs ...) - -BUG FIXES - -- [consensus] Fix issue #1575 where a late proposer can get stuck - -## 0.19.3 (May 14th, 2018) - -FEATURES - -- [rpc] New `/consensus_state` returns just the votes seen at the current height - -IMPROVEMENTS - -- [rpc] Add stringified votes and fraction of power voted to `/dump_consensus_state` -- [rpc] Add PeerStateStats to `/dump_consensus_state` - -BUG FIXES - -- [cmd] Set GenesisTime during `tendermint init` -- [consensus] fix ValidBlock rules - -## 0.19.2 (April 30th, 2018) - -FEATURES: - -- [p2p] Allow peers with different Minor versions to connect -- [rpc] `/net_info` includes `n_peers` - -IMPROVEMENTS: - -- [p2p] Various code comments, cleanup, error types -- [p2p] Change some Error logs to Debug - -BUG FIXES: - -- [p2p] Fix reconnect to persistent peer when first dial fails -- [p2p] Validate NodeInfo.ListenAddr -- [p2p] Only allow (MaxNumPeers - MaxNumOutboundPeers) inbound peers -- [p2p/pex] Limit max msg size to 64kB -- [p2p] Fix panic when pex=false -- [p2p] Allow multiple IPs per ID in AddrBook -- [p2p] Fix before/after bugs in addrbook isBad() - -## 0.19.1 (April 27th, 2018) - -Note this release includes some small breaking changes in the RPC and one in the -config that are really bug fixes. v0.19.1 will work with existing chains, and make Tendermint -easier to use and debug. With <3 - -BREAKING (MINOR) - -- [config] Removed `wal_light` setting. If you really needed this, let us know - -FEATURES: - -- [networks] moved in tooling from devops repo: terraform and ansible scripts for deploying testnets ! -- [cmd] Added `gen_node_key` command - -BUG FIXES - -Some of these are breaking in the RPC response, but they're really bugs! - -- [spec] Document address format and pubkey encoding pre and post Amino -- [rpc] Lower case JSON field names -- [rpc] Fix missing entries, improve, and lower case the fields in `/dump_consensus_state` -- [rpc] Fix NodeInfo.Channels format to hex -- [rpc] Add Validator address to `/status` -- [rpc] Fix `prove` in ABCIQuery -- [cmd] MarshalJSONIndent on init - -## 0.19.0 (April 13th, 2018) - -BREAKING: -- [cmd] improved `testnet` command; now it can fill in `persistent_peers` for you in the config file and much more (see `tendermint testnet --help` for details) -- [cmd] `show_node_id` now returns an error if there is no node key -- [rpc]: changed the output format for the `/status` endpoint (see https://godoc.org/github.com/tendermint/tendermint/rpc/core#Status) - -Upgrade from go-wire to go-amino. This is a sweeping change that breaks everything that is -serialized to disk or over the network. - -See github.com/tendermint/go-amino for details on the new format. - -See `scripts/wire2amino.go` for a tool to upgrade -genesis/priv_validator/node_key JSON files. - -FEATURES - -- [test] docker-compose for local testnet setup (thanks Greg!) - -## 0.18.0 (April 6th, 2018) - -BREAKING: - -- [types] Merkle tree uses different encoding for varints (see tmlibs v0.8.0) -- [types] ValidtorSet.GetByAddress returns -1 if no validator found -- [p2p] require all addresses come with an ID no matter what -- [rpc] Listening address must contain tcp:// or unix:// prefix - -FEATURES: - -- [rpc] StartHTTPAndTLSServer (not used yet) -- [rpc] Include validator's voting power in `/status` -- [rpc] `/tx` and `/tx_search` responses now include the transaction hash -- [rpc] Include peer NodeIDs in `/net_info` - -IMPROVEMENTS: -- [config] trim whitespace from elements of lists (like `persistent_peers`) -- [rpc] `/tx_search` results are sorted by height -- [p2p] do not try to connect to ourselves (ok, maybe only once) -- [p2p] seeds respond with a bias towards good peers - -BUG FIXES: -- [rpc] fix subscribing using an abci.ResponseDeliverTx tag -- [rpc] fix tx_indexers matchRange -- [rpc] fix unsubscribing (see tmlibs v0.8.0) - -## 0.17.1 (March 27th, 2018) - -BUG FIXES: -- [types] Actually support `app_state` in genesis as `AppStateJSON` - -## 0.17.0 (March 27th, 2018) - -BREAKING: -- [types] WriteSignBytes -> SignBytes - -IMPROVEMENTS: -- [all] renamed `dummy` (`persistent_dummy`) to `kvstore` (`persistent_kvstore`) (name "dummy" is deprecated and will not work in the next breaking release) -- [docs] note on determinism (docs/determinism.rst) -- [genesis] `app_options` field is deprecated. please rename it to `app_state` in your genesis file(s). `app_options` will not work in the next breaking release -- [p2p] dial seeds directly without potential peers -- [p2p] exponential backoff for addrs in the address book -- [p2p] mark peer as good if it contributed enough votes or block parts -- [p2p] stop peer if it sends incorrect data, msg to unknown channel, msg we did not expect -- [p2p] when `auth_enc` is true, all dialed peers must have a node ID in their address -- [spec] various improvements -- switched from glide to dep internally for package management -- [wire] prep work for upgrading to new go-wire (which is now called go-amino) - -FEATURES: -- [config] exposed `auth_enc` flag to enable/disable encryption -- [config] added the `--p2p.private_peer_ids` flag and `PrivatePeerIDs` config variable (see config for description) -- [rpc] added `/health` endpoint, which returns empty result for now -- [types/priv_validator] new format and socket client, allowing for remote signing - -BUG FIXES: -- [consensus] fix liveness bug by introducing ValidBlock mechanism - -## 0.16.0 (February 20th, 2018) - -BREAKING CHANGES: -- [config] use $TMHOME/config for all config and json files -- [p2p] old `--p2p.seeds` is now `--p2p.persistent_peers` (persistent peers to which TM will always connect to) -- [p2p] now `--p2p.seeds` only used for getting addresses (if addrbook is empty; not persistent) -- [p2p] NodeInfo: remove RemoteAddr and add Channels - - we must have at least one overlapping channel with peer - - we only send msgs for channels the peer advertised -- [p2p/conn] pong timeout -- [lite] comment out IAVL related code - -FEATURES: -- [p2p] added new `/dial_peers&persistent=_` **unsafe** endpoint -- [p2p] persistent node key in `$THMHOME/config/node_key.json` -- [p2p] introduce peer ID and authenticate peers by ID using addresses like `ID@IP:PORT` -- [p2p/pex] new seed mode crawls the network and serves as a seed. -- [config] MempoolConfig.CacheSize -- [config] P2P.SeedMode (`--p2p.seed_mode`) - -IMPROVEMENT: -- [p2p/pex] stricter rules in the PEX reactor for better handling of abuse -- [p2p] various improvements to code structure including subpackages for `pex` and `conn` -- [docs] new spec! -- [all] speed up the tests! - -BUG FIX: -- [blockchain] StopPeerForError on timeout -- [consensus] StopPeerForError on a bad Maj23 message -- [state] flush mempool conn before calling commit -- [types] fix priv val signing things that only differ by timestamp -- [mempool] fix memory leak causing zombie peers -- [p2p/conn] fix potential deadlock - -## 0.15.0 (December 29, 2017) - -BREAKING CHANGES: -- [p2p] enable the Peer Exchange reactor by default -- [types] add Timestamp field to Proposal/Vote -- [types] add new fields to Header: TotalTxs, ConsensusParamsHash, LastResultsHash, EvidenceHash -- [types] add Evidence to Block -- [types] simplify ValidateBasic -- [state] updates to support changes to the header -- [state] Enforce <1/3 of validator set can change at a time - -FEATURES: -- [state] Send indices of absent validators and addresses of byzantine validators in BeginBlock -- [state] Historical ConsensusParams and ABCIResponses -- [docs] Specification for the base Tendermint data structures. -- [evidence] New evidence reactor for gossiping and managing evidence -- [rpc] `/block_results?height=X` returns the DeliverTx results for a given height. - -IMPROVEMENTS: -- [consensus] Better handling of corrupt WAL file - -BUG FIXES: -- [lite] fix race -- [state] validate block.Header.ValidatorsHash -- [p2p] allow seed addresses to be prefixed with eg. `tcp://` -- [p2p] use consistent key to refer to peers so we dont try to connect to existing peers -- [cmd] fix `tendermint init` to ignore files that are there and generate files that aren't. - -## 0.14.0 (December 11, 2017) - -BREAKING CHANGES: -- consensus/wal: removed separator -- rpc/client: changed Subscribe/Unsubscribe/UnsubscribeAll funcs signatures to be identical to event bus. - -FEATURES: -- new `tendermint lite` command (and `lite/proxy` pkg) for running a light-client RPC proxy. - NOTE it is currently insecure and its APIs are not yet covered by semver - -IMPROVEMENTS: -- rpc/client: can act as event bus subscriber (See https://github.com/tendermint/tendermint/issues/945). -- p2p: use exponential backoff from seconds to hours when attempting to reconnect to persistent peer -- config: moniker defaults to the machine's hostname instead of "anonymous" - -BUG FIXES: -- p2p: no longer exit if one of the seed addresses is incorrect - -## 0.13.0 (December 6, 2017) - -BREAKING CHANGES: -- abci: update to v0.8 using gogo/protobuf; includes tx tags, vote info in RequestBeginBlock, data.Bytes everywhere, use int64, etc. -- types: block heights are now `int64` everywhere -- types & node: EventSwitch and EventCache have been replaced by EventBus and EventBuffer; event types have been overhauled -- node: EventSwitch methods now refer to EventBus -- rpc/lib/types: RPCResponse is no longer a pointer; WSRPCConnection interface has been modified -- rpc/client: WaitForOneEvent takes an EventsClient instead of types.EventSwitch -- rpc/client: Add/RemoveListenerForEvent are now Subscribe/Unsubscribe -- rpc/core/types: ResultABCIQuery wraps an abci.ResponseQuery -- rpc: `/subscribe` and `/unsubscribe` take `query` arg instead of `event` -- rpc: `/status` returns the LatestBlockTime in human readable form instead of in nanoseconds -- mempool: cached transactions return an error instead of an ABCI response with BadNonce - -FEATURES: -- rpc: new `/unsubscribe_all` WebSocket RPC endpoint -- rpc: new `/tx_search` endpoint for filtering transactions by more complex queries -- p2p/trust: new trust metric for tracking peers. See ADR-006 -- config: TxIndexConfig allows to set what DeliverTx tags to index - -IMPROVEMENTS: -- New asynchronous events system using `tmlibs/pubsub` -- logging: Various small improvements -- consensus: Graceful shutdown when app crashes -- tests: Fix various non-deterministic errors -- p2p: more defensive programming - -BUG FIXES: -- consensus: fix panic where prs.ProposalBlockParts is not initialized -- p2p: fix panic on bad channel - -## 0.12.1 (November 27, 2017) - -BUG FIXES: -- upgrade tmlibs dependency to enable Windows builds for Tendermint - -## 0.12.0 (October 27, 2017) - -BREAKING CHANGES: - - rpc/client: websocket ResultsCh and ErrorsCh unified in ResponsesCh. - - rpc/client: ABCIQuery no longer takes `prove` - - state: remove GenesisDoc from state. - - consensus: new binary WAL format provides efficiency and uses checksums to detect corruption - - use scripts/wal2json to convert to json for debugging - -FEATURES: - - new `certifiers` pkg contains the tendermint light-client library (name subject to change)! - - rpc: `/genesis` includes the `app_options` . - - rpc: `/abci_query` takes an additional `height` parameter to support historical queries. - - rpc/client: new ABCIQueryWithOptions supports options like `trusted` (set false to get a proof) and `height` to query a historical height. - -IMPROVEMENTS: - - rpc: `/genesis` result includes `app_options` - - rpc/lib/client: add jitter to reconnects. - - rpc/lib/types: `RPCError` satisfies the `error` interface. - -BUG FIXES: - - rpc/client: fix ws deadlock after stopping - - blockchain: fix panic on AddBlock when peer is nil - - mempool: fix sending on TxsAvailable when a tx has been invalidated - - consensus: dont run WAL catchup if we fast synced - -## 0.11.1 (October 10, 2017) - -IMPROVEMENTS: - - blockchain/reactor: respondWithNoResponseMessage for missing height - -BUG FIXES: - - rpc: fixed client WebSocket timeout - - rpc: client now resubscribes on reconnection - - rpc: fix panics on missing params - - rpc: fix `/dump_consensus_state` to have normal json output (NOTE: technically breaking, but worth a bug fix label) - - types: fixed out of range error in VoteSet.addVote - - consensus: fix wal autofile via https://github.com/tendermint/tmlibs/blob/master/CHANGELOG.md#032-october-2-2017 - -## 0.11.0 (September 22, 2017) - -BREAKING: - - genesis file: validator `amount` is now `power` - - abci: Info, BeginBlock, InitChain all take structs - - rpc: various changes to match JSONRPC spec (http://www.jsonrpc.org/specification), including breaking ones: - - requests that previously returned HTTP code 4XX now return 200 with an error code in the JSONRPC. - - `rpctypes.RPCResponse` uses new `RPCError` type instead of `string`. - - - cmd: if there is no genesis, exit immediately instead of waiting around for one to show. - - types: `Signer.Sign` returns an error. - - state: every validator set change is persisted to disk, which required some changes to the `State` structure. - - p2p: new `p2p.Peer` interface used for all reactor methods (instead of `*p2p.Peer` struct). - -FEATURES: - - rpc: `/validators?height=X` allows querying of validators at previous heights. - - rpc: Leaving the `height` param empty for `/block`, `/validators`, and `/commit` will return the value for the latest height. - -IMPROVEMENTS: - - docs: Moved all docs from the website and tools repo in, converted to `.rst`, and cleaned up for presentation on `tendermint.readthedocs.io` - -BUG FIXES: - - fix WAL openning issue on Windows - -## 0.10.4 (September 5, 2017) - -IMPROVEMENTS: -- docs: Added Slate docs to each rpc function (see rpc/core) -- docs: Ported all website docs to Read The Docs -- config: expose some p2p params to tweak performance: RecvRate, SendRate, and MaxMsgPacketPayloadSize -- rpc: Upgrade the websocket client and server, including improved auto reconnect, and proper ping/pong - -BUG FIXES: -- consensus: fix panic on getVoteBitArray -- consensus: hang instead of panicking on byzantine consensus failures -- cmd: dont load config for version command - -## 0.10.3 (August 10, 2017) - -FEATURES: -- control over empty block production: - - new flag, `--consensus.create_empty_blocks`; when set to false, blocks are only created when there are txs or when the AppHash changes. - - new config option, `consensus.create_empty_blocks_interval`; an empty block is created after this many seconds. - - in normal operation, `create_empty_blocks = true` and `create_empty_blocks_interval = 0`, so blocks are being created all the time (as in all previous versions of tendermint). The number of empty blocks can be reduced by increasing `create_empty_blocks_interval` or by setting `create_empty_blocks = false`. - - new `TxsAvailable()` method added to Mempool that returns a channel which fires when txs are available. - - new heartbeat message added to consensus reactor to notify peers that a node is waiting for txs before entering propose step. -- rpc: Add `syncing` field to response returned by `/status`. Is `true` while in fast-sync mode. - -IMPROVEMENTS: -- various improvements to documentation and code comments - -BUG FIXES: -- mempool: pass height into constructor so it doesn't always start at 0 - -## 0.10.2 (July 10, 2017) - -FEATURES: -- Enable lower latency block commits by adding consensus reactor sleep durations and p2p flush throttle timeout to the config - -IMPROVEMENTS: -- More detailed logging in the consensus reactor and state machine -- More in-code documentation for many exposed functions, especially in consensus/reactor.go and p2p/switch.go -- Improved readability for some function definitions and code blocks with long lines - -## 0.10.1 (June 28, 2017) - -FEATURES: -- Use `--trace` to get stack traces for logged errors -- types: GenesisDoc.ValidatorHash returns the hash of the genesis validator set -- types: GenesisDocFromFile parses a GenesiDoc from a JSON file - -IMPROVEMENTS: -- Add a Code of Conduct -- Variety of improvements as suggested by `megacheck` tool -- rpc: deduplicate tests between rpc/client and rpc/tests -- rpc: addresses without a protocol prefix default to `tcp://`. `http://` is also accepted as an alias for `tcp://` -- cmd: commands are more easily reuseable from other tools -- DOCKER: automate build/push - -BUG FIXES: -- Fix log statements using keys with spaces (logger does not currently support spaces) -- rpc: set logger on websocket connection -- rpc: fix ws connection stability by setting write deadline on pings - -## 0.10.0 (June 2, 2017) - -Includes major updates to configuration, logging, and json serialization. -Also includes the Grand Repo-Merge of 2017. - -BREAKING CHANGES: - -- Config and Flags: - - The `config` map is replaced with a [`Config` struct](https://github.com/tendermint/tendermint/blob/master/config/config.go#L11), -containing substructs: `BaseConfig`, `P2PConfig`, `MempoolConfig`, `ConsensusConfig`, `RPCConfig` - - This affects the following flags: - - `--seeds` is now `--p2p.seeds` - - `--node_laddr` is now `--p2p.laddr` - - `--pex` is now `--p2p.pex` - - `--skip_upnp` is now `--p2p.skip_upnp` - - `--rpc_laddr` is now `--rpc.laddr` - - `--grpc_laddr` is now `--rpc.grpc_laddr` - - Any configuration option now within a substract must come under that heading in the `config.toml`, for instance: - ``` - [p2p] - laddr="tcp://1.2.3.4:46656" - - [consensus] - timeout_propose=1000 - ``` - - Use viper and `DefaultConfig() / TestConfig()` functions to handle defaults, and remove `config/tendermint` and `config/tendermint_test` - - Change some function and method signatures to - - Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) accomodate new config - -- Logger - - Replace static `log15` logger with a simple interface, and provide a new implementation using `go-kit`. -See our new [logging library](https://github.com/tendermint/tmlibs/log) and [blog post](https://tendermint.com/blog/abstracting-the-logger-interface-in-go) for more details - - Levels `warn` and `notice` are removed (you may need to change them in your `config.toml`!) - - Change some [function and method signatures](https://gist.github.com/ebuchman/640d5fc6c2605f73497992fe107ebe0b) to accept a logger - -- JSON serialization: - - Replace `[TypeByte, Xxx]` with `{"type": "some-type", "data": Xxx}` in RPC and all `.json` files by using `go-wire/data`. For instance, a public key is now: - ``` - "pub_key": { - "type": "ed25519", - "data": "83DDF8775937A4A12A2704269E2729FCFCD491B933C4B0A7FFE37FE41D7760D0" - } - ``` - - Remove type information about RPC responses, so `[TypeByte, {"jsonrpc": "2.0", ... }]` is now just `{"jsonrpc": "2.0", ... }` - - Change `[]byte` to `data.Bytes` in all serialized types (for hex encoding) - - Lowercase the JSON tags in `ValidatorSet` fields - - Introduce `EventDataInner` for serializing events - -- Other: - - Send InitChain message in handshake if `appBlockHeight == 0` - - Do not include the `Accum` field when computing the validator hash. This makes the ValidatorSetHash unique for a given validator set, rather than changing with every block (as the Accum changes) - - Unsafe RPC calls are not enabled by default. This includes `/dial_seeds`, and all calls prefixed with `unsafe`. Use the `--rpc.unsafe` flag to enable. - - -FEATURES: - -- Per-module log levels. For instance, the new default is `state:info,*:error`, which means the `state` package logs at `info` level, and everything else logs at `error` level -- Log if a node is validator or not in every consensus round -- Use ldflags to set git hash as part of the version -- Ignore `address` and `pub_key` fields in `priv_validator.json` and overwrite them with the values derrived from the `priv_key` - -IMPROVEMENTS: - -- Merge `tendermint/go-p2p -> tendermint/tendermint/p2p` and `tendermint/go-rpc -> tendermint/tendermint/rpc/lib` -- Update paths for grand repo merge: - - `go-common -> tmlibs/common` - - `go-data -> go-wire/data` - - All other `go-` libs, except `go-crypto` and `go-wire`, are merged under `tmlibs` -- No global loggers (loggers are passed into constructors, or preferably set with a SetLogger method) -- Return HTTP status codes with errors for RPC responses -- Limit `/blockchain_info` call to return a maximum of 20 blocks -- Use `.Wrap()` and `.Unwrap()` instead of eg. `PubKeyS` for `go-crypto` types -- RPC JSON responses use pretty printing (via `json.MarshalIndent`) -- Color code different instances of the consensus for tests -- Isolate viper to `cmd/tendermint/commands` and do not read config from file for tests - - -## 0.9.2 (April 26, 2017) - -BUG FIXES: - -- Fix bug in `ResetPrivValidator` where we were using the global config and log (causing external consumers, eg. basecoin, to fail). - -## 0.9.1 (April 21, 2017) - -FEATURES: - -- Transaction indexing - txs are indexed by their hash using a simple key-value store; easily extended to more advanced indexers -- New `/tx?hash=X` endpoint to query for transactions and their DeliverTx result by hash. Optionally returns a proof of the tx's inclusion in the block -- `tendermint testnet` command initializes files for a testnet - -IMPROVEMENTS: - -- CLI now uses Cobra framework -- TMROOT is now TMHOME (TMROOT will stop working in 0.10.0) -- `/broadcast_tx_XXX` also returns the Hash (can be used to query for the tx) -- `/broadcast_tx_commit` also returns the height the block was committed in -- ABCIResponses struct persisted to disk before calling Commit; makes handshake replay much cleaner -- WAL uses #ENDHEIGHT instead of #HEIGHT (#HEIGHT will stop working in 0.10.0) -- Peers included via `--seeds`, under `seeds` in the config, or in `/dial_seeds` are now persistent, and will be reconnected to if the connection breaks - -BUG FIXES: - -- Fix bug in fast-sync where we stop syncing after a peer is removed, even if they're re-added later -- Fix handshake replay to handle validator set changes and results of DeliverTx when we crash after app.Commit but before state.Save() - -## 0.9.0 (March 6, 2017) - -BREAKING CHANGES: - -- Update ABCI to v0.4.0, where Query is now `Query(RequestQuery) ResponseQuery`, enabling precise proofs at particular heights: - -``` -message RequestQuery{ - bytes data = 1; - string path = 2; - uint64 height = 3; - bool prove = 4; -} - -message ResponseQuery{ - CodeType code = 1; - int64 index = 2; - bytes key = 3; - bytes value = 4; - bytes proof = 5; - uint64 height = 6; - string log = 7; -} -``` - - -- `BlockMeta` data type unifies its Hash and PartSetHash under a `BlockID`: - -``` -type BlockMeta struct { - BlockID BlockID `json:"block_id"` // the block hash and partsethash - Header *Header `json:"header"` // The block's Header -} -``` - -- `ValidatorSet.Proposer` is exposed as a field and persisted with the `State`. Use `GetProposer()` to initialize or update after validator-set changes. - -- `tendermint gen_validator` command output is now pure JSON - -FEATURES: - -- New RPC endpoint `/commit?height=X` returns header and commit for block at height `X` -- Client API for each endpoint, including mocks for testing - -IMPROVEMENTS: - -- `Node` is now a `BaseService` -- Simplified starting Tendermint in-process from another application -- Better organized Makefile -- Scripts for auto-building binaries across platforms -- Docker image improved, slimmed down (using Alpine), and changed from tendermint/tmbase to tendermint/tendermint -- New repo files: `CONTRIBUTING.md`, Github `ISSUE_TEMPLATE`, `CHANGELOG.md` -- Improvements on CircleCI for managing build/test artifacts -- Handshake replay is doen through the consensus package, possibly using a mockApp -- Graceful shutdown of RPC listeners -- Tests for the PEX reactor and DialSeeds - -BUG FIXES: - -- Check peer.Send for failure before updating PeerState in consensus -- Fix panic in `/dial_seeds` with invalid addresses -- Fix proposer selection logic in ValidatorSet by taking the address into account in the `accumComparable` -- Fix inconcistencies with `ValidatorSet.Proposer` across restarts by persisting it in the `State` - - -## 0.8.0 (January 13, 2017) - -BREAKING CHANGES: - -- New data type `BlockID` to represent blocks: - -``` -type BlockID struct { - Hash []byte `json:"hash"` - PartsHeader PartSetHeader `json:"parts"` -} -``` - -- `Vote` data type now includes validator address and index: - -``` -type Vote struct { - ValidatorAddress []byte `json:"validator_address"` - ValidatorIndex int `json:"validator_index"` - Height int `json:"height"` - Round int `json:"round"` - Type byte `json:"type"` - BlockID BlockID `json:"block_id"` // zero if vote is nil. - Signature crypto.Signature `json:"signature"` -} -``` - -- Update TMSP to v0.3.0, where it is now called ABCI and AppendTx is DeliverTx -- Hex strings in the RPC are now "0x" prefixed - - -FEATURES: - -- New message type on the ConsensusReactor, `Maj23Msg`, for peers to alert others they've seen a Maj23, -in order to track and handle conflicting votes intelligently to prevent Byzantine faults from causing halts: - -``` -type VoteSetMaj23Message struct { - Height int - Round int - Type byte - BlockID types.BlockID -} -``` - -- Configurable block part set size -- Validator set changes -- Optionally skip TimeoutCommit if we have all the votes -- Handshake between Tendermint and App on startup to sync latest state and ensure consistent recovery from crashes -- GRPC server for BroadcastTx endpoint - -IMPROVEMENTS: - -- Less verbose logging -- Better test coverage (37% -> 49%) -- Canonical SignBytes for signable types -- Write-Ahead Log for Mempool and Consensus via tmlibs/autofile -- Better in-process testing for the consensus reactor and byzantine faults -- Better crash/restart testing for individual nodes at preset failure points, and of networks at arbitrary points -- Better abstraction over timeout mechanics - -BUG FIXES: - -- Fix memory leak in mempool peer -- Fix panic on POLRound=-1 -- Actually set the CommitTime -- Actually send BeginBlock message -- Fix a liveness issues caused by Byzantine proposals/votes. Uses the new `Maj23Msg`. - - -## 0.7.4 (December 14, 2016) - -FEATURES: - -- Enable the Peer Exchange reactor with the `--pex` flag for more resilient gossip network (feature still in development, beware dragons) - -IMPROVEMENTS: - -- Remove restrictions on RPC endpoint `/dial_seeds` to enable manual network configuration - -## 0.7.3 (October 20, 2016) - -IMPROVEMENTS: - -- Type safe FireEvent -- More WAL/replay tests -- Cleanup some docs - -BUG FIXES: - -- Fix deadlock in mempool for synchronous apps -- Replay handles non-empty blocks -- Fix race condition in HeightVoteSet - -## 0.7.2 (September 11, 2016) - -BUG FIXES: - -- Set mustConnect=false so tendermint will retry connecting to the app - -## 0.7.1 (September 10, 2016) - -FEATURES: - -- New TMSP connection for Query/Info -- New RPC endpoints: - - `tmsp_query` - - `tmsp_info` -- Allow application to filter peers through Query (off by default) - -IMPROVEMENTS: - -- TMSP connection type enforced at compile time -- All listen/client urls use a "tcp://" or "unix://" prefix - -BUG FIXES: - -- Save LastSignature/LastSignBytes to `priv_validator.json` for recovery -- Fix event unsubscribe -- Fix fastsync/blockchain reactor - -## 0.7.0 (August 7, 2016) - -BREAKING CHANGES: - -- Strict SemVer starting now! -- Update to ABCI v0.2.0 -- Validation types now called Commit -- NewBlock event only returns the block header - - -FEATURES: - -- TMSP and RPC support TCP and UNIX sockets -- Addition config options including block size and consensus parameters -- New WAL mode `cswal_light`; logs only the validator's own votes -- New RPC endpoints: - - for starting/stopping profilers, and for updating config - - `/broadcast_tx_commit`, returns when tx is included in a block, else an error - - `/unsafe_flush_mempool`, empties the mempool - - -IMPROVEMENTS: - -- Various optimizations -- Remove bad or invalidated transactions from the mempool cache (allows later duplicates) -- More elaborate testing using CircleCI including benchmarking throughput on 4 digitalocean droplets - -BUG FIXES: - -- Various fixes to WAL and replay logic -- Various race conditions - -## PreHistory - -Strict versioning only began with the release of v0.7.0, in late summer 2016. -The project itself began in early summer 2014 and was workable decentralized cryptocurrency software by the end of that year. -Through the course of 2015, in collaboration with Eris Industries (now Monax Indsutries), -many additional features were integrated, including an implementation from scratch of the Ethereum Virtual Machine. -That implementation now forms the heart of [Burrow](https://github.com/hyperledger/burrow). -In the later half of 2015, the consensus algorithm was upgraded with a more asynchronous design and a more deterministic and robust implementation. - -By late 2015, frustration with the difficulty of forking a large monolithic stack to create alternative cryptocurrency designs led to the -invention of the Application Blockchain Interface (ABCI), then called the Tendermint Socket Protocol (TMSP). -The Ethereum Virtual Machine and various other transaction features were removed, and Tendermint was whittled down to a core consensus engine -driving an application running in another process. -The ABCI interface and implementation were iterated on and improved over the course of 2016, -until versioned history kicked in with v0.7.0. diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md deleted file mode 100644 index d47c0f15..00000000 --- a/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,56 +0,0 @@ -# The Tendermint Code of Conduct -This code of conduct applies to all projects run by the Tendermint/COSMOS team and hence to tendermint. - - ----- - - -# Conduct -## Contact: adrian@tendermint.com - -* We are committed to providing a friendly, safe and welcoming environment for all, regardless of level of experience, gender, gender identity and expression, sexual orientation, disability, personal appearance, body size, race, ethnicity, age, religion, nationality, or other similar characteristic. - -* On Slack, please avoid using overtly sexual nicknames or other nicknames that might detract from a friendly, safe and welcoming environment for all. - -* Please be kind and courteous. There’s no need to be mean or rude. - -* Respect that people have differences of opinion and that every design or implementation choice carries a trade-off and numerous costs. There is seldom a right answer. - -* Please keep unstructured critique to a minimum. If you have solid ideas you want to experiment with, make a fork and see how it works. - -* We will exclude you from interaction if you insult, demean or harass anyone. That is not welcome behaviour. We interpret the term “harassment” as including the definition in the [Citizen Code of Conduct](http://citizencodeofconduct.org/); if you have any lack of clarity about what might be included in that concept, please read their definition. In particular, we don’t tolerate behavior that excludes people in socially marginalized groups. - -* Private harassment is also unacceptable. No matter who you are, if you feel you have been or are being harassed or made uncomfortable by a community member, please contact one of the channel admins or the person mentioned above immediately. Whether you’re a regular contributor or a newcomer, we care about making this community a safe place for you and we’ve got your back. - -* Likewise any spamming, trolling, flaming, baiting or other attention-stealing behaviour is not welcome. - - ----- - - -# Moderation -These are the policies for upholding our community’s standards of conduct. If you feel that a thread needs moderation, please contact the above mentioned person. - -1. Remarks that violate the Tendermint/COSMOS standards of conduct, including hateful, hurtful, oppressive, or exclusionary remarks, are not allowed. (Cursing is allowed, but never targeting another user, and never in a hateful manner.) - -2. Remarks that moderators find inappropriate, whether listed in the code of conduct or not, are also not allowed. - -3. Moderators will first respond to such remarks with a warning. - -4. If the warning is unheeded, the user will be “kicked,” i.e., kicked out of the communication channel to cool off. - -5. If the user comes back and continues to make trouble, they will be banned, i.e., indefinitely excluded. - -6. Moderators may choose at their discretion to un-ban the user if it was a first offense and they offer the offended party a genuine apology. - -7. If a moderator bans someone and you think it was unjustified, please take it up with that moderator, or with a different moderator, in private. Complaints about bans in-channel are not allowed. - -8. Moderators are held to a higher standard than other community members. If a moderator creates an inappropriate situation, they should expect less leeway than others. - -In the Tendermint/COSMOS community we strive to go the extra step to look out for each other. Don’t just aim to be technically unimpeachable, try to be your best self. In particular, avoid flirting with offensive or sensitive issues, particularly if they’re off-topic; this all too often leads to unnecessary fights, hurt feelings, and damaged trust; worse, it can drive people away from the community entirely. - -And if someone takes issue with something you said or did, resist the urge to be defensive. Just stop doing what it was they complained about and apologize. Even if you feel you were misinterpreted or unfairly accused, chances are good there was something you could’ve communicated better — remember that it’s your responsibility to make your fellow Cosmonauts comfortable. Everyone wants to get along and we are all here first and foremost because we want to talk about cool technology. You will find that people will be eager to assume good intent and forgive as long as you earn their trust. - -The enforcement policies listed above apply to all official Tendermint/COSMOS venues.For other projects adopting the Tendermint/COSMOS Code of Conduct, please contact the maintainers of those projects for enforcement. If you wish to use this code of conduct for your own project, consider explicitly mentioning your moderation policy or making a copy with your own moderation policy so as to avoid confusion. - -*Adapted from the [Node.js Policy on Trolling](http://blog.izs.me/post/30036893703/policy-on-trolling), the [Contributor Covenant v1.3.0](http://contributor-covenant.org/version/1/3/0/) and the [Rust Code of Conduct](https://www.rust-lang.org/en-US/conduct.html). diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 5fd2d982..00000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,117 +0,0 @@ -# Contributing - -Thank you for considering making contributions to Tendermint and related repositories! Start by taking a look at the [coding repo](https://github.com/tendermint/coding) for overall information on repository workflow and standards. - -Please follow standard github best practices: fork the repo, branch from the tip of develop, make some commits, and submit a pull request to develop. See the [open issues](https://github.com/tendermint/tendermint/issues) for things we need help with! - -Please make sure to use `gofmt` before every commit - the easiest way to do this is have your editor run it for you upon saving a file. - -## Forking - -Please note that Go requires code to live under absolute paths, which complicates forking. -While my fork lives at `https://github.com/ebuchman/tendermint`, -the code should never exist at `$GOPATH/src/github.com/ebuchman/tendermint`. -Instead, we use `git remote` to add the fork as a new remote for the original repo, -`$GOPATH/src/github.com/tendermint/tendermint `, and do all the work there. - -For instance, to create a fork and work on a branch of it, I would: - - * Create the fork on github, using the fork button. - * Go to the original repo checked out locally (ie. `$GOPATH/src/github.com/tendermint/tendermint`) - * `git remote rename origin upstream` - * `git remote add origin git@github.com:ebuchman/basecoin.git` - -Now `origin` refers to my fork and `upstream` refers to the tendermint version. -So I can `git push -u origin master` to update my fork, and make pull requests to tendermint from there. -Of course, replace `ebuchman` with your git handle. - -To pull in updates from the origin repo, run - - * `git fetch upstream` - * `git rebase upstream/master` (or whatever branch you want) - -Please don't make Pull Requests to `master`. - -## Dependencies - -We use [dep](https://github.com/golang/dep) to manage dependencies. - -That said, the master branch of every Tendermint repository should just build -with `go get`, which means they should be kept up-to-date with their -dependencies so we can get away with telling people they can just `go get` our -software. - -Since some dependencies are not under our control, a third party may break our -build, in which case we can fall back on `dep ensure` (or `make -get_vendor_deps`). Even for dependencies under our control, dep helps us to -keep multiple repos in sync as they evolve. Anything with an executable, such -as apps, tools, and the core, should use dep. - -Run `dep status` to get a list of vendored dependencies that may not be -up-to-date. - -## Vagrant - -If you are a [Vagrant](https://www.vagrantup.com/) user, you can get started -hacking Tendermint with the commands below. - -NOTE: In case you installed Vagrant in 2017, you might need to run -`vagrant box update` to upgrade to the latest `ubuntu/xenial64`. - -``` -vagrant up -vagrant ssh -make test -``` - -## Testing - -All repos should be hooked up to [CircleCI](https://circleci.com/). - -If they have `.go` files in the root directory, they will be automatically -tested by circle using `go test -v -race ./...`. If not, they will need a -`circle.yml`. Ideally, every repo has a `Makefile` that defines `make test` and -includes its continuous integration status using a badge in the `README.md`. - -## Branching Model and Release - -User-facing repos should adhere to the branching model: http://nvie.com/posts/a-successful-git-branching-model/. -That is, these repos should be well versioned, and any merge to master requires a version bump and tagged release. - -Libraries need not follow the model strictly, but would be wise to, -especially `go-p2p` and `go-rpc`, as their versions are referenced in tendermint core. - -### Development Procedure: -- the latest state of development is on `develop` -- `develop` must never fail `make test` -- no --force onto `develop` (except when reverting a broken commit, which should seldom happen) -- create a development branch either on github.com/tendermint/tendermint, or your fork (using `git add origin`) -- before submitting a pull request, begin `git rebase` on top of `develop` - -### Pull Merge Procedure: -- ensure pull branch is rebased on develop -- run `make test` to ensure that all tests pass -- merge pull request -- the `unstable` branch may be used to aggregate pull merges before testing once -- push master may request that pull requests be rebased on top of `unstable` - -### Release Procedure: -- start on `develop` -- run integration tests (see `test_integrations` in Makefile) -- prepare changelog/release issue -- bump versions -- push to release-vX.X.X to run the extended integration tests on the CI -- merge to master -- merge master back to develop - -### Hotfix Procedure: -- start on `master` -- checkout a new branch named hotfix-vX.X.X -- make the required changes - - these changes should be small and an absolute necessity - - add a note to CHANGELOG.md -- bumb versions -- push to hotfix-vX.X.X to run the extended integration tests on the CI -- merge hotfix-vX.X.X to master -- merge hotfix-vX.X.X to develop -- delete the hotfix-vX.X.X branch diff --git a/DOCKER/.gitignore b/DOCKER/.gitignore deleted file mode 100644 index 9059c684..00000000 --- a/DOCKER/.gitignore +++ /dev/null @@ -1 +0,0 @@ -tendermint diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile deleted file mode 100644 index 4a855f42..00000000 --- a/DOCKER/Dockerfile +++ /dev/null @@ -1,39 +0,0 @@ -FROM alpine:3.7 -MAINTAINER Greg Szabo - -# Tendermint will be looking for the genesis file in /tendermint/config/genesis.json -# (unless you change `genesis_file` in config.toml). You can put your config.toml and -# private validator file into /tendermint/config. -# -# The /tendermint/data dir is used by tendermint to store state. -ENV TMHOME /tendermint - -# OS environment setup -# Set user right away for determinism, create directory for persistence and give our user ownership -# jq and curl used for extracting `pub_key` from private validator while -# deploying tendermint with Kubernetes. It is nice to have bash so the users -# could execute bash commands. -RUN apk update && \ - apk upgrade && \ - apk --no-cache add curl jq bash && \ - addgroup tmuser && \ - adduser -S -G tmuser tmuser -h "$TMHOME" - -# Run the container with tmuser by default. (UID=100, GID=1000) -USER tmuser - -# Expose the data directory as a volume since there's mutable state in there -VOLUME [ $TMHOME ] - -WORKDIR $TMHOME - -# p2p and rpc port -EXPOSE 26656 26657 - -ENTRYPOINT ["/usr/bin/tendermint"] -CMD ["node", "--moniker=`hostname`"] -STOPSIGNAL SIGTERM - -ARG BINARY=tendermint -COPY $BINARY /usr/bin/tendermint - diff --git a/DOCKER/Dockerfile.develop b/DOCKER/Dockerfile.develop deleted file mode 100644 index 5759e765..00000000 --- a/DOCKER/Dockerfile.develop +++ /dev/null @@ -1,35 +0,0 @@ -FROM alpine:3.7 - -ENV DATA_ROOT /tendermint -ENV TMHOME $DATA_ROOT - -RUN addgroup tmuser && \ - adduser -S -G tmuser tmuser - -RUN mkdir -p $DATA_ROOT && \ - chown -R tmuser:tmuser $DATA_ROOT - -RUN apk add --no-cache bash curl jq - -ENV GOPATH /go -ENV PATH "$PATH:/go/bin" -RUN mkdir -p /go/src/github.com/tendermint/tendermint && \ - apk add --no-cache go build-base git && \ - cd /go/src/github.com/tendermint/tendermint && \ - git clone https://github.com/tendermint/tendermint . && \ - git checkout develop && \ - make get_tools && \ - make get_vendor_deps && \ - make install && \ - cd - && \ - rm -rf /go/src/github.com/tendermint/tendermint && \ - apk del go build-base git - -VOLUME $DATA_ROOT - -EXPOSE 26656 -EXPOSE 26657 - -ENTRYPOINT ["tendermint"] - -CMD ["node", "--moniker=`hostname`", "--proxy_app=kvstore"] diff --git a/DOCKER/Dockerfile.testing b/DOCKER/Dockerfile.testing deleted file mode 100644 index b82afe2a..00000000 --- a/DOCKER/Dockerfile.testing +++ /dev/null @@ -1,18 +0,0 @@ -FROM golang:1.10.1 - - -# Grab deps (jq, hexdump, xxd, killall) -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - jq bsdmainutils vim-common psmisc netcat - -# Add testing deps for curl -RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list && \ - apt-get update && \ - apt-get install -y --no-install-recommends curl - -VOLUME /go - -EXPOSE 26656 -EXPOSE 26657 - diff --git a/DOCKER/Makefile b/DOCKER/Makefile deleted file mode 100644 index 32510ebb..00000000 --- a/DOCKER/Makefile +++ /dev/null @@ -1,16 +0,0 @@ -build: - @sh -c "'$(CURDIR)/build.sh'" - -push: - @sh -c "'$(CURDIR)/push.sh'" - -build_develop: - docker build -t "tendermint/tendermint:develop" -f Dockerfile.develop . - -build_testing: - docker build --tag tendermint/testing -f ./Dockerfile.testing . - -push_develop: - docker push "tendermint/tendermint:develop" - -.PHONY: build build_develop push push_develop diff --git a/DOCKER/README.md b/DOCKER/README.md deleted file mode 100644 index 43edce0f..00000000 --- a/DOCKER/README.md +++ /dev/null @@ -1,67 +0,0 @@ -# Docker - -## Supported tags and respective `Dockerfile` links - -- `0.17.1`, `latest` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/208ac32fa266657bd6c304e84ec828aa252bb0b8/DOCKER/Dockerfile) -- `0.15.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/170777300ea92dc21a8aec1abc16cb51812513a4/DOCKER/Dockerfile) -- `0.13.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/a28b3fff49dce2fb31f90abb2fc693834e0029c2/DOCKER/Dockerfile) -- `0.12.1` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/457c688346b565e90735431619ca3ca597ef9007/DOCKER/Dockerfile) -- `0.12.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/70d8afa6e952e24c573ece345560a5971bf2cc0e/DOCKER/Dockerfile) -- `0.11.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/9177cc1f64ca88a4a0243c5d1773d10fba67e201/DOCKER/Dockerfile) -- `0.10.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/e5342f4054ab784b2cd6150e14f01053d7c8deb2/DOCKER/Dockerfile) -- `0.9.1`, `0.9`, [(Dockerfile)](https://github.com/tendermint/tendermint/blob/809e0e8c5933604ba8b2d096803ada7c5ec4dfd3/DOCKER/Dockerfile) -- `0.9.0` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/d474baeeea6c22b289e7402449572f7c89ee21da/DOCKER/Dockerfile) -- `0.8.0`, `0.8` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/bf64dd21fdb193e54d8addaaaa2ecf7ac371de8c/DOCKER/Dockerfile) -- `develop` [(Dockerfile)](https://github.com/tendermint/tendermint/blob/master/DOCKER/Dockerfile.develop) - -`develop` tag points to the [develop](https://github.com/tendermint/tendermint/tree/develop) branch. - -## Quick reference - -* **Where to get help:** - https://cosmos.network/community - -* **Where to file issues:** - https://github.com/tendermint/tendermint/issues - -* **Supported Docker versions:** - [the latest release](https://github.com/moby/moby/releases) (down to 1.6 on a best-effort basis) - -## Tendermint - -Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine, written in any programming language, and securely replicates it on many machines. - -For more background, see the [introduction](https://tendermint.readthedocs.io/en/master/introduction.html). - -To get started developing applications, see the [application developers guide](https://tendermint.readthedocs.io/en/master/getting-started.html). - -## How to use this image - -### Start one instance of the Tendermint core with the `kvstore` app - -A quick example of a built-in app and Tendermint core in one container. - -``` -docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint init -docker run -it --rm -v "/tmp:/tendermint" tendermint/tendermint node --proxy_app=kvstore -``` - -## Local cluster - -To run a 4-node network, see the `Makefile` in the root of [the repo](https://github.com/tendermint/tendermint/master/Makefile) and run: - -``` -make build-linux -make build-docker-localnode -make localnet-start -``` - -Note that this will build and use a different image than the ones provided here. - -## License - -- Tendermint's license is [Apache 2.0](https://github.com/tendermint/tendermint/master/LICENSE). - -## Contributing - -Contributions are most welcome! See the [contributing file](https://github.com/tendermint/tendermint/blob/master/CONTRIBUTING.md) for more information. diff --git a/DOCKER/build.sh b/DOCKER/build.sh deleted file mode 100755 index ee617cc6..00000000 --- a/DOCKER/build.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Get the tag from the version, or try to figure it out. -if [ -z "$TAG" ]; then - TAG=$(awk -F\" '/Version =/ { print $2; exit }' < ../version/version.go) -fi -if [ -z "$TAG" ]; then - echo "Please specify a tag." - exit 1 -fi - -TAG_NO_PATCH=${TAG%.*} - -read -p "==> Build 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r -echo -if [[ $REPLY =~ ^[Yy]$ ]] -then - docker build -t "tendermint/tendermint" -t "tendermint/tendermint:$TAG" -t "tendermint/tendermint:$TAG_NO_PATCH" . -fi diff --git a/DOCKER/push.sh b/DOCKER/push.sh deleted file mode 100755 index 32741dce..00000000 --- a/DOCKER/push.sh +++ /dev/null @@ -1,22 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Get the tag from the version, or try to figure it out. -if [ -z "$TAG" ]; then - TAG=$(awk -F\" '/Version =/ { print $2; exit }' < ../version/version.go) -fi -if [ -z "$TAG" ]; then - echo "Please specify a tag." - exit 1 -fi - -TAG_NO_PATCH=${TAG%.*} - -read -p "==> Push 3 docker images with the following tags (latest, $TAG, $TAG_NO_PATCH)? y/n" -n 1 -r -echo -if [[ $REPLY =~ ^[Yy]$ ]] -then - docker push "tendermint/tendermint:latest" - docker push "tendermint/tendermint:$TAG" - docker push "tendermint/tendermint:$TAG_NO_PATCH" -fi diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index e45b84d1..00000000 --- a/Gopkg.lock +++ /dev/null @@ -1,431 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - name = "github.com/beorn7/perks" - packages = ["quantile"] - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - branch = "master" - name = "github.com/btcsuite/btcd" - packages = ["btcec"] - revision = "86fed781132ac890ee03e906e4ecd5d6fa180c64" - -[[projects]] - name = "github.com/davecgh/go-spew" - packages = ["spew"] - revision = "346938d642f2ec3594ed81d874461961cd0faa76" - version = "v1.1.0" - -[[projects]] - branch = "master" - name = "github.com/ebuchman/fail-test" - packages = ["."] - revision = "95f809107225be108efcf10a3509e4ea6ceef3c4" - -[[projects]] - name = "github.com/fortytw2/leaktest" - packages = ["."] - revision = "a5ef70473c97b71626b9abeda80ee92ba2a7de9e" - version = "v1.2.0" - -[[projects]] - name = "github.com/fsnotify/fsnotify" - packages = ["."] - revision = "c2828203cd70a50dcccfb2761f8b1f8ceef9a8e9" - version = "v1.4.7" - -[[projects]] - name = "github.com/go-kit/kit" - packages = [ - "log", - "log/level", - "log/term", - "metrics", - "metrics/discard", - "metrics/internal/lv", - "metrics/prometheus" - ] - revision = "4dc7be5d2d12881735283bcab7352178e190fc71" - version = "v0.6.0" - -[[projects]] - name = "github.com/go-logfmt/logfmt" - packages = ["."] - revision = "390ab7935ee28ec6b286364bba9b4dd6410cb3d5" - version = "v0.3.0" - -[[projects]] - name = "github.com/go-stack/stack" - packages = ["."] - revision = "259ab82a6cad3992b4e21ff5cac294ccb06474bc" - version = "v1.7.0" - -[[projects]] - name = "github.com/gogo/protobuf" - packages = [ - "gogoproto", - "jsonpb", - "proto", - "protoc-gen-gogo/descriptor", - "sortkeys", - "types" - ] - revision = "1adfc126b41513cc696b209667c8656ea7aac67c" - version = "v1.0.0" - -[[projects]] - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp" - ] - revision = "925541529c1fa6821df4e44ce2723319eb2be768" - version = "v1.0.0" - -[[projects]] - branch = "master" - name = "github.com/golang/snappy" - packages = ["."] - revision = "2e65f85255dbc3072edf28d6b5b8efc472979f5a" - -[[projects]] - name = "github.com/gorilla/websocket" - packages = ["."] - revision = "ea4d1f681babbce9545c9c5f3d5194a789c89f5b" - version = "v1.2.0" - -[[projects]] - branch = "master" - name = "github.com/hashicorp/hcl" - packages = [ - ".", - "hcl/ast", - "hcl/parser", - "hcl/printer", - "hcl/scanner", - "hcl/strconv", - "hcl/token", - "json/parser", - "json/scanner", - "json/token" - ] - revision = "ef8a98b0bbce4a65b5aa4c368430a80ddc533168" - -[[projects]] - name = "github.com/inconshreveable/mousetrap" - packages = ["."] - revision = "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75" - version = "v1.0" - -[[projects]] - branch = "master" - name = "github.com/jmhodges/levigo" - packages = ["."] - revision = "c42d9e0ca023e2198120196f842701bb4c55d7b9" - -[[projects]] - branch = "master" - name = "github.com/kr/logfmt" - packages = ["."] - revision = "b84e30acd515aadc4b783ad4ff83aff3299bdfe0" - -[[projects]] - name = "github.com/magiconair/properties" - packages = ["."] - revision = "c2353362d570a7bfa228149c62842019201cfb71" - version = "v1.8.0" - -[[projects]] - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - branch = "master" - name = "github.com/mitchellh/mapstructure" - packages = ["."] - revision = "bb74f1db0675b241733089d5a1faa5dd8b0ef57b" - -[[projects]] - name = "github.com/pelletier/go-toml" - packages = ["."] - revision = "c01d1270ff3e442a8a57cddc1c92dc1138598194" - version = "v1.2.0" - -[[projects]] - name = "github.com/pkg/errors" - packages = ["."] - revision = "645ef00459ed84a119197bfb8d8205042c6df63d" - version = "v0.8.0" - -[[projects]] - name = "github.com/pmezard/go-difflib" - packages = ["difflib"] - revision = "792786c7400a136282c1664665ae0a8db921c6c2" - version = "v1.0.0" - -[[projects]] - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/promhttp" - ] - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" - -[[projects]] - branch = "master" - name = "github.com/prometheus/client_model" - packages = ["go"] - revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" - -[[projects]] - branch = "master" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model" - ] - revision = "7600349dcfe1abd18d72d3a1770870d9800a7801" - -[[projects]] - branch = "master" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs" - ] - revision = "94663424ae5ae9856b40a9f170762b4197024661" - -[[projects]] - branch = "master" - name = "github.com/rcrowley/go-metrics" - packages = ["."] - revision = "e2704e165165ec55d062f5919b4b29494e9fa790" - -[[projects]] - name = "github.com/spf13/afero" - packages = [ - ".", - "mem" - ] - revision = "787d034dfe70e44075ccc060d346146ef53270ad" - version = "v1.1.1" - -[[projects]] - name = "github.com/spf13/cast" - packages = ["."] - revision = "8965335b8c7107321228e3e3702cab9832751bac" - version = "v1.2.0" - -[[projects]] - name = "github.com/spf13/cobra" - packages = ["."] - revision = "ef82de70bb3f60c65fb8eebacbb2d122ef517385" - version = "v0.0.3" - -[[projects]] - branch = "master" - name = "github.com/spf13/jwalterweatherman" - packages = ["."] - revision = "7c0cea34c8ece3fbeb2b27ab9b59511d360fb394" - -[[projects]] - name = "github.com/spf13/pflag" - packages = ["."] - revision = "583c0c0531f06d5278b7d917446061adc344b5cd" - version = "v1.0.1" - -[[projects]] - name = "github.com/spf13/viper" - packages = ["."] - revision = "b5e8006cbee93ec955a89ab31e0e3ce3204f3736" - version = "v1.0.2" - -[[projects]] - name = "github.com/stretchr/testify" - packages = [ - "assert", - "require" - ] - revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" - version = "v1.2.2" - -[[projects]] - branch = "master" - name = "github.com/syndtr/goleveldb" - packages = [ - "leveldb", - "leveldb/cache", - "leveldb/comparer", - "leveldb/errors", - "leveldb/filter", - "leveldb/iterator", - "leveldb/journal", - "leveldb/memdb", - "leveldb/opt", - "leveldb/storage", - "leveldb/table", - "leveldb/util" - ] - revision = "e2150783cd35f5b607daca48afd8c57ec54cc995" - -[[projects]] - name = "github.com/tendermint/abci" - packages = [ - "client", - "example/code", - "example/counter", - "example/kvstore", - "server", - "types" - ] - revision = "198dccf0ddfd1bb176f87657e3286a05a6ed9540" - version = "v0.12.0" - -[[projects]] - branch = "master" - name = "github.com/tendermint/ed25519" - packages = [ - ".", - "edwards25519", - "extra25519" - ] - revision = "d8387025d2b9d158cf4efb07e7ebf814bcce2057" - -[[projects]] - name = "github.com/tendermint/go-amino" - packages = ["."] - revision = "ed62928576cfcaf887209dc96142cd79cdfff389" - version = "0.9.9" - -[[projects]] - name = "github.com/tendermint/go-crypto" - packages = ["."] - revision = "915416979bf70efa4bcbf1c6cd5d64c5fff9fc19" - version = "v0.6.2" - -[[projects]] - name = "github.com/tendermint/tmlibs" - packages = [ - "autofile", - "cli", - "cli/flags", - "clist", - "common", - "db", - "flowrate", - "log", - "merkle", - "test" - ] - revision = "692f1d86a6e2c0efa698fd1e4541b68c74ffaf38" - version = "v0.8.4" - -[[projects]] - branch = "master" - name = "golang.org/x/crypto" - packages = [ - "curve25519", - "nacl/box", - "nacl/secretbox", - "openpgp/armor", - "openpgp/errors", - "poly1305", - "ripemd160", - "salsa20/salsa" - ] - revision = "8ac0e0d97ce45cd83d1d7243c060cb8461dda5e9" - -[[projects]] - branch = "master" - name = "golang.org/x/net" - packages = [ - "context", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "trace" - ] - revision = "db08ff08e8622530d9ed3a0e8ac279f6d4c02196" - -[[projects]] - branch = "master" - name = "golang.org/x/sys" - packages = ["unix"] - revision = "a9e25c09b96b8870693763211309e213c6ef299d" - -[[projects]] - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable" - ] - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200" - -[[projects]] - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "codes", - "connectivity", - "credentials", - "grpclb/grpc_lb_v1/messages", - "grpclog", - "internal", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "stats", - "status", - "tap", - "transport" - ] - revision = "5b3c4e850e90a4cf6a20ebd46c8b32a0a3afcb9e" - version = "v1.7.5" - -[[projects]] - name = "gopkg.in/yaml.v2" - packages = ["."] - revision = "5420a8b6744d3b0345ab293f6fcba19c978f1183" - version = "v2.2.1" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - inputs-digest = "3bd388e520a08cd0aa14df2d6f5ecb46449d7c36fd80cf52eb775798e6accbaa" - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index 13339e55..00000000 --- a/Gopkg.toml +++ /dev/null @@ -1,103 +0,0 @@ -# Gopkg.toml example -# -# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md -# for detailed Gopkg.toml documentation. -# -# required = ["github.com/user/thing/cmd/thing"] -# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] -# -# [[constraint]] -# name = "github.com/user/project" -# version = "1.0.0" -# -# [[constraint]] -# name = "github.com/user/project2" -# branch = "dev" -# source = "github.com/myfork/project2" -# -# [[override]] -# name = "github.com/x/y" -# version = "2.4.0" -# -# [prune] -# non-go = false -# go-tests = true -# unused-packages = true - - -[[constraint]] - name = "github.com/ebuchman/fail-test" - branch = "master" - -[[constraint]] - name = "github.com/fortytw2/leaktest" - branch = "master" - -[[constraint]] - name = "github.com/go-kit/kit" - version = "~0.6.0" - -[[constraint]] - name = "github.com/gogo/protobuf" - version = "~1.0.0" - -[[constraint]] - name = "github.com/golang/protobuf" - version = "~1.0.0" - -[[constraint]] - name = "github.com/gorilla/websocket" - version = "~1.2.0" - -[[constraint]] - name = "github.com/pkg/errors" - version = "~0.8.0" - -[[constraint]] - name = "github.com/rcrowley/go-metrics" - branch = "master" - -[[constraint]] - name = "github.com/spf13/cobra" - version = "~0.0.1" - -[[constraint]] - name = "github.com/spf13/viper" - version = "~1.0.0" - -[[constraint]] - name = "github.com/stretchr/testify" - version = "~1.2.1" - -[[constraint]] - name = "github.com/tendermint/abci" - version = "~0.12.0" - -[[constraint]] - name = "github.com/tendermint/go-crypto" - version = "~0.6.2" - -[[constraint]] - name = "github.com/tendermint/go-amino" - version = "=0.9.9" - -[[override]] - name = "github.com/tendermint/tmlibs" - version = "~0.8.4" - -[[constraint]] - name = "google.golang.org/grpc" - version = "~1.7.3" - -# this got updated and broke, so locked to an old working commit ... -[[override]] - name = "google.golang.org/genproto" - revision = "7fd901a49ba6a7f87732eb344f6e3c5b19d1b200" - -[prune] - go-tests = true - unused-packages = true - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = "0.8.0" diff --git a/LICENSE b/LICENSE deleted file mode 100644 index bb66bb35..00000000 --- a/LICENSE +++ /dev/null @@ -1,204 +0,0 @@ -Tendermint Core -License: Apache2.0 - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "{}" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright 2016 All in Bits, Inc - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/Makefile b/Makefile deleted file mode 100755 index 079c58f9..00000000 --- a/Makefile +++ /dev/null @@ -1,236 +0,0 @@ -GOTOOLS = \ - github.com/golang/dep/cmd/dep \ - gopkg.in/alecthomas/gometalinter.v2 -PACKAGES=$(shell go list ./... | grep -v '/vendor/') -BUILD_TAGS?=tendermint -BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD`" - -all: check build test install - -check: check_tools ensure_deps - - -######################################## -### Build - -build: - CGO_ENABLED=0 go build $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/ - -build_race: - CGO_ENABLED=0 go build -race $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint - -install: - CGO_ENABLED=0 go install $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' ./cmd/tendermint - -######################################## -### Distribution - -# dist builds binaries for all platforms and packages them for distribution -dist: - @BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'" - -######################################## -### Tools & dependencies - -check_tools: - @# https://stackoverflow.com/a/25668869 - @echo "Found tools: $(foreach tool,$(notdir $(GOTOOLS)),\ - $(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))" - -get_tools: - @echo "--> Installing tools" - go get -u -v $(GOTOOLS) - @gometalinter.v2 --install - -update_tools: - @echo "--> Updating tools" - @go get -u $(GOTOOLS) - -#Run this from CI -get_vendor_deps: - @rm -rf vendor/ - @echo "--> Running dep" - @dep ensure -vendor-only - - -#Run this locally. -ensure_deps: - @rm -rf vendor/ - @echo "--> Running dep" - @dep ensure - -draw_deps: - @# requires brew install graphviz or apt-get install graphviz - go get github.com/RobotsAndPencils/goviz - @goviz -i github.com/tendermint/tendermint/cmd/tendermint -d 3 | dot -Tpng -o dependency-graph.png - -get_deps_bin_size: - @# Copy of build recipe with additional flags to perform binary size analysis - $(eval $(shell go build -work -a $(BUILD_FLAGS) -tags '$(BUILD_TAGS)' -o build/tendermint ./cmd/tendermint/ 2>&1)) - @find $(WORK) -type f -name "*.a" | xargs -I{} du -hxs "{}" | sort -rh | sed -e s:${WORK}/::g > deps_bin_size.log - @echo "Results can be found here: $(CURDIR)/deps_bin_size.log" - -######################################## -### Testing - -## required to be run first by most tests -build_docker_test_image: - docker build -t tester -f ./test/docker/Dockerfile . - -### coverage, app, persistence, and libs tests -test_cover: - # run the go unit tests with coverage - bash test/test_cover.sh - -test_apps: - # run the app tests using bash - # requires `abci-cli` and `tendermint` binaries installed - bash test/app/test.sh - -test_persistence: - # run the persistence tests using bash - # requires `abci-cli` installed - docker run --name run_persistence -t tester bash test/persist/test_failure_indices.sh - - # TODO undockerize - # bash test/persist/test_failure_indices.sh - -test_p2p: - docker rm -f rsyslog || true - rm -rf test/logs || true - mkdir test/logs - cd test/ - docker run -d -v "logs:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog - cd .. - # requires 'tester' the image from above - bash test/p2p/test.sh tester - -need_abci: - bash scripts/install_abci_apps.sh - -test_integrations: - make build_docker_test_image - make get_tools - make get_vendor_deps - make install - make need_abci - make test_cover - make test_apps - make test_persistence - make test_p2p - -test_release: - @go test -tags release $(PACKAGES) - -test100: - @for i in {1..100}; do make test; done - -vagrant_test: - vagrant up - vagrant ssh -c 'make test_integrations' - -### go tests -test: - @echo "--> Running go test" - @go test $(PACKAGES) - -test_race: - @echo "--> Running go test --race" - @go test -v -race $(PACKAGES) - - -######################################## -### Formatting, linting, and vetting - -fmt: - @go fmt ./... - -metalinter: - @echo "--> Running linter" - @gometalinter.v2 --vendor --deadline=600s --disable-all \ - --enable=deadcode \ - --enable=gosimple \ - --enable=misspell \ - --enable=safesql \ - ./... - #--enable=gas \ - #--enable=maligned \ - #--enable=dupl \ - #--enable=errcheck \ - #--enable=goconst \ - #--enable=gocyclo \ - #--enable=goimports \ - #--enable=golint \ <== comments on anything exported - #--enable=gotype \ - #--enable=ineffassign \ - #--enable=interfacer \ - #--enable=megacheck \ - #--enable=staticcheck \ - #--enable=structcheck \ - #--enable=unconvert \ - #--enable=unparam \ - #--enable=unused \ - #--enable=varcheck \ - #--enable=vet \ - #--enable=vetshadow \ - -metalinter_all: - @echo "--> Running linter (all)" - gometalinter.v2 --vendor --deadline=600s --enable-all --disable=lll ./... - -########################################################### -### Docker image - -build-docker: - cp build/tendermint DOCKER/tendermint - docker build --label=tendermint --tag="tendermint/tendermint" DOCKER - rm -rf DOCKER/tendermint - -########################################################### -### Local testnet using docker - -# Build linux binary on other platforms -build-linux: - GOOS=linux GOARCH=amd64 $(MAKE) build - -build-docker-localnode: - cd networks/local - make - -# Run a 4-node testnet locally -localnet-start: localnet-stop - @if ! [ -f build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 4 --o . --populate-persistent-peers --starting-ip-address 192.167.10.2 ; fi - docker-compose up - -# Stop testnet -localnet-stop: - docker-compose down - -########################################################### -### Remote full-nodes (sentry) using terraform and ansible - -# Server management -sentry-start: - @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi - @if ! [ -f $(HOME)/.ssh/id_rsa.pub ]; then ssh-keygen ; fi - cd networks/remote/terraform && terraform init && terraform apply -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub" - @if ! [ -f $(CURDIR)/build/node0/config/genesis.json ]; then docker run --rm -v $(CURDIR)/build:/tendermint:Z tendermint/localnode testnet --v 0 --n 4 --o . ; fi - cd networks/remote/ansible && ANSIBLE_HOST_KEY_CHECKING=False ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml - @echo "Next step: Add your validator setup in the genesis.json and config.tml files and run \"make sentry-config\". (Public key of validator, chain ID, peer IP and node ID.)" - -# Configuration management -sentry-config: - cd networks/remote/ansible && ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$(CURDIR)/build/tendermint -e CONFIGDIR=$(CURDIR)/build - -sentry-stop: - @if [ -z "$(DO_API_TOKEN)" ]; then echo "DO_API_TOKEN environment variable not set." ; false ; fi - cd networks/remote/terraform && terraform destroy -var DO_API_TOKEN="$(DO_API_TOKEN)" -var SSH_KEY_FILE="$(HOME)/.ssh/id_rsa.pub" - -# meant for the CI, inspect script & adapt accordingly -build-slate: - bash scripts/slate.sh - -# To avoid unintended conflicts with file names, always add to .PHONY -# unless there is a reason not to. -# https://www.gnu.org/software/make/manual/html_node/Phony-Targets.html -.PHONY: check build build_race dist install check_tools get_tools update_tools get_vendor_deps draw_deps test_cover test_apps test_persistence test_p2p test test_race test_integrations test_release test100 vagrant_test fmt build-linux localnet-start localnet-stop build-docker build-docker-localnode sentry-start sentry-config sentry-stop build-slate diff --git a/README.md b/README.md deleted file mode 100644 index 52e93a7c..00000000 --- a/README.md +++ /dev/null @@ -1,138 +0,0 @@ -# Tendermint - -[Byzantine-Fault Tolerant](https://en.wikipedia.org/wiki/Byzantine_fault_tolerance) -[State Machine Replication](https://en.wikipedia.org/wiki/State_machine_replication). -Or [Blockchain](https://en.wikipedia.org/wiki/Blockchain_(database)) for short. - -[![version](https://img.shields.io/github/tag/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/releases/latest) -[![API Reference]( -https://camo.githubusercontent.com/915b7be44ada53c290eb157634330494ebe3e30a/68747470733a2f2f676f646f632e6f72672f6769746875622e636f6d2f676f6c616e672f6764646f3f7374617475732e737667 -)](https://godoc.org/github.com/tendermint/tendermint) -[![Go version](https://img.shields.io/badge/go-1.9.2-blue.svg)](https://github.com/moovweb/gvm) -[![riot.im](https://img.shields.io/badge/riot.im-JOIN%20CHAT-green.svg)](https://riot.im/app/#/room/#tendermint:matrix.org) -[![license](https://img.shields.io/github/license/tendermint/tendermint.svg)](https://github.com/tendermint/tendermint/blob/master/LICENSE) -[![](https://tokei.rs/b1/github/tendermint/tendermint?category=lines)](https://github.com/tendermint/tendermint) - - -Branch | Tests | Coverage -----------|-------|---------- -master | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/master.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/master) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/master/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) -develop | [![CircleCI](https://circleci.com/gh/tendermint/tendermint/tree/develop.svg?style=shield)](https://circleci.com/gh/tendermint/tendermint/tree/develop) | [![codecov](https://codecov.io/gh/tendermint/tendermint/branch/develop/graph/badge.svg)](https://codecov.io/gh/tendermint/tendermint) - -Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state transition machine - written in any programming language - -and securely replicates it on many machines. - -For protocol details, see [the specification](/docs/spec). - -## A Note on Production Readiness - -While Tendermint is being used in production in private, permissioned -environments, we are still working actively to harden and audit it in preparation -for use in public blockchains, such as the [Cosmos Network](https://cosmos.network/). -We are also still making breaking changes to the protocol and the APIs. -Thus we tag the releases as *alpha software*. - -In any case, if you intend to run Tendermint in production, -please [contact us](https://riot.im/app/#/room/#tendermint:matrix.org) :) - -## Security - -To report a security vulnerability, see our [bug bounty -program](https://tendermint.com/security). - -For examples of the kinds of bugs we're looking for, see [SECURITY.md](SECURITY.md) - -## Minimum requirements - -Requirement|Notes ----|--- -Go version | Go1.9 or higher - -## Install - -See the [install instructions](/docs/install.rst) - -## Quick Start - -- [Single node](/docs/using-tendermint.rst) -- [Local cluster using docker-compose](/networks/local) -- [Remote cluster using terraform and ansible](/docs/terraform-and-ansible.rst) -- [Join the public testnet](https://cosmos.network/testnet) - -## Resources - -### Tendermint Core - -For details about the blockchain data structures and the p2p protocols, see the -the [Tendermint specification](/docs/spec). - -For details on using the software, [Read The Docs](https://tendermint.readthedocs.io/en/master/). -Additional information about some - and eventually all - of the sub-projects below, can be found at Read The Docs. - - -### Sub-projects - -* [ABCI](http://github.com/tendermint/abci), the Application Blockchain Interface -* [Go-Wire](http://github.com/tendermint/go-wire), a deterministic serialization library -* [Go-Crypto](http://github.com/tendermint/go-crypto), an elliptic curve cryptography library -* [TmLibs](http://github.com/tendermint/tmlibs), an assortment of Go libraries used internally -* [IAVL](http://github.com/tendermint/iavl), Merkleized IAVL+ Tree implementation - -### Tools -* [Deployment, Benchmarking, and Monitoring](http://tendermint.readthedocs.io/projects/tools/en/develop/index.html#tendermint-tools) - -### Applications - -* [Cosmos SDK](http://github.com/cosmos/cosmos-sdk); a cryptocurrency application framework -* [Ethermint](http://github.com/tendermint/ethermint); Ethereum on Tendermint -* [Many more](https://tendermint.readthedocs.io/en/master/ecosystem.html#abci-applications) - -### More - -* [Master's Thesis on Tendermint](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769) -* [Original Whitepaper](https://tendermint.com/static/docs/tendermint.pdf) -* [Tendermint Blog](https://blog.cosmos.network/tendermint/home) -* [Cosmos Blog](https://blog.cosmos.network) - -## Contributing - -Yay open source! Please see our [contributing guidelines](CONTRIBUTING.md). - -## Versioning - -### SemVer - -Tendermint uses [SemVer](http://semver.org/) to determine when and how the version changes. -According to SemVer, anything in the public API can change at any time before version 1.0.0 - -To provide some stability to Tendermint users in these 0.X.X days, the MINOR version is used -to signal breaking changes across a subset of the total public API. This subset includes all -interfaces exposed to other processes (cli, rpc, p2p, etc.), but does not -include the in-process Go APIs. - -That said, breaking changes in the following packages will be documented in the -CHANGELOG even if they don't lead to MINOR version bumps: - -- types -- rpc/client -- config -- node - -Exported objects in these packages that are not covered by the versioning scheme -are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any time. -Functions, types, and values in any other package may also change at any time. - -### Upgrades - -In an effort to avoid accumulating technical debt prior to 1.0.0, -we do not guarantee that breaking changes (ie. bumps in the MINOR version) -will work with existing tendermint blockchains. In these cases you will -have to start a new blockchain, or write something custom to get the old -data into the new chain. - -However, any bump in the PATCH version should be compatible with existing histories -(if not please open an [issue](https://github.com/tendermint/tendermint/issues)). - -## Code of Conduct - -Please read, understand and adhere to our [code of conduct](CODE_OF_CONDUCT.md). diff --git a/ROADMAP.md b/ROADMAP.md deleted file mode 100644 index 60c28433..00000000 --- a/ROADMAP.md +++ /dev/null @@ -1,23 +0,0 @@ -# Roadmap - -BREAKING CHANGES: -- Better support for injecting randomness -- Upgrade consensus for more real-time use of evidence - -FEATURES: -- Use the chain as its own CA for nodes and validators -- Tooling to run multiple blockchains/apps, possibly in a single process -- State syncing (without transaction replay) -- Add authentication and rate-limitting to the RPC - -IMPROVEMENTS: -- Improve subtleties around mempool caching and logic -- Consensus optimizations: - - cache block parts for faster agreement after round changes - - propagate block parts rarest first -- Better testing of the consensus state machine (ie. use a DSL) -- Auto compiled serialization/deserialization code instead of go-wire reflection - -BUG FIXES: -- Graceful handling/recovery for apps that have non-determinism or fail to halt -- Graceful handling/recovery for violations of safety, or liveness diff --git a/SECURITY.md b/SECURITY.md deleted file mode 100644 index 8b979378..00000000 --- a/SECURITY.md +++ /dev/null @@ -1,71 +0,0 @@ -# Security - -As part of our [Coordinated Vulnerability Disclosure -Policy](https://tendermint.com/security), we operate a bug bounty. -See the policy for more details on submissions and rewards. - -Here is a list of examples of the kinds of bugs we're most interested in: - -## Specification - -- Conceptual flaws -- Ambiguities, inconsistencies, or incorrect statements -- Mis-match between specification and implementation of any component - -## Consensus - -Assuming less than 1/3 of the voting power is Byzantine (malicious): - -- Validation of blockchain data structures, including blocks, block parts, - votes, and so on -- Execution of blocks -- Validator set changes -- Proposer round robin -- Two nodes committing conflicting blocks for the same height (safety failure) -- A correct node signing conflicting votes -- A node halting (liveness failure) -- Syncing new and old nodes - -## Networking - -- Authenticated encryption (MITM, information leakage) -- Eclipse attacks -- Sybil attacks -- Long-range attacks -- Denial-of-Service - -## RPC - -- Write-access to anything besides sending transactions -- Denial-of-Service -- Leakage of secrets - -## Denial-of-Service - -Attacks may come through the P2P network or the RPC: - -- Amplification attacks -- Resource abuse -- Deadlocks and race conditions -- Panics and unhandled errors - -## Libraries - -- Serialization (Amino) -- Reading/Writing files and databases -- Logging and monitoring - -## Cryptography - -- Elliptic curves for validator signatures -- Hash algorithms and Merkle trees for block validation -- Authenticated encryption for P2P connections - -## Light Client - -- Validation of blockchain data structures -- Correctly validating an incorrect proof -- Incorrectly validating a correct proof -- Syncing validator set changes - - diff --git a/Vagrantfile b/Vagrantfile deleted file mode 100644 index 095a6b06..00000000 --- a/Vagrantfile +++ /dev/null @@ -1,58 +0,0 @@ -# -*- mode: ruby -*- -# vi: set ft=ruby : - -Vagrant.configure("2") do |config| - config.vm.box = "ubuntu/xenial64" - - config.vm.provider "virtualbox" do |v| - v.memory = 4096 - v.cpus = 2 - end - - config.vm.provision "shell", inline: <<-SHELL - apt-get update - - # install base requirements - apt-get install -y --no-install-recommends wget curl jq zip \ - make shellcheck bsdmainutils psmisc - apt-get install -y language-pack-en - - # install docker - apt-get install -y --no-install-recommends apt-transport-https \ - ca-certificates curl software-properties-common - curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add - - add-apt-repository \ - "deb [arch=amd64] https://download.docker.com/linux/ubuntu \ - $(lsb_release -cs) \ - stable" - apt-get install -y docker-ce - usermod -a -G docker vagrant - - # install go - wget -q https://dl.google.com/go/go1.10.1.linux-amd64.tar.gz - tar -xvf go1.10.1.linux-amd64.tar.gz - mv go /usr/local - rm -f go1.10.1.linux-amd64.tar.gz - - # cleanup - apt-get autoremove -y - - # set env variables - echo 'export GOROOT=/usr/local/go' >> /home/vagrant/.bash_profile - echo 'export GOPATH=/home/vagrant/go' >> /home/vagrant/.bash_profile - echo 'export PATH=$PATH:$GOROOT/bin:$GOPATH/bin' >> /home/vagrant/.bash_profile - echo 'export LC_ALL=en_US.UTF-8' >> /home/vagrant/.bash_profile - echo 'cd go/src/github.com/tendermint/tendermint' >> /home/vagrant/.bash_profile - - mkdir -p /home/vagrant/go/bin - mkdir -p /home/vagrant/go/src/github.com/tendermint - ln -s /vagrant /home/vagrant/go/src/github.com/tendermint/tendermint - - chown -R vagrant:vagrant /home/vagrant/go - chown vagrant:vagrant /home/vagrant/.bash_profile - - # get all deps and tools, ready to install/test - su - vagrant -c 'source /home/vagrant/.bash_profile' - su - vagrant -c 'cd /home/vagrant/go/src/github.com/tendermint/tendermint && make get_tools && make get_vendor_deps' - SHELL -end diff --git a/appveyor.yml b/appveyor.yml deleted file mode 100644 index 1ddf8fdd..00000000 --- a/appveyor.yml +++ /dev/null @@ -1,13 +0,0 @@ -version: 1.0.{build} -configuration: Release -platform: -- x64 -- x86 -clone_folder: c:\go\path\src\github.com\tendermint\tendermint -before_build: -- cmd: set GOPATH=%GOROOT%\path -- cmd: set PATH=%GOPATH%\bin;%PATH% -- cmd: make get_vendor_deps -build_script: -- cmd: make test -test: off diff --git a/benchmarks/atomic_test.go b/benchmarks/atomic_test.go deleted file mode 100644 index 5fe4832d..00000000 --- a/benchmarks/atomic_test.go +++ /dev/null @@ -1,29 +0,0 @@ -package benchmarks - -import ( - "sync/atomic" - "testing" - "unsafe" -) - -func BenchmarkAtomicUintPtr(b *testing.B) { - b.StopTimer() - pointers := make([]uintptr, 1000) - b.Log(unsafe.Sizeof(pointers[0])) - b.StartTimer() - - for j := 0; j < b.N; j++ { - atomic.StoreUintptr(&pointers[j%1000], uintptr(j)) - } -} - -func BenchmarkAtomicPointer(b *testing.B) { - b.StopTimer() - pointers := make([]unsafe.Pointer, 1000) - b.Log(unsafe.Sizeof(pointers[0])) - b.StartTimer() - - for j := 0; j < b.N; j++ { - atomic.StorePointer(&pointers[j%1000], unsafe.Pointer(uintptr(j))) - } -} diff --git a/benchmarks/blockchain/.gitignore b/benchmarks/blockchain/.gitignore deleted file mode 100644 index 9e67bd47..00000000 --- a/benchmarks/blockchain/.gitignore +++ /dev/null @@ -1,2 +0,0 @@ -data - diff --git a/benchmarks/blockchain/localsync.sh b/benchmarks/blockchain/localsync.sh deleted file mode 100755 index 2dc3e49c..00000000 --- a/benchmarks/blockchain/localsync.sh +++ /dev/null @@ -1,80 +0,0 @@ -#!/bin/bash - -DATA=$GOPATH/src/github.com/tendermint/tendermint/benchmarks/blockchain/data -if [ ! -d $DATA ]; then - echo "no data found, generating a chain... (this only has to happen once)" - - tendermint init --home $DATA - cp $DATA/config.toml $DATA/config2.toml - echo " - [consensus] - timeout_commit = 0 - " >> $DATA/config.toml - - echo "starting node" - tendermint node \ - --home $DATA \ - --proxy_app kvstore \ - --p2p.laddr tcp://127.0.0.1:56656 \ - --rpc.laddr tcp://127.0.0.1:56657 \ - --log_level error & - - echo "making blocks for 60s" - sleep 60 - - mv $DATA/config2.toml $DATA/config.toml - - kill %1 - - echo "done generating chain." -fi - -# validator node -HOME1=$TMPDIR$RANDOM$RANDOM -cp -R $DATA $HOME1 -echo "starting validator node" -tendermint node \ - --home $HOME1 \ - --proxy_app kvstore \ - --p2p.laddr tcp://127.0.0.1:56656 \ - --rpc.laddr tcp://127.0.0.1:56657 \ - --log_level error & -sleep 1 - -# downloader node -HOME2=$TMPDIR$RANDOM$RANDOM -tendermint init --home $HOME2 -cp $HOME1/genesis.json $HOME2 -printf "starting downloader node" -tendermint node \ - --home $HOME2 \ - --proxy_app kvstore \ - --p2p.laddr tcp://127.0.0.1:56666 \ - --rpc.laddr tcp://127.0.0.1:56667 \ - --p2p.persistent_peers 127.0.0.1:56656 \ - --log_level error & - -# wait for node to start up so we only count time where we are actually syncing -sleep 0.5 -while curl localhost:56667/status 2> /dev/null | grep "\"latest_block_height\": 0," > /dev/null -do - printf '.' - sleep 0.2 -done -echo - -echo "syncing blockchain for 10s" -for i in {1..10} -do - sleep 1 - HEIGHT="$(curl localhost:56667/status 2> /dev/null \ - | grep 'latest_block_height' \ - | grep -o ' [0-9]*' \ - | xargs)" - let 'RATE = HEIGHT / i' - echo "height: $HEIGHT, blocks/sec: $RATE" -done - -kill %1 -kill %2 -rm -rf $HOME1 $HOME2 diff --git a/benchmarks/chan_test.go b/benchmarks/chan_test.go deleted file mode 100644 index 78b70c9b..00000000 --- a/benchmarks/chan_test.go +++ /dev/null @@ -1,19 +0,0 @@ -package benchmarks - -import ( - "testing" -) - -func BenchmarkChanMakeClose(b *testing.B) { - b.StopTimer() - b.StartTimer() - - for j := 0; j < b.N; j++ { - foo := make(chan struct{}) - close(foo) - something, ok := <-foo - if ok { - b.Error(something, ok) - } - } -} diff --git a/benchmarks/codec_test.go b/benchmarks/codec_test.go deleted file mode 100644 index 9acafce7..00000000 --- a/benchmarks/codec_test.go +++ /dev/null @@ -1,129 +0,0 @@ -package benchmarks - -import ( - "testing" - "time" - - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" - - proto "github.com/tendermint/tendermint/benchmarks/proto" - "github.com/tendermint/tendermint/p2p" - ctypes "github.com/tendermint/tendermint/rpc/core/types" -) - -func BenchmarkEncodeStatusWire(b *testing.B) { - b.StopTimer() - cdc := amino.NewCodec() - ctypes.RegisterAmino(cdc) - nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} - status := &ctypes.ResultStatus{ - NodeInfo: p2p.NodeInfo{ - ID: nodeKey.ID(), - Moniker: "SOMENAME", - Network: "SOMENAME", - ListenAddr: "SOMEADDR", - Version: "SOMEVER", - Other: []string{"SOMESTRING", "OTHERSTRING"}, - }, - SyncInfo: ctypes.SyncInfo{ - LatestBlockHash: []byte("SOMEBYTES"), - LatestBlockHeight: 123, - LatestBlockTime: time.Unix(0, 1234), - }, - ValidatorInfo: ctypes.ValidatorInfo{ - PubKey: nodeKey.PubKey(), - }, - } - b.StartTimer() - - counter := 0 - for i := 0; i < b.N; i++ { - jsonBytes, err := cdc.MarshalJSON(status) - if err != nil { - panic(err) - } - counter += len(jsonBytes) - } - -} - -func BenchmarkEncodeNodeInfoWire(b *testing.B) { - b.StopTimer() - cdc := amino.NewCodec() - ctypes.RegisterAmino(cdc) - nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} - nodeInfo := p2p.NodeInfo{ - ID: nodeKey.ID(), - Moniker: "SOMENAME", - Network: "SOMENAME", - ListenAddr: "SOMEADDR", - Version: "SOMEVER", - Other: []string{"SOMESTRING", "OTHERSTRING"}, - } - b.StartTimer() - - counter := 0 - for i := 0; i < b.N; i++ { - jsonBytes, err := cdc.MarshalJSON(nodeInfo) - if err != nil { - panic(err) - } - counter += len(jsonBytes) - } -} - -func BenchmarkEncodeNodeInfoBinary(b *testing.B) { - b.StopTimer() - cdc := amino.NewCodec() - ctypes.RegisterAmino(cdc) - nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} - nodeInfo := p2p.NodeInfo{ - ID: nodeKey.ID(), - Moniker: "SOMENAME", - Network: "SOMENAME", - ListenAddr: "SOMEADDR", - Version: "SOMEVER", - Other: []string{"SOMESTRING", "OTHERSTRING"}, - } - b.StartTimer() - - counter := 0 - for i := 0; i < b.N; i++ { - jsonBytes := cdc.MustMarshalBinaryBare(nodeInfo) - counter += len(jsonBytes) - } - -} - -func BenchmarkEncodeNodeInfoProto(b *testing.B) { - b.StopTimer() - nodeKey := p2p.NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} - nodeID := string(nodeKey.ID()) - someName := "SOMENAME" - someAddr := "SOMEADDR" - someVer := "SOMEVER" - someString := "SOMESTRING" - otherString := "OTHERSTRING" - nodeInfo := proto.NodeInfo{ - Id: &proto.ID{Id: &nodeID}, - Moniker: &someName, - Network: &someName, - ListenAddr: &someAddr, - Version: &someVer, - Other: []string{someString, otherString}, - } - b.StartTimer() - - counter := 0 - for i := 0; i < b.N; i++ { - bytes, err := nodeInfo.Marshal() - if err != nil { - b.Fatal(err) - return - } - //jsonBytes := wire.JSONBytes(nodeInfo) - counter += len(bytes) - } - -} diff --git a/benchmarks/empty.go b/benchmarks/empty.go deleted file mode 100644 index 20f08f14..00000000 --- a/benchmarks/empty.go +++ /dev/null @@ -1 +0,0 @@ -package benchmarks diff --git a/benchmarks/map_test.go b/benchmarks/map_test.go deleted file mode 100644 index 2d978902..00000000 --- a/benchmarks/map_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package benchmarks - -import ( - "testing" - - cmn "github.com/tendermint/tmlibs/common" -) - -func BenchmarkSomething(b *testing.B) { - b.StopTimer() - numItems := 100000 - numChecks := 100000 - keys := make([]string, numItems) - for i := 0; i < numItems; i++ { - keys[i] = cmn.RandStr(100) - } - txs := make([]string, numChecks) - for i := 0; i < numChecks; i++ { - txs[i] = cmn.RandStr(100) - } - b.StartTimer() - - counter := 0 - for j := 0; j < b.N; j++ { - foo := make(map[string]string) - for _, key := range keys { - foo[key] = key - } - for _, tx := range txs { - if _, ok := foo[tx]; ok { - counter++ - } - } - } -} diff --git a/benchmarks/os_test.go b/benchmarks/os_test.go deleted file mode 100644 index dfadc312..00000000 --- a/benchmarks/os_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package benchmarks - -import ( - "os" - "testing" - - cmn "github.com/tendermint/tmlibs/common" -) - -func BenchmarkFileWrite(b *testing.B) { - b.StopTimer() - file, err := os.OpenFile("benchmark_file_write.out", - os.O_RDWR|os.O_CREATE|os.O_APPEND, 0600) - if err != nil { - b.Error(err) - } - testString := cmn.RandStr(200) + "\n" - b.StartTimer() - - for i := 0; i < b.N; i++ { - _, err := file.Write([]byte(testString)) - if err != nil { - b.Error(err) - } - } - - if err := file.Close(); err != nil { - b.Error(err) - } - if err := os.Remove("benchmark_file_write.out"); err != nil { - b.Error(err) - } -} diff --git a/benchmarks/proto/README b/benchmarks/proto/README deleted file mode 100644 index 87ece257..00000000 --- a/benchmarks/proto/README +++ /dev/null @@ -1,2 +0,0 @@ -Doing some protobuf tests here. -Using gogoprotobuf. diff --git a/benchmarks/proto/test.pb.go b/benchmarks/proto/test.pb.go deleted file mode 100644 index d430eeb0..00000000 --- a/benchmarks/proto/test.pb.go +++ /dev/null @@ -1,1456 +0,0 @@ -// Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: test.proto - -/* - Package test is a generated protocol buffer package. - - It is generated from these files: - test.proto - - It has these top-level messages: - ResultStatus - NodeInfo - ID - PubKey - PubKeyEd25519 -*/ -package test - -import proto "github.com/gogo/protobuf/proto" -import fmt "fmt" -import math "math" - -import io "io" - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package - -type ResultStatus struct { - NodeInfo *NodeInfo `protobuf:"bytes,1,opt,name=nodeInfo" json:"nodeInfo,omitempty"` - PubKey *PubKey `protobuf:"bytes,2,req,name=pubKey" json:"pubKey,omitempty"` - LatestBlockHash []byte `protobuf:"bytes,3,req,name=latestBlockHash" json:"latestBlockHash,omitempty"` - LatestBlockHeight *int64 `protobuf:"varint,4,req,name=latestBlockHeight" json:"latestBlockHeight,omitempty"` - LatestBlocktime *int64 `protobuf:"varint,5,req,name=latestBlocktime" json:"latestBlocktime,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ResultStatus) Reset() { *m = ResultStatus{} } -func (m *ResultStatus) String() string { return proto.CompactTextString(m) } -func (*ResultStatus) ProtoMessage() {} -func (*ResultStatus) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{0} } - -func (m *ResultStatus) GetNodeInfo() *NodeInfo { - if m != nil { - return m.NodeInfo - } - return nil -} - -func (m *ResultStatus) GetPubKey() *PubKey { - if m != nil { - return m.PubKey - } - return nil -} - -func (m *ResultStatus) GetLatestBlockHash() []byte { - if m != nil { - return m.LatestBlockHash - } - return nil -} - -func (m *ResultStatus) GetLatestBlockHeight() int64 { - if m != nil && m.LatestBlockHeight != nil { - return *m.LatestBlockHeight - } - return 0 -} - -func (m *ResultStatus) GetLatestBlocktime() int64 { - if m != nil && m.LatestBlocktime != nil { - return *m.LatestBlocktime - } - return 0 -} - -type NodeInfo struct { - Id *ID `protobuf:"bytes,1,req,name=id" json:"id,omitempty"` - Moniker *string `protobuf:"bytes,2,req,name=moniker" json:"moniker,omitempty"` - Network *string `protobuf:"bytes,3,req,name=network" json:"network,omitempty"` - RemoteAddr *string `protobuf:"bytes,4,req,name=remoteAddr" json:"remoteAddr,omitempty"` - ListenAddr *string `protobuf:"bytes,5,req,name=listenAddr" json:"listenAddr,omitempty"` - Version *string `protobuf:"bytes,6,req,name=version" json:"version,omitempty"` - Other []string `protobuf:"bytes,7,rep,name=other" json:"other,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *NodeInfo) Reset() { *m = NodeInfo{} } -func (m *NodeInfo) String() string { return proto.CompactTextString(m) } -func (*NodeInfo) ProtoMessage() {} -func (*NodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{1} } - -func (m *NodeInfo) GetId() *ID { - if m != nil { - return m.Id - } - return nil -} - -func (m *NodeInfo) GetMoniker() string { - if m != nil && m.Moniker != nil { - return *m.Moniker - } - return "" -} - -func (m *NodeInfo) GetNetwork() string { - if m != nil && m.Network != nil { - return *m.Network - } - return "" -} - -func (m *NodeInfo) GetRemoteAddr() string { - if m != nil && m.RemoteAddr != nil { - return *m.RemoteAddr - } - return "" -} - -func (m *NodeInfo) GetListenAddr() string { - if m != nil && m.ListenAddr != nil { - return *m.ListenAddr - } - return "" -} - -func (m *NodeInfo) GetVersion() string { - if m != nil && m.Version != nil { - return *m.Version - } - return "" -} - -func (m *NodeInfo) GetOther() []string { - if m != nil { - return m.Other - } - return nil -} - -type ID struct { - Id *string `protobuf:"bytes,1,req,name=id" json:"id,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *ID) Reset() { *m = ID{} } -func (m *ID) String() string { return proto.CompactTextString(m) } -func (*ID) ProtoMessage() {} -func (*ID) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{2} } - -func (m *ID) GetId() string { - if m != nil && m.Id != nil { - return *m.Id - } - return "" -} - -type PubKey struct { - Ed25519 *PubKeyEd25519 `protobuf:"bytes,1,opt,name=ed25519" json:"ed25519,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PubKey) Reset() { *m = PubKey{} } -func (m *PubKey) String() string { return proto.CompactTextString(m) } -func (*PubKey) ProtoMessage() {} -func (*PubKey) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{3} } - -func (m *PubKey) GetEd25519() *PubKeyEd25519 { - if m != nil { - return m.Ed25519 - } - return nil -} - -type PubKeyEd25519 struct { - Bytes []byte `protobuf:"bytes,1,req,name=bytes" json:"bytes,omitempty"` - XXX_unrecognized []byte `json:"-"` -} - -func (m *PubKeyEd25519) Reset() { *m = PubKeyEd25519{} } -func (m *PubKeyEd25519) String() string { return proto.CompactTextString(m) } -func (*PubKeyEd25519) ProtoMessage() {} -func (*PubKeyEd25519) Descriptor() ([]byte, []int) { return fileDescriptorTest, []int{4} } - -func (m *PubKeyEd25519) GetBytes() []byte { - if m != nil { - return m.Bytes - } - return nil -} - -func init() { - proto.RegisterType((*ResultStatus)(nil), "ResultStatus") - proto.RegisterType((*NodeInfo)(nil), "NodeInfo") - proto.RegisterType((*ID)(nil), "ID") - proto.RegisterType((*PubKey)(nil), "PubKey") - proto.RegisterType((*PubKeyEd25519)(nil), "PubKeyEd25519") -} -func (m *ResultStatus) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ResultStatus) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.NodeInfo != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintTest(dAtA, i, uint64(m.NodeInfo.Size())) - n1, err := m.NodeInfo.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n1 - } - if m.PubKey == nil { - return 0, proto.NewRequiredNotSetError("pubKey") - } else { - dAtA[i] = 0x12 - i++ - i = encodeVarintTest(dAtA, i, uint64(m.PubKey.Size())) - n2, err := m.PubKey.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n2 - } - if m.LatestBlockHash == nil { - return 0, proto.NewRequiredNotSetError("latestBlockHash") - } else { - dAtA[i] = 0x1a - i++ - i = encodeVarintTest(dAtA, i, uint64(len(m.LatestBlockHash))) - i += copy(dAtA[i:], m.LatestBlockHash) - } - if m.LatestBlockHeight == nil { - return 0, proto.NewRequiredNotSetError("latestBlockHeight") - } else { - dAtA[i] = 0x20 - i++ - i = encodeVarintTest(dAtA, i, uint64(*m.LatestBlockHeight)) - } - if m.LatestBlocktime == nil { - return 0, proto.NewRequiredNotSetError("latestBlocktime") - } else { - dAtA[i] = 0x28 - i++ - i = encodeVarintTest(dAtA, i, uint64(*m.LatestBlocktime)) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *NodeInfo) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *NodeInfo) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Id == nil { - return 0, proto.NewRequiredNotSetError("id") - } else { - dAtA[i] = 0xa - i++ - i = encodeVarintTest(dAtA, i, uint64(m.Id.Size())) - n3, err := m.Id.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n3 - } - if m.Moniker == nil { - return 0, proto.NewRequiredNotSetError("moniker") - } else { - dAtA[i] = 0x12 - i++ - i = encodeVarintTest(dAtA, i, uint64(len(*m.Moniker))) - i += copy(dAtA[i:], *m.Moniker) - } - if m.Network == nil { - return 0, proto.NewRequiredNotSetError("network") - } else { - dAtA[i] = 0x1a - i++ - i = encodeVarintTest(dAtA, i, uint64(len(*m.Network))) - i += copy(dAtA[i:], *m.Network) - } - if m.RemoteAddr == nil { - return 0, proto.NewRequiredNotSetError("remoteAddr") - } else { - dAtA[i] = 0x22 - i++ - i = encodeVarintTest(dAtA, i, uint64(len(*m.RemoteAddr))) - i += copy(dAtA[i:], *m.RemoteAddr) - } - if m.ListenAddr == nil { - return 0, proto.NewRequiredNotSetError("listenAddr") - } else { - dAtA[i] = 0x2a - i++ - i = encodeVarintTest(dAtA, i, uint64(len(*m.ListenAddr))) - i += copy(dAtA[i:], *m.ListenAddr) - } - if m.Version == nil { - return 0, proto.NewRequiredNotSetError("version") - } else { - dAtA[i] = 0x32 - i++ - i = encodeVarintTest(dAtA, i, uint64(len(*m.Version))) - i += copy(dAtA[i:], *m.Version) - } - if len(m.Other) > 0 { - for _, s := range m.Other { - dAtA[i] = 0x3a - i++ - l = len(s) - for l >= 1<<7 { - dAtA[i] = uint8(uint64(l)&0x7f | 0x80) - l >>= 7 - i++ - } - dAtA[i] = uint8(l) - i++ - i += copy(dAtA[i:], s) - } - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *ID) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *ID) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Id == nil { - return 0, proto.NewRequiredNotSetError("id") - } else { - dAtA[i] = 0xa - i++ - i = encodeVarintTest(dAtA, i, uint64(len(*m.Id))) - i += copy(dAtA[i:], *m.Id) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *PubKey) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PubKey) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Ed25519 != nil { - dAtA[i] = 0xa - i++ - i = encodeVarintTest(dAtA, i, uint64(m.Ed25519.Size())) - n4, err := m.Ed25519.MarshalTo(dAtA[i:]) - if err != nil { - return 0, err - } - i += n4 - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func (m *PubKeyEd25519) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalTo(dAtA) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *PubKeyEd25519) MarshalTo(dAtA []byte) (int, error) { - var i int - _ = i - var l int - _ = l - if m.Bytes == nil { - return 0, proto.NewRequiredNotSetError("bytes") - } else { - dAtA[i] = 0xa - i++ - i = encodeVarintTest(dAtA, i, uint64(len(m.Bytes))) - i += copy(dAtA[i:], m.Bytes) - } - if m.XXX_unrecognized != nil { - i += copy(dAtA[i:], m.XXX_unrecognized) - } - return i, nil -} - -func encodeVarintTest(dAtA []byte, offset int, v uint64) int { - for v >= 1<<7 { - dAtA[offset] = uint8(v&0x7f | 0x80) - v >>= 7 - offset++ - } - dAtA[offset] = uint8(v) - return offset + 1 -} -func (m *ResultStatus) Size() (n int) { - var l int - _ = l - if m.NodeInfo != nil { - l = m.NodeInfo.Size() - n += 1 + l + sovTest(uint64(l)) - } - if m.PubKey != nil { - l = m.PubKey.Size() - n += 1 + l + sovTest(uint64(l)) - } - if m.LatestBlockHash != nil { - l = len(m.LatestBlockHash) - n += 1 + l + sovTest(uint64(l)) - } - if m.LatestBlockHeight != nil { - n += 1 + sovTest(uint64(*m.LatestBlockHeight)) - } - if m.LatestBlocktime != nil { - n += 1 + sovTest(uint64(*m.LatestBlocktime)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *NodeInfo) Size() (n int) { - var l int - _ = l - if m.Id != nil { - l = m.Id.Size() - n += 1 + l + sovTest(uint64(l)) - } - if m.Moniker != nil { - l = len(*m.Moniker) - n += 1 + l + sovTest(uint64(l)) - } - if m.Network != nil { - l = len(*m.Network) - n += 1 + l + sovTest(uint64(l)) - } - if m.RemoteAddr != nil { - l = len(*m.RemoteAddr) - n += 1 + l + sovTest(uint64(l)) - } - if m.ListenAddr != nil { - l = len(*m.ListenAddr) - n += 1 + l + sovTest(uint64(l)) - } - if m.Version != nil { - l = len(*m.Version) - n += 1 + l + sovTest(uint64(l)) - } - if len(m.Other) > 0 { - for _, s := range m.Other { - l = len(s) - n += 1 + l + sovTest(uint64(l)) - } - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *ID) Size() (n int) { - var l int - _ = l - if m.Id != nil { - l = len(*m.Id) - n += 1 + l + sovTest(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PubKey) Size() (n int) { - var l int - _ = l - if m.Ed25519 != nil { - l = m.Ed25519.Size() - n += 1 + l + sovTest(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func (m *PubKeyEd25519) Size() (n int) { - var l int - _ = l - if m.Bytes != nil { - l = len(m.Bytes) - n += 1 + l + sovTest(uint64(l)) - } - if m.XXX_unrecognized != nil { - n += len(m.XXX_unrecognized) - } - return n -} - -func sovTest(x uint64) (n int) { - for { - n++ - x >>= 7 - if x == 0 { - break - } - } - return n -} - -func (m *ResultStatus) Unmarshal(dAtA []byte) error { - var hasFields [1]uint64 - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ResultStatus: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ResultStatus: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field NodeInfo", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTest - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.NodeInfo == nil { - m.NodeInfo = &NodeInfo{} - } - if err := m.NodeInfo.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field PubKey", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTest - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.PubKey == nil { - m.PubKey = &PubKey{} - } - if err := m.PubKey.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - hasFields[0] |= uint64(0x00000001) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockHash", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTest - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.LatestBlockHash = append(m.LatestBlockHash[:0], dAtA[iNdEx:postIndex]...) - if m.LatestBlockHash == nil { - m.LatestBlockHash = []byte{} - } - iNdEx = postIndex - hasFields[0] |= uint64(0x00000002) - case 4: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LatestBlockHeight", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.LatestBlockHeight = &v - hasFields[0] |= uint64(0x00000004) - case 5: - if wireType != 0 { - return fmt.Errorf("proto: wrong wireType = %d for field LatestBlocktime", wireType) - } - var v int64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - v |= (int64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - m.LatestBlocktime = &v - hasFields[0] |= uint64(0x00000008) - default: - iNdEx = preIndex - skippy, err := skipTest(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return proto.NewRequiredNotSetError("pubKey") - } - if hasFields[0]&uint64(0x00000002) == 0 { - return proto.NewRequiredNotSetError("latestBlockHash") - } - if hasFields[0]&uint64(0x00000004) == 0 { - return proto.NewRequiredNotSetError("latestBlockHeight") - } - if hasFields[0]&uint64(0x00000008) == 0 { - return proto.NewRequiredNotSetError("latestBlocktime") - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *NodeInfo) Unmarshal(dAtA []byte) error { - var hasFields [1]uint64 - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: NodeInfo: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: NodeInfo: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTest - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Id == nil { - m.Id = &ID{} - } - if err := m.Id.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - hasFields[0] |= uint64(0x00000001) - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Moniker", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTest - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Moniker = &s - iNdEx = postIndex - hasFields[0] |= uint64(0x00000002) - case 3: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Network", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTest - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Network = &s - iNdEx = postIndex - hasFields[0] |= uint64(0x00000004) - case 4: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field RemoteAddr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTest - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.RemoteAddr = &s - iNdEx = postIndex - hasFields[0] |= uint64(0x00000008) - case 5: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field ListenAddr", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTest - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.ListenAddr = &s - iNdEx = postIndex - hasFields[0] |= uint64(0x00000010) - case 6: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTest - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Version = &s - iNdEx = postIndex - hasFields[0] |= uint64(0x00000020) - case 7: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Other", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTest - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Other = append(m.Other, string(dAtA[iNdEx:postIndex])) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTest(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return proto.NewRequiredNotSetError("id") - } - if hasFields[0]&uint64(0x00000002) == 0 { - return proto.NewRequiredNotSetError("moniker") - } - if hasFields[0]&uint64(0x00000004) == 0 { - return proto.NewRequiredNotSetError("network") - } - if hasFields[0]&uint64(0x00000008) == 0 { - return proto.NewRequiredNotSetError("remoteAddr") - } - if hasFields[0]&uint64(0x00000010) == 0 { - return proto.NewRequiredNotSetError("listenAddr") - } - if hasFields[0]&uint64(0x00000020) == 0 { - return proto.NewRequiredNotSetError("version") - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *ID) Unmarshal(dAtA []byte) error { - var hasFields [1]uint64 - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: ID: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: ID: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthTest - } - postIndex := iNdEx + intStringLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - s := string(dAtA[iNdEx:postIndex]) - m.Id = &s - iNdEx = postIndex - hasFields[0] |= uint64(0x00000001) - default: - iNdEx = preIndex - skippy, err := skipTest(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return proto.NewRequiredNotSetError("id") - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PubKey) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PubKey: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PubKey: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Ed25519", wireType) - } - var msglen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - msglen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if msglen < 0 { - return ErrInvalidLengthTest - } - postIndex := iNdEx + msglen - if postIndex > l { - return io.ErrUnexpectedEOF - } - if m.Ed25519 == nil { - m.Ed25519 = &PubKeyEd25519{} - } - if err := m.Ed25519.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { - return err - } - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipTest(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func (m *PubKeyEd25519) Unmarshal(dAtA []byte) error { - var hasFields [1]uint64 - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: PubKeyEd25519: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: PubKeyEd25519: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Bytes", wireType) - } - var byteLen int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowTest - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - byteLen |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - if byteLen < 0 { - return ErrInvalidLengthTest - } - postIndex := iNdEx + byteLen - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Bytes = append(m.Bytes[:0], dAtA[iNdEx:postIndex]...) - if m.Bytes == nil { - m.Bytes = []byte{} - } - iNdEx = postIndex - hasFields[0] |= uint64(0x00000001) - default: - iNdEx = preIndex - skippy, err := skipTest(dAtA[iNdEx:]) - if err != nil { - return err - } - if skippy < 0 { - return ErrInvalidLengthTest - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) - iNdEx += skippy - } - } - if hasFields[0]&uint64(0x00000001) == 0 { - return proto.NewRequiredNotSetError("bytes") - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} -func skipTest(dAtA []byte) (n int, err error) { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - wireType := int(wire & 0x7) - switch wireType { - case 0: - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - iNdEx++ - if dAtA[iNdEx-1] < 0x80 { - break - } - } - return iNdEx, nil - case 1: - iNdEx += 8 - return iNdEx, nil - case 2: - var length int - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - length |= (int(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - iNdEx += length - if length < 0 { - return 0, ErrInvalidLengthTest - } - return iNdEx, nil - case 3: - for { - var innerWire uint64 - var start int = iNdEx - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return 0, ErrIntOverflowTest - } - if iNdEx >= l { - return 0, io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - innerWire |= (uint64(b) & 0x7F) << shift - if b < 0x80 { - break - } - } - innerWireType := int(innerWire & 0x7) - if innerWireType == 4 { - break - } - next, err := skipTest(dAtA[start:]) - if err != nil { - return 0, err - } - iNdEx = start + next - } - return iNdEx, nil - case 4: - return iNdEx, nil - case 5: - iNdEx += 4 - return iNdEx, nil - default: - return 0, fmt.Errorf("proto: illegal wireType %d", wireType) - } - } - panic("unreachable") -} - -var ( - ErrInvalidLengthTest = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowTest = fmt.Errorf("proto: integer overflow") -) - -func init() { proto.RegisterFile("test.proto", fileDescriptorTest) } - -var fileDescriptorTest = []byte{ - // 342 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x64, 0x91, 0x41, 0x4b, 0xf3, 0x30, - 0x1c, 0xc6, 0x49, 0xfb, 0x6e, 0x5d, 0xff, 0xeb, 0x3b, 0x31, 0xee, 0x90, 0x53, 0x2d, 0x85, 0x41, - 0x0f, 0x52, 0xb0, 0xb0, 0x83, 0x47, 0xc7, 0x04, 0x87, 0x20, 0x23, 0x7e, 0x82, 0xcd, 0xfe, 0x75, - 0x65, 0x5d, 0x33, 0x92, 0x4c, 0xd9, 0xe7, 0xf3, 0xe2, 0xd1, 0xa3, 0x47, 0xd9, 0x27, 0x91, 0xa6, - 0xed, 0x9c, 0xf3, 0xf8, 0xfc, 0x7e, 0xe5, 0xc9, 0xd3, 0x04, 0x40, 0xa3, 0xd2, 0xf1, 0x5a, 0x0a, - 0x2d, 0xc2, 0x4f, 0x02, 0x1e, 0x47, 0xb5, 0xc9, 0xf5, 0x83, 0x9e, 0xe9, 0x8d, 0xa2, 0x03, 0xe8, - 0x14, 0x22, 0xc5, 0x49, 0xf1, 0x24, 0x18, 0x09, 0x48, 0xd4, 0x4d, 0xdc, 0xf8, 0xbe, 0x06, 0x7c, - 0xaf, 0xe8, 0x39, 0xb4, 0xd7, 0x9b, 0xf9, 0x1d, 0x6e, 0x99, 0x15, 0x58, 0x51, 0x37, 0x71, 0xe2, - 0xa9, 0x89, 0xbc, 0xc6, 0x34, 0x82, 0x93, 0x7c, 0x56, 0x1e, 0x34, 0xca, 0xc5, 0xe3, 0xf2, 0x76, - 0xa6, 0x16, 0xcc, 0x0e, 0xac, 0xc8, 0xe3, 0xc7, 0x98, 0x5e, 0xc0, 0xe9, 0x21, 0xc2, 0xec, 0x79, - 0xa1, 0xd9, 0xbf, 0xc0, 0x8a, 0x6c, 0xfe, 0x57, 0x1c, 0xf5, 0xea, 0x6c, 0x85, 0xac, 0x65, 0xbe, - 0x3d, 0xc6, 0xe1, 0x1b, 0x81, 0x4e, 0xb3, 0x9c, 0x9e, 0x81, 0x95, 0xa5, 0x8c, 0x98, 0xad, 0x76, - 0x3c, 0x19, 0x73, 0x2b, 0x4b, 0x29, 0x03, 0x67, 0x25, 0x8a, 0x6c, 0x89, 0xd2, 0xfc, 0x85, 0xcb, - 0x9b, 0x58, 0x9a, 0x02, 0xf5, 0xab, 0x90, 0x4b, 0xb3, 0xda, 0xe5, 0x4d, 0xa4, 0x3e, 0x80, 0xc4, - 0x95, 0xd0, 0x78, 0x9d, 0xa6, 0xd2, 0xcc, 0x74, 0xf9, 0x01, 0x29, 0x7d, 0x9e, 0x29, 0x8d, 0x85, - 0xf1, 0xad, 0xca, 0xff, 0x90, 0xb2, 0xf9, 0x05, 0xa5, 0xca, 0x44, 0xc1, 0xda, 0x55, 0x73, 0x1d, - 0x69, 0x1f, 0x5a, 0x42, 0x2f, 0x50, 0x32, 0x27, 0xb0, 0x23, 0x97, 0x57, 0x21, 0xec, 0x83, 0x35, - 0x19, 0xd3, 0xde, 0x7e, 0xbe, 0x5b, 0x2e, 0x0f, 0x13, 0x68, 0x4f, 0x9b, 0x7b, 0x76, 0x30, 0x4d, - 0x86, 0xc3, 0xcb, 0xab, 0xfa, 0xb9, 0x7a, 0xf5, 0x4b, 0xdc, 0x54, 0x94, 0x37, 0x3a, 0x1c, 0xc0, - 0xff, 0x5f, 0xa6, 0x3c, 0x70, 0xbe, 0xd5, 0xa8, 0x4c, 0xaf, 0xc7, 0xab, 0x30, 0xf2, 0xde, 0x77, - 0x3e, 0xf9, 0xd8, 0xf9, 0xe4, 0x6b, 0xe7, 0x93, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0xb1, 0xee, - 0x6b, 0xdd, 0x2c, 0x02, 0x00, 0x00, -} diff --git a/benchmarks/proto/test.proto b/benchmarks/proto/test.proto deleted file mode 100644 index 6d770d98..00000000 --- a/benchmarks/proto/test.proto +++ /dev/null @@ -1,29 +0,0 @@ -message ResultStatus { - optional NodeInfo nodeInfo = 1; - required PubKey pubKey = 2; - required bytes latestBlockHash = 3; - required int64 latestBlockHeight = 4; - required int64 latestBlocktime = 5; -} - -message NodeInfo { - required ID id = 1; - required string moniker = 2; - required string network = 3; - required string remoteAddr = 4; - required string listenAddr = 5; - required string version = 6; - repeated string other = 7; -} - -message ID { - required string id = 1; -} - -message PubKey { - optional PubKeyEd25519 ed25519 = 1; -} - -message PubKeyEd25519 { - required bytes bytes = 1; -} diff --git a/benchmarks/simu/counter.go b/benchmarks/simu/counter.go deleted file mode 100644 index dd00408c..00000000 --- a/benchmarks/simu/counter.go +++ /dev/null @@ -1,47 +0,0 @@ -package main - -import ( - "context" - "encoding/binary" - "fmt" - "time" - - rpcclient "github.com/tendermint/tendermint/rpc/lib/client" - cmn "github.com/tendermint/tmlibs/common" -) - -func main() { - wsc := rpcclient.NewWSClient("127.0.0.1:26657", "/websocket") - err := wsc.Start() - if err != nil { - cmn.Exit(err.Error()) - } - defer wsc.Stop() - - // Read a bunch of responses - go func() { - for { - _, ok := <-wsc.ResponsesCh - if !ok { - break - } - //fmt.Println("Received response", string(wire.JSONBytes(res))) - } - }() - - // Make a bunch of requests - buf := make([]byte, 32) - for i := 0; ; i++ { - binary.BigEndian.PutUint64(buf, uint64(i)) - //txBytes := hex.EncodeToString(buf[:n]) - fmt.Print(".") - err = wsc.Call(context.TODO(), "broadcast_tx", map[string]interface{}{"tx": buf[:8]}) - if err != nil { - cmn.Exit(err.Error()) - } - if i%1000 == 0 { - fmt.Println(i) - } - time.Sleep(time.Microsecond * 1000) - } -} diff --git a/blockchain/pool.go b/blockchain/pool.go deleted file mode 100644 index 8b964e81..00000000 --- a/blockchain/pool.go +++ /dev/null @@ -1,587 +0,0 @@ -package blockchain - -import ( - "errors" - "fmt" - "math" - "sync" - "sync/atomic" - "time" - - cmn "github.com/tendermint/tmlibs/common" - flow "github.com/tendermint/tmlibs/flowrate" - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -/* -eg, L = latency = 0.1s - P = num peers = 10 - FN = num full nodes - BS = 1kB block size - CB = 1 Mbit/s = 128 kB/s - CB/P = 12.8 kB - B/S = CB/P/BS = 12.8 blocks/s - - 12.8 * 0.1 = 1.28 blocks on conn -*/ - -const ( - requestIntervalMS = 100 - maxTotalRequesters = 1000 - maxPendingRequests = maxTotalRequesters - maxPendingRequestsPerPeer = 50 - - // Minimum recv rate to ensure we're receiving blocks from a peer fast - // enough. If a peer is not sending us data at at least that rate, we - // consider them to have timedout and we disconnect. - // - // Assuming a DSL connection (not a good choice) 128 Kbps (upload) ~ 15 KB/s, - // sending data across atlantic ~ 7.5 KB/s. - minRecvRate = 7680 - - // Maximum difference between current and new block's height. - maxDiffBetweenCurrentAndReceivedBlockHeight = 100 -) - -var peerTimeout = 15 * time.Second // not const so we can override with tests - -/* - Peers self report their heights when we join the block pool. - Starting from our latest pool.height, we request blocks - in sequence from peers that reported higher heights than ours. - Every so often we ask peers what height they're on so we can keep going. - - Requests are continuously made for blocks of higher heights until - the limit is reached. If most of the requests have no available peers, and we - are not at peer limits, we can probably switch to consensus reactor -*/ - -type BlockPool struct { - cmn.BaseService - startTime time.Time - - mtx sync.Mutex - // block requests - requesters map[int64]*bpRequester - height int64 // the lowest key in requesters. - // peers - peers map[p2p.ID]*bpPeer - maxPeerHeight int64 - - // atomic - numPending int32 // number of requests pending assignment or block response - - requestsCh chan<- BlockRequest - errorsCh chan<- peerError -} - -func NewBlockPool(start int64, requestsCh chan<- BlockRequest, errorsCh chan<- peerError) *BlockPool { - bp := &BlockPool{ - peers: make(map[p2p.ID]*bpPeer), - - requesters: make(map[int64]*bpRequester), - height: start, - numPending: 0, - - requestsCh: requestsCh, - errorsCh: errorsCh, - } - bp.BaseService = *cmn.NewBaseService(nil, "BlockPool", bp) - return bp -} - -func (pool *BlockPool) OnStart() error { - go pool.makeRequestersRoutine() - pool.startTime = time.Now() - return nil -} - -func (pool *BlockPool) OnStop() {} - -// Run spawns requesters as needed. -func (pool *BlockPool) makeRequestersRoutine() { - for { - if !pool.IsRunning() { - break - } - - _, numPending, lenRequesters := pool.GetStatus() - if numPending >= maxPendingRequests { - // sleep for a bit. - time.Sleep(requestIntervalMS * time.Millisecond) - // check for timed out peers - pool.removeTimedoutPeers() - } else if lenRequesters >= maxTotalRequesters { - // sleep for a bit. - time.Sleep(requestIntervalMS * time.Millisecond) - // check for timed out peers - pool.removeTimedoutPeers() - } else { - // request for more blocks. - pool.makeNextRequester() - } - } -} - -func (pool *BlockPool) removeTimedoutPeers() { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - for _, peer := range pool.peers { - if !peer.didTimeout && peer.numPending > 0 { - curRate := peer.recvMonitor.Status().CurRate - // curRate can be 0 on start - if curRate != 0 && curRate < minRecvRate { - err := errors.New("peer is not sending us data fast enough") - pool.sendError(err, peer.id) - pool.Logger.Error("SendTimeout", "peer", peer.id, - "reason", err, - "curRate", fmt.Sprintf("%d KB/s", curRate/1024), - "minRate", fmt.Sprintf("%d KB/s", minRecvRate/1024)) - peer.didTimeout = true - } - } - if peer.didTimeout { - pool.removePeer(peer.id) - } - } -} - -func (pool *BlockPool) GetStatus() (height int64, numPending int32, lenRequesters int) { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - return pool.height, atomic.LoadInt32(&pool.numPending), len(pool.requesters) -} - -// TODO: relax conditions, prevent abuse. -func (pool *BlockPool) IsCaughtUp() bool { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - // Need at least 1 peer to be considered caught up. - if len(pool.peers) == 0 { - pool.Logger.Debug("Blockpool has no peers") - return false - } - - // some conditions to determine if we're caught up - receivedBlockOrTimedOut := (pool.height > 0 || time.Since(pool.startTime) > 5*time.Second) - ourChainIsLongestAmongPeers := pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight - isCaughtUp := receivedBlockOrTimedOut && ourChainIsLongestAmongPeers - return isCaughtUp -} - -// We need to see the second block's Commit to validate the first block. -// So we peek two blocks at a time. -// The caller will verify the commit. -func (pool *BlockPool) PeekTwoBlocks() (first *types.Block, second *types.Block) { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - if r := pool.requesters[pool.height]; r != nil { - first = r.getBlock() - } - if r := pool.requesters[pool.height+1]; r != nil { - second = r.getBlock() - } - return -} - -// Pop the first block at pool.height -// It must have been validated by 'second'.Commit from PeekTwoBlocks(). -func (pool *BlockPool) PopRequest() { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - if r := pool.requesters[pool.height]; r != nil { - /* The block can disappear at any time, due to removePeer(). - if r := pool.requesters[pool.height]; r == nil || r.block == nil { - PanicSanity("PopRequest() requires a valid block") - } - */ - r.Stop() - delete(pool.requesters, pool.height) - pool.height++ - } else { - panic(fmt.Sprintf("Expected requester to pop, got nothing at height %v", pool.height)) - } -} - -// Invalidates the block at pool.height, -// Remove the peer and redo request from others. -// Returns the ID of the removed peer. -func (pool *BlockPool) RedoRequest(height int64) p2p.ID { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - request := pool.requesters[height] - - if request.block == nil { - panic("Expected block to be non-nil") - } - - // RemovePeer will redo all requesters associated with this peer. - pool.removePeer(request.peerID) - return request.peerID -} - -// TODO: ensure that blocks come in order for each peer. -func (pool *BlockPool) AddBlock(peerID p2p.ID, block *types.Block, blockSize int) { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - requester := pool.requesters[block.Height] - if requester == nil { - pool.Logger.Info("peer sent us a block we didn't expect", "peer", peerID, "curHeight", pool.height, "blockHeight", block.Height) - diff := pool.height - block.Height - if diff < 0 { - diff *= -1 - } - if diff > maxDiffBetweenCurrentAndReceivedBlockHeight { - pool.sendError(errors.New("peer sent us a block we didn't expect with a height too far ahead/behind"), peerID) - } - return - } - - if requester.setBlock(block, peerID) { - atomic.AddInt32(&pool.numPending, -1) - peer := pool.peers[peerID] - if peer != nil { - peer.decrPending(blockSize) - } - } else { - // Bad peer? - } -} - -// MaxPeerHeight returns the highest height reported by a peer. -func (pool *BlockPool) MaxPeerHeight() int64 { - pool.mtx.Lock() - defer pool.mtx.Unlock() - return pool.maxPeerHeight -} - -// Sets the peer's alleged blockchain height. -func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - peer := pool.peers[peerID] - if peer != nil { - peer.height = height - } else { - peer = newBPPeer(pool, peerID, height) - peer.setLogger(pool.Logger.With("peer", peerID)) - pool.peers[peerID] = peer - } - - if height > pool.maxPeerHeight { - pool.maxPeerHeight = height - } -} - -func (pool *BlockPool) RemovePeer(peerID p2p.ID) { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - pool.removePeer(peerID) -} - -func (pool *BlockPool) removePeer(peerID p2p.ID) { - for _, requester := range pool.requesters { - if requester.getPeerID() == peerID { - requester.redo() - } - } - delete(pool.peers, peerID) -} - -// Pick an available peer with at least the given minHeight. -// If no peers are available, returns nil. -func (pool *BlockPool) pickIncrAvailablePeer(minHeight int64) *bpPeer { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - for _, peer := range pool.peers { - if peer.didTimeout { - pool.removePeer(peer.id) - continue - } - if peer.numPending >= maxPendingRequestsPerPeer { - continue - } - if peer.height < minHeight { - continue - } - peer.incrPending() - return peer - } - return nil -} - -func (pool *BlockPool) makeNextRequester() { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - nextHeight := pool.height + pool.requestersLen() - request := newBPRequester(pool, nextHeight) - // request.SetLogger(pool.Logger.With("height", nextHeight)) - - pool.requesters[nextHeight] = request - atomic.AddInt32(&pool.numPending, 1) - - err := request.Start() - if err != nil { - request.Logger.Error("Error starting request", "err", err) - } -} - -func (pool *BlockPool) requestersLen() int64 { - return int64(len(pool.requesters)) -} - -func (pool *BlockPool) sendRequest(height int64, peerID p2p.ID) { - if !pool.IsRunning() { - return - } - pool.requestsCh <- BlockRequest{height, peerID} -} - -func (pool *BlockPool) sendError(err error, peerID p2p.ID) { - if !pool.IsRunning() { - return - } - pool.errorsCh <- peerError{err, peerID} -} - -// unused by tendermint; left for debugging purposes -func (pool *BlockPool) debug() string { - pool.mtx.Lock() - defer pool.mtx.Unlock() - - str := "" - nextHeight := pool.height + pool.requestersLen() - for h := pool.height; h < nextHeight; h++ { - if pool.requesters[h] == nil { - str += cmn.Fmt("H(%v):X ", h) - } else { - str += cmn.Fmt("H(%v):", h) - str += cmn.Fmt("B?(%v) ", pool.requesters[h].block != nil) - } - } - return str -} - -//------------------------------------- - -type bpPeer struct { - pool *BlockPool - id p2p.ID - recvMonitor *flow.Monitor - - height int64 - numPending int32 - timeout *time.Timer - didTimeout bool - - logger log.Logger -} - -func newBPPeer(pool *BlockPool, peerID p2p.ID, height int64) *bpPeer { - peer := &bpPeer{ - pool: pool, - id: peerID, - height: height, - numPending: 0, - logger: log.NewNopLogger(), - } - return peer -} - -func (peer *bpPeer) setLogger(l log.Logger) { - peer.logger = l -} - -func (peer *bpPeer) resetMonitor() { - peer.recvMonitor = flow.New(time.Second, time.Second*40) - initialValue := float64(minRecvRate) * math.E - peer.recvMonitor.SetREMA(initialValue) -} - -func (peer *bpPeer) resetTimeout() { - if peer.timeout == nil { - peer.timeout = time.AfterFunc(peerTimeout, peer.onTimeout) - } else { - peer.timeout.Reset(peerTimeout) - } -} - -func (peer *bpPeer) incrPending() { - if peer.numPending == 0 { - peer.resetMonitor() - peer.resetTimeout() - } - peer.numPending++ -} - -func (peer *bpPeer) decrPending(recvSize int) { - peer.numPending-- - if peer.numPending == 0 { - peer.timeout.Stop() - } else { - peer.recvMonitor.Update(recvSize) - peer.resetTimeout() - } -} - -func (peer *bpPeer) onTimeout() { - peer.pool.mtx.Lock() - defer peer.pool.mtx.Unlock() - - err := errors.New("peer did not send us anything") - peer.pool.sendError(err, peer.id) - peer.logger.Error("SendTimeout", "reason", err, "timeout", peerTimeout) - peer.didTimeout = true -} - -//------------------------------------- - -type bpRequester struct { - cmn.BaseService - pool *BlockPool - height int64 - gotBlockCh chan struct{} - redoCh chan struct{} - - mtx sync.Mutex - peerID p2p.ID - block *types.Block -} - -func newBPRequester(pool *BlockPool, height int64) *bpRequester { - bpr := &bpRequester{ - pool: pool, - height: height, - gotBlockCh: make(chan struct{}, 1), - redoCh: make(chan struct{}, 1), - - peerID: "", - block: nil, - } - bpr.BaseService = *cmn.NewBaseService(nil, "bpRequester", bpr) - return bpr -} - -func (bpr *bpRequester) OnStart() error { - go bpr.requestRoutine() - return nil -} - -// Returns true if the peer matches and block doesn't already exist. -func (bpr *bpRequester) setBlock(block *types.Block, peerID p2p.ID) bool { - bpr.mtx.Lock() - if bpr.block != nil || bpr.peerID != peerID { - bpr.mtx.Unlock() - return false - } - bpr.block = block - bpr.mtx.Unlock() - - select { - case bpr.gotBlockCh <- struct{}{}: - default: - } - return true -} - -func (bpr *bpRequester) getBlock() *types.Block { - bpr.mtx.Lock() - defer bpr.mtx.Unlock() - return bpr.block -} - -func (bpr *bpRequester) getPeerID() p2p.ID { - bpr.mtx.Lock() - defer bpr.mtx.Unlock() - return bpr.peerID -} - -// This is called from the requestRoutine, upon redo(). -func (bpr *bpRequester) reset() { - bpr.mtx.Lock() - defer bpr.mtx.Unlock() - - if bpr.block != nil { - atomic.AddInt32(&bpr.pool.numPending, 1) - } - - bpr.peerID = "" - bpr.block = nil -} - -// Tells bpRequester to pick another peer and try again. -// NOTE: Nonblocking, and does nothing if another redo -// was already requested. -func (bpr *bpRequester) redo() { - select { - case bpr.redoCh <- struct{}{}: - default: - } -} - -// Responsible for making more requests as necessary -// Returns only when a block is found (e.g. AddBlock() is called) -func (bpr *bpRequester) requestRoutine() { -OUTER_LOOP: - for { - // Pick a peer to send request to. - var peer *bpPeer - PICK_PEER_LOOP: - for { - if !bpr.IsRunning() || !bpr.pool.IsRunning() { - return - } - peer = bpr.pool.pickIncrAvailablePeer(bpr.height) - if peer == nil { - //log.Info("No peers available", "height", height) - time.Sleep(requestIntervalMS * time.Millisecond) - continue PICK_PEER_LOOP - } - break PICK_PEER_LOOP - } - bpr.mtx.Lock() - bpr.peerID = peer.id - bpr.mtx.Unlock() - - // Send request and wait. - bpr.pool.sendRequest(bpr.height, peer.id) - WAIT_LOOP: - for { - select { - case <-bpr.pool.Quit(): - bpr.Stop() - return - case <-bpr.Quit(): - return - case <-bpr.redoCh: - bpr.reset() - continue OUTER_LOOP - case <-bpr.gotBlockCh: - // We got a block! - // Continue the for-loop and wait til Quit. - continue WAIT_LOOP - } - } - } -} - -//------------------------------------- - -type BlockRequest struct { - Height int64 - PeerID p2p.ID -} diff --git a/blockchain/pool_test.go b/blockchain/pool_test.go deleted file mode 100644 index 82120eae..00000000 --- a/blockchain/pool_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package blockchain - -import ( - "math/rand" - "testing" - "time" - - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -func init() { - peerTimeout = 2 * time.Second -} - -type testPeer struct { - id p2p.ID - height int64 -} - -func makePeers(numPeers int, minHeight, maxHeight int64) map[p2p.ID]testPeer { - peers := make(map[p2p.ID]testPeer, numPeers) - for i := 0; i < numPeers; i++ { - peerID := p2p.ID(cmn.RandStr(12)) - height := minHeight + rand.Int63n(maxHeight-minHeight) - peers[peerID] = testPeer{peerID, height} - } - return peers -} - -func TestBasic(t *testing.T) { - start := int64(42) - peers := makePeers(10, start+1, 1000) - errorsCh := make(chan peerError, 1000) - requestsCh := make(chan BlockRequest, 1000) - pool := NewBlockPool(start, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) - - err := pool.Start() - if err != nil { - t.Error(err) - } - - defer pool.Stop() - - // Introduce each peer. - go func() { - for _, peer := range peers { - pool.SetPeerHeight(peer.id, peer.height) - } - }() - - // Start a goroutine to pull blocks - go func() { - for { - if !pool.IsRunning() { - return - } - first, second := pool.PeekTwoBlocks() - if first != nil && second != nil { - pool.PopRequest() - } else { - time.Sleep(1 * time.Second) - } - } - }() - - // Pull from channels - for { - select { - case err := <-errorsCh: - t.Error(err) - case request := <-requestsCh: - t.Logf("Pulled new BlockRequest %v", request) - if request.Height == 300 { - return // Done! - } - // Request desired, pretend like we got the block immediately. - go func() { - block := &types.Block{Header: &types.Header{Height: request.Height}} - pool.AddBlock(request.PeerID, block, 123) - t.Logf("Added block from peer %v (height: %v)", request.PeerID, request.Height) - }() - } - } -} - -func TestTimeout(t *testing.T) { - start := int64(42) - peers := makePeers(10, start+1, 1000) - errorsCh := make(chan peerError, 1000) - requestsCh := make(chan BlockRequest, 1000) - pool := NewBlockPool(start, requestsCh, errorsCh) - pool.SetLogger(log.TestingLogger()) - err := pool.Start() - if err != nil { - t.Error(err) - } - defer pool.Stop() - - for _, peer := range peers { - t.Logf("Peer %v", peer.id) - } - - // Introduce each peer. - go func() { - for _, peer := range peers { - pool.SetPeerHeight(peer.id, peer.height) - } - }() - - // Start a goroutine to pull blocks - go func() { - for { - if !pool.IsRunning() { - return - } - first, second := pool.PeekTwoBlocks() - if first != nil && second != nil { - pool.PopRequest() - } else { - time.Sleep(1 * time.Second) - } - } - }() - - // Pull from channels - counter := 0 - timedOut := map[p2p.ID]struct{}{} - for { - select { - case err := <-errorsCh: - t.Log(err) - // consider error to be always timeout here - if _, ok := timedOut[err.peerID]; !ok { - counter++ - if counter == len(peers) { - return // Done! - } - } - case request := <-requestsCh: - t.Logf("Pulled new BlockRequest %+v", request) - } - } -} diff --git a/blockchain/reactor.go b/blockchain/reactor.go deleted file mode 100644 index 33dfdd28..00000000 --- a/blockchain/reactor.go +++ /dev/null @@ -1,405 +0,0 @@ -package blockchain - -import ( - "fmt" - "reflect" - "time" - - "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/p2p" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" -) - -const ( - // BlockchainChannel is a channel for blocks and status updates (`BlockStore` height) - BlockchainChannel = byte(0x40) - - trySyncIntervalMS = 50 - // stop syncing when last block's time is - // within this much of the system time. - // stopSyncingDurationMinutes = 10 - - // ask for best height every 10s - statusUpdateIntervalSeconds = 10 - // check if we should switch to consensus reactor - switchToConsensusIntervalSeconds = 1 - - // NOTE: keep up to date with bcBlockResponseMessage - bcBlockResponseMessagePrefixSize = 4 - bcBlockResponseMessageFieldKeySize = 1 - maxMsgSize = types.MaxBlockSizeBytes + - bcBlockResponseMessagePrefixSize + - bcBlockResponseMessageFieldKeySize -) - -type consensusReactor interface { - // for when we switch from blockchain reactor and fast sync to - // the consensus machine - SwitchToConsensus(sm.State, int) -} - -type peerError struct { - err error - peerID p2p.ID -} - -func (e peerError) Error() string { - return fmt.Sprintf("error with peer %v: %s", e.peerID, e.err.Error()) -} - -// BlockchainReactor handles long-term catchup syncing. -type BlockchainReactor struct { - p2p.BaseReactor - - // immutable - initialState sm.State - - blockExec *sm.BlockExecutor - store *BlockStore - pool *BlockPool - fastSync bool - - requestsCh <-chan BlockRequest - errorsCh <-chan peerError -} - -// NewBlockchainReactor returns new reactor instance. -func NewBlockchainReactor(state sm.State, blockExec *sm.BlockExecutor, store *BlockStore, - fastSync bool) *BlockchainReactor { - - if state.LastBlockHeight != store.Height() { - panic(fmt.Sprintf("state (%v) and store (%v) height mismatch", state.LastBlockHeight, - store.Height())) - } - - const capacity = 1000 // must be bigger than peers count - requestsCh := make(chan BlockRequest, capacity) - errorsCh := make(chan peerError, capacity) // so we don't block in #Receive#pool.AddBlock - - pool := NewBlockPool( - store.Height()+1, - requestsCh, - errorsCh, - ) - - bcR := &BlockchainReactor{ - initialState: state, - blockExec: blockExec, - store: store, - pool: pool, - fastSync: fastSync, - requestsCh: requestsCh, - errorsCh: errorsCh, - } - bcR.BaseReactor = *p2p.NewBaseReactor("BlockchainReactor", bcR) - return bcR -} - -// SetLogger implements cmn.Service by setting the logger on reactor and pool. -func (bcR *BlockchainReactor) SetLogger(l log.Logger) { - bcR.BaseService.Logger = l - bcR.pool.Logger = l -} - -// OnStart implements cmn.Service. -func (bcR *BlockchainReactor) OnStart() error { - if err := bcR.BaseReactor.OnStart(); err != nil { - return err - } - if bcR.fastSync { - err := bcR.pool.Start() - if err != nil { - return err - } - go bcR.poolRoutine() - } - return nil -} - -// OnStop implements cmn.Service. -func (bcR *BlockchainReactor) OnStop() { - bcR.BaseReactor.OnStop() - bcR.pool.Stop() -} - -// GetChannels implements Reactor -func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { - return []*p2p.ChannelDescriptor{ - { - ID: BlockchainChannel, - Priority: 10, - SendQueueCapacity: 1000, - RecvBufferCapacity: 50 * 4096, - RecvMessageCapacity: maxMsgSize, - }, - } -} - -// AddPeer implements Reactor by sending our state to peer. -func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) - if !peer.Send(BlockchainChannel, msgBytes) { - // doing nothing, will try later in `poolRoutine` - } - // peer is added to the pool once we receive the first - // bcStatusResponseMessage from the peer and call pool.SetPeerHeight -} - -// RemovePeer implements Reactor by removing peer from the pool. -func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { - bcR.pool.RemovePeer(peer.ID()) -} - -// respondToPeer loads a block and sends it to the requesting peer, -// if we have it. Otherwise, we'll respond saying we don't have it. -// According to the Tendermint spec, if all nodes are honest, -// no node should be requesting for a block that's non-existent. -func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage, - src p2p.Peer) (queued bool) { - - block := bcR.store.LoadBlock(msg.Height) - if block != nil { - msgBytes := cdc.MustMarshalBinaryBare(&bcBlockResponseMessage{Block: block}) - return src.TrySend(BlockchainChannel, msgBytes) - } - - bcR.Logger.Info("Peer asking for a block we don't have", "src", src, "height", msg.Height) - - msgBytes := cdc.MustMarshalBinaryBare(&bcNoBlockResponseMessage{Height: msg.Height}) - return src.TrySend(BlockchainChannel, msgBytes) -} - -// Receive implements Reactor by handling 4 types of messages (look below). -func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := DecodeMessage(msgBytes) - if err != nil { - bcR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) - bcR.Switch.StopPeerForError(src, err) - return - } - - bcR.Logger.Debug("Receive", "src", src, "chID", chID, "msg", msg) - - switch msg := msg.(type) { - case *bcBlockRequestMessage: - if queued := bcR.respondToPeer(msg, src); !queued { - // Unfortunately not queued since the queue is full. - } - case *bcBlockResponseMessage: - // Got a block. - bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes)) - case *bcStatusRequestMessage: - // Send peer our state. - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) - queued := src.TrySend(BlockchainChannel, msgBytes) - if !queued { - // sorry - } - case *bcStatusResponseMessage: - // Got a peer status. Unverified. - bcR.pool.SetPeerHeight(src.ID(), msg.Height) - default: - bcR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) - } -} - -// Handle messages from the poolReactor telling the reactor what to do. -// NOTE: Don't sleep in the FOR_LOOP or otherwise slow it down! -// (Except for the SYNC_LOOP, which is the primary purpose and must be synchronous.) -func (bcR *BlockchainReactor) poolRoutine() { - - trySyncTicker := time.NewTicker(trySyncIntervalMS * time.Millisecond) - statusUpdateTicker := time.NewTicker(statusUpdateIntervalSeconds * time.Second) - switchToConsensusTicker := time.NewTicker(switchToConsensusIntervalSeconds * time.Second) - - blocksSynced := 0 - - chainID := bcR.initialState.ChainID - state := bcR.initialState - - lastHundred := time.Now() - lastRate := 0.0 - -FOR_LOOP: - for { - select { - case request := <-bcR.requestsCh: - peer := bcR.Switch.Peers().Get(request.PeerID) - if peer == nil { - continue FOR_LOOP // Peer has since been disconnected. - } - msgBytes := cdc.MustMarshalBinaryBare(&bcBlockRequestMessage{request.Height}) - queued := peer.TrySend(BlockchainChannel, msgBytes) - if !queued { - // We couldn't make the request, send-queue full. - // The pool handles timeouts, just let it go. - continue FOR_LOOP - } - case err := <-bcR.errorsCh: - peer := bcR.Switch.Peers().Get(err.peerID) - if peer != nil { - bcR.Switch.StopPeerForError(peer, err) - } - case <-statusUpdateTicker.C: - // ask for status updates - go bcR.BroadcastStatusRequest() // nolint: errcheck - case <-switchToConsensusTicker.C: - height, numPending, lenRequesters := bcR.pool.GetStatus() - outbound, inbound, _ := bcR.Switch.NumPeers() - bcR.Logger.Debug("Consensus ticker", "numPending", numPending, "total", lenRequesters, - "outbound", outbound, "inbound", inbound) - if bcR.pool.IsCaughtUp() { - bcR.Logger.Info("Time to switch to consensus reactor!", "height", height) - bcR.pool.Stop() - - conR := bcR.Switch.Reactor("CONSENSUS").(consensusReactor) - conR.SwitchToConsensus(state, blocksSynced) - - break FOR_LOOP - } - case <-trySyncTicker.C: // chan time - // This loop can be slow as long as it's doing syncing work. - SYNC_LOOP: - for i := 0; i < 10; i++ { - // See if there are any blocks to sync. - first, second := bcR.pool.PeekTwoBlocks() - //bcR.Logger.Info("TrySync peeked", "first", first, "second", second) - if first == nil || second == nil { - // We need both to sync the first block. - break SYNC_LOOP - } - firstParts := first.MakePartSet(state.ConsensusParams.BlockPartSizeBytes) - firstPartsHeader := firstParts.Header() - firstID := types.BlockID{first.Hash(), firstPartsHeader} - // Finally, verify the first block using the second's commit - // NOTE: we can probably make this more efficient, but note that calling - // first.Hash() doesn't verify the tx contents, so MakePartSet() is - // currently necessary. - err := state.Validators.VerifyCommit( - chainID, firstID, first.Height, second.LastCommit) - if err != nil { - bcR.Logger.Error("Error in validation", "err", err) - peerID := bcR.pool.RedoRequest(first.Height) - peer := bcR.Switch.Peers().Get(peerID) - if peer != nil { - bcR.Switch.StopPeerForError(peer, fmt.Errorf("BlockchainReactor validation error: %v", err)) - } - break SYNC_LOOP - } else { - bcR.pool.PopRequest() - - // TODO: batch saves so we dont persist to disk every block - bcR.store.SaveBlock(first, firstParts, second.LastCommit) - - // TODO: same thing for app - but we would need a way to - // get the hash without persisting the state - var err error - state, err = bcR.blockExec.ApplyBlock(state, firstID, first) - if err != nil { - // TODO This is bad, are we zombie? - cmn.PanicQ(cmn.Fmt("Failed to process committed block (%d:%X): %v", - first.Height, first.Hash(), err)) - } - blocksSynced++ - - if blocksSynced%100 == 0 { - lastRate = 0.9*lastRate + 0.1*(100/time.Since(lastHundred).Seconds()) - bcR.Logger.Info("Fast Sync Rate", "height", bcR.pool.height, - "max_peer_height", bcR.pool.MaxPeerHeight(), "blocks/s", lastRate) - lastHundred = time.Now() - } - } - } - continue FOR_LOOP - case <-bcR.Quit(): - break FOR_LOOP - } - } -} - -// BroadcastStatusRequest broadcasts `BlockStore` height. -func (bcR *BlockchainReactor) BroadcastStatusRequest() error { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()}) - bcR.Switch.Broadcast(BlockchainChannel, msgBytes) - return nil -} - -//----------------------------------------------------------------------------- -// Messages - -// BlockchainMessage is a generic message for this reactor. -type BlockchainMessage interface{} - -func RegisterBlockchainMessages(cdc *amino.Codec) { - cdc.RegisterInterface((*BlockchainMessage)(nil), nil) - cdc.RegisterConcrete(&bcBlockRequestMessage{}, "tendermint/mempool/BlockRequest", nil) - cdc.RegisterConcrete(&bcBlockResponseMessage{}, "tendermint/mempool/BlockResponse", nil) - cdc.RegisterConcrete(&bcNoBlockResponseMessage{}, "tendermint/mempool/NoBlockResponse", nil) - cdc.RegisterConcrete(&bcStatusResponseMessage{}, "tendermint/mempool/StatusResponse", nil) - cdc.RegisterConcrete(&bcStatusRequestMessage{}, "tendermint/mempool/StatusRequest", nil) -} - -// DecodeMessage decodes BlockchainMessage. -// TODO: ensure that bz is completely read. -func DecodeMessage(bz []byte) (msg BlockchainMessage, err error) { - if len(bz) > maxMsgSize { - return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", - len(bz), maxMsgSize) - } - err = cdc.UnmarshalBinaryBare(bz, &msg) - if err != nil { - err = cmn.ErrorWrap(err, "DecodeMessage() had bytes left over") - } - return -} - -//------------------------------------- - -type bcBlockRequestMessage struct { - Height int64 -} - -func (m *bcBlockRequestMessage) String() string { - return cmn.Fmt("[bcBlockRequestMessage %v]", m.Height) -} - -type bcNoBlockResponseMessage struct { - Height int64 -} - -func (brm *bcNoBlockResponseMessage) String() string { - return cmn.Fmt("[bcNoBlockResponseMessage %d]", brm.Height) -} - -//------------------------------------- - -type bcBlockResponseMessage struct { - Block *types.Block -} - -func (m *bcBlockResponseMessage) String() string { - return cmn.Fmt("[bcBlockResponseMessage %v]", m.Block.Height) -} - -//------------------------------------- - -type bcStatusRequestMessage struct { - Height int64 -} - -func (m *bcStatusRequestMessage) String() string { - return cmn.Fmt("[bcStatusRequestMessage %v]", m.Height) -} - -//------------------------------------- - -type bcStatusResponseMessage struct { - Height int64 -} - -func (m *bcStatusResponseMessage) String() string { - return cmn.Fmt("[bcStatusResponseMessage %v]", m.Height) -} diff --git a/blockchain/reactor_test.go b/blockchain/reactor_test.go deleted file mode 100644 index c7f7e9af..00000000 --- a/blockchain/reactor_test.go +++ /dev/null @@ -1,208 +0,0 @@ -package blockchain - -import ( - "net" - "testing" - - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore) { - config := cfg.ResetTestRoot("blockchain_reactor_test") - // blockDB := dbm.NewDebugDB("blockDB", dbm.NewMemDB()) - // stateDB := dbm.NewDebugDB("stateDB", dbm.NewMemDB()) - blockDB := dbm.NewMemDB() - stateDB := dbm.NewMemDB() - blockStore := NewBlockStore(blockDB) - state, err := sm.LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) - if err != nil { - panic(cmn.ErrorWrap(err, "error constructing state from genesis file")) - } - return state, blockStore -} - -func newBlockchainReactor(logger log.Logger, maxBlockHeight int64) *BlockchainReactor { - state, blockStore := makeStateAndBlockStore(logger) - - // Make the blockchainReactor itself - fastSync := true - var nilApp proxy.AppConnConsensus - blockExec := sm.NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nilApp, - sm.MockMempool{}, sm.MockEvidencePool{}) - - bcReactor := NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) - bcReactor.SetLogger(logger.With("module", "blockchain")) - - // Next: we need to set a switch in order for peers to be added in - bcReactor.Switch = p2p.NewSwitch(cfg.DefaultP2PConfig()) - - // Lastly: let's add some blocks in - for blockHeight := int64(1); blockHeight <= maxBlockHeight; blockHeight++ { - firstBlock := makeBlock(blockHeight, state) - secondBlock := makeBlock(blockHeight+1, state) - firstParts := firstBlock.MakePartSet(state.ConsensusParams.BlockGossip.BlockPartSizeBytes) - blockStore.SaveBlock(firstBlock, firstParts, secondBlock.LastCommit) - } - - return bcReactor -} - -func TestNoBlockResponse(t *testing.T) { - maxBlockHeight := int64(20) - - bcr := newBlockchainReactor(log.TestingLogger(), maxBlockHeight) - bcr.Start() - defer bcr.Stop() - - // Add some peers in - peer := newbcrTestPeer(p2p.ID(cmn.RandStr(12))) - bcr.AddPeer(peer) - - chID := byte(0x01) - - tests := []struct { - height int64 - existent bool - }{ - {maxBlockHeight + 2, false}, - {10, true}, - {1, true}, - {100, false}, - } - - // receive a request message from peer, - // wait for our response to be received on the peer - for _, tt := range tests { - reqBlockMsg := &bcBlockRequestMessage{tt.height} - reqBlockBytes := cdc.MustMarshalBinaryBare(reqBlockMsg) - bcr.Receive(chID, peer, reqBlockBytes) - msg := peer.lastBlockchainMessage() - - if tt.existent { - if blockMsg, ok := msg.(*bcBlockResponseMessage); !ok { - t.Fatalf("Expected to receive a block response for height %d", tt.height) - } else if blockMsg.Block.Height != tt.height { - t.Fatalf("Expected response to be for height %d, got %d", tt.height, blockMsg.Block.Height) - } - } else { - if noBlockMsg, ok := msg.(*bcNoBlockResponseMessage); !ok { - t.Fatalf("Expected to receive a no block response for height %d", tt.height) - } else if noBlockMsg.Height != tt.height { - t.Fatalf("Expected response to be for height %d, got %d", tt.height, noBlockMsg.Height) - } - } - } -} - -/* -// NOTE: This is too hard to test without -// an easy way to add test peer to switch -// or without significant refactoring of the module. -// Alternatively we could actually dial a TCP conn but -// that seems extreme. -func TestBadBlockStopsPeer(t *testing.T) { - maxBlockHeight := int64(20) - - bcr := newBlockchainReactor(log.TestingLogger(), maxBlockHeight) - bcr.Start() - defer bcr.Stop() - - // Add some peers in - peer := newbcrTestPeer(p2p.ID(cmn.RandStr(12))) - - // XXX: This doesn't add the peer to anything, - // so it's hard to check that it's later removed - bcr.AddPeer(peer) - assert.True(t, bcr.Switch.Peers().Size() > 0) - - // send a bad block from the peer - // default blocks already dont have commits, so should fail - block := bcr.store.LoadBlock(3) - msg := &bcBlockResponseMessage{Block: block} - peer.Send(BlockchainChannel, struct{ BlockchainMessage }{msg}) - - ticker := time.NewTicker(time.Millisecond * 10) - timer := time.NewTimer(time.Second * 2) -LOOP: - for { - select { - case <-ticker.C: - if bcr.Switch.Peers().Size() == 0 { - break LOOP - } - case <-timer.C: - t.Fatal("Timed out waiting to disconnect peer") - } - } -} -*/ - -//---------------------------------------------- -// utility funcs - -func makeTxs(height int64) (txs []types.Tx) { - for i := 0; i < 10; i++ { - txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) - } - return txs -} - -func makeBlock(height int64, state sm.State) *types.Block { - block, _ := state.MakeBlock(height, makeTxs(height), new(types.Commit)) - return block -} - -// The Test peer -type bcrTestPeer struct { - cmn.BaseService - id p2p.ID - ch chan interface{} -} - -var _ p2p.Peer = (*bcrTestPeer)(nil) - -func newbcrTestPeer(id p2p.ID) *bcrTestPeer { - bcr := &bcrTestPeer{ - id: id, - ch: make(chan interface{}, 2), - } - bcr.BaseService = *cmn.NewBaseService(nil, "bcrTestPeer", bcr) - return bcr -} - -func (tp *bcrTestPeer) lastBlockchainMessage() interface{} { return <-tp.ch } - -func (tp *bcrTestPeer) TrySend(chID byte, msgBytes []byte) bool { - var msg BlockchainMessage - err := cdc.UnmarshalBinaryBare(msgBytes, &msg) - if err != nil { - panic(cmn.ErrorWrap(err, "Error while trying to parse a BlockchainMessage")) - } - if _, ok := msg.(*bcStatusResponseMessage); ok { - // Discard status response messages since they skew our results - // We only want to deal with: - // + bcBlockResponseMessage - // + bcNoBlockResponseMessage - } else { - tp.ch <- msg - } - return true -} - -func (tp *bcrTestPeer) Send(chID byte, msgBytes []byte) bool { return tp.TrySend(chID, msgBytes) } -func (tp *bcrTestPeer) NodeInfo() p2p.NodeInfo { return p2p.NodeInfo{} } -func (tp *bcrTestPeer) Status() p2p.ConnectionStatus { return p2p.ConnectionStatus{} } -func (tp *bcrTestPeer) ID() p2p.ID { return tp.id } -func (tp *bcrTestPeer) IsOutbound() bool { return false } -func (tp *bcrTestPeer) IsPersistent() bool { return true } -func (tp *bcrTestPeer) Get(s string) interface{} { return s } -func (tp *bcrTestPeer) Set(string, interface{}) {} -func (tp *bcrTestPeer) RemoteIP() net.IP { return []byte{127, 0, 0, 1} } diff --git a/blockchain/store.go b/blockchain/store.go deleted file mode 100644 index e7608b2c..00000000 --- a/blockchain/store.go +++ /dev/null @@ -1,247 +0,0 @@ -package blockchain - -import ( - "fmt" - "sync" - - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - - "github.com/tendermint/tendermint/types" -) - -/* -BlockStore is a simple low level store for blocks. - -There are three types of information stored: - - BlockMeta: Meta information about each block - - Block part: Parts of each block, aggregated w/ PartSet - - Commit: The commit part of each block, for gossiping precommit votes - -Currently the precommit signatures are duplicated in the Block parts as -well as the Commit. In the future this may change, perhaps by moving -the Commit data outside the Block. (TODO) - -// NOTE: BlockStore methods will panic if they encounter errors -// deserializing loaded data, indicating probable corruption on disk. -*/ -type BlockStore struct { - db dbm.DB - - mtx sync.RWMutex - height int64 -} - -// NewBlockStore returns a new BlockStore with the given DB, -// initialized to the last height that was committed to the DB. -func NewBlockStore(db dbm.DB) *BlockStore { - bsjson := LoadBlockStoreStateJSON(db) - return &BlockStore{ - height: bsjson.Height, - db: db, - } -} - -// Height returns the last known contiguous block height. -func (bs *BlockStore) Height() int64 { - bs.mtx.RLock() - defer bs.mtx.RUnlock() - return bs.height -} - -// LoadBlock returns the block with the given height. -// If no block is found for that height, it returns nil. -func (bs *BlockStore) LoadBlock(height int64) *types.Block { - var blockMeta = bs.LoadBlockMeta(height) - if blockMeta == nil { - return nil - } - - var block = new(types.Block) - buf := []byte{} - for i := 0; i < blockMeta.BlockID.PartsHeader.Total; i++ { - part := bs.LoadBlockPart(height, i) - buf = append(buf, part.Bytes...) - } - err := cdc.UnmarshalBinary(buf, block) - if err != nil { - // NOTE: The existence of meta should imply the existence of the - // block. So, make sure meta is only saved after blocks are saved. - panic(cmn.ErrorWrap(err, "Error reading block")) - } - return block -} - -// LoadBlockPart returns the Part at the given index -// from the block at the given height. -// If no part is found for the given height and index, it returns nil. -func (bs *BlockStore) LoadBlockPart(height int64, index int) *types.Part { - var part = new(types.Part) - bz := bs.db.Get(calcBlockPartKey(height, index)) - if len(bz) == 0 { - return nil - } - err := cdc.UnmarshalBinaryBare(bz, part) - if err != nil { - panic(cmn.ErrorWrap(err, "Error reading block part")) - } - return part -} - -// LoadBlockMeta returns the BlockMeta for the given height. -// If no block is found for the given height, it returns nil. -func (bs *BlockStore) LoadBlockMeta(height int64) *types.BlockMeta { - var blockMeta = new(types.BlockMeta) - bz := bs.db.Get(calcBlockMetaKey(height)) - if len(bz) == 0 { - return nil - } - err := cdc.UnmarshalBinaryBare(bz, blockMeta) - if err != nil { - panic(cmn.ErrorWrap(err, "Error reading block meta")) - } - return blockMeta -} - -// LoadBlockCommit returns the Commit for the given height. -// This commit consists of the +2/3 and other Precommit-votes for block at `height`, -// and it comes from the block.LastCommit for `height+1`. -// If no commit is found for the given height, it returns nil. -func (bs *BlockStore) LoadBlockCommit(height int64) *types.Commit { - var commit = new(types.Commit) - bz := bs.db.Get(calcBlockCommitKey(height)) - if len(bz) == 0 { - return nil - } - err := cdc.UnmarshalBinaryBare(bz, commit) - if err != nil { - panic(cmn.ErrorWrap(err, "Error reading block commit")) - } - return commit -} - -// LoadSeenCommit returns the locally seen Commit for the given height. -// This is useful when we've seen a commit, but there has not yet been -// a new block at `height + 1` that includes this commit in its block.LastCommit. -func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { - var commit = new(types.Commit) - bz := bs.db.Get(calcSeenCommitKey(height)) - if len(bz) == 0 { - return nil - } - err := cdc.UnmarshalBinaryBare(bz, commit) - if err != nil { - panic(cmn.ErrorWrap(err, "Error reading block seen commit")) - } - return commit -} - -// SaveBlock persists the given block, blockParts, and seenCommit to the underlying db. -// blockParts: Must be parts of the block -// seenCommit: The +2/3 precommits that were seen which committed at height. -// If all the nodes restart after committing a block, -// we need this to reload the precommits to catch-up nodes to the -// most recent height. Otherwise they'd stall at H-1. -func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { - if block == nil { - cmn.PanicSanity("BlockStore can only save a non-nil block") - } - height := block.Height - if g, w := height, bs.Height()+1; g != w { - cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g)) - } - if !blockParts.IsComplete() { - cmn.PanicSanity(cmn.Fmt("BlockStore can only save complete block part sets")) - } - - // Save block meta - blockMeta := types.NewBlockMeta(block, blockParts) - metaBytes := cdc.MustMarshalBinaryBare(blockMeta) - bs.db.Set(calcBlockMetaKey(height), metaBytes) - - // Save block parts - for i := 0; i < blockParts.Total(); i++ { - part := blockParts.GetPart(i) - bs.saveBlockPart(height, i, part) - } - - // Save block commit (duplicate and separate from the Block) - blockCommitBytes := cdc.MustMarshalBinaryBare(block.LastCommit) - bs.db.Set(calcBlockCommitKey(height-1), blockCommitBytes) - - // Save seen commit (seen +2/3 precommits for block) - // NOTE: we can delete this at a later height - seenCommitBytes := cdc.MustMarshalBinaryBare(seenCommit) - bs.db.Set(calcSeenCommitKey(height), seenCommitBytes) - - // Save new BlockStoreStateJSON descriptor - BlockStoreStateJSON{Height: height}.Save(bs.db) - - // Done! - bs.mtx.Lock() - bs.height = height - bs.mtx.Unlock() - - // Flush - bs.db.SetSync(nil, nil) -} - -func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) { - if height != bs.Height()+1 { - cmn.PanicSanity(cmn.Fmt("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) - } - partBytes := cdc.MustMarshalBinaryBare(part) - bs.db.Set(calcBlockPartKey(height, index), partBytes) -} - -//----------------------------------------------------------------------------- - -func calcBlockMetaKey(height int64) []byte { - return []byte(fmt.Sprintf("H:%v", height)) -} - -func calcBlockPartKey(height int64, partIndex int) []byte { - return []byte(fmt.Sprintf("P:%v:%v", height, partIndex)) -} - -func calcBlockCommitKey(height int64) []byte { - return []byte(fmt.Sprintf("C:%v", height)) -} - -func calcSeenCommitKey(height int64) []byte { - return []byte(fmt.Sprintf("SC:%v", height)) -} - -//----------------------------------------------------------------------------- - -var blockStoreKey = []byte("blockStore") - -type BlockStoreStateJSON struct { - Height int64 `json:"height"` -} - -// Save persists the blockStore state to the database as JSON. -func (bsj BlockStoreStateJSON) Save(db dbm.DB) { - bytes, err := cdc.MarshalJSON(bsj) - if err != nil { - cmn.PanicSanity(cmn.Fmt("Could not marshal state bytes: %v", err)) - } - db.SetSync(blockStoreKey, bytes) -} - -// LoadBlockStoreStateJSON returns the BlockStoreStateJSON as loaded from disk. -// If no BlockStoreStateJSON was previously persisted, it returns the zero value. -func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON { - bytes := db.Get(blockStoreKey) - if len(bytes) == 0 { - return BlockStoreStateJSON{ - Height: 0, - } - } - bsj := BlockStoreStateJSON{} - err := cdc.UnmarshalJSON(bytes, &bsj) - if err != nil { - panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes)) - } - return bsj -} diff --git a/blockchain/store_test.go b/blockchain/store_test.go deleted file mode 100644 index a1bd0fd5..00000000 --- a/blockchain/store_test.go +++ /dev/null @@ -1,383 +0,0 @@ -package blockchain - -import ( - "bytes" - "fmt" - "runtime/debug" - "strings" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/tendermint/types" -) - -func TestLoadBlockStoreStateJSON(t *testing.T) { - db := db.NewMemDB() - - bsj := &BlockStoreStateJSON{Height: 1000} - bsj.Save(db) - - retrBSJ := LoadBlockStoreStateJSON(db) - - assert.Equal(t, *bsj, retrBSJ, "expected the retrieved DBs to match") -} - -func TestNewBlockStore(t *testing.T) { - db := db.NewMemDB() - db.Set(blockStoreKey, []byte(`{"height": 10000}`)) - bs := NewBlockStore(db) - require.Equal(t, int64(10000), bs.Height(), "failed to properly parse blockstore") - - panicCausers := []struct { - data []byte - wantErr string - }{ - {[]byte("artful-doger"), "not unmarshal bytes"}, - {[]byte(" "), "unmarshal bytes"}, - } - - for i, tt := range panicCausers { - // Expecting a panic here on trying to parse an invalid blockStore - _, _, panicErr := doFn(func() (interface{}, error) { - db.Set(blockStoreKey, tt.data) - _ = NewBlockStore(db) - return nil, nil - }) - require.NotNil(t, panicErr, "#%d panicCauser: %q expected a panic", i, tt.data) - assert.Contains(t, panicErr.Error(), tt.wantErr, "#%d data: %q", i, tt.data) - } - - db.Set(blockStoreKey, nil) - bs = NewBlockStore(db) - assert.Equal(t, bs.Height(), int64(0), "expecting nil bytes to be unmarshaled alright") -} - -func freshBlockStore() (*BlockStore, db.DB) { - db := db.NewMemDB() - return NewBlockStore(db), db -} - -var ( - state, _ = makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) - - block = makeBlock(1, state) - partSet = block.MakePartSet(2) - part1 = partSet.GetPart(0) - part2 = partSet.GetPart(1) - seenCommit1 = &types.Commit{Precommits: []*types.Vote{{Height: 10, - Timestamp: time.Now().UTC()}}} -) - -// TODO: This test should be simplified ... - -func TestBlockStoreSaveLoadBlock(t *testing.T) { - state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) - require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") - - // check there are no blocks at various heights - noBlockHeights := []int64{0, -1, 100, 1000, 2} - for i, height := range noBlockHeights { - if g := bs.LoadBlock(height); g != nil { - t.Errorf("#%d: height(%d) got a block; want nil", i, height) - } - } - - // save a block - block := makeBlock(bs.Height()+1, state) - validPartSet := block.MakePartSet(2) - seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10, - Timestamp: time.Now().UTC()}}} - bs.SaveBlock(block, partSet, seenCommit) - require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") - - incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2}) - uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0}) - uncontiguousPartSet.AddPart(part2) - - header1 := types.Header{ - Height: 1, - NumTxs: 100, - ChainID: "block_test", - Time: time.Now(), - } - header2 := header1 - header2.Height = 4 - - // End of setup, test data - - commitAtH10 := &types.Commit{Precommits: []*types.Vote{{Height: 10, - Timestamp: time.Now().UTC()}}} - tuples := []struct { - block *types.Block - parts *types.PartSet - seenCommit *types.Commit - wantErr bool - wantPanic string - - corruptBlockInDB bool - corruptCommitInDB bool - corruptSeenCommitInDB bool - eraseCommitInDB bool - eraseSeenCommitInDB bool - }{ - { - block: newBlock(&header1, commitAtH10), - parts: validPartSet, - seenCommit: seenCommit1, - }, - - { - block: nil, - wantPanic: "only save a non-nil block", - }, - - { - block: newBlock(&header2, commitAtH10), - parts: uncontiguousPartSet, - wantPanic: "only save contiguous blocks", // and incomplete and uncontiguous parts - }, - - { - block: newBlock(&header1, commitAtH10), - parts: incompletePartSet, - wantPanic: "only save complete block", // incomplete parts - }, - - { - block: newBlock(&header1, commitAtH10), - parts: validPartSet, - seenCommit: seenCommit1, - corruptCommitInDB: true, // Corrupt the DB's commit entry - wantPanic: "Error reading block commit", - }, - - { - block: newBlock(&header1, commitAtH10), - parts: validPartSet, - seenCommit: seenCommit1, - wantPanic: "Error reading block", - corruptBlockInDB: true, // Corrupt the DB's block entry - }, - - { - block: newBlock(&header1, commitAtH10), - parts: validPartSet, - seenCommit: seenCommit1, - - // Expecting no error and we want a nil back - eraseSeenCommitInDB: true, - }, - - { - block: newBlock(&header1, commitAtH10), - parts: validPartSet, - seenCommit: seenCommit1, - - corruptSeenCommitInDB: true, - wantPanic: "Error reading block seen commit", - }, - - { - block: newBlock(&header1, commitAtH10), - parts: validPartSet, - seenCommit: seenCommit1, - - // Expecting no error and we want a nil back - eraseCommitInDB: true, - }, - } - - type quad struct { - block *types.Block - commit *types.Commit - meta *types.BlockMeta - - seenCommit *types.Commit - } - - for i, tuple := range tuples { - bs, db := freshBlockStore() - // SaveBlock - res, err, panicErr := doFn(func() (interface{}, error) { - bs.SaveBlock(tuple.block, tuple.parts, tuple.seenCommit) - if tuple.block == nil { - return nil, nil - } - - if tuple.corruptBlockInDB { - db.Set(calcBlockMetaKey(tuple.block.Height), []byte("block-bogus")) - } - bBlock := bs.LoadBlock(tuple.block.Height) - bBlockMeta := bs.LoadBlockMeta(tuple.block.Height) - - if tuple.eraseSeenCommitInDB { - db.Delete(calcSeenCommitKey(tuple.block.Height)) - } - if tuple.corruptSeenCommitInDB { - db.Set(calcSeenCommitKey(tuple.block.Height), []byte("bogus-seen-commit")) - } - bSeenCommit := bs.LoadSeenCommit(tuple.block.Height) - - commitHeight := tuple.block.Height - 1 - if tuple.eraseCommitInDB { - db.Delete(calcBlockCommitKey(commitHeight)) - } - if tuple.corruptCommitInDB { - db.Set(calcBlockCommitKey(commitHeight), []byte("foo-bogus")) - } - bCommit := bs.LoadBlockCommit(commitHeight) - return &quad{block: bBlock, seenCommit: bSeenCommit, commit: bCommit, - meta: bBlockMeta}, nil - }) - - if subStr := tuple.wantPanic; subStr != "" { - if panicErr == nil { - t.Errorf("#%d: want a non-nil panic", i) - } else if got := panicErr.Error(); !strings.Contains(got, subStr) { - t.Errorf("#%d:\n\tgotErr: %q\nwant substring: %q", i, got, subStr) - } - continue - } - - if tuple.wantErr { - if err == nil { - t.Errorf("#%d: got nil error", i) - } - continue - } - - assert.Nil(t, panicErr, "#%d: unexpected panic", i) - assert.Nil(t, err, "#%d: expecting a non-nil error", i) - qua, ok := res.(*quad) - if !ok || qua == nil { - t.Errorf("#%d: got nil quad back; gotType=%T", i, res) - continue - } - if tuple.eraseSeenCommitInDB { - assert.Nil(t, qua.seenCommit, - "erased the seenCommit in the DB hence we should get back a nil seenCommit") - } - if tuple.eraseCommitInDB { - assert.Nil(t, qua.commit, - "erased the commit in the DB hence we should get back a nil commit") - } - } -} - -func TestLoadBlockPart(t *testing.T) { - bs, db := freshBlockStore() - height, index := int64(10), 1 - loadPart := func() (interface{}, error) { - part := bs.LoadBlockPart(height, index) - return part, nil - } - - // Initially no contents. - // 1. Requesting for a non-existent block shouldn't fail - res, _, panicErr := doFn(loadPart) - require.Nil(t, panicErr, "a non-existent block part shouldn't cause a panic") - require.Nil(t, res, "a non-existent block part should return nil") - - // 2. Next save a corrupted block then try to load it - db.Set(calcBlockPartKey(height, index), []byte("Tendermint")) - res, _, panicErr = doFn(loadPart) - require.NotNil(t, panicErr, "expecting a non-nil panic") - require.Contains(t, panicErr.Error(), "Error reading block part") - - // 3. A good block serialized and saved to the DB should be retrievable - db.Set(calcBlockPartKey(height, index), cdc.MustMarshalBinaryBare(part1)) - gotPart, _, panicErr := doFn(loadPart) - require.Nil(t, panicErr, "an existent and proper block should not panic") - require.Nil(t, res, "a properly saved block should return a proper block") - require.Equal(t, gotPart.(*types.Part).Hash(), part1.Hash(), - "expecting successful retrieval of previously saved block") -} - -func TestLoadBlockMeta(t *testing.T) { - bs, db := freshBlockStore() - height := int64(10) - loadMeta := func() (interface{}, error) { - meta := bs.LoadBlockMeta(height) - return meta, nil - } - - // Initially no contents. - // 1. Requesting for a non-existent blockMeta shouldn't fail - res, _, panicErr := doFn(loadMeta) - require.Nil(t, panicErr, "a non-existent blockMeta shouldn't cause a panic") - require.Nil(t, res, "a non-existent blockMeta should return nil") - - // 2. Next save a corrupted blockMeta then try to load it - db.Set(calcBlockMetaKey(height), []byte("Tendermint-Meta")) - res, _, panicErr = doFn(loadMeta) - require.NotNil(t, panicErr, "expecting a non-nil panic") - require.Contains(t, panicErr.Error(), "Error reading block meta") - - // 3. A good blockMeta serialized and saved to the DB should be retrievable - meta := &types.BlockMeta{} - db.Set(calcBlockMetaKey(height), cdc.MustMarshalBinaryBare(meta)) - gotMeta, _, panicErr := doFn(loadMeta) - require.Nil(t, panicErr, "an existent and proper block should not panic") - require.Nil(t, res, "a properly saved blockMeta should return a proper blocMeta ") - require.Equal(t, cdc.MustMarshalBinaryBare(meta), cdc.MustMarshalBinaryBare(gotMeta), - "expecting successful retrieval of previously saved blockMeta") -} - -func TestBlockFetchAtHeight(t *testing.T) { - state, bs := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) - require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") - block := makeBlock(bs.Height()+1, state) - - partSet := block.MakePartSet(2) - seenCommit := &types.Commit{Precommits: []*types.Vote{{Height: 10, - Timestamp: time.Now().UTC()}}} - - bs.SaveBlock(block, partSet, seenCommit) - require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") - - blockAtHeight := bs.LoadBlock(bs.Height()) - bz1 := cdc.MustMarshalBinaryBare(block) - bz2 := cdc.MustMarshalBinaryBare(blockAtHeight) - require.Equal(t, bz1, bz2) - require.Equal(t, block.Hash(), blockAtHeight.Hash(), - "expecting a successful load of the last saved block") - - blockAtHeightPlus1 := bs.LoadBlock(bs.Height() + 1) - require.Nil(t, blockAtHeightPlus1, "expecting an unsuccessful load of Height()+1") - blockAtHeightPlus2 := bs.LoadBlock(bs.Height() + 2) - require.Nil(t, blockAtHeightPlus2, "expecting an unsuccessful load of Height()+2") -} - -func doFn(fn func() (interface{}, error)) (res interface{}, err error, panicErr error) { - defer func() { - if r := recover(); r != nil { - switch e := r.(type) { - case error: - panicErr = e - case string: - panicErr = fmt.Errorf("%s", e) - default: - if st, ok := r.(fmt.Stringer); ok { - panicErr = fmt.Errorf("%s", st) - } else { - panicErr = fmt.Errorf("%s", debug.Stack()) - } - } - } - }() - - res, err = fn() - return res, err, panicErr -} - -func newBlock(hdr *types.Header, lastCommit *types.Commit) *types.Block { - return &types.Block{ - Header: hdr, - LastCommit: lastCommit, - } -} diff --git a/blockchain/wire.go b/blockchain/wire.go deleted file mode 100644 index 55b4e60a..00000000 --- a/blockchain/wire.go +++ /dev/null @@ -1,13 +0,0 @@ -package blockchain - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" -) - -var cdc = amino.NewCodec() - -func init() { - RegisterBlockchainMessages(cdc) - crypto.RegisterAmino(cdc) -} diff --git a/cmd/priv_val_server/main.go b/cmd/priv_val_server/main.go deleted file mode 100644 index c78adeb8..00000000 --- a/cmd/priv_val_server/main.go +++ /dev/null @@ -1,53 +0,0 @@ -package main - -import ( - "flag" - "os" - - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/tendermint/privval" -) - -func main() { - var ( - addr = flag.String("addr", ":26659", "Address of client to connect to") - chainID = flag.String("chain-id", "mychain", "chain id") - privValPath = flag.String("priv", "", "priv val file path") - - logger = log.NewTMLogger( - log.NewSyncWriter(os.Stdout), - ).With("module", "priv_val") - ) - flag.Parse() - - logger.Info( - "Starting private validator", - "addr", *addr, - "chainID", *chainID, - "privPath", *privValPath, - ) - - pv := privval.LoadFilePV(*privValPath) - - rs := privval.NewRemoteSigner( - logger, - *chainID, - *addr, - pv, - crypto.GenPrivKeyEd25519(), - ) - err := rs.Start() - if err != nil { - panic(err) - } - - cmn.TrapSignal(func() { - err := rs.Stop() - if err != nil { - panic(err) - } - }) -} diff --git a/cmd/tendermint/commands/gen_node_key.go b/cmd/tendermint/commands/gen_node_key.go deleted file mode 100644 index 4990be47..00000000 --- a/cmd/tendermint/commands/gen_node_key.go +++ /dev/null @@ -1,32 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/tendermint/tendermint/p2p" - cmn "github.com/tendermint/tmlibs/common" -) - -// GenNodeKeyCmd allows the generation of a node key. It prints node's ID to -// the standard output. -var GenNodeKeyCmd = &cobra.Command{ - Use: "gen_node_key", - Short: "Generate a node key for this node and print its ID", - RunE: genNodeKey, -} - -func genNodeKey(cmd *cobra.Command, args []string) error { - nodeKeyFile := config.NodeKeyFile() - if cmn.FileExists(nodeKeyFile) { - return fmt.Errorf("node key at %s already exists", nodeKeyFile) - } - - nodeKey, err := p2p.LoadOrGenNodeKey(nodeKeyFile) - if err != nil { - return err - } - fmt.Println(nodeKey.ID()) - return nil -} diff --git a/cmd/tendermint/commands/gen_validator.go b/cmd/tendermint/commands/gen_validator.go deleted file mode 100644 index 20d43d4d..00000000 --- a/cmd/tendermint/commands/gen_validator.go +++ /dev/null @@ -1,27 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/tendermint/tendermint/privval" -) - -// GenValidatorCmd allows the generation of a keypair for a -// validator. -var GenValidatorCmd = &cobra.Command{ - Use: "gen_validator", - Short: "Generate new validator keypair", - Run: genValidator, -} - -func genValidator(cmd *cobra.Command, args []string) { - pv := privval.GenFilePV("") - jsbz, err := cdc.MarshalJSON(pv) - if err != nil { - panic(err) - } - fmt.Printf(`%v -`, string(jsbz)) -} diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go deleted file mode 100644 index 45812b9e..00000000 --- a/cmd/tendermint/commands/init.go +++ /dev/null @@ -1,70 +0,0 @@ -package commands - -import ( - "time" - - "github.com/spf13/cobra" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -// InitFilesCmd initialises a fresh Tendermint Core instance. -var InitFilesCmd = &cobra.Command{ - Use: "init", - Short: "Initialize Tendermint", - RunE: initFiles, -} - -func initFiles(cmd *cobra.Command, args []string) error { - return initFilesWithConfig(config) -} - -func initFilesWithConfig(config *cfg.Config) error { - // private validator - privValFile := config.PrivValidatorFile() - var pv *privval.FilePV - if cmn.FileExists(privValFile) { - pv = privval.LoadFilePV(privValFile) - logger.Info("Found private validator", "path", privValFile) - } else { - pv = privval.GenFilePV(privValFile) - pv.Save() - logger.Info("Generated private validator", "path", privValFile) - } - - nodeKeyFile := config.NodeKeyFile() - if cmn.FileExists(nodeKeyFile) { - logger.Info("Found node key", "path", nodeKeyFile) - } else { - if _, err := p2p.LoadOrGenNodeKey(nodeKeyFile); err != nil { - return err - } - logger.Info("Generated node key", "path", nodeKeyFile) - } - - // genesis file - genFile := config.GenesisFile() - if cmn.FileExists(genFile) { - logger.Info("Found genesis file", "path", genFile) - } else { - genDoc := types.GenesisDoc{ - ChainID: cmn.Fmt("test-chain-%v", cmn.RandStr(6)), - GenesisTime: time.Now(), - } - genDoc.Validators = []types.GenesisValidator{{ - PubKey: pv.GetPubKey(), - Power: 10, - }} - - if err := genDoc.SaveAs(genFile); err != nil { - return err - } - logger.Info("Generated genesis file", "path", genFile) - } - - return nil -} diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go deleted file mode 100644 index 6987b7f1..00000000 --- a/cmd/tendermint/commands/lite.go +++ /dev/null @@ -1,87 +0,0 @@ -package commands - -import ( - "fmt" - "net/url" - - "github.com/spf13/cobra" - - cmn "github.com/tendermint/tmlibs/common" - - "github.com/tendermint/tendermint/lite/proxy" - rpcclient "github.com/tendermint/tendermint/rpc/client" -) - -// LiteCmd represents the base command when called without any subcommands -var LiteCmd = &cobra.Command{ - Use: "lite", - Short: "Run lite-client proxy server, verifying tendermint rpc", - Long: `This node will run a secure proxy to a tendermint rpc server. - -All calls that can be tracked back to a block header by a proof -will be verified before passing them back to the caller. Other that -that it will present the same interface as a full tendermint node, -just with added trust and running locally.`, - RunE: runProxy, - SilenceUsage: true, -} - -var ( - listenAddr string - nodeAddr string - chainID string - home string -) - -func init() { - LiteCmd.Flags().StringVar(&listenAddr, "laddr", "tcp://localhost:8888", "Serve the proxy on the given address") - LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:26657", "Connect to a Tendermint node at this address") - LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID") - LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory") -} - -func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) { - u, err := url.Parse(addr) - if err != nil { - return "", err - } - switch u.Scheme { - case "tcp", "unix": - case "": - u.Scheme = "tcp" - default: - return "", fmt.Errorf("unknown scheme %q, use either tcp or unix", u.Scheme) - } - return u.String(), nil -} - -func runProxy(cmd *cobra.Command, args []string) error { - nodeAddr, err := ensureAddrHasSchemeOrDefaultToTCP(nodeAddr) - if err != nil { - return err - } - listenAddr, err := ensureAddrHasSchemeOrDefaultToTCP(listenAddr) - if err != nil { - return err - } - - // First, connect a client - node := rpcclient.NewHTTP(nodeAddr, "/websocket") - - cert, err := proxy.GetCertifier(chainID, home, nodeAddr) - if err != nil { - return err - } - sc := proxy.SecureClient(node, cert) - - err = proxy.StartProxy(sc, listenAddr, logger) - if err != nil { - return err - } - - cmn.TrapSignal(func() { - // TODO: close up shop - }) - - return nil -} diff --git a/cmd/tendermint/commands/probe_upnp.go b/cmd/tendermint/commands/probe_upnp.go deleted file mode 100644 index 35c3c354..00000000 --- a/cmd/tendermint/commands/probe_upnp.go +++ /dev/null @@ -1,31 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/tendermint/tendermint/p2p/upnp" -) - -// ProbeUpnpCmd adds capabilities to test the UPnP functionality. -var ProbeUpnpCmd = &cobra.Command{ - Use: "probe_upnp", - Short: "Test UPnP functionality", - RunE: probeUpnp, -} - -func probeUpnp(cmd *cobra.Command, args []string) error { - capabilities, err := upnp.Probe(logger) - if err != nil { - fmt.Println("Probe failed: ", err) - } else { - fmt.Println("Probe success!") - jsonBytes, err := cdc.MarshalJSON(capabilities) - if err != nil { - return err - } - fmt.Println(string(jsonBytes)) - } - return nil -} diff --git a/cmd/tendermint/commands/replay.go b/cmd/tendermint/commands/replay.go deleted file mode 100644 index 303ccba6..00000000 --- a/cmd/tendermint/commands/replay.go +++ /dev/null @@ -1,26 +0,0 @@ -package commands - -import ( - "github.com/spf13/cobra" - - "github.com/tendermint/tendermint/consensus" -) - -// ReplayCmd allows replaying of messages from the WAL. -var ReplayCmd = &cobra.Command{ - Use: "replay", - Short: "Replay messages from WAL", - Run: func(cmd *cobra.Command, args []string) { - consensus.RunReplayFile(config.BaseConfig, config.Consensus, false) - }, -} - -// ReplayConsoleCmd allows replaying of messages from the WAL in a -// console. -var ReplayConsoleCmd = &cobra.Command{ - Use: "replay_console", - Short: "Replay messages from WAL in a console", - Run: func(cmd *cobra.Command, args []string) { - consensus.RunReplayFile(config.BaseConfig, config.Consensus, true) - }, -} diff --git a/cmd/tendermint/commands/reset_priv_validator.go b/cmd/tendermint/commands/reset_priv_validator.go deleted file mode 100644 index 32d7b143..00000000 --- a/cmd/tendermint/commands/reset_priv_validator.go +++ /dev/null @@ -1,69 +0,0 @@ -package commands - -import ( - "os" - - "github.com/spf13/cobra" - - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tmlibs/log" -) - -// ResetAllCmd removes the database of this Tendermint core -// instance. -var ResetAllCmd = &cobra.Command{ - Use: "unsafe_reset_all", - Short: "(unsafe) Remove all the data and WAL, reset this node's validator to genesis state", - Run: resetAll, -} - -// ResetPrivValidatorCmd resets the private validator files. -var ResetPrivValidatorCmd = &cobra.Command{ - Use: "unsafe_reset_priv_validator", - Short: "(unsafe) Reset this node's validator to genesis state", - Run: resetPrivValidator, -} - -// XXX: this is totally unsafe. -// it's only suitable for testnets. -func resetAll(cmd *cobra.Command, args []string) { - ResetAll(config.DBDir(), config.P2P.AddrBookFile(), config.PrivValidatorFile(), logger) -} - -// XXX: this is totally unsafe. -// it's only suitable for testnets. -func resetPrivValidator(cmd *cobra.Command, args []string) { - resetFilePV(config.PrivValidatorFile(), logger) -} - -// ResetAll removes the privValidator and address book files plus all data. -// Exported so other CLI tools can use it. -func ResetAll(dbDir, addrBookFile, privValFile string, logger log.Logger) { - resetFilePV(privValFile, logger) - removeAddrBook(addrBookFile, logger) - if err := os.RemoveAll(dbDir); err == nil { - logger.Info("Removed all blockchain history", "dir", dbDir) - } else { - logger.Error("Error removing all blockchain history", "dir", dbDir, "err", err) - } -} - -func resetFilePV(privValFile string, logger log.Logger) { - if _, err := os.Stat(privValFile); err == nil { - pv := privval.LoadFilePV(privValFile) - pv.Reset() - logger.Info("Reset private validator file to genesis state", "file", privValFile) - } else { - pv := privval.GenFilePV(privValFile) - pv.Save() - logger.Info("Generated private validator file", "file", privValFile) - } -} - -func removeAddrBook(addrBookFile string, logger log.Logger) { - if err := os.Remove(addrBookFile); err == nil { - logger.Info("Removed existing address book", "file", addrBookFile) - } else if !os.IsNotExist(err) { - logger.Info("Error removing address book", "file", addrBookFile, "err", err) - } -} diff --git a/cmd/tendermint/commands/root.go b/cmd/tendermint/commands/root.go deleted file mode 100644 index f229a788..00000000 --- a/cmd/tendermint/commands/root.go +++ /dev/null @@ -1,63 +0,0 @@ -package commands - -import ( - "os" - - "github.com/spf13/cobra" - "github.com/spf13/viper" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tmlibs/cli" - tmflags "github.com/tendermint/tmlibs/cli/flags" - "github.com/tendermint/tmlibs/log" -) - -var ( - config = cfg.DefaultConfig() - logger = log.NewTMLogger(log.NewSyncWriter(os.Stdout)) -) - -func init() { - registerFlagsRootCmd(RootCmd) -} - -func registerFlagsRootCmd(cmd *cobra.Command) { - cmd.PersistentFlags().String("log_level", config.LogLevel, "Log level") -} - -// ParseConfig retrieves the default environment configuration, -// sets up the Tendermint root and ensures that the root exists -func ParseConfig() (*cfg.Config, error) { - conf := cfg.DefaultConfig() - err := viper.Unmarshal(conf) - if err != nil { - return nil, err - } - conf.SetRoot(conf.RootDir) - cfg.EnsureRoot(conf.RootDir) - return conf, err -} - -// RootCmd is the root command for Tendermint core. -var RootCmd = &cobra.Command{ - Use: "tendermint", - Short: "Tendermint Core (BFT Consensus) in Go", - PersistentPreRunE: func(cmd *cobra.Command, args []string) (err error) { - if cmd.Name() == VersionCmd.Name() { - return nil - } - config, err = ParseConfig() - if err != nil { - return err - } - logger, err = tmflags.ParseLogLevel(config.LogLevel, logger, cfg.DefaultLogLevel()) - if err != nil { - return err - } - if viper.GetBool(cli.TraceFlag) { - logger = log.NewTracingLogger(logger) - } - logger = logger.With("module", "main") - return nil - }, -} diff --git a/cmd/tendermint/commands/root_test.go b/cmd/tendermint/commands/root_test.go deleted file mode 100644 index 59d258af..00000000 --- a/cmd/tendermint/commands/root_test.go +++ /dev/null @@ -1,176 +0,0 @@ -package commands - -import ( - "fmt" - "io/ioutil" - "os" - "path/filepath" - "strconv" - "testing" - - "github.com/spf13/cobra" - "github.com/spf13/viper" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tmlibs/cli" - cmn "github.com/tendermint/tmlibs/common" -) - -var ( - defaultRoot = os.ExpandEnv("$HOME/.some/test/dir") -) - -const ( - rootName = "root" -) - -// clearConfig clears env vars, the given root dir, and resets viper. -func clearConfig(dir string) { - if err := os.Unsetenv("TMHOME"); err != nil { - panic(err) - } - if err := os.Unsetenv("TM_HOME"); err != nil { - panic(err) - } - - if err := os.RemoveAll(dir); err != nil { - panic(err) - } - viper.Reset() - config = cfg.DefaultConfig() -} - -// prepare new rootCmd -func testRootCmd() *cobra.Command { - rootCmd := &cobra.Command{ - Use: RootCmd.Use, - PersistentPreRunE: RootCmd.PersistentPreRunE, - Run: func(cmd *cobra.Command, args []string) {}, - } - registerFlagsRootCmd(rootCmd) - var l string - rootCmd.PersistentFlags().String("log", l, "Log") - return rootCmd -} - -func testSetup(rootDir string, args []string, env map[string]string) error { - clearConfig(defaultRoot) - - rootCmd := testRootCmd() - cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot) - - // run with the args and env - args = append([]string{rootCmd.Use}, args...) - return cli.RunWithArgs(cmd, args, env) -} - -func TestRootHome(t *testing.T) { - newRoot := filepath.Join(defaultRoot, "something-else") - cases := []struct { - args []string - env map[string]string - root string - }{ - {nil, nil, defaultRoot}, - {[]string{"--home", newRoot}, nil, newRoot}, - {nil, map[string]string{"TMHOME": newRoot}, newRoot}, - } - - for i, tc := range cases { - idxString := strconv.Itoa(i) - - err := testSetup(defaultRoot, tc.args, tc.env) - require.Nil(t, err, idxString) - - assert.Equal(t, tc.root, config.RootDir, idxString) - assert.Equal(t, tc.root, config.P2P.RootDir, idxString) - assert.Equal(t, tc.root, config.Consensus.RootDir, idxString) - assert.Equal(t, tc.root, config.Mempool.RootDir, idxString) - } -} - -func TestRootFlagsEnv(t *testing.T) { - - // defaults - defaults := cfg.DefaultConfig() - defaultLogLvl := defaults.LogLevel - - cases := []struct { - args []string - env map[string]string - logLevel string - }{ - {[]string{"--log", "debug"}, nil, defaultLogLvl}, // wrong flag - {[]string{"--log_level", "debug"}, nil, "debug"}, // right flag - {nil, map[string]string{"TM_LOW": "debug"}, defaultLogLvl}, // wrong env flag - {nil, map[string]string{"MT_LOG_LEVEL": "debug"}, defaultLogLvl}, // wrong env prefix - {nil, map[string]string{"TM_LOG_LEVEL": "debug"}, "debug"}, // right env - } - - for i, tc := range cases { - idxString := strconv.Itoa(i) - - err := testSetup(defaultRoot, tc.args, tc.env) - require.Nil(t, err, idxString) - - assert.Equal(t, tc.logLevel, config.LogLevel, idxString) - } -} - -func TestRootConfig(t *testing.T) { - - // write non-default config - nonDefaultLogLvl := "abc:debug" - cvals := map[string]string{ - "log_level": nonDefaultLogLvl, - } - - cases := []struct { - args []string - env map[string]string - - logLvl string - }{ - {nil, nil, nonDefaultLogLvl}, // should load config - {[]string{"--log_level=abc:info"}, nil, "abc:info"}, // flag over rides - {nil, map[string]string{"TM_LOG_LEVEL": "abc:info"}, "abc:info"}, // env over rides - } - - for i, tc := range cases { - idxString := strconv.Itoa(i) - clearConfig(defaultRoot) - - // XXX: path must match cfg.defaultConfigPath - configFilePath := filepath.Join(defaultRoot, "config") - err := cmn.EnsureDir(configFilePath, 0700) - require.Nil(t, err) - - // write the non-defaults to a different path - // TODO: support writing sub configs so we can test that too - err = WriteConfigVals(configFilePath, cvals) - require.Nil(t, err) - - rootCmd := testRootCmd() - cmd := cli.PrepareBaseCmd(rootCmd, "TM", defaultRoot) - - // run with the args and env - tc.args = append([]string{rootCmd.Use}, tc.args...) - err = cli.RunWithArgs(cmd, tc.args, tc.env) - require.Nil(t, err, idxString) - - assert.Equal(t, tc.logLvl, config.LogLevel, idxString) - } -} - -// WriteConfigVals writes a toml file with the given values. -// It returns an error if writing was impossible. -func WriteConfigVals(dir string, vals map[string]string) error { - data := "" - for k, v := range vals { - data = data + fmt.Sprintf("%s = \"%s\"\n", k, v) - } - cfile := filepath.Join(dir, "config.toml") - return ioutil.WriteFile(cfile, []byte(data), 0666) -} diff --git a/cmd/tendermint/commands/run_node.go b/cmd/tendermint/commands/run_node.go deleted file mode 100644 index 0d50f9e4..00000000 --- a/cmd/tendermint/commands/run_node.go +++ /dev/null @@ -1,72 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/spf13/cobra" - - nm "github.com/tendermint/tendermint/node" -) - -// AddNodeFlags exposes some common configuration options on the command-line -// These are exposed for convenience of commands embedding a tendermint node -func AddNodeFlags(cmd *cobra.Command) { - // bind flags - cmd.Flags().String("moniker", config.Moniker, "Node Name") - - // priv val flags - cmd.Flags().String("priv_validator_laddr", config.PrivValidatorListenAddr, "Socket address to listen on for connections from external priv_validator process") - - // node flags - cmd.Flags().Bool("fast_sync", config.FastSync, "Fast blockchain syncing") - - // abci flags - cmd.Flags().String("proxy_app", config.ProxyApp, "Proxy app address, or 'nilapp' or 'kvstore' for local testing.") - cmd.Flags().String("abci", config.ABCI, "Specify abci transport (socket | grpc)") - - // rpc flags - cmd.Flags().String("rpc.laddr", config.RPC.ListenAddress, "RPC listen address. Port required") - cmd.Flags().String("rpc.grpc_laddr", config.RPC.GRPCListenAddress, "GRPC listen address (BroadcastTx only). Port required") - cmd.Flags().Bool("rpc.unsafe", config.RPC.Unsafe, "Enabled unsafe rpc methods") - - // p2p flags - cmd.Flags().String("p2p.laddr", config.P2P.ListenAddress, "Node listen address. (0.0.0.0:0 means any interface, any port)") - cmd.Flags().String("p2p.seeds", config.P2P.Seeds, "Comma-delimited ID@host:port seed nodes") - cmd.Flags().String("p2p.persistent_peers", config.P2P.PersistentPeers, "Comma-delimited ID@host:port persistent peers") - cmd.Flags().Bool("p2p.skip_upnp", config.P2P.SkipUPNP, "Skip UPNP configuration") - cmd.Flags().Bool("p2p.pex", config.P2P.PexReactor, "Enable/disable Peer-Exchange") - cmd.Flags().Bool("p2p.seed_mode", config.P2P.SeedMode, "Enable/disable seed mode") - cmd.Flags().String("p2p.private_peer_ids", config.P2P.PrivatePeerIDs, "Comma-delimited private peer IDs") - - // consensus flags - cmd.Flags().Bool("consensus.create_empty_blocks", config.Consensus.CreateEmptyBlocks, "Set this to false to only produce blocks when there are txs or when the AppHash changes") -} - -// NewRunNodeCmd returns the command that allows the CLI to start a node. -// It can be used with a custom PrivValidator and in-process ABCI application. -func NewRunNodeCmd(nodeProvider nm.NodeProvider) *cobra.Command { - cmd := &cobra.Command{ - Use: "node", - Short: "Run the tendermint node", - RunE: func(cmd *cobra.Command, args []string) error { - // Create & start node - n, err := nodeProvider(config, logger) - if err != nil { - return fmt.Errorf("Failed to create node: %v", err) - } - - if err := n.Start(); err != nil { - return fmt.Errorf("Failed to start node: %v", err) - } - logger.Info("Started node", "nodeInfo", n.Switch().NodeInfo()) - - // Trap signal, run forever. - n.RunForever() - - return nil - }, - } - - AddNodeFlags(cmd) - return cmd -} diff --git a/cmd/tendermint/commands/show_node_id.go b/cmd/tendermint/commands/show_node_id.go deleted file mode 100644 index 02ab1a9b..00000000 --- a/cmd/tendermint/commands/show_node_id.go +++ /dev/null @@ -1,27 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/tendermint/tendermint/p2p" -) - -// ShowNodeIDCmd dumps node's ID to the standard output. -var ShowNodeIDCmd = &cobra.Command{ - Use: "show_node_id", - Short: "Show this node's ID", - RunE: showNodeID, -} - -func showNodeID(cmd *cobra.Command, args []string) error { - - nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) - if err != nil { - return err - } - fmt.Println(nodeKey.ID()) - - return nil -} diff --git a/cmd/tendermint/commands/show_validator.go b/cmd/tendermint/commands/show_validator.go deleted file mode 100644 index 54765164..00000000 --- a/cmd/tendermint/commands/show_validator.go +++ /dev/null @@ -1,22 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/tendermint/tendermint/privval" -) - -// ShowValidatorCmd adds capabilities for showing the validator info. -var ShowValidatorCmd = &cobra.Command{ - Use: "show_validator", - Short: "Show this node's validator info", - Run: showValidator, -} - -func showValidator(cmd *cobra.Command, args []string) { - privValidator := privval.LoadOrGenFilePV(config.PrivValidatorFile()) - pubKeyJSONBytes, _ := cdc.MarshalJSON(privValidator.GetPubKey()) - fmt.Println(string(pubKeyJSONBytes)) -} diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go deleted file mode 100644 index 29d29502..00000000 --- a/cmd/tendermint/commands/testnet.go +++ /dev/null @@ -1,183 +0,0 @@ -package commands - -import ( - "fmt" - "net" - "os" - "path/filepath" - "strings" - "time" - - "github.com/spf13/cobra" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -var ( - nValidators int - nNonValidators int - outputDir string - nodeDirPrefix string - - populatePersistentPeers bool - hostnamePrefix string - startingIPAddress string - p2pPort int -) - -const ( - nodeDirPerm = 0755 -) - -func init() { - TestnetFilesCmd.Flags().IntVar(&nValidators, "v", 4, - "Number of validators to initialize the testnet with") - TestnetFilesCmd.Flags().IntVar(&nNonValidators, "n", 0, - "Number of non-validators to initialize the testnet with") - TestnetFilesCmd.Flags().StringVar(&outputDir, "o", "./mytestnet", - "Directory to store initialization data for the testnet") - TestnetFilesCmd.Flags().StringVar(&nodeDirPrefix, "node-dir-prefix", "node", - "Prefix the directory name for each node with (node results in node0, node1, ...)") - - TestnetFilesCmd.Flags().BoolVar(&populatePersistentPeers, "populate-persistent-peers", true, - "Update config of each node with the list of persistent peers build using either hostname-prefix or starting-ip-address") - TestnetFilesCmd.Flags().StringVar(&hostnamePrefix, "hostname-prefix", "node", - "Hostname prefix (node results in persistent peers list ID0@node0:26656, ID1@node1:26656, ...)") - TestnetFilesCmd.Flags().StringVar(&startingIPAddress, "starting-ip-address", "", - "Starting IP address (192.168.0.1 results in persistent peers list ID0@192.168.0.1:26656, ID1@192.168.0.2:26656, ...)") - TestnetFilesCmd.Flags().IntVar(&p2pPort, "p2p-port", 26656, - "P2P Port") -} - -// TestnetFilesCmd allows initialisation of files for a Tendermint testnet. -var TestnetFilesCmd = &cobra.Command{ - Use: "testnet", - Short: "Initialize files for a Tendermint testnet", - Long: `testnet will create "v" + "n" number of directories and populate each with -necessary files (private validator, genesis, config, etc.). - -Note, strict routability for addresses is turned off in the config file. - -Optionally, it will fill in persistent_peers list in config file using either hostnames or IPs. - -Example: - - tendermint testnet --v 4 --o ./output --populate-persistent-peers --starting-ip-address 192.168.10.2 - `, - RunE: testnetFiles, -} - -func testnetFiles(cmd *cobra.Command, args []string) error { - config := cfg.DefaultConfig() - genVals := make([]types.GenesisValidator, nValidators) - - for i := 0; i < nValidators; i++ { - nodeDirName := cmn.Fmt("%s%d", nodeDirPrefix, i) - nodeDir := filepath.Join(outputDir, nodeDirName) - config.SetRoot(nodeDir) - - err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm) - if err != nil { - _ = os.RemoveAll(outputDir) - return err - } - - initFilesWithConfig(config) - - pvFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidator) - pv := privval.LoadFilePV(pvFile) - genVals[i] = types.GenesisValidator{ - PubKey: pv.GetPubKey(), - Power: 1, - Name: nodeDirName, - } - } - - for i := 0; i < nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i+nValidators)) - config.SetRoot(nodeDir) - - err := os.MkdirAll(filepath.Join(nodeDir, "config"), nodeDirPerm) - if err != nil { - _ = os.RemoveAll(outputDir) - return err - } - - initFilesWithConfig(config) - } - - // Generate genesis doc from generated validators - genDoc := &types.GenesisDoc{ - GenesisTime: time.Now(), - ChainID: "chain-" + cmn.RandStr(6), - Validators: genVals, - } - - // Write genesis file. - for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i)) - if err := genDoc.SaveAs(filepath.Join(nodeDir, config.BaseConfig.Genesis)); err != nil { - _ = os.RemoveAll(outputDir) - return err - } - } - - if populatePersistentPeers { - err := populatePersistentPeersInConfigAndWriteIt(config) - if err != nil { - _ = os.RemoveAll(outputDir) - return err - } - } - - fmt.Printf("Successfully initialized %v node directories\n", nValidators+nNonValidators) - return nil -} - -func hostnameOrIP(i int) string { - if startingIPAddress != "" { - ip := net.ParseIP(startingIPAddress) - ip = ip.To4() - if ip == nil { - fmt.Printf("%v: non ipv4 address\n", startingIPAddress) - os.Exit(1) - } - - for j := 0; j < i; j++ { - ip[3]++ - } - return ip.String() - } - - return fmt.Sprintf("%s%d", hostnamePrefix, i) -} - -func populatePersistentPeersInConfigAndWriteIt(config *cfg.Config) error { - persistentPeers := make([]string, nValidators+nNonValidators) - for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i)) - config.SetRoot(nodeDir) - nodeKey, err := p2p.LoadNodeKey(config.NodeKeyFile()) - if err != nil { - return err - } - persistentPeers[i] = p2p.IDAddressString(nodeKey.ID(), fmt.Sprintf("%s:%d", hostnameOrIP(i), p2pPort)) - } - persistentPeersList := strings.Join(persistentPeers, ",") - - for i := 0; i < nValidators+nNonValidators; i++ { - nodeDir := filepath.Join(outputDir, cmn.Fmt("%s%d", nodeDirPrefix, i)) - config.SetRoot(nodeDir) - config.P2P.PersistentPeers = persistentPeersList - config.P2P.AddrBookStrict = false - - // overwrite default config - cfg.WriteConfigFile(filepath.Join(nodeDir, "config", "config.toml"), config) - } - - return nil -} diff --git a/cmd/tendermint/commands/version.go b/cmd/tendermint/commands/version.go deleted file mode 100644 index f9f545e5..00000000 --- a/cmd/tendermint/commands/version.go +++ /dev/null @@ -1,18 +0,0 @@ -package commands - -import ( - "fmt" - - "github.com/spf13/cobra" - - "github.com/tendermint/tendermint/version" -) - -// VersionCmd ... -var VersionCmd = &cobra.Command{ - Use: "version", - Short: "Show version info", - Run: func(cmd *cobra.Command, args []string) { - fmt.Println(version.Version) - }, -} diff --git a/cmd/tendermint/commands/wire.go b/cmd/tendermint/commands/wire.go deleted file mode 100644 index 4c133a8c..00000000 --- a/cmd/tendermint/commands/wire.go +++ /dev/null @@ -1,12 +0,0 @@ -package commands - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" -) - -var cdc = amino.NewCodec() - -func init() { - crypto.RegisterAmino(cdc) -} diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go deleted file mode 100644 index 8c7f0cd1..00000000 --- a/cmd/tendermint/main.go +++ /dev/null @@ -1,48 +0,0 @@ -package main - -import ( - "os" - "path/filepath" - - "github.com/tendermint/tmlibs/cli" - - cmd "github.com/tendermint/tendermint/cmd/tendermint/commands" - cfg "github.com/tendermint/tendermint/config" - nm "github.com/tendermint/tendermint/node" -) - -func main() { - rootCmd := cmd.RootCmd - rootCmd.AddCommand( - cmd.GenValidatorCmd, - cmd.InitFilesCmd, - cmd.ProbeUpnpCmd, - cmd.LiteCmd, - cmd.ReplayCmd, - cmd.ReplayConsoleCmd, - cmd.ResetAllCmd, - cmd.ResetPrivValidatorCmd, - cmd.ShowValidatorCmd, - cmd.TestnetFilesCmd, - cmd.ShowNodeIDCmd, - cmd.GenNodeKeyCmd, - cmd.VersionCmd) - - // NOTE: - // Users wishing to: - // * Use an external signer for their validators - // * Supply an in-proc abci app - // * Supply a genesis doc file from another source - // * Provide their own DB implementation - // can copy this file and use something other than the - // DefaultNewNode function - nodeFunc := nm.DefaultNewNode - - // Create & start node - rootCmd.AddCommand(cmd.NewRunNodeCmd(nodeFunc)) - - cmd := cli.PrepareBaseCmd(rootCmd, "TM", os.ExpandEnv(filepath.Join("$HOME", cfg.DefaultTendermintDir))) - if err := cmd.Execute(); err != nil { - panic(err) - } -} diff --git a/codecov.yml b/codecov.yml deleted file mode 100644 index b190853d..00000000 --- a/codecov.yml +++ /dev/null @@ -1,23 +0,0 @@ -coverage: - precision: 2 - round: down - range: "70...100" - - status: - project: - default: - threshold: 1% - patch: on - changes: off - -comment: - layout: "diff, files" - behavior: default - require_changes: no - require_base: no - require_head: yes - -ignore: - - "docs" - - "DOCKER" - - "scripts" diff --git a/config/config.go b/config/config.go deleted file mode 100644 index 6a283a82..00000000 --- a/config/config.go +++ /dev/null @@ -1,632 +0,0 @@ -package config - -import ( - "fmt" - "os" - "path/filepath" - "time" -) - -const ( - // FuzzModeDrop is a mode in which we randomly drop reads/writes, connections or sleep - FuzzModeDrop = iota - // FuzzModeDelay is a mode in which we randomly sleep - FuzzModeDelay -) - -// NOTE: Most of the structs & relevant comments + the -// default configuration options were used to manually -// generate the config.toml. Please reflect any changes -// made here in the defaultConfigTemplate constant in -// config/toml.go -// NOTE: tmlibs/cli must know to look in the config dir! -var ( - DefaultTendermintDir = ".tendermint" - defaultConfigDir = "config" - defaultDataDir = "data" - - defaultConfigFileName = "config.toml" - defaultGenesisJSONName = "genesis.json" - - defaultPrivValName = "priv_validator.json" - defaultNodeKeyName = "node_key.json" - defaultAddrBookName = "addrbook.json" - - defaultConfigFilePath = filepath.Join(defaultConfigDir, defaultConfigFileName) - defaultGenesisJSONPath = filepath.Join(defaultConfigDir, defaultGenesisJSONName) - defaultPrivValPath = filepath.Join(defaultConfigDir, defaultPrivValName) - defaultNodeKeyPath = filepath.Join(defaultConfigDir, defaultNodeKeyName) - defaultAddrBookPath = filepath.Join(defaultConfigDir, defaultAddrBookName) -) - -// Config defines the top level configuration for a Tendermint node -type Config struct { - // Top level options use an anonymous struct - BaseConfig `mapstructure:",squash"` - - // Options for services - RPC *RPCConfig `mapstructure:"rpc"` - P2P *P2PConfig `mapstructure:"p2p"` - Mempool *MempoolConfig `mapstructure:"mempool"` - Consensus *ConsensusConfig `mapstructure:"consensus"` - TxIndex *TxIndexConfig `mapstructure:"tx_index"` - Instrumentation *InstrumentationConfig `mapstructure:"instrumentation"` -} - -// DefaultConfig returns a default configuration for a Tendermint node -func DefaultConfig() *Config { - return &Config{ - BaseConfig: DefaultBaseConfig(), - RPC: DefaultRPCConfig(), - P2P: DefaultP2PConfig(), - Mempool: DefaultMempoolConfig(), - Consensus: DefaultConsensusConfig(), - TxIndex: DefaultTxIndexConfig(), - Instrumentation: DefaultInstrumentationConfig(), - } -} - -// TestConfig returns a configuration that can be used for testing -func TestConfig() *Config { - return &Config{ - BaseConfig: TestBaseConfig(), - RPC: TestRPCConfig(), - P2P: TestP2PConfig(), - Mempool: TestMempoolConfig(), - Consensus: TestConsensusConfig(), - TxIndex: TestTxIndexConfig(), - Instrumentation: TestInstrumentationConfig(), - } -} - -// SetRoot sets the RootDir for all Config structs -func (cfg *Config) SetRoot(root string) *Config { - cfg.BaseConfig.RootDir = root - cfg.RPC.RootDir = root - cfg.P2P.RootDir = root - cfg.Mempool.RootDir = root - cfg.Consensus.RootDir = root - return cfg -} - -//----------------------------------------------------------------------------- -// BaseConfig - -// BaseConfig defines the base configuration for a Tendermint node -type BaseConfig struct { - - // chainID is unexposed and immutable but here for convenience - chainID string - - // The root directory for all data. - // This should be set in viper so it can unmarshal into this struct - RootDir string `mapstructure:"home"` - - // Path to the JSON file containing the initial validator set and other meta data - Genesis string `mapstructure:"genesis_file"` - - // Path to the JSON file containing the private key to use as a validator in the consensus protocol - PrivValidator string `mapstructure:"priv_validator_file"` - - // A JSON file containing the private key to use for p2p authenticated encryption - NodeKey string `mapstructure:"node_key_file"` - - // A custom human readable name for this node - Moniker string `mapstructure:"moniker"` - - // TCP or UNIX socket address for Tendermint to listen on for - // connections from an external PrivValidator process - PrivValidatorListenAddr string `mapstructure:"priv_validator_laddr"` - - // TCP or UNIX socket address of the ABCI application, - // or the name of an ABCI application compiled in with the Tendermint binary - ProxyApp string `mapstructure:"proxy_app"` - - // Mechanism to connect to the ABCI application: socket | grpc - ABCI string `mapstructure:"abci"` - - // Output level for logging - LogLevel string `mapstructure:"log_level"` - - // TCP or UNIX socket address for the profiling server to listen on - ProfListenAddress string `mapstructure:"prof_laddr"` - - // If this node is many blocks behind the tip of the chain, FastSync - // allows them to catchup quickly by downloading blocks in parallel - // and verifying their commits - FastSync bool `mapstructure:"fast_sync"` - - // If true, query the ABCI app on connecting to a new peer - // so the app can decide if we should keep the connection or not - FilterPeers bool `mapstructure:"filter_peers"` // false - - // Database backend: leveldb | memdb - DBBackend string `mapstructure:"db_backend"` - - // Database directory - DBPath string `mapstructure:"db_dir"` -} - -// DefaultBaseConfig returns a default base configuration for a Tendermint node -func DefaultBaseConfig() BaseConfig { - return BaseConfig{ - Genesis: defaultGenesisJSONPath, - PrivValidator: defaultPrivValPath, - NodeKey: defaultNodeKeyPath, - Moniker: defaultMoniker, - ProxyApp: "tcp://127.0.0.1:26658", - ABCI: "socket", - LogLevel: DefaultPackageLogLevels(), - ProfListenAddress: "", - FastSync: true, - FilterPeers: false, - DBBackend: "leveldb", - DBPath: "data", - } -} - -// TestBaseConfig returns a base configuration for testing a Tendermint node -func TestBaseConfig() BaseConfig { - cfg := DefaultBaseConfig() - cfg.chainID = "tendermint_test" - cfg.ProxyApp = "kvstore" - cfg.FastSync = false - cfg.DBBackend = "memdb" - return cfg -} - -func (cfg BaseConfig) ChainID() string { - return cfg.chainID -} - -// GenesisFile returns the full path to the genesis.json file -func (cfg BaseConfig) GenesisFile() string { - return rootify(cfg.Genesis, cfg.RootDir) -} - -// PrivValidatorFile returns the full path to the priv_validator.json file -func (cfg BaseConfig) PrivValidatorFile() string { - return rootify(cfg.PrivValidator, cfg.RootDir) -} - -// NodeKeyFile returns the full path to the node_key.json file -func (cfg BaseConfig) NodeKeyFile() string { - return rootify(cfg.NodeKey, cfg.RootDir) -} - -// DBDir returns the full path to the database directory -func (cfg BaseConfig) DBDir() string { - return rootify(cfg.DBPath, cfg.RootDir) -} - -// DefaultLogLevel returns a default log level of "error" -func DefaultLogLevel() string { - return "error" -} - -// DefaultPackageLogLevels returns a default log level setting so all packages -// log at "error", while the `state` and `main` packages log at "info" -func DefaultPackageLogLevels() string { - return fmt.Sprintf("main:info,state:info,*:%s", DefaultLogLevel()) -} - -//----------------------------------------------------------------------------- -// RPCConfig - -// RPCConfig defines the configuration options for the Tendermint RPC server -type RPCConfig struct { - RootDir string `mapstructure:"home"` - - // TCP or UNIX socket address for the RPC server to listen on - ListenAddress string `mapstructure:"laddr"` - - // TCP or UNIX socket address for the gRPC server to listen on - // NOTE: This server only supports /broadcast_tx_commit - GRPCListenAddress string `mapstructure:"grpc_laddr"` - - // Activate unsafe RPC commands like /dial_persistent_peers and /unsafe_flush_mempool - Unsafe bool `mapstructure:"unsafe"` -} - -// DefaultRPCConfig returns a default configuration for the RPC server -func DefaultRPCConfig() *RPCConfig { - return &RPCConfig{ - ListenAddress: "tcp://0.0.0.0:26657", - GRPCListenAddress: "", - Unsafe: false, - } -} - -// TestRPCConfig returns a configuration for testing the RPC server -func TestRPCConfig() *RPCConfig { - cfg := DefaultRPCConfig() - cfg.ListenAddress = "tcp://0.0.0.0:36657" - cfg.GRPCListenAddress = "tcp://0.0.0.0:36658" - cfg.Unsafe = true - return cfg -} - -//----------------------------------------------------------------------------- -// P2PConfig - -// P2PConfig defines the configuration options for the Tendermint peer-to-peer networking layer -type P2PConfig struct { - RootDir string `mapstructure:"home"` - - // Address to listen for incoming connections - ListenAddress string `mapstructure:"laddr"` - - // Comma separated list of seed nodes to connect to - // We only use these if we can’t connect to peers in the addrbook - Seeds string `mapstructure:"seeds"` - - // Comma separated list of nodes to keep persistent connections to - // Do not add private peers to this list if you don't want them advertised - PersistentPeers string `mapstructure:"persistent_peers"` - - // Skip UPNP port forwarding - SkipUPNP bool `mapstructure:"skip_upnp"` - - // Path to address book - AddrBook string `mapstructure:"addr_book_file"` - - // Set true for strict address routability rules - AddrBookStrict bool `mapstructure:"addr_book_strict"` - - // Maximum number of peers to connect to - MaxNumPeers int `mapstructure:"max_num_peers"` - - // Time to wait before flushing messages out on the connection, in ms - FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"` - - // Maximum size of a message packet payload, in bytes - MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"` - - // Rate at which packets can be sent, in bytes/second - SendRate int64 `mapstructure:"send_rate"` - - // Rate at which packets can be received, in bytes/second - RecvRate int64 `mapstructure:"recv_rate"` - - // Set true to enable the peer-exchange reactor - PexReactor bool `mapstructure:"pex"` - - // Seed mode, in which node constantly crawls the network and looks for - // peers. If another node asks it for addresses, it responds and disconnects. - // - // Does not work if the peer-exchange reactor is disabled. - SeedMode bool `mapstructure:"seed_mode"` - - // Comma separated list of peer IDs to keep private (will not be gossiped to - // other peers) - PrivatePeerIDs string `mapstructure:"private_peer_ids"` - - // Toggle to disable guard against peers connecting from the same ip. - AllowDuplicateIP bool `mapstructure:"allow_duplicate_ip"` - - // Peer connection configuration. - HandshakeTimeout time.Duration `mapstructure:"handshake_timeout"` - DialTimeout time.Duration `mapstructure:"dial_timeout"` - - // Testing params. - // Force dial to fail - TestDialFail bool `mapstructure:"test_dial_fail"` - // FUzz connection - TestFuzz bool `mapstructure:"test_fuzz"` - TestFuzzConfig *FuzzConnConfig `mapstructure:"test_fuzz_config"` -} - -// DefaultP2PConfig returns a default configuration for the peer-to-peer layer -func DefaultP2PConfig() *P2PConfig { - return &P2PConfig{ - ListenAddress: "tcp://0.0.0.0:26656", - AddrBook: defaultAddrBookPath, - AddrBookStrict: true, - MaxNumPeers: 50, - FlushThrottleTimeout: 100, - MaxPacketMsgPayloadSize: 1024, // 1 kB - SendRate: 512000, // 500 kB/s - RecvRate: 512000, // 500 kB/s - PexReactor: true, - SeedMode: false, - AllowDuplicateIP: true, // so non-breaking yet - HandshakeTimeout: 20 * time.Second, - DialTimeout: 3 * time.Second, - TestDialFail: false, - TestFuzz: false, - TestFuzzConfig: DefaultFuzzConnConfig(), - } -} - -// TestP2PConfig returns a configuration for testing the peer-to-peer layer -func TestP2PConfig() *P2PConfig { - cfg := DefaultP2PConfig() - cfg.ListenAddress = "tcp://0.0.0.0:36656" - cfg.SkipUPNP = true - cfg.FlushThrottleTimeout = 10 - cfg.AllowDuplicateIP = true - return cfg -} - -// AddrBookFile returns the full path to the address book -func (cfg *P2PConfig) AddrBookFile() string { - return rootify(cfg.AddrBook, cfg.RootDir) -} - -// FuzzConnConfig is a FuzzedConnection configuration. -type FuzzConnConfig struct { - Mode int - MaxDelay time.Duration - ProbDropRW float64 - ProbDropConn float64 - ProbSleep float64 -} - -// DefaultFuzzConnConfig returns the default config. -func DefaultFuzzConnConfig() *FuzzConnConfig { - return &FuzzConnConfig{ - Mode: FuzzModeDrop, - MaxDelay: 3 * time.Second, - ProbDropRW: 0.2, - ProbDropConn: 0.00, - ProbSleep: 0.00, - } -} - -//----------------------------------------------------------------------------- -// MempoolConfig - -// MempoolConfig defines the configuration options for the Tendermint mempool -type MempoolConfig struct { - RootDir string `mapstructure:"home"` - Recheck bool `mapstructure:"recheck"` - RecheckEmpty bool `mapstructure:"recheck_empty"` - Broadcast bool `mapstructure:"broadcast"` - WalPath string `mapstructure:"wal_dir"` - Size int `mapstructure:"size"` - CacheSize int `mapstructure:"cache_size"` -} - -// DefaultMempoolConfig returns a default configuration for the Tendermint mempool -func DefaultMempoolConfig() *MempoolConfig { - return &MempoolConfig{ - Recheck: true, - RecheckEmpty: true, - Broadcast: true, - WalPath: filepath.Join(defaultDataDir, "mempool.wal"), - Size: 100000, - CacheSize: 100000, - } -} - -// TestMempoolConfig returns a configuration for testing the Tendermint mempool -func TestMempoolConfig() *MempoolConfig { - cfg := DefaultMempoolConfig() - cfg.CacheSize = 1000 - return cfg -} - -// WalDir returns the full path to the mempool's write-ahead log -func (cfg *MempoolConfig) WalDir() string { - return rootify(cfg.WalPath, cfg.RootDir) -} - -//----------------------------------------------------------------------------- -// ConsensusConfig - -// ConsensusConfig defines the configuration for the Tendermint consensus service, -// including timeouts and details about the WAL and the block structure. -type ConsensusConfig struct { - RootDir string `mapstructure:"home"` - WalPath string `mapstructure:"wal_file"` - walFile string // overrides WalPath if set - - // All timeouts are in milliseconds - TimeoutPropose int `mapstructure:"timeout_propose"` - TimeoutProposeDelta int `mapstructure:"timeout_propose_delta"` - TimeoutPrevote int `mapstructure:"timeout_prevote"` - TimeoutPrevoteDelta int `mapstructure:"timeout_prevote_delta"` - TimeoutPrecommit int `mapstructure:"timeout_precommit"` - TimeoutPrecommitDelta int `mapstructure:"timeout_precommit_delta"` - TimeoutCommit int `mapstructure:"timeout_commit"` - - // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) - SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"` - - // BlockSize - MaxBlockSizeTxs int `mapstructure:"max_block_size_txs"` - MaxBlockSizeBytes int `mapstructure:"max_block_size_bytes"` - - // EmptyBlocks mode and possible interval between empty blocks in seconds - CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"` - CreateEmptyBlocksInterval int `mapstructure:"create_empty_blocks_interval"` - - // Reactor sleep duration parameters are in milliseconds - PeerGossipSleepDuration int `mapstructure:"peer_gossip_sleep_duration"` - PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"` -} - -// DefaultConsensusConfig returns a default configuration for the consensus service -func DefaultConsensusConfig() *ConsensusConfig { - return &ConsensusConfig{ - WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"), - TimeoutPropose: 3000, - TimeoutProposeDelta: 500, - TimeoutPrevote: 1000, - TimeoutPrevoteDelta: 500, - TimeoutPrecommit: 1000, - TimeoutPrecommitDelta: 500, - TimeoutCommit: 1000, - SkipTimeoutCommit: false, - MaxBlockSizeTxs: 10000, - MaxBlockSizeBytes: 1, // TODO - CreateEmptyBlocks: true, - CreateEmptyBlocksInterval: 0, - PeerGossipSleepDuration: 100, - PeerQueryMaj23SleepDuration: 2000, - } -} - -// TestConsensusConfig returns a configuration for testing the consensus service -func TestConsensusConfig() *ConsensusConfig { - cfg := DefaultConsensusConfig() - cfg.TimeoutPropose = 100 - cfg.TimeoutProposeDelta = 1 - cfg.TimeoutPrevote = 10 - cfg.TimeoutPrevoteDelta = 1 - cfg.TimeoutPrecommit = 10 - cfg.TimeoutPrecommitDelta = 1 - cfg.TimeoutCommit = 10 - cfg.SkipTimeoutCommit = true - cfg.PeerGossipSleepDuration = 5 - cfg.PeerQueryMaj23SleepDuration = 250 - return cfg -} - -// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step -func (cfg *ConsensusConfig) WaitForTxs() bool { - return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0 -} - -// EmptyBlocks returns the amount of time to wait before proposing an empty block or starting the propose timer if there are no txs available -func (cfg *ConsensusConfig) EmptyBlocksInterval() time.Duration { - return time.Duration(cfg.CreateEmptyBlocksInterval) * time.Second -} - -// Propose returns the amount of time to wait for a proposal -func (cfg *ConsensusConfig) Propose(round int) time.Duration { - return time.Duration(cfg.TimeoutPropose+cfg.TimeoutProposeDelta*round) * time.Millisecond -} - -// Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes -func (cfg *ConsensusConfig) Prevote(round int) time.Duration { - return time.Duration(cfg.TimeoutPrevote+cfg.TimeoutPrevoteDelta*round) * time.Millisecond -} - -// Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits -func (cfg *ConsensusConfig) Precommit(round int) time.Duration { - return time.Duration(cfg.TimeoutPrecommit+cfg.TimeoutPrecommitDelta*round) * time.Millisecond -} - -// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits for a single block (ie. a commit). -func (cfg *ConsensusConfig) Commit(t time.Time) time.Time { - return t.Add(time.Duration(cfg.TimeoutCommit) * time.Millisecond) -} - -// PeerGossipSleep returns the amount of time to sleep if there is nothing to send from the ConsensusReactor -func (cfg *ConsensusConfig) PeerGossipSleep() time.Duration { - return time.Duration(cfg.PeerGossipSleepDuration) * time.Millisecond -} - -// PeerQueryMaj23Sleep returns the amount of time to sleep after each VoteSetMaj23Message is sent in the ConsensusReactor -func (cfg *ConsensusConfig) PeerQueryMaj23Sleep() time.Duration { - return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond -} - -// WalFile returns the full path to the write-ahead log file -func (cfg *ConsensusConfig) WalFile() string { - if cfg.walFile != "" { - return cfg.walFile - } - return rootify(cfg.WalPath, cfg.RootDir) -} - -// SetWalFile sets the path to the write-ahead log file -func (cfg *ConsensusConfig) SetWalFile(walFile string) { - cfg.walFile = walFile -} - -//----------------------------------------------------------------------------- -// TxIndexConfig - -// TxIndexConfig defines the configuration for the transaction -// indexer, including tags to index. -type TxIndexConfig struct { - // What indexer to use for transactions - // - // Options: - // 1) "null" - // 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). - Indexer string `mapstructure:"indexer"` - - // Comma-separated list of tags to index (by default the only tag is tx hash) - // - // It's recommended to index only a subset of tags due to possible memory - // bloat. This is, of course, depends on the indexer's DB and the volume of - // transactions. - IndexTags string `mapstructure:"index_tags"` - - // When set to true, tells indexer to index all tags. Note this may be not - // desirable (see the comment above). IndexTags has a precedence over - // IndexAllTags (i.e. when given both, IndexTags will be indexed). - IndexAllTags bool `mapstructure:"index_all_tags"` -} - -// DefaultTxIndexConfig returns a default configuration for the transaction indexer. -func DefaultTxIndexConfig() *TxIndexConfig { - return &TxIndexConfig{ - Indexer: "kv", - IndexTags: "", - IndexAllTags: false, - } -} - -// TestTxIndexConfig returns a default configuration for the transaction indexer. -func TestTxIndexConfig() *TxIndexConfig { - return DefaultTxIndexConfig() -} - -//----------------------------------------------------------------------------- -// InstrumentationConfig - -// InstrumentationConfig defines the configuration for metrics reporting. -type InstrumentationConfig struct { - // When true, Prometheus metrics are served under /metrics on - // PrometheusListenAddr. - // Check out the documentation for the list of available metrics. - Prometheus bool `mapstructure:"prometheus"` - - // Address to listen for Prometheus collector(s) connections. - PrometheusListenAddr string `mapstructure:"prometheus_listen_addr"` -} - -// DefaultInstrumentationConfig returns a default configuration for metrics -// reporting. -func DefaultInstrumentationConfig() *InstrumentationConfig { - return &InstrumentationConfig{ - Prometheus: false, - PrometheusListenAddr: ":26660", - } -} - -// TestInstrumentationConfig returns a default configuration for metrics -// reporting. -func TestInstrumentationConfig() *InstrumentationConfig { - return DefaultInstrumentationConfig() -} - -//----------------------------------------------------------------------------- -// Utils - -// helper function to make config creation independent of root dir -func rootify(path, root string) string { - if filepath.IsAbs(path) { - return path - } - return filepath.Join(root, path) -} - -//----------------------------------------------------------------------------- -// Moniker - -var defaultMoniker = getDefaultMoniker() - -// getDefaultMoniker returns a default moniker, which is the host name. If runtime -// fails to get the host name, "anonymous" will be returned. -func getDefaultMoniker() string { - moniker, err := os.Hostname() - if err != nil { - moniker = "anonymous" - } - return moniker -} diff --git a/config/config_test.go b/config/config_test.go deleted file mode 100644 index 6379960f..00000000 --- a/config/config_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package config - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestDefaultConfig(t *testing.T) { - assert := assert.New(t) - - // set up some defaults - cfg := DefaultConfig() - assert.NotNil(cfg.P2P) - assert.NotNil(cfg.Mempool) - assert.NotNil(cfg.Consensus) - - // check the root dir stuff... - cfg.SetRoot("/foo") - cfg.Genesis = "bar" - cfg.DBPath = "/opt/data" - cfg.Mempool.WalPath = "wal/mem/" - - assert.Equal("/foo/bar", cfg.GenesisFile()) - assert.Equal("/opt/data", cfg.DBDir()) - assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir()) - -} diff --git a/config/toml.go b/config/toml.go deleted file mode 100644 index c3d41a9b..00000000 --- a/config/toml.go +++ /dev/null @@ -1,324 +0,0 @@ -package config - -import ( - "bytes" - "os" - "path/filepath" - "text/template" - - cmn "github.com/tendermint/tmlibs/common" -) - -var configTemplate *template.Template - -func init() { - var err error - if configTemplate, err = template.New("configFileTemplate").Parse(defaultConfigTemplate); err != nil { - panic(err) - } -} - -/****** these are for production settings ***********/ - -// EnsureRoot creates the root, config, and data directories if they don't exist, -// and panics if it fails. -func EnsureRoot(rootDir string) { - if err := cmn.EnsureDir(rootDir, 0700); err != nil { - cmn.PanicSanity(err.Error()) - } - if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil { - cmn.PanicSanity(err.Error()) - } - if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil { - cmn.PanicSanity(err.Error()) - } - - configFilePath := filepath.Join(rootDir, defaultConfigFilePath) - - // Write default config file if missing. - if !cmn.FileExists(configFilePath) { - writeDefaultConfigFile(configFilePath) - } -} - -// XXX: this func should probably be called by cmd/tendermint/commands/init.go -// alongside the writing of the genesis.json and priv_validator.json -func writeDefaultConfigFile(configFilePath string) { - WriteConfigFile(configFilePath, DefaultConfig()) -} - -// WriteConfigFile renders config using the template and writes it to configFilePath. -func WriteConfigFile(configFilePath string, config *Config) { - var buffer bytes.Buffer - - if err := configTemplate.Execute(&buffer, config); err != nil { - panic(err) - } - - cmn.MustWriteFile(configFilePath, buffer.Bytes(), 0644) -} - -// Note: any changes to the comments/variables/mapstructure -// must be reflected in the appropriate struct in config/config.go -const defaultConfigTemplate = `# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -##### main base config options ##### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "{{ .BaseConfig.ProxyApp }}" - -# A custom human readable name for this node -moniker = "{{ .BaseConfig.Moniker }}" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = {{ .BaseConfig.FastSync }} - -# Database backend: leveldb | memdb -db_backend = "{{ .BaseConfig.DBBackend }}" - -# Database directory -db_path = "{{ js .BaseConfig.DBPath }}" - -# Output level for logging, including package level options -log_level = "{{ .BaseConfig.LogLevel }}" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "{{ js .BaseConfig.Genesis }}" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_file = "{{ js .BaseConfig.PrivValidator }}" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "{{ js .BaseConfig.NodeKey}}" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "{{ .BaseConfig.ABCI }}" - -# TCP or UNIX socket address for the profiling server to listen on -prof_laddr = "{{ .BaseConfig.ProfListenAddress }}" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = {{ .BaseConfig.FilterPeers }} - -##### advanced configuration options ##### - -##### rpc server configuration options ##### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "{{ .RPC.ListenAddress }}" - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "{{ .RPC.GRPCListenAddress }}" - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = {{ .RPC.Unsafe }} - -##### peer to peer configuration options ##### -[p2p] - -# Address to listen for incoming connections -laddr = "{{ .P2P.ListenAddress }}" - -# Comma separated list of seed nodes to connect to -seeds = "{{ .P2P.Seeds }}" - -# Comma separated list of nodes to keep persistent connections to -# Do not add private peers to this list if you don't want them advertised -persistent_peers = "{{ .P2P.PersistentPeers }}" - -# Path to address book -addr_book_file = "{{ js .P2P.AddrBook }}" - -# Set true for strict address routability rules -addr_book_strict = {{ .P2P.AddrBookStrict }} - -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }} - -# Maximum number of peers to connect to -max_num_peers = {{ .P2P.MaxNumPeers }} - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }} - -# Rate at which packets can be sent, in bytes/second -send_rate = {{ .P2P.SendRate }} - -# Rate at which packets can be received, in bytes/second -recv_rate = {{ .P2P.RecvRate }} - -# Set true to enable the peer-exchange reactor -pex = {{ .P2P.PexReactor }} - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = {{ .P2P.SeedMode }} - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "{{ .P2P.PrivatePeerIDs }}" - -##### mempool configuration options ##### -[mempool] - -recheck = {{ .Mempool.Recheck }} -recheck_empty = {{ .Mempool.RecheckEmpty }} -broadcast = {{ .Mempool.Broadcast }} -wal_dir = "{{ js .Mempool.WalPath }}" - -# size of the mempool -size = {{ .Mempool.Size }} - -# size of the cache (used to filter transactions we saw earlier) -cache_size = {{ .Mempool.CacheSize }} - -##### consensus configuration options ##### -[consensus] - -wal_file = "{{ js .Consensus.WalPath }}" - -# All timeouts are in milliseconds -timeout_propose = {{ .Consensus.TimeoutPropose }} -timeout_propose_delta = {{ .Consensus.TimeoutProposeDelta }} -timeout_prevote = {{ .Consensus.TimeoutPrevote }} -timeout_prevote_delta = {{ .Consensus.TimeoutPrevoteDelta }} -timeout_precommit = {{ .Consensus.TimeoutPrecommit }} -timeout_precommit_delta = {{ .Consensus.TimeoutPrecommitDelta }} -timeout_commit = {{ .Consensus.TimeoutCommit }} - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }} - -# BlockSize -max_block_size_txs = {{ .Consensus.MaxBlockSizeTxs }} -max_block_size_bytes = {{ .Consensus.MaxBlockSizeBytes }} - -# EmptyBlocks mode and possible interval between empty blocks in seconds -create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }} -create_empty_blocks_interval = {{ .Consensus.CreateEmptyBlocksInterval }} - -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = {{ .Consensus.PeerGossipSleepDuration }} -peer_query_maj23_sleep_duration = {{ .Consensus.PeerQueryMaj23SleepDuration }} - -##### transactions indexer configuration options ##### -[tx_index] - -# What indexer to use for transactions -# -# Options: -# 1) "null" (default) -# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -indexer = "{{ .TxIndex.Indexer }}" - -# Comma-separated list of tags to index (by default the only tag is tx hash) -# -# It's recommended to index only a subset of tags due to possible memory -# bloat. This is, of course, depends on the indexer's DB and the volume of -# transactions. -index_tags = "{{ .TxIndex.IndexTags }}" - -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). -index_all_tags = {{ .TxIndex.IndexAllTags }} - -##### instrumentation configuration options ##### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = {{ .Instrumentation.Prometheus }} - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = "{{ .Instrumentation.PrometheusListenAddr }}" -` - -/****** these are for test settings ***********/ - -func ResetTestRoot(testName string) *Config { - rootDir := os.ExpandEnv("$HOME/.tendermint_test") - rootDir = filepath.Join(rootDir, testName) - // Remove ~/.tendermint_test_bak - if cmn.FileExists(rootDir + "_bak") { - if err := os.RemoveAll(rootDir + "_bak"); err != nil { - cmn.PanicSanity(err.Error()) - } - } - // Move ~/.tendermint_test to ~/.tendermint_test_bak - if cmn.FileExists(rootDir) { - if err := os.Rename(rootDir, rootDir+"_bak"); err != nil { - cmn.PanicSanity(err.Error()) - } - } - // Create new dir - if err := cmn.EnsureDir(rootDir, 0700); err != nil { - cmn.PanicSanity(err.Error()) - } - if err := cmn.EnsureDir(filepath.Join(rootDir, defaultConfigDir), 0700); err != nil { - cmn.PanicSanity(err.Error()) - } - if err := cmn.EnsureDir(filepath.Join(rootDir, defaultDataDir), 0700); err != nil { - cmn.PanicSanity(err.Error()) - } - - baseConfig := DefaultBaseConfig() - configFilePath := filepath.Join(rootDir, defaultConfigFilePath) - genesisFilePath := filepath.Join(rootDir, baseConfig.Genesis) - privFilePath := filepath.Join(rootDir, baseConfig.PrivValidator) - - // Write default config file if missing. - if !cmn.FileExists(configFilePath) { - writeDefaultConfigFile(configFilePath) - } - if !cmn.FileExists(genesisFilePath) { - cmn.MustWriteFile(genesisFilePath, []byte(testGenesis), 0644) - } - // we always overwrite the priv val - cmn.MustWriteFile(privFilePath, []byte(testPrivValidator), 0644) - - config := TestConfig().SetRoot(rootDir) - return config -} - -var testGenesis = `{ - "genesis_time": "0001-01-01T00:00:00.000Z", - "chain_id": "tendermint_test", - "validators": [ - { - "pub_key": { - "type": "AC26791624DE60", - "value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE=" - }, - "power": 10, - "name": "" - } - ], - "app_hash": "" -}` - -var testPrivValidator = `{ - "address": "849CB2C877F87A20925F35D00AE6688342D25B47", - "pub_key": { - "type": "AC26791624DE60", - "value": "AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE=" - }, - "priv_key": { - "type": "954568A3288910", - "value": "EVkqJO/jIXp3rkASXfh9YnyToYXRXhBr6g9cQVxPFnQBP/5povV4HTjvsy530kybxKHwEi85iU8YL0qQhSYVoQ==" - }, - "last_height": 0, - "last_round": 0, - "last_step": 0 -}` diff --git a/config/toml_test.go b/config/toml_test.go deleted file mode 100644 index a1637f67..00000000 --- a/config/toml_test.go +++ /dev/null @@ -1,94 +0,0 @@ -package config - -import ( - "io/ioutil" - "os" - "path/filepath" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func ensureFiles(t *testing.T, rootDir string, files ...string) { - for _, f := range files { - p := rootify(rootDir, f) - _, err := os.Stat(p) - assert.Nil(t, err, p) - } -} - -func TestEnsureRoot(t *testing.T) { - require := require.New(t) - - // setup temp dir for test - tmpDir, err := ioutil.TempDir("", "config-test") - require.Nil(err) - defer os.RemoveAll(tmpDir) // nolint: errcheck - - // create root dir - EnsureRoot(tmpDir) - - // make sure config is set properly - data, err := ioutil.ReadFile(filepath.Join(tmpDir, defaultConfigFilePath)) - require.Nil(err) - - if !checkConfig(string(data)) { - t.Fatalf("config file missing some information") - } - - ensureFiles(t, tmpDir, "data") -} - -func TestEnsureTestRoot(t *testing.T) { - require := require.New(t) - - testName := "ensureTestRoot" - - // create root dir - cfg := ResetTestRoot(testName) - rootDir := cfg.RootDir - - // make sure config is set properly - data, err := ioutil.ReadFile(filepath.Join(rootDir, defaultConfigFilePath)) - require.Nil(err) - - if !checkConfig(string(data)) { - t.Fatalf("config file missing some information") - } - - // TODO: make sure the cfg returned and testconfig are the same! - baseConfig := DefaultBaseConfig() - ensureFiles(t, rootDir, defaultDataDir, baseConfig.Genesis, baseConfig.PrivValidator) -} - -func checkConfig(configFile string) bool { - var valid bool - - // list of words we expect in the config - var elems = []string{ - "moniker", - "seeds", - "proxy_app", - "fast_sync", - "create_empty_blocks", - "peer", - "timeout", - "broadcast", - "send", - "addr", - "wal", - "propose", - "max", - "genesis", - } - for _, e := range elems { - if !strings.Contains(configFile, e) { - valid = false - } else { - valid = true - } - } - return valid -} diff --git a/consensus/README.md b/consensus/README.md deleted file mode 100644 index 1111317d..00000000 --- a/consensus/README.md +++ /dev/null @@ -1 +0,0 @@ -See the [consensus spec](https://github.com/tendermint/tendermint/tree/master/docs/spec/consensus) and the [reactor consensus spec](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/consensus) for more information. diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go deleted file mode 100644 index d3be8c35..00000000 --- a/consensus/byzantine_test.go +++ /dev/null @@ -1,267 +0,0 @@ -package consensus - -import ( - "context" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -func init() { - config = ResetConfig("consensus_byzantine_test") -} - -//---------------------------------------------- -// byzantine failures - -// 4 validators. 1 is byzantine. The other three are partitioned into A (1 val) and B (2 vals). -// byzantine validator sends conflicting proposals into A and B, -// and prevotes/precommits on both of them. -// B sees a commit, A doesn't. -// Byzantine validator refuses to prevote. -// Heal partition and ensure A sees the commit -func TestByzantine(t *testing.T) { - N := 4 - logger := consensusLogger().With("test", "byzantine") - css := randConsensusNet(N, "consensus_byzantine_test", newMockTickerFunc(false), newCounter) - - // give the byzantine validator a normal ticker - ticker := NewTimeoutTicker() - ticker.SetLogger(css[0].Logger) - css[0].SetTimeoutTicker(ticker) - - switches := make([]*p2p.Switch, N) - p2pLogger := logger.With("module", "p2p") - for i := 0; i < N; i++ { - switches[i] = p2p.NewSwitch(config.P2P) - switches[i].SetLogger(p2pLogger.With("validator", i)) - } - - eventChans := make([]chan interface{}, N) - reactors := make([]p2p.Reactor, N) - for i := 0; i < N; i++ { - // make first val byzantine - if i == 0 { - // NOTE: Now, test validators are MockPV, which by default doesn't - // do any safety checks. - css[i].privValidator.(*types.MockPV).DisableChecks() - css[i].decideProposal = func(j int) func(int64, int) { - return func(height int64, round int) { - byzantineDecideProposalFunc(t, height, round, css[j], switches[j]) - } - }(i) - css[i].doPrevote = func(height int64, round int) {} - } - - eventBus := css[i].eventBus - eventBus.SetLogger(logger.With("module", "events", "validator", i)) - - eventChans[i] = make(chan interface{}, 1) - err := eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i]) - require.NoError(t, err) - - conR := NewConsensusReactor(css[i], true) // so we dont start the consensus states - conR.SetLogger(logger.With("validator", i)) - conR.SetEventBus(eventBus) - - var conRI p2p.Reactor // nolint: gotype, gosimple - conRI = conR - - // make first val byzantine - if i == 0 { - conRI = NewByzantineReactor(conR) - } - - reactors[i] = conRI - } - - defer func() { - for _, r := range reactors { - if rr, ok := r.(*ByzantineReactor); ok { - rr.reactor.Switch.Stop() - } else { - r.(*ConsensusReactor).Switch.Stop() - } - } - }() - - p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { - // ignore new switch s, we already made ours - switches[i].AddReactor("CONSENSUS", reactors[i]) - return switches[i] - }, func(sws []*p2p.Switch, i, j int) { - // the network starts partitioned with globally active adversary - if i != 0 { - return - } - p2p.Connect2Switches(sws, i, j) - }) - - // start the non-byz state machines. - // note these must be started before the byz - for i := 1; i < N; i++ { - cr := reactors[i].(*ConsensusReactor) - cr.SwitchToConsensus(cr.conS.GetState(), 0) - } - - // start the byzantine state machine - byzR := reactors[0].(*ByzantineReactor) - s := byzR.reactor.conS.GetState() - byzR.reactor.SwitchToConsensus(s, 0) - - // byz proposer sends one block to peers[0] - // and the other block to peers[1] and peers[2]. - // note peers and switches order don't match. - peers := switches[0].Peers().List() - - // partition A - ind0 := getSwitchIndex(switches, peers[0]) - - // partition B - ind1 := getSwitchIndex(switches, peers[1]) - ind2 := getSwitchIndex(switches, peers[2]) - p2p.Connect2Switches(switches, ind1, ind2) - - // wait for someone in the big partition (B) to make a block - <-eventChans[ind2] - - t.Log("A block has been committed. Healing partition") - p2p.Connect2Switches(switches, ind0, ind1) - p2p.Connect2Switches(switches, ind0, ind2) - - // wait till everyone makes the first new block - // (one of them already has) - wg := new(sync.WaitGroup) - wg.Add(2) - for i := 1; i < N-1; i++ { - go func(j int) { - <-eventChans[j] - wg.Done() - }(i) - } - - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - tick := time.NewTicker(time.Second * 10) - select { - case <-done: - case <-tick.C: - for i, reactor := range reactors { - t.Log(cmn.Fmt("Consensus Reactor %v", i)) - t.Log(cmn.Fmt("%v", reactor)) - } - t.Fatalf("Timed out waiting for all validators to commit first block") - } -} - -//------------------------------- -// byzantine consensus functions - -func byzantineDecideProposalFunc(t *testing.T, height int64, round int, cs *ConsensusState, sw *p2p.Switch) { - // byzantine user should create two proposals and try to split the vote. - // Avoid sending on internalMsgQueue and running consensus state. - - // Create a new proposal block from state/txs from the mempool. - block1, blockParts1 := cs.createProposalBlock() - polRound, polBlockID := cs.Votes.POLInfo() - proposal1 := types.NewProposal(height, round, blockParts1.Header(), polRound, polBlockID) - if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal1); err != nil { - t.Error(err) - } - - // Create a new proposal block from state/txs from the mempool. - block2, blockParts2 := cs.createProposalBlock() - polRound, polBlockID = cs.Votes.POLInfo() - proposal2 := types.NewProposal(height, round, blockParts2.Header(), polRound, polBlockID) - if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal2); err != nil { - t.Error(err) - } - - block1Hash := block1.Hash() - block2Hash := block2.Hash() - - // broadcast conflicting proposals/block parts to peers - peers := sw.Peers().List() - t.Logf("Byzantine: broadcasting conflicting proposals to %d peers", len(peers)) - for i, peer := range peers { - if i < len(peers)/2 { - go sendProposalAndParts(height, round, cs, peer, proposal1, block1Hash, blockParts1) - } else { - go sendProposalAndParts(height, round, cs, peer, proposal2, block2Hash, blockParts2) - } - } -} - -func sendProposalAndParts(height int64, round int, cs *ConsensusState, peer p2p.Peer, proposal *types.Proposal, blockHash []byte, parts *types.PartSet) { - // proposal - msg := &ProposalMessage{Proposal: proposal} - peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) - - // parts - for i := 0; i < parts.Total(); i++ { - part := parts.GetPart(i) - msg := &BlockPartMessage{ - Height: height, // This tells peer that this part applies to us. - Round: round, // This tells peer that this part applies to us. - Part: part, - } - peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) - } - - // votes - cs.mtx.Lock() - prevote, _ := cs.signVote(types.VoteTypePrevote, blockHash, parts.Header()) - precommit, _ := cs.signVote(types.VoteTypePrecommit, blockHash, parts.Header()) - cs.mtx.Unlock() - - peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{prevote})) - peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(&VoteMessage{precommit})) -} - -//---------------------------------------- -// byzantine consensus reactor - -type ByzantineReactor struct { - cmn.Service - reactor *ConsensusReactor -} - -func NewByzantineReactor(conR *ConsensusReactor) *ByzantineReactor { - return &ByzantineReactor{ - Service: conR, - reactor: conR, - } -} - -func (br *ByzantineReactor) SetSwitch(s *p2p.Switch) { br.reactor.SetSwitch(s) } -func (br *ByzantineReactor) GetChannels() []*p2p.ChannelDescriptor { return br.reactor.GetChannels() } -func (br *ByzantineReactor) AddPeer(peer p2p.Peer) { - if !br.reactor.IsRunning() { - return - } - - // Create peerState for peer - peerState := NewPeerState(peer).SetLogger(br.reactor.Logger) - peer.Set(types.PeerStateKey, peerState) - - // Send our state to peer. - // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). - if !br.reactor.fastSync { - br.reactor.sendNewRoundStepMessages(peer) - } -} -func (br *ByzantineReactor) RemovePeer(peer p2p.Peer, reason interface{}) { - br.reactor.RemovePeer(peer, reason) -} -func (br *ByzantineReactor) Receive(chID byte, peer p2p.Peer, msgBytes []byte) { - br.reactor.Receive(chID, peer, msgBytes) -} diff --git a/consensus/common_test.go b/consensus/common_test.go deleted file mode 100644 index f50e5769..00000000 --- a/consensus/common_test.go +++ /dev/null @@ -1,495 +0,0 @@ -package consensus - -import ( - "bytes" - "context" - "fmt" - "io/ioutil" - "os" - "path" - "sort" - "sync" - "testing" - "time" - - abcicli "github.com/tendermint/abci/client" - abci "github.com/tendermint/abci/types" - bc "github.com/tendermint/tendermint/blockchain" - cfg "github.com/tendermint/tendermint/config" - cstypes "github.com/tendermint/tendermint/consensus/types" - mempl "github.com/tendermint/tendermint/mempool" - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/privval" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/abci/example/counter" - "github.com/tendermint/abci/example/kvstore" - - "github.com/go-kit/kit/log/term" -) - -const ( - testSubscriber = "test-client" -) - -// genesis, chain_id, priv_val -var config *cfg.Config // NOTE: must be reset for each _test.go file -var ensureTimeout = time.Second * 1 // must be in seconds because CreateEmptyBlocksInterval is - -func ensureDir(dir string, mode os.FileMode) { - if err := cmn.EnsureDir(dir, mode); err != nil { - panic(err) - } -} - -func ResetConfig(name string) *cfg.Config { - return cfg.ResetTestRoot(name) -} - -//------------------------------------------------------------------------------- -// validator stub (a kvstore consensus peer we control) - -type validatorStub struct { - Index int // Validator index. NOTE: we don't assume validator set changes. - Height int64 - Round int - types.PrivValidator -} - -var testMinPower int64 = 10 - -func NewValidatorStub(privValidator types.PrivValidator, valIndex int) *validatorStub { - return &validatorStub{ - Index: valIndex, - PrivValidator: privValidator, - } -} - -func (vs *validatorStub) signVote(voteType byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) { - vote := &types.Vote{ - ValidatorIndex: vs.Index, - ValidatorAddress: vs.PrivValidator.GetAddress(), - Height: vs.Height, - Round: vs.Round, - Timestamp: time.Now().UTC(), - Type: voteType, - BlockID: types.BlockID{hash, header}, - } - err := vs.PrivValidator.SignVote(config.ChainID(), vote) - return vote, err -} - -// Sign vote for type/hash/header -func signVote(vs *validatorStub, voteType byte, hash []byte, header types.PartSetHeader) *types.Vote { - v, err := vs.signVote(voteType, hash, header) - if err != nil { - panic(fmt.Errorf("failed to sign vote: %v", err)) - } - return v -} - -func signVotes(voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) []*types.Vote { - votes := make([]*types.Vote, len(vss)) - for i, vs := range vss { - votes[i] = signVote(vs, voteType, hash, header) - } - return votes -} - -func incrementHeight(vss ...*validatorStub) { - for _, vs := range vss { - vs.Height++ - } -} - -func incrementRound(vss ...*validatorStub) { - for _, vs := range vss { - vs.Round++ - } -} - -//------------------------------------------------------------------------------- -// Functions for transitioning the consensus state - -func startTestRound(cs *ConsensusState, height int64, round int) { - cs.enterNewRound(height, round) - cs.startRoutines(0) -} - -// Create proposal block from cs1 but sign it with vs -func decideProposal(cs1 *ConsensusState, vs *validatorStub, height int64, round int) (proposal *types.Proposal, block *types.Block) { - block, blockParts := cs1.createProposalBlock() - if block == nil { // on error - panic("error creating proposal block") - } - - // Make proposal - polRound, polBlockID := cs1.Votes.POLInfo() - proposal = types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID) - if err := vs.SignProposal(cs1.state.ChainID, proposal); err != nil { - panic(err) - } - return -} - -func addVotes(to *ConsensusState, votes ...*types.Vote) { - for _, vote := range votes { - to.peerMsgQueue <- msgInfo{Msg: &VoteMessage{vote}} - } -} - -func signAddVotes(to *ConsensusState, voteType byte, hash []byte, header types.PartSetHeader, vss ...*validatorStub) { - votes := signVotes(voteType, hash, header, vss...) - addVotes(to, votes...) -} - -func validatePrevote(t *testing.T, cs *ConsensusState, round int, privVal *validatorStub, blockHash []byte) { - prevotes := cs.Votes.Prevotes(round) - var vote *types.Vote - if vote = prevotes.GetByAddress(privVal.GetAddress()); vote == nil { - panic("Failed to find prevote from validator") - } - if blockHash == nil { - if vote.BlockID.Hash != nil { - panic(fmt.Sprintf("Expected prevote to be for nil, got %X", vote.BlockID.Hash)) - } - } else { - if !bytes.Equal(vote.BlockID.Hash, blockHash) { - panic(fmt.Sprintf("Expected prevote to be for %X, got %X", blockHash, vote.BlockID.Hash)) - } - } -} - -func validateLastPrecommit(t *testing.T, cs *ConsensusState, privVal *validatorStub, blockHash []byte) { - votes := cs.LastCommit - var vote *types.Vote - if vote = votes.GetByAddress(privVal.GetAddress()); vote == nil { - panic("Failed to find precommit from validator") - } - if !bytes.Equal(vote.BlockID.Hash, blockHash) { - panic(fmt.Sprintf("Expected precommit to be for %X, got %X", blockHash, vote.BlockID.Hash)) - } -} - -func validatePrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) { - precommits := cs.Votes.Precommits(thisRound) - var vote *types.Vote - if vote = precommits.GetByAddress(privVal.GetAddress()); vote == nil { - panic("Failed to find precommit from validator") - } - - if votedBlockHash == nil { - if vote.BlockID.Hash != nil { - panic("Expected precommit to be for nil") - } - } else { - if !bytes.Equal(vote.BlockID.Hash, votedBlockHash) { - panic("Expected precommit to be for proposal block") - } - } - - if lockedBlockHash == nil { - if cs.LockedRound != lockRound || cs.LockedBlock != nil { - panic(fmt.Sprintf("Expected to be locked on nil at round %d. Got locked at round %d with block %v", lockRound, cs.LockedRound, cs.LockedBlock)) - } - } else { - if cs.LockedRound != lockRound || !bytes.Equal(cs.LockedBlock.Hash(), lockedBlockHash) { - panic(fmt.Sprintf("Expected block to be locked on round %d, got %d. Got locked block %X, expected %X", lockRound, cs.LockedRound, cs.LockedBlock.Hash(), lockedBlockHash)) - } - } - -} - -func validatePrevoteAndPrecommit(t *testing.T, cs *ConsensusState, thisRound, lockRound int, privVal *validatorStub, votedBlockHash, lockedBlockHash []byte) { - // verify the prevote - validatePrevote(t, cs, thisRound, privVal, votedBlockHash) - // verify precommit - cs.mtx.Lock() - validatePrecommit(t, cs, thisRound, lockRound, privVal, votedBlockHash, lockedBlockHash) - cs.mtx.Unlock() -} - -// genesis -func subscribeToVoter(cs *ConsensusState, addr []byte) chan interface{} { - voteCh0 := make(chan interface{}) - err := cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryVote, voteCh0) - if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, types.EventQueryVote)) - } - voteCh := make(chan interface{}) - go func() { - for v := range voteCh0 { - vote := v.(types.EventDataVote) - // we only fire for our own votes - if bytes.Equal(addr, vote.Vote.ValidatorAddress) { - voteCh <- v - } - } - }() - return voteCh -} - -//------------------------------------------------------------------------------- -// consensus states - -func newConsensusState(state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState { - return newConsensusStateWithConfig(config, state, pv, app) -} - -func newConsensusStateWithConfig(thisConfig *cfg.Config, state sm.State, pv types.PrivValidator, app abci.Application) *ConsensusState { - blockDB := dbm.NewMemDB() - return newConsensusStateWithConfigAndBlockStore(thisConfig, state, pv, app, blockDB) -} - -func newConsensusStateWithConfigAndBlockStore(thisConfig *cfg.Config, state sm.State, pv types.PrivValidator, app abci.Application, blockDB dbm.DB) *ConsensusState { - // Get BlockStore - blockStore := bc.NewBlockStore(blockDB) - - // one for mempool, one for consensus - mtx := new(sync.Mutex) - proxyAppConnMem := abcicli.NewLocalClient(mtx, app) - proxyAppConnCon := abcicli.NewLocalClient(mtx, app) - - // Make Mempool - mempool := mempl.NewMempool(thisConfig.Mempool, proxyAppConnMem, 0) - mempool.SetLogger(log.TestingLogger().With("module", "mempool")) - if thisConfig.Consensus.WaitForTxs() { - mempool.EnableTxsAvailable() - } - - // mock the evidence pool - evpool := sm.MockEvidencePool{} - - // Make ConsensusState - stateDB := dbm.NewMemDB() - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool) - cs := NewConsensusState(thisConfig.Consensus, state, blockExec, blockStore, mempool, evpool) - cs.SetLogger(log.TestingLogger().With("module", "consensus")) - cs.SetPrivValidator(pv) - - eventBus := types.NewEventBus() - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - eventBus.Start() - cs.SetEventBus(eventBus) - return cs -} - -func loadPrivValidator(config *cfg.Config) *privval.FilePV { - privValidatorFile := config.PrivValidatorFile() - ensureDir(path.Dir(privValidatorFile), 0700) - privValidator := privval.LoadOrGenFilePV(privValidatorFile) - privValidator.Reset() - return privValidator -} - -func randConsensusState(nValidators int) (*ConsensusState, []*validatorStub) { - // Get State - state, privVals := randGenesisState(nValidators, false, 10) - - vss := make([]*validatorStub, nValidators) - - cs := newConsensusState(state, privVals[0], counter.NewCounterApplication(true)) - - for i := 0; i < nValidators; i++ { - vss[i] = NewValidatorStub(privVals[i], i) - } - // since cs1 starts at 1 - incrementHeight(vss[1:]...) - - return cs, vss -} - -//------------------------------------------------------------------------------- - -func ensureNoNewStep(stepCh <-chan interface{}) { - timer := time.NewTimer(ensureTimeout) - select { - case <-timer.C: - break - case <-stepCh: - panic("We should be stuck waiting, not moving to the next step") - } -} - -func ensureNewStep(stepCh <-chan interface{}) { - timer := time.NewTimer(ensureTimeout) - select { - case <-timer.C: - panic("We shouldnt be stuck waiting") - case <-stepCh: - break - } -} - -//------------------------------------------------------------------------------- -// consensus nets - -// consensusLogger is a TestingLogger which uses a different -// color for each validator ("validator" key must exist). -func consensusLogger() log.Logger { - return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { - for i := 0; i < len(keyvals)-1; i += 2 { - if keyvals[i] == "validator" { - return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} - } - } - return term.FgBgColor{} - }).With("module", "consensus") -} - -func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application, configOpts ...func(*cfg.Config)) []*ConsensusState { - genDoc, privVals := randGenesisDoc(nValidators, false, 30) - css := make([]*ConsensusState, nValidators) - logger := consensusLogger() - for i := 0; i < nValidators; i++ { - stateDB := dbm.NewMemDB() // each state needs its own db - state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) - thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) - for _, opt := range configOpts { - opt(thisConfig) - } - ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal - app := appFunc() - vals := types.TM2PB.Validators(state.Validators) - app.InitChain(abci.RequestInitChain{Validators: vals}) - - css[i] = newConsensusStateWithConfig(thisConfig, state, privVals[i], app) - css[i].SetTimeoutTicker(tickerFunc()) - css[i].SetLogger(logger.With("validator", i, "module", "consensus")) - } - return css -} - -// nPeers = nValidators + nNotValidator -func randConsensusNetWithPeers(nValidators, nPeers int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application) []*ConsensusState { - genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower) - css := make([]*ConsensusState, nPeers) - logger := consensusLogger() - for i := 0; i < nPeers; i++ { - stateDB := dbm.NewMemDB() // each state needs its own db - state, _ := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) - thisConfig := ResetConfig(cmn.Fmt("%s_%d", testName, i)) - ensureDir(path.Dir(thisConfig.Consensus.WalFile()), 0700) // dir for wal - var privVal types.PrivValidator - if i < nValidators { - privVal = privVals[i] - } else { - _, tempFilePath := cmn.Tempfile("priv_validator_") - privVal = privval.GenFilePV(tempFilePath) - } - - app := appFunc() - vals := types.TM2PB.Validators(state.Validators) - app.InitChain(abci.RequestInitChain{Validators: vals}) - - css[i] = newConsensusStateWithConfig(thisConfig, state, privVal, app) - css[i].SetTimeoutTicker(tickerFunc()) - css[i].SetLogger(logger.With("validator", i, "module", "consensus")) - } - return css -} - -func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int { - for i, s := range switches { - if peer.NodeInfo().ID == s.NodeInfo().ID { - return i - } - } - panic("didnt find peer in switches") - return -1 -} - -//------------------------------------------------------------------------------- -// genesis - -func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) { - validators := make([]types.GenesisValidator, numValidators) - privValidators := make([]types.PrivValidator, numValidators) - for i := 0; i < numValidators; i++ { - val, privVal := types.RandValidator(randPower, minPower) - validators[i] = types.GenesisValidator{ - PubKey: val.PubKey, - Power: val.VotingPower, - } - privValidators[i] = privVal - } - sort.Sort(types.PrivValidatorsByAddress(privValidators)) - - return &types.GenesisDoc{ - GenesisTime: time.Now(), - ChainID: config.ChainID(), - Validators: validators, - }, privValidators -} - -func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) { - genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower) - s0, _ := sm.MakeGenesisState(genDoc) - db := dbm.NewMemDB() - sm.SaveState(db, s0) - return s0, privValidators -} - -//------------------------------------ -// mock ticker - -func newMockTickerFunc(onlyOnce bool) func() TimeoutTicker { - return func() TimeoutTicker { - return &mockTicker{ - c: make(chan timeoutInfo, 10), - onlyOnce: onlyOnce, - } - } -} - -// mock ticker only fires on RoundStepNewHeight -// and only once if onlyOnce=true -type mockTicker struct { - c chan timeoutInfo - - mtx sync.Mutex - onlyOnce bool - fired bool -} - -func (m *mockTicker) Start() error { - return nil -} - -func (m *mockTicker) Stop() error { - return nil -} - -func (m *mockTicker) ScheduleTimeout(ti timeoutInfo) { - m.mtx.Lock() - defer m.mtx.Unlock() - if m.onlyOnce && m.fired { - return - } - if ti.Step == cstypes.RoundStepNewHeight { - m.c <- ti - m.fired = true - } -} - -func (m *mockTicker) Chan() <-chan timeoutInfo { - return m.c -} - -func (mockTicker) SetLogger(log.Logger) { -} - -//------------------------------------ - -func newCounter() abci.Application { - return counter.NewCounterApplication(true) -} - -func newPersistentKVStore() abci.Application { - dir, _ := ioutil.TempDir("/tmp", "persistent-kvstore") - return kvstore.NewPersistentKVStoreApplication(dir) -} diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go deleted file mode 100644 index 3c818006..00000000 --- a/consensus/mempool_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package consensus - -import ( - "encoding/binary" - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/abci/example/code" - abci "github.com/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - - "github.com/tendermint/tendermint/types" -) - -func init() { - config = ResetConfig("consensus_mempool_test") -} - -func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { - config := ResetConfig("consensus_mempool_txs_available_test") - config.Consensus.CreateEmptyBlocks = false - state, privVals := randGenesisState(1, false, 10) - cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication()) - cs.mempool.EnableTxsAvailable() - height, round := cs.Height, cs.Round - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - startTestRound(cs, height, round) - - ensureNewStep(newBlockCh) // first block gets committed - ensureNoNewStep(newBlockCh) - deliverTxsRange(cs, 0, 1) - ensureNewStep(newBlockCh) // commit txs - ensureNewStep(newBlockCh) // commit updated app hash - ensureNoNewStep(newBlockCh) -} - -func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { - config := ResetConfig("consensus_mempool_txs_available_test") - config.Consensus.CreateEmptyBlocksInterval = int(ensureTimeout.Seconds()) - state, privVals := randGenesisState(1, false, 10) - cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication()) - cs.mempool.EnableTxsAvailable() - height, round := cs.Height, cs.Round - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - startTestRound(cs, height, round) - - ensureNewStep(newBlockCh) // first block gets committed - ensureNoNewStep(newBlockCh) // then we dont make a block ... - ensureNewStep(newBlockCh) // until the CreateEmptyBlocksInterval has passed -} - -func TestMempoolProgressInHigherRound(t *testing.T) { - config := ResetConfig("consensus_mempool_txs_available_test") - config.Consensus.CreateEmptyBlocks = false - state, privVals := randGenesisState(1, false, 10) - cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication()) - cs.mempool.EnableTxsAvailable() - height, round := cs.Height, cs.Round - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) - cs.setProposal = func(proposal *types.Proposal) error { - if cs.Height == 2 && cs.Round == 0 { - // dont set the proposal in round 0 so we timeout and - // go to next round - cs.Logger.Info("Ignoring set proposal at height 2, round 0") - return nil - } - return cs.defaultSetProposal(proposal) - } - startTestRound(cs, height, round) - - ensureNewStep(newRoundCh) // first round at first height - ensureNewStep(newBlockCh) // first block gets committed - ensureNewStep(newRoundCh) // first round at next height - deliverTxsRange(cs, 0, 1) // we deliver txs, but dont set a proposal so we get the next round - <-timeoutCh - ensureNewStep(newRoundCh) // wait for the next round - ensureNewStep(newBlockCh) // now we can commit the block -} - -func deliverTxsRange(cs *ConsensusState, start, end int) { - // Deliver some txs. - for i := start; i < end; i++ { - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := cs.mempool.CheckTx(txBytes, nil) - if err != nil { - panic(cmn.Fmt("Error after CheckTx: %v", err)) - } - } -} - -func TestMempoolTxConcurrentWithCommit(t *testing.T) { - state, privVals := randGenesisState(1, false, 10) - cs := newConsensusState(state, privVals[0], NewCounterApplication()) - height, round := cs.Height, cs.Round - newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) - - NTxs := 10000 - go deliverTxsRange(cs, 0, NTxs) - - startTestRound(cs, height, round) - for nTxs := 0; nTxs < NTxs; { - ticker := time.NewTicker(time.Second * 30) - select { - case b := <-newBlockCh: - evt := b.(types.EventDataNewBlock) - nTxs += int(evt.Block.Header.NumTxs) - case <-ticker.C: - panic("Timed out waiting to commit blocks with transactions") - } - } -} - -func TestMempoolRmBadTx(t *testing.T) { - state, privVals := randGenesisState(1, false, 10) - app := NewCounterApplication() - cs := newConsensusState(state, privVals[0], app) - - // increment the counter by 1 - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(0)) - - resDeliver := app.DeliverTx(txBytes) - assert.False(t, resDeliver.IsErr(), cmn.Fmt("expected no error. got %v", resDeliver)) - - resCommit := app.Commit() - assert.True(t, len(resCommit.Data) > 0) - - emptyMempoolCh := make(chan struct{}) - checkTxRespCh := make(chan struct{}) - go func() { - // Try to send the tx through the mempool. - // CheckTx should not err, but the app should return a bad abci code - // and the tx should get removed from the pool - err := cs.mempool.CheckTx(txBytes, func(r *abci.Response) { - if r.GetCheckTx().Code != code.CodeTypeBadNonce { - t.Fatalf("expected checktx to return bad nonce, got %v", r) - } - checkTxRespCh <- struct{}{} - }) - if err != nil { - t.Fatalf("Error after CheckTx: %v", err) - } - - // check for the tx - for { - txs := cs.mempool.Reap(1) - if len(txs) == 0 { - emptyMempoolCh <- struct{}{} - return - } - time.Sleep(10 * time.Millisecond) - } - }() - - // Wait until the tx returns - ticker := time.After(time.Second * 5) - select { - case <-checkTxRespCh: - // success - case <-ticker: - t.Fatalf("Timed out waiting for tx to return") - } - - // Wait until the tx is removed - ticker = time.After(time.Second * 5) - select { - case <-emptyMempoolCh: - // success - case <-ticker: - t.Fatalf("Timed out waiting for tx to be removed") - } -} - -// CounterApplication that maintains a mempool state and resets it upon commit -type CounterApplication struct { - abci.BaseApplication - - txCount int - mempoolTxCount int -} - -func NewCounterApplication() *CounterApplication { - return &CounterApplication{} -} - -func (app *CounterApplication) Info(req abci.RequestInfo) abci.ResponseInfo { - return abci.ResponseInfo{Data: cmn.Fmt("txs:%v", app.txCount)} -} - -func (app *CounterApplication) DeliverTx(tx []byte) abci.ResponseDeliverTx { - txValue := txAsUint64(tx) - if txValue != uint64(app.txCount) { - return abci.ResponseDeliverTx{ - Code: code.CodeTypeBadNonce, - Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.txCount, txValue)} - } - app.txCount++ - return abci.ResponseDeliverTx{Code: code.CodeTypeOK} -} - -func (app *CounterApplication) CheckTx(tx []byte) abci.ResponseCheckTx { - txValue := txAsUint64(tx) - if txValue != uint64(app.mempoolTxCount) { - return abci.ResponseCheckTx{ - Code: code.CodeTypeBadNonce, - Log: fmt.Sprintf("Invalid nonce. Expected %v, got %v", app.mempoolTxCount, txValue)} - } - app.mempoolTxCount++ - return abci.ResponseCheckTx{Code: code.CodeTypeOK} -} - -func txAsUint64(tx []byte) uint64 { - tx8 := make([]byte, 8) - copy(tx8[len(tx8)-len(tx):], tx) - return binary.BigEndian.Uint64(tx8) -} - -func (app *CounterApplication) Commit() abci.ResponseCommit { - app.mempoolTxCount = app.txCount - if app.txCount == 0 { - return abci.ResponseCommit{} - } - hash := make([]byte, 8) - binary.BigEndian.PutUint64(hash, uint64(app.txCount)) - return abci.ResponseCommit{Data: hash} -} diff --git a/consensus/metrics.go b/consensus/metrics.go deleted file mode 100644 index 253880e8..00000000 --- a/consensus/metrics.go +++ /dev/null @@ -1,133 +0,0 @@ -package consensus - -import ( - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - - prometheus "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" -) - -// Metrics contains metrics exposed by this package. -type Metrics struct { - // Height of the chain. - Height metrics.Gauge - - // Number of rounds. - Rounds metrics.Gauge - - // Number of validators. - Validators metrics.Gauge - // Total power of all validators. - ValidatorsPower metrics.Gauge - // Number of validators who did not sign. - MissingValidators metrics.Gauge - // Total power of the missing validators. - MissingValidatorsPower metrics.Gauge - // Number of validators who tried to double sign. - ByzantineValidators metrics.Gauge - // Total power of the byzantine validators. - ByzantineValidatorsPower metrics.Gauge - - // Time between this and the last block. - BlockIntervalSeconds metrics.Histogram - - // Number of transactions. - NumTxs metrics.Gauge - // Size of the block. - BlockSizeBytes metrics.Gauge - // Total number of transactions. - TotalTxs metrics.Gauge -} - -// PrometheusMetrics returns Metrics build using Prometheus client library. -func PrometheusMetrics() *Metrics { - return &Metrics{ - Height: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", - Name: "height", - Help: "Height of the chain.", - }, []string{}), - Rounds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", - Name: "rounds", - Help: "Number of rounds.", - }, []string{}), - - Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", - Name: "validators", - Help: "Number of validators.", - }, []string{}), - ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", - Name: "validators_power", - Help: "Total power of all validators.", - }, []string{}), - MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", - Name: "missing_validators", - Help: "Number of validators who did not sign.", - }, []string{}), - MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", - Name: "missing_validators_power", - Help: "Total power of the missing validators.", - }, []string{}), - ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", - Name: "byzantine_validators", - Help: "Number of validators who tried to double sign.", - }, []string{}), - ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", - Name: "byzantine_validators_power", - Help: "Total power of the byzantine validators.", - }, []string{}), - - BlockIntervalSeconds: prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{ - Subsystem: "consensus", - Name: "block_interval_seconds", - Help: "Time between this and the last block.", - Buckets: []float64{1, 2.5, 5, 10, 60}, - }, []string{}), - - NumTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", - Name: "num_txs", - Help: "Number of transactions.", - }, []string{}), - BlockSizeBytes: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", - Name: "block_size_bytes", - Help: "Size of the block.", - }, []string{}), - TotalTxs: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "consensus", - Name: "total_txs", - Help: "Total number of transactions.", - }, []string{}), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - Height: discard.NewGauge(), - - Rounds: discard.NewGauge(), - - Validators: discard.NewGauge(), - ValidatorsPower: discard.NewGauge(), - MissingValidators: discard.NewGauge(), - MissingValidatorsPower: discard.NewGauge(), - ByzantineValidators: discard.NewGauge(), - ByzantineValidatorsPower: discard.NewGauge(), - - BlockIntervalSeconds: discard.NewHistogram(), - - NumTxs: discard.NewGauge(), - BlockSizeBytes: discard.NewGauge(), - TotalTxs: discard.NewGauge(), - } -} diff --git a/consensus/reactor.go b/consensus/reactor.go deleted file mode 100644 index 2034ad34..00000000 --- a/consensus/reactor.go +++ /dev/null @@ -1,1459 +0,0 @@ -package consensus - -import ( - "fmt" - "reflect" - "sync" - "time" - - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" - - cstypes "github.com/tendermint/tendermint/consensus/types" - tmevents "github.com/tendermint/tendermint/libs/events" - "github.com/tendermint/tendermint/p2p" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -const ( - StateChannel = byte(0x20) - DataChannel = byte(0x21) - VoteChannel = byte(0x22) - VoteSetBitsChannel = byte(0x23) - - maxMsgSize = 1048576 // 1MB; NOTE/TODO: keep in sync with types.PartSet sizes. - - blocksToContributeToBecomeGoodPeer = 10000 -) - -//----------------------------------------------------------------------------- - -// ConsensusReactor defines a reactor for the consensus service. -type ConsensusReactor struct { - p2p.BaseReactor // BaseService + p2p.Switch - - conS *ConsensusState - - mtx sync.RWMutex - fastSync bool - eventBus *types.EventBus -} - -// NewConsensusReactor returns a new ConsensusReactor with the given -// consensusState. -func NewConsensusReactor(consensusState *ConsensusState, fastSync bool) *ConsensusReactor { - conR := &ConsensusReactor{ - conS: consensusState, - fastSync: fastSync, - } - conR.BaseReactor = *p2p.NewBaseReactor("ConsensusReactor", conR) - return conR -} - -// OnStart implements BaseService by subscribing to events, which later will be -// broadcasted to other peers and starting state if we're not in fast sync. -func (conR *ConsensusReactor) OnStart() error { - conR.Logger.Info("ConsensusReactor ", "fastSync", conR.FastSync()) - if err := conR.BaseReactor.OnStart(); err != nil { - return err - } - - conR.subscribeToBroadcastEvents() - - if !conR.FastSync() { - err := conR.conS.Start() - if err != nil { - return err - } - } - - return nil -} - -// OnStop implements BaseService by unsubscribing from events and stopping -// state. -func (conR *ConsensusReactor) OnStop() { - conR.BaseReactor.OnStop() - conR.unsubscribeFromBroadcastEvents() - conR.conS.Stop() -} - -// SwitchToConsensus switches from fast_sync mode to consensus mode. -// It resets the state, turns off fast_sync, and starts the consensus state-machine -func (conR *ConsensusReactor) SwitchToConsensus(state sm.State, blocksSynced int) { - conR.Logger.Info("SwitchToConsensus") - conR.conS.reconstructLastCommit(state) - // NOTE: The line below causes broadcastNewRoundStepRoutine() to - // broadcast a NewRoundStepMessage. - conR.conS.updateToState(state) - - conR.mtx.Lock() - conR.fastSync = false - conR.mtx.Unlock() - - if blocksSynced > 0 { - // dont bother with the WAL if we fast synced - conR.conS.doWALCatchup = false - } - err := conR.conS.Start() - if err != nil { - conR.Logger.Error("Error starting conS", "err", err) - return - } -} - -// GetChannels implements Reactor -func (conR *ConsensusReactor) GetChannels() []*p2p.ChannelDescriptor { - // TODO optimize - return []*p2p.ChannelDescriptor{ - { - ID: StateChannel, - Priority: 5, - SendQueueCapacity: 100, - RecvMessageCapacity: maxMsgSize, - }, - { - ID: DataChannel, // maybe split between gossiping current block and catchup stuff - Priority: 10, // once we gossip the whole block there's nothing left to send until next height or round - SendQueueCapacity: 100, - RecvBufferCapacity: 50 * 4096, - RecvMessageCapacity: maxMsgSize, - }, - { - ID: VoteChannel, - Priority: 5, - SendQueueCapacity: 100, - RecvBufferCapacity: 100 * 100, - RecvMessageCapacity: maxMsgSize, - }, - { - ID: VoteSetBitsChannel, - Priority: 1, - SendQueueCapacity: 2, - RecvBufferCapacity: 1024, - RecvMessageCapacity: maxMsgSize, - }, - } -} - -// AddPeer implements Reactor -func (conR *ConsensusReactor) AddPeer(peer p2p.Peer) { - if !conR.IsRunning() { - return - } - - // Create peerState for peer - peerState := NewPeerState(peer).SetLogger(conR.Logger) - peer.Set(types.PeerStateKey, peerState) - - // Begin routines for this peer. - go conR.gossipDataRoutine(peer, peerState) - go conR.gossipVotesRoutine(peer, peerState) - go conR.queryMaj23Routine(peer, peerState) - - // Send our state to peer. - // If we're fast_syncing, broadcast a RoundStepMessage later upon SwitchToConsensus(). - if !conR.FastSync() { - conR.sendNewRoundStepMessages(peer) - } -} - -// RemovePeer implements Reactor -func (conR *ConsensusReactor) RemovePeer(peer p2p.Peer, reason interface{}) { - if !conR.IsRunning() { - return - } - // TODO - //peer.Get(PeerStateKey).(*PeerState).Disconnect() -} - -// Receive implements Reactor -// NOTE: We process these messages even when we're fast_syncing. -// Messages affect either a peer state or the consensus state. -// Peer state updates can happen in parallel, but processing of -// proposals, block parts, and votes are ordered by the receiveRoutine -// NOTE: blocks on consensus state for proposals, block parts, and votes -func (conR *ConsensusReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - if !conR.IsRunning() { - conR.Logger.Debug("Receive", "src", src, "chId", chID, "bytes", msgBytes) - return - } - - msg, err := DecodeMessage(msgBytes) - if err != nil { - conR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) - conR.Switch.StopPeerForError(src, err) - return - } - conR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) - - // Get peer states - ps := src.Get(types.PeerStateKey).(*PeerState) - - switch chID { - case StateChannel: - switch msg := msg.(type) { - case *NewRoundStepMessage: - ps.ApplyNewRoundStepMessage(msg) - case *CommitStepMessage: - ps.ApplyCommitStepMessage(msg) - case *HasVoteMessage: - ps.ApplyHasVoteMessage(msg) - case *VoteSetMaj23Message: - cs := conR.conS - cs.mtx.Lock() - height, votes := cs.Height, cs.Votes - cs.mtx.Unlock() - if height != msg.Height { - return - } - // Peer claims to have a maj23 for some BlockID at H,R,S, - err := votes.SetPeerMaj23(msg.Round, msg.Type, ps.peer.ID(), msg.BlockID) - if err != nil { - conR.Switch.StopPeerForError(src, err) - return - } - // Respond with a VoteSetBitsMessage showing which votes we have. - // (and consequently shows which we don't have) - var ourVotes *cmn.BitArray - switch msg.Type { - case types.VoteTypePrevote: - ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) - case types.VoteTypePrecommit: - ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) - default: - conR.Logger.Error("Bad VoteSetBitsMessage field Type") - return - } - src.TrySend(VoteSetBitsChannel, cdc.MustMarshalBinaryBare(&VoteSetBitsMessage{ - Height: msg.Height, - Round: msg.Round, - Type: msg.Type, - BlockID: msg.BlockID, - Votes: ourVotes, - })) - case *ProposalHeartbeatMessage: - hb := msg.Heartbeat - conR.Logger.Debug("Received proposal heartbeat message", - "height", hb.Height, "round", hb.Round, "sequence", hb.Sequence, - "valIdx", hb.ValidatorIndex, "valAddr", hb.ValidatorAddress) - default: - conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) - } - - case DataChannel: - if conR.FastSync() { - conR.Logger.Info("Ignoring message received during fastSync", "msg", msg) - return - } - switch msg := msg.(type) { - case *ProposalMessage: - ps.SetHasProposal(msg.Proposal) - conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} - case *ProposalPOLMessage: - ps.ApplyProposalPOLMessage(msg) - case *BlockPartMessage: - ps.SetHasProposalBlockPart(msg.Height, msg.Round, msg.Part.Index) - if numBlocks := ps.RecordBlockPart(msg); numBlocks%blocksToContributeToBecomeGoodPeer == 0 { - conR.Switch.MarkPeerAsGood(src) - } - conR.conS.peerMsgQueue <- msgInfo{msg, src.ID()} - default: - conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) - } - - case VoteChannel: - if conR.FastSync() { - conR.Logger.Info("Ignoring message received during fastSync", "msg", msg) - return - } - switch msg := msg.(type) { - case *VoteMessage: - cs := conR.conS - cs.mtx.Lock() - height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size() - cs.mtx.Unlock() - ps.EnsureVoteBitArrays(height, valSize) - ps.EnsureVoteBitArrays(height-1, lastCommitSize) - ps.SetHasVote(msg.Vote) - if blocks := ps.RecordVote(msg.Vote); blocks%blocksToContributeToBecomeGoodPeer == 0 { - conR.Switch.MarkPeerAsGood(src) - } - - cs.peerMsgQueue <- msgInfo{msg, src.ID()} - - default: - // don't punish (leave room for soft upgrades) - conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) - } - - case VoteSetBitsChannel: - if conR.FastSync() { - conR.Logger.Info("Ignoring message received during fastSync", "msg", msg) - return - } - switch msg := msg.(type) { - case *VoteSetBitsMessage: - cs := conR.conS - cs.mtx.Lock() - height, votes := cs.Height, cs.Votes - cs.mtx.Unlock() - - if height == msg.Height { - var ourVotes *cmn.BitArray - switch msg.Type { - case types.VoteTypePrevote: - ourVotes = votes.Prevotes(msg.Round).BitArrayByBlockID(msg.BlockID) - case types.VoteTypePrecommit: - ourVotes = votes.Precommits(msg.Round).BitArrayByBlockID(msg.BlockID) - default: - conR.Logger.Error("Bad VoteSetBitsMessage field Type") - return - } - ps.ApplyVoteSetBitsMessage(msg, ourVotes) - } else { - ps.ApplyVoteSetBitsMessage(msg, nil) - } - default: - // don't punish (leave room for soft upgrades) - conR.Logger.Error(cmn.Fmt("Unknown message type %v", reflect.TypeOf(msg))) - } - - default: - conR.Logger.Error(cmn.Fmt("Unknown chId %X", chID)) - } - - if err != nil { - conR.Logger.Error("Error in Receive()", "err", err) - } -} - -// SetEventBus sets event bus. -func (conR *ConsensusReactor) SetEventBus(b *types.EventBus) { - conR.eventBus = b - conR.conS.SetEventBus(b) -} - -// FastSync returns whether the consensus reactor is in fast-sync mode. -func (conR *ConsensusReactor) FastSync() bool { - conR.mtx.RLock() - defer conR.mtx.RUnlock() - return conR.fastSync -} - -//-------------------------------------- - -// subscribeToBroadcastEvents subscribes for new round steps, votes and -// proposal heartbeats using internal pubsub defined on state to broadcast -// them to peers upon receiving. -func (conR *ConsensusReactor) subscribeToBroadcastEvents() { - const subscriber = "consensus-reactor" - conR.conS.evsw.AddListenerForEvent(subscriber, types.EventNewRoundStep, - func(data tmevents.EventData) { - conR.broadcastNewRoundStepMessages(data.(*cstypes.RoundState)) - }) - - conR.conS.evsw.AddListenerForEvent(subscriber, types.EventVote, - func(data tmevents.EventData) { - conR.broadcastHasVoteMessage(data.(*types.Vote)) - }) - - conR.conS.evsw.AddListenerForEvent(subscriber, types.EventProposalHeartbeat, - func(data tmevents.EventData) { - conR.broadcastProposalHeartbeatMessage(data.(*types.Heartbeat)) - }) -} - -func (conR *ConsensusReactor) unsubscribeFromBroadcastEvents() { - const subscriber = "consensus-reactor" - conR.conS.evsw.RemoveListener(subscriber) -} - -func (conR *ConsensusReactor) broadcastProposalHeartbeatMessage(hb *types.Heartbeat) { - conR.Logger.Debug("Broadcasting proposal heartbeat message", - "height", hb.Height, "round", hb.Round, "sequence", hb.Sequence) - msg := &ProposalHeartbeatMessage{hb} - conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg)) -} - -func (conR *ConsensusReactor) broadcastNewRoundStepMessages(rs *cstypes.RoundState) { - nrsMsg, csMsg := makeRoundStepMessages(rs) - if nrsMsg != nil { - conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) - } - if csMsg != nil { - conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(csMsg)) - } -} - -// Broadcasts HasVoteMessage to peers that care. -func (conR *ConsensusReactor) broadcastHasVoteMessage(vote *types.Vote) { - msg := &HasVoteMessage{ - Height: vote.Height, - Round: vote.Round, - Type: vote.Type, - Index: vote.ValidatorIndex, - } - conR.Switch.Broadcast(StateChannel, cdc.MustMarshalBinaryBare(msg)) - /* - // TODO: Make this broadcast more selective. - for _, peer := range conR.Switch.Peers().List() { - ps := peer.Get(PeerStateKey).(*PeerState) - prs := ps.GetRoundState() - if prs.Height == vote.Height { - // TODO: Also filter on round? - peer.TrySend(StateChannel, struct{ ConsensusMessage }{msg}) - } else { - // Height doesn't match - // TODO: check a field, maybe CatchupCommitRound? - // TODO: But that requires changing the struct field comment. - } - } - */ -} - -func makeRoundStepMessages(rs *cstypes.RoundState) (nrsMsg *NewRoundStepMessage, csMsg *CommitStepMessage) { - nrsMsg = &NewRoundStepMessage{ - Height: rs.Height, - Round: rs.Round, - Step: rs.Step, - SecondsSinceStartTime: int(time.Since(rs.StartTime).Seconds()), - LastCommitRound: rs.LastCommit.Round(), - } - if rs.Step == cstypes.RoundStepCommit { - csMsg = &CommitStepMessage{ - Height: rs.Height, - BlockPartsHeader: rs.ProposalBlockParts.Header(), - BlockParts: rs.ProposalBlockParts.BitArray(), - } - } - return -} - -func (conR *ConsensusReactor) sendNewRoundStepMessages(peer p2p.Peer) { - rs := conR.conS.GetRoundState() - nrsMsg, csMsg := makeRoundStepMessages(rs) - if nrsMsg != nil { - peer.Send(StateChannel, cdc.MustMarshalBinaryBare(nrsMsg)) - } - if csMsg != nil { - peer.Send(StateChannel, cdc.MustMarshalBinaryBare(csMsg)) - } -} - -func (conR *ConsensusReactor) gossipDataRoutine(peer p2p.Peer, ps *PeerState) { - logger := conR.Logger.With("peer", peer) - -OUTER_LOOP: - for { - // Manage disconnects from self or peer. - if !peer.IsRunning() || !conR.IsRunning() { - logger.Info("Stopping gossipDataRoutine for peer") - return - } - rs := conR.conS.GetRoundState() - prs := ps.GetRoundState() - - // Send proposal Block parts? - if rs.ProposalBlockParts.HasHeader(prs.ProposalBlockPartsHeader) { - if index, ok := rs.ProposalBlockParts.BitArray().Sub(prs.ProposalBlockParts.Copy()).PickRandom(); ok { - part := rs.ProposalBlockParts.GetPart(index) - msg := &BlockPartMessage{ - Height: rs.Height, // This tells peer that this part applies to us. - Round: rs.Round, // This tells peer that this part applies to us. - Part: part, - } - logger.Debug("Sending block part", "height", prs.Height, "round", prs.Round) - if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) { - ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) - } - continue OUTER_LOOP - } - } - - // If the peer is on a previous height, help catch up. - if (0 < prs.Height) && (prs.Height < rs.Height) { - heightLogger := logger.With("height", prs.Height) - - // if we never received the commit message from the peer, the block parts wont be initialized - if prs.ProposalBlockParts == nil { - blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) - if blockMeta == nil { - cmn.PanicCrisis(cmn.Fmt("Failed to load block %d when blockStore is at %d", - prs.Height, conR.conS.blockStore.Height())) - } - ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader) - // continue the loop since prs is a copy and not effected by this initialization - continue OUTER_LOOP - } - conR.gossipDataForCatchup(heightLogger, rs, prs, ps, peer) - continue OUTER_LOOP - } - - // If height and round don't match, sleep. - if (rs.Height != prs.Height) || (rs.Round != prs.Round) { - //logger.Info("Peer Height|Round mismatch, sleeping", "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer) - time.Sleep(conR.conS.config.PeerGossipSleep()) - continue OUTER_LOOP - } - - // By here, height and round match. - // Proposal block parts were already matched and sent if any were wanted. - // (These can match on hash so the round doesn't matter) - // Now consider sending other things, like the Proposal itself. - - // Send Proposal && ProposalPOL BitArray? - if rs.Proposal != nil && !prs.Proposal { - // Proposal: share the proposal metadata with peer. - { - msg := &ProposalMessage{Proposal: rs.Proposal} - logger.Debug("Sending proposal", "height", prs.Height, "round", prs.Round) - if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) { - ps.SetHasProposal(rs.Proposal) - } - } - // ProposalPOL: lets peer know which POL votes we have so far. - // Peer must receive ProposalMessage first. - // rs.Proposal was validated, so rs.Proposal.POLRound <= rs.Round, - // so we definitely have rs.Votes.Prevotes(rs.Proposal.POLRound). - if 0 <= rs.Proposal.POLRound { - msg := &ProposalPOLMessage{ - Height: rs.Height, - ProposalPOLRound: rs.Proposal.POLRound, - ProposalPOL: rs.Votes.Prevotes(rs.Proposal.POLRound).BitArray(), - } - logger.Debug("Sending POL", "height", prs.Height, "round", prs.Round) - peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) - } - continue OUTER_LOOP - } - - // Nothing to do. Sleep. - time.Sleep(conR.conS.config.PeerGossipSleep()) - continue OUTER_LOOP - } -} - -func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundState, - prs *cstypes.PeerRoundState, ps *PeerState, peer p2p.Peer) { - - if index, ok := prs.ProposalBlockParts.Not().PickRandom(); ok { - // Ensure that the peer's PartSetHeader is correct - blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) - if blockMeta == nil { - logger.Error("Failed to load block meta", - "ourHeight", rs.Height, "blockstoreHeight", conR.conS.blockStore.Height()) - time.Sleep(conR.conS.config.PeerGossipSleep()) - return - } else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) { - logger.Info("Peer ProposalBlockPartsHeader mismatch, sleeping", - "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) - time.Sleep(conR.conS.config.PeerGossipSleep()) - return - } - // Load the part - part := conR.conS.blockStore.LoadBlockPart(prs.Height, index) - if part == nil { - logger.Error("Could not load part", "index", index, - "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) - time.Sleep(conR.conS.config.PeerGossipSleep()) - return - } - // Send the part - msg := &BlockPartMessage{ - Height: prs.Height, // Not our height, so it doesn't matter. - Round: prs.Round, // Not our height, so it doesn't matter. - Part: part, - } - logger.Debug("Sending block part for catchup", "round", prs.Round, "index", index) - if peer.Send(DataChannel, cdc.MustMarshalBinaryBare(msg)) { - ps.SetHasProposalBlockPart(prs.Height, prs.Round, index) - } else { - logger.Debug("Sending block part for catchup failed") - } - return - } - //logger.Info("No parts to send in catch-up, sleeping") - time.Sleep(conR.conS.config.PeerGossipSleep()) -} - -func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) { - logger := conR.Logger.With("peer", peer) - - // Simple hack to throttle logs upon sleep. - var sleeping = 0 - -OUTER_LOOP: - for { - // Manage disconnects from self or peer. - if !peer.IsRunning() || !conR.IsRunning() { - logger.Info("Stopping gossipVotesRoutine for peer") - return - } - rs := conR.conS.GetRoundState() - prs := ps.GetRoundState() - - switch sleeping { - case 1: // First sleep - sleeping = 2 - case 2: // No more sleep - sleeping = 0 - } - - //logger.Debug("gossipVotesRoutine", "rsHeight", rs.Height, "rsRound", rs.Round, - // "prsHeight", prs.Height, "prsRound", prs.Round, "prsStep", prs.Step) - - // If height matches, then send LastCommit, Prevotes, Precommits. - if rs.Height == prs.Height { - heightLogger := logger.With("height", prs.Height) - if conR.gossipVotesForHeight(heightLogger, rs, prs, ps) { - continue OUTER_LOOP - } - } - - // Special catchup logic. - // If peer is lagging by height 1, send LastCommit. - if prs.Height != 0 && rs.Height == prs.Height+1 { - if ps.PickSendVote(rs.LastCommit) { - logger.Debug("Picked rs.LastCommit to send", "height", prs.Height) - continue OUTER_LOOP - } - } - - // Catchup logic - // If peer is lagging by more than 1, send Commit. - if prs.Height != 0 && rs.Height >= prs.Height+2 { - // Load the block commit for prs.Height, - // which contains precommit signatures for prs.Height. - commit := conR.conS.blockStore.LoadBlockCommit(prs.Height) - if ps.PickSendVote(commit) { - logger.Debug("Picked Catchup commit to send", "height", prs.Height) - continue OUTER_LOOP - } - } - - if sleeping == 0 { - // We sent nothing. Sleep... - sleeping = 1 - logger.Debug("No votes to send, sleeping", "rs.Height", rs.Height, "prs.Height", prs.Height, - "localPV", rs.Votes.Prevotes(rs.Round).BitArray(), "peerPV", prs.Prevotes, - "localPC", rs.Votes.Precommits(rs.Round).BitArray(), "peerPC", prs.Precommits) - } else if sleeping == 2 { - // Continued sleep... - sleeping = 1 - } - - time.Sleep(conR.conS.config.PeerGossipSleep()) - continue OUTER_LOOP - } -} - -func (conR *ConsensusReactor) gossipVotesForHeight(logger log.Logger, rs *cstypes.RoundState, prs *cstypes.PeerRoundState, ps *PeerState) bool { - - // If there are lastCommits to send... - if prs.Step == cstypes.RoundStepNewHeight { - if ps.PickSendVote(rs.LastCommit) { - logger.Debug("Picked rs.LastCommit to send") - return true - } - } - // If there are POL prevotes to send... - if prs.Step <= cstypes.RoundStepPropose && prs.Round != -1 && prs.Round <= rs.Round && prs.ProposalPOLRound != -1 { - if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { - if ps.PickSendVote(polPrevotes) { - logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", - "round", prs.ProposalPOLRound) - return true - } - } - } - // If there are prevotes to send... - if prs.Step <= cstypes.RoundStepPrevoteWait && prs.Round != -1 && prs.Round <= rs.Round { - if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { - logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) - return true - } - } - // If there are precommits to send... - if prs.Step <= cstypes.RoundStepPrecommitWait && prs.Round != -1 && prs.Round <= rs.Round { - if ps.PickSendVote(rs.Votes.Precommits(prs.Round)) { - logger.Debug("Picked rs.Precommits(prs.Round) to send", "round", prs.Round) - return true - } - } - // If there are prevotes to send...Needed because of validBlock mechanism - if prs.Round != -1 && prs.Round <= rs.Round { - if ps.PickSendVote(rs.Votes.Prevotes(prs.Round)) { - logger.Debug("Picked rs.Prevotes(prs.Round) to send", "round", prs.Round) - return true - } - } - // If there are POLPrevotes to send... - if prs.ProposalPOLRound != -1 { - if polPrevotes := rs.Votes.Prevotes(prs.ProposalPOLRound); polPrevotes != nil { - if ps.PickSendVote(polPrevotes) { - logger.Debug("Picked rs.Prevotes(prs.ProposalPOLRound) to send", - "round", prs.ProposalPOLRound) - return true - } - } - } - - return false -} - -// NOTE: `queryMaj23Routine` has a simple crude design since it only comes -// into play for liveness when there's a signature DDoS attack happening. -func (conR *ConsensusReactor) queryMaj23Routine(peer p2p.Peer, ps *PeerState) { - logger := conR.Logger.With("peer", peer) - -OUTER_LOOP: - for { - // Manage disconnects from self or peer. - if !peer.IsRunning() || !conR.IsRunning() { - logger.Info("Stopping queryMaj23Routine for peer") - return - } - - // Maybe send Height/Round/Prevotes - { - rs := conR.conS.GetRoundState() - prs := ps.GetRoundState() - if rs.Height == prs.Height { - if maj23, ok := rs.Votes.Prevotes(prs.Round).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ - Height: prs.Height, - Round: prs.Round, - Type: types.VoteTypePrevote, - BlockID: maj23, - })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) - } - } - } - - // Maybe send Height/Round/Precommits - { - rs := conR.conS.GetRoundState() - prs := ps.GetRoundState() - if rs.Height == prs.Height { - if maj23, ok := rs.Votes.Precommits(prs.Round).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ - Height: prs.Height, - Round: prs.Round, - Type: types.VoteTypePrecommit, - BlockID: maj23, - })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) - } - } - } - - // Maybe send Height/Round/ProposalPOL - { - rs := conR.conS.GetRoundState() - prs := ps.GetRoundState() - if rs.Height == prs.Height && prs.ProposalPOLRound >= 0 { - if maj23, ok := rs.Votes.Prevotes(prs.ProposalPOLRound).TwoThirdsMajority(); ok { - peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ - Height: prs.Height, - Round: prs.ProposalPOLRound, - Type: types.VoteTypePrevote, - BlockID: maj23, - })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) - } - } - } - - // Little point sending LastCommitRound/LastCommit, - // These are fleeting and non-blocking. - - // Maybe send Height/CatchupCommitRound/CatchupCommit. - { - prs := ps.GetRoundState() - if prs.CatchupCommitRound != -1 && 0 < prs.Height && prs.Height <= conR.conS.blockStore.Height() { - commit := conR.conS.LoadCommit(prs.Height) - peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ - Height: prs.Height, - Round: commit.Round(), - Type: types.VoteTypePrecommit, - BlockID: commit.BlockID, - })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) - } - } - - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) - - continue OUTER_LOOP - } -} - -// String returns a string representation of the ConsensusReactor. -// NOTE: For now, it is just a hard-coded string to avoid accessing unprotected shared variables. -// TODO: improve! -func (conR *ConsensusReactor) String() string { - // better not to access shared variables - return "ConsensusReactor" // conR.StringIndented("") -} - -// StringIndented returns an indented string representation of the ConsensusReactor -func (conR *ConsensusReactor) StringIndented(indent string) string { - s := "ConsensusReactor{\n" - s += indent + " " + conR.conS.StringIndented(indent+" ") + "\n" - for _, peer := range conR.Switch.Peers().List() { - ps := peer.Get(types.PeerStateKey).(*PeerState) - s += indent + " " + ps.StringIndented(indent+" ") + "\n" - } - s += indent + "}" - return s -} - -//----------------------------------------------------------------------------- - -var ( - ErrPeerStateHeightRegression = errors.New("Error peer state height regression") - ErrPeerStateInvalidStartTime = errors.New("Error peer state invalid startTime") -) - -// PeerState contains the known state of a peer, including its connection and -// threadsafe access to its PeerRoundState. -// NOTE: THIS GETS DUMPED WITH rpc/core/consensus.go. -// Be mindful of what you Expose. -type PeerState struct { - peer p2p.Peer - logger log.Logger - - mtx sync.Mutex `json:"-"` // NOTE: Modify below using setters, never directly. - PRS cstypes.PeerRoundState `json:"round_state"` // Exposed. - Stats *peerStateStats `json:"stats"` // Exposed. -} - -// peerStateStats holds internal statistics for a peer. -type peerStateStats struct { - LastVoteHeight int64 `json:"last_vote_height"` - Votes int `json:"votes"` - LastBlockPartHeight int64 `json:"last_block_part_height"` - BlockParts int `json:"block_parts"` -} - -func (pss peerStateStats) String() string { - return fmt.Sprintf("peerStateStats{lvh: %d, votes: %d, lbph: %d, blockParts: %d}", - pss.LastVoteHeight, pss.Votes, pss.LastBlockPartHeight, pss.BlockParts) -} - -// NewPeerState returns a new PeerState for the given Peer -func NewPeerState(peer p2p.Peer) *PeerState { - return &PeerState{ - peer: peer, - logger: log.NewNopLogger(), - PRS: cstypes.PeerRoundState{ - Round: -1, - ProposalPOLRound: -1, - LastCommitRound: -1, - CatchupCommitRound: -1, - }, - Stats: &peerStateStats{}, - } -} - -// SetLogger allows to set a logger on the peer state. Returns the peer state -// itself. -func (ps *PeerState) SetLogger(logger log.Logger) *PeerState { - ps.logger = logger - return ps -} - -// GetRoundState returns an shallow copy of the PeerRoundState. -// There's no point in mutating it since it won't change PeerState. -func (ps *PeerState) GetRoundState() *cstypes.PeerRoundState { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - prs := ps.PRS // copy - return &prs -} - -// ToJSON returns a json of PeerState, marshalled using go-amino. -func (ps *PeerState) ToJSON() ([]byte, error) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - return cdc.MarshalJSON(ps) -} - -// GetHeight returns an atomic snapshot of the PeerRoundState's height -// used by the mempool to ensure peers are caught up before broadcasting new txs -func (ps *PeerState) GetHeight() int64 { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return ps.PRS.Height -} - -// SetHasProposal sets the given proposal as known for the peer. -func (ps *PeerState) SetHasProposal(proposal *types.Proposal) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if ps.PRS.Height != proposal.Height || ps.PRS.Round != proposal.Round { - return - } - if ps.PRS.Proposal { - return - } - - ps.PRS.Proposal = true - ps.PRS.ProposalBlockPartsHeader = proposal.BlockPartsHeader - ps.PRS.ProposalBlockParts = cmn.NewBitArray(proposal.BlockPartsHeader.Total) - ps.PRS.ProposalPOLRound = proposal.POLRound - ps.PRS.ProposalPOL = nil // Nil until ProposalPOLMessage received. -} - -// InitProposalBlockParts initializes the peer's proposal block parts header and bit array. -func (ps *PeerState) InitProposalBlockParts(partsHeader types.PartSetHeader) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if ps.PRS.ProposalBlockParts != nil { - return - } - - ps.PRS.ProposalBlockPartsHeader = partsHeader - ps.PRS.ProposalBlockParts = cmn.NewBitArray(partsHeader.Total) -} - -// SetHasProposalBlockPart sets the given block part index as known for the peer. -func (ps *PeerState) SetHasProposalBlockPart(height int64, round int, index int) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if ps.PRS.Height != height || ps.PRS.Round != round { - return - } - - ps.PRS.ProposalBlockParts.SetIndex(index, true) -} - -// PickSendVote picks a vote and sends it to the peer. -// Returns true if vote was sent. -func (ps *PeerState) PickSendVote(votes types.VoteSetReader) bool { - if vote, ok := ps.PickVoteToSend(votes); ok { - msg := &VoteMessage{vote} - ps.logger.Debug("Sending vote message", "ps", ps, "vote", vote) - return ps.peer.Send(VoteChannel, cdc.MustMarshalBinaryBare(msg)) - } - return false -} - -// PickVoteToSend picks a vote to send to the peer. -// Returns true if a vote was picked. -// NOTE: `votes` must be the correct Size() for the Height(). -func (ps *PeerState) PickVoteToSend(votes types.VoteSetReader) (vote *types.Vote, ok bool) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if votes.Size() == 0 { - return nil, false - } - - height, round, type_, size := votes.Height(), votes.Round(), votes.Type(), votes.Size() - - // Lazily set data using 'votes'. - if votes.IsCommit() { - ps.ensureCatchupCommitRound(height, round, size) - } - ps.ensureVoteBitArrays(height, size) - - psVotes := ps.getVoteBitArray(height, round, type_) - if psVotes == nil { - return nil, false // Not something worth sending - } - if index, ok := votes.BitArray().Sub(psVotes).PickRandom(); ok { - ps.setHasVote(height, round, type_, index) - return votes.GetByIndex(index), true - } - return nil, false -} - -func (ps *PeerState) getVoteBitArray(height int64, round int, type_ byte) *cmn.BitArray { - if !types.IsVoteTypeValid(type_) { - return nil - } - - if ps.PRS.Height == height { - if ps.PRS.Round == round { - switch type_ { - case types.VoteTypePrevote: - return ps.PRS.Prevotes - case types.VoteTypePrecommit: - return ps.PRS.Precommits - } - } - if ps.PRS.CatchupCommitRound == round { - switch type_ { - case types.VoteTypePrevote: - return nil - case types.VoteTypePrecommit: - return ps.PRS.CatchupCommit - } - } - if ps.PRS.ProposalPOLRound == round { - switch type_ { - case types.VoteTypePrevote: - return ps.PRS.ProposalPOL - case types.VoteTypePrecommit: - return nil - } - } - return nil - } - if ps.PRS.Height == height+1 { - if ps.PRS.LastCommitRound == round { - switch type_ { - case types.VoteTypePrevote: - return nil - case types.VoteTypePrecommit: - return ps.PRS.LastCommit - } - } - return nil - } - return nil -} - -// 'round': A round for which we have a +2/3 commit. -func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValidators int) { - if ps.PRS.Height != height { - return - } - /* - NOTE: This is wrong, 'round' could change. - e.g. if orig round is not the same as block LastCommit round. - if ps.CatchupCommitRound != -1 && ps.CatchupCommitRound != round { - cmn.PanicSanity(cmn.Fmt("Conflicting CatchupCommitRound. Height: %v, Orig: %v, New: %v", height, ps.CatchupCommitRound, round)) - } - */ - if ps.PRS.CatchupCommitRound == round { - return // Nothing to do! - } - ps.PRS.CatchupCommitRound = round - if round == ps.PRS.Round { - ps.PRS.CatchupCommit = ps.PRS.Precommits - } else { - ps.PRS.CatchupCommit = cmn.NewBitArray(numValidators) - } -} - -// EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking -// what votes this peer has received. -// NOTE: It's important to make sure that numValidators actually matches -// what the node sees as the number of validators for height. -func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - ps.ensureVoteBitArrays(height, numValidators) -} - -func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) { - if ps.PRS.Height == height { - if ps.PRS.Prevotes == nil { - ps.PRS.Prevotes = cmn.NewBitArray(numValidators) - } - if ps.PRS.Precommits == nil { - ps.PRS.Precommits = cmn.NewBitArray(numValidators) - } - if ps.PRS.CatchupCommit == nil { - ps.PRS.CatchupCommit = cmn.NewBitArray(numValidators) - } - if ps.PRS.ProposalPOL == nil { - ps.PRS.ProposalPOL = cmn.NewBitArray(numValidators) - } - } else if ps.PRS.Height == height+1 { - if ps.PRS.LastCommit == nil { - ps.PRS.LastCommit = cmn.NewBitArray(numValidators) - } - } -} - -// RecordVote updates internal statistics for this peer by recording the vote. -// It returns the total number of votes (1 per block). This essentially means -// the number of blocks for which peer has been sending us votes. -func (ps *PeerState) RecordVote(vote *types.Vote) int { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if ps.Stats.LastVoteHeight >= vote.Height { - return ps.Stats.Votes - } - ps.Stats.LastVoteHeight = vote.Height - ps.Stats.Votes++ - return ps.Stats.Votes -} - -// VotesSent returns the number of blocks for which peer has been sending us -// votes. -func (ps *PeerState) VotesSent() int { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - return ps.Stats.Votes -} - -// RecordBlockPart updates internal statistics for this peer by recording the -// block part. It returns the total number of block parts (1 per block). This -// essentially means the number of blocks for which peer has been sending us -// block parts. -func (ps *PeerState) RecordBlockPart(bp *BlockPartMessage) int { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if ps.Stats.LastBlockPartHeight >= bp.Height { - return ps.Stats.BlockParts - } - - ps.Stats.LastBlockPartHeight = bp.Height - ps.Stats.BlockParts++ - return ps.Stats.BlockParts -} - -// BlockPartsSent returns the number of blocks for which peer has been sending -// us block parts. -func (ps *PeerState) BlockPartsSent() int { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - return ps.Stats.BlockParts -} - -// SetHasVote sets the given vote as known by the peer -func (ps *PeerState) SetHasVote(vote *types.Vote) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - ps.setHasVote(vote.Height, vote.Round, vote.Type, vote.ValidatorIndex) -} - -func (ps *PeerState) setHasVote(height int64, round int, type_ byte, index int) { - logger := ps.logger.With("peerH/R", cmn.Fmt("%d/%d", ps.PRS.Height, ps.PRS.Round), "H/R", cmn.Fmt("%d/%d", height, round)) - logger.Debug("setHasVote", "type", type_, "index", index) - - // NOTE: some may be nil BitArrays -> no side effects. - psVotes := ps.getVoteBitArray(height, round, type_) - if psVotes != nil { - psVotes.SetIndex(index, true) - } -} - -// ApplyNewRoundStepMessage updates the peer state for the new round. -func (ps *PeerState) ApplyNewRoundStepMessage(msg *NewRoundStepMessage) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - // Ignore duplicates or decreases - if CompareHRS(msg.Height, msg.Round, msg.Step, ps.PRS.Height, ps.PRS.Round, ps.PRS.Step) <= 0 { - return - } - - // Just remember these values. - psHeight := ps.PRS.Height - psRound := ps.PRS.Round - //psStep := ps.PRS.Step - psCatchupCommitRound := ps.PRS.CatchupCommitRound - psCatchupCommit := ps.PRS.CatchupCommit - - startTime := time.Now().Add(-1 * time.Duration(msg.SecondsSinceStartTime) * time.Second) - ps.PRS.Height = msg.Height - ps.PRS.Round = msg.Round - ps.PRS.Step = msg.Step - ps.PRS.StartTime = startTime - if psHeight != msg.Height || psRound != msg.Round { - ps.PRS.Proposal = false - ps.PRS.ProposalBlockPartsHeader = types.PartSetHeader{} - ps.PRS.ProposalBlockParts = nil - ps.PRS.ProposalPOLRound = -1 - ps.PRS.ProposalPOL = nil - // We'll update the BitArray capacity later. - ps.PRS.Prevotes = nil - ps.PRS.Precommits = nil - } - if psHeight == msg.Height && psRound != msg.Round && msg.Round == psCatchupCommitRound { - // Peer caught up to CatchupCommitRound. - // Preserve psCatchupCommit! - // NOTE: We prefer to use prs.Precommits if - // pr.Round matches pr.CatchupCommitRound. - ps.PRS.Precommits = psCatchupCommit - } - if psHeight != msg.Height { - // Shift Precommits to LastCommit. - if psHeight+1 == msg.Height && psRound == msg.LastCommitRound { - ps.PRS.LastCommitRound = msg.LastCommitRound - ps.PRS.LastCommit = ps.PRS.Precommits - } else { - ps.PRS.LastCommitRound = msg.LastCommitRound - ps.PRS.LastCommit = nil - } - // We'll update the BitArray capacity later. - ps.PRS.CatchupCommitRound = -1 - ps.PRS.CatchupCommit = nil - } -} - -// ApplyCommitStepMessage updates the peer state for the new commit. -func (ps *PeerState) ApplyCommitStepMessage(msg *CommitStepMessage) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if ps.PRS.Height != msg.Height { - return - } - - ps.PRS.ProposalBlockPartsHeader = msg.BlockPartsHeader - ps.PRS.ProposalBlockParts = msg.BlockParts -} - -// ApplyProposalPOLMessage updates the peer state for the new proposal POL. -func (ps *PeerState) ApplyProposalPOLMessage(msg *ProposalPOLMessage) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if ps.PRS.Height != msg.Height { - return - } - if ps.PRS.ProposalPOLRound != msg.ProposalPOLRound { - return - } - - // TODO: Merge onto existing ps.PRS.ProposalPOL? - // We might have sent some prevotes in the meantime. - ps.PRS.ProposalPOL = msg.ProposalPOL -} - -// ApplyHasVoteMessage updates the peer state for the new vote. -func (ps *PeerState) ApplyHasVoteMessage(msg *HasVoteMessage) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if ps.PRS.Height != msg.Height { - return - } - - ps.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) -} - -// ApplyVoteSetBitsMessage updates the peer state for the bit-array of votes -// it claims to have for the corresponding BlockID. -// `ourVotes` is a BitArray of votes we have for msg.BlockID -// NOTE: if ourVotes is nil (e.g. msg.Height < rs.Height), -// we conservatively overwrite ps's votes w/ msg.Votes. -func (ps *PeerState) ApplyVoteSetBitsMessage(msg *VoteSetBitsMessage, ourVotes *cmn.BitArray) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - votes := ps.getVoteBitArray(msg.Height, msg.Round, msg.Type) - if votes != nil { - if ourVotes == nil { - votes.Update(msg.Votes) - } else { - otherVotes := votes.Sub(ourVotes) - hasVotes := otherVotes.Or(msg.Votes) - votes.Update(hasVotes) - } - } -} - -// String returns a string representation of the PeerState -func (ps *PeerState) String() string { - return ps.StringIndented("") -} - -// StringIndented returns a string representation of the PeerState -func (ps *PeerState) StringIndented(indent string) string { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return fmt.Sprintf(`PeerState{ -%s Key %v -%s RoundState %v -%s Stats %v -%s}`, - indent, ps.peer.ID(), - indent, ps.PRS.StringIndented(indent+" "), - indent, ps.Stats, - indent) -} - -//----------------------------------------------------------------------------- -// Messages - -// ConsensusMessage is a message that can be sent and received on the ConsensusReactor -type ConsensusMessage interface{} - -func RegisterConsensusMessages(cdc *amino.Codec) { - cdc.RegisterInterface((*ConsensusMessage)(nil), nil) - cdc.RegisterConcrete(&NewRoundStepMessage{}, "tendermint/NewRoundStepMessage", nil) - cdc.RegisterConcrete(&CommitStepMessage{}, "tendermint/CommitStep", nil) - cdc.RegisterConcrete(&ProposalMessage{}, "tendermint/Proposal", nil) - cdc.RegisterConcrete(&ProposalPOLMessage{}, "tendermint/ProposalPOL", nil) - cdc.RegisterConcrete(&BlockPartMessage{}, "tendermint/BlockPart", nil) - cdc.RegisterConcrete(&VoteMessage{}, "tendermint/Vote", nil) - cdc.RegisterConcrete(&HasVoteMessage{}, "tendermint/HasVote", nil) - cdc.RegisterConcrete(&VoteSetMaj23Message{}, "tendermint/VoteSetMaj23", nil) - cdc.RegisterConcrete(&VoteSetBitsMessage{}, "tendermint/VoteSetBits", nil) - cdc.RegisterConcrete(&ProposalHeartbeatMessage{}, "tendermint/ProposalHeartbeat", nil) -} - -// DecodeMessage decodes the given bytes into a ConsensusMessage. -func DecodeMessage(bz []byte) (msg ConsensusMessage, err error) { - if len(bz) > maxMsgSize { - return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", - len(bz), maxMsgSize) - } - err = cdc.UnmarshalBinaryBare(bz, &msg) - return -} - -//------------------------------------- - -// NewRoundStepMessage is sent for every step taken in the ConsensusState. -// For every height/round/step transition -type NewRoundStepMessage struct { - Height int64 - Round int - Step cstypes.RoundStepType - SecondsSinceStartTime int - LastCommitRound int -} - -// String returns a string representation. -func (m *NewRoundStepMessage) String() string { - return fmt.Sprintf("[NewRoundStep H:%v R:%v S:%v LCR:%v]", - m.Height, m.Round, m.Step, m.LastCommitRound) -} - -//------------------------------------- - -// CommitStepMessage is sent when a block is committed. -type CommitStepMessage struct { - Height int64 - BlockPartsHeader types.PartSetHeader - BlockParts *cmn.BitArray -} - -// String returns a string representation. -func (m *CommitStepMessage) String() string { - return fmt.Sprintf("[CommitStep H:%v BP:%v BA:%v]", m.Height, m.BlockPartsHeader, m.BlockParts) -} - -//------------------------------------- - -// ProposalMessage is sent when a new block is proposed. -type ProposalMessage struct { - Proposal *types.Proposal -} - -// String returns a string representation. -func (m *ProposalMessage) String() string { - return fmt.Sprintf("[Proposal %v]", m.Proposal) -} - -//------------------------------------- - -// ProposalPOLMessage is sent when a previous proposal is re-proposed. -type ProposalPOLMessage struct { - Height int64 - ProposalPOLRound int - ProposalPOL *cmn.BitArray -} - -// String returns a string representation. -func (m *ProposalPOLMessage) String() string { - return fmt.Sprintf("[ProposalPOL H:%v POLR:%v POL:%v]", m.Height, m.ProposalPOLRound, m.ProposalPOL) -} - -//------------------------------------- - -// BlockPartMessage is sent when gossipping a piece of the proposed block. -type BlockPartMessage struct { - Height int64 - Round int - Part *types.Part -} - -// String returns a string representation. -func (m *BlockPartMessage) String() string { - return fmt.Sprintf("[BlockPart H:%v R:%v P:%v]", m.Height, m.Round, m.Part) -} - -//------------------------------------- - -// VoteMessage is sent when voting for a proposal (or lack thereof). -type VoteMessage struct { - Vote *types.Vote -} - -// String returns a string representation. -func (m *VoteMessage) String() string { - return fmt.Sprintf("[Vote %v]", m.Vote) -} - -//------------------------------------- - -// HasVoteMessage is sent to indicate that a particular vote has been received. -type HasVoteMessage struct { - Height int64 - Round int - Type byte - Index int -} - -// String returns a string representation. -func (m *HasVoteMessage) String() string { - return fmt.Sprintf("[HasVote VI:%v V:{%v/%02d/%v}]", m.Index, m.Height, m.Round, m.Type) -} - -//------------------------------------- - -// VoteSetMaj23Message is sent to indicate that a given BlockID has seen +2/3 votes. -type VoteSetMaj23Message struct { - Height int64 - Round int - Type byte - BlockID types.BlockID -} - -// String returns a string representation. -func (m *VoteSetMaj23Message) String() string { - return fmt.Sprintf("[VSM23 %v/%02d/%v %v]", m.Height, m.Round, m.Type, m.BlockID) -} - -//------------------------------------- - -// VoteSetBitsMessage is sent to communicate the bit-array of votes seen for the BlockID. -type VoteSetBitsMessage struct { - Height int64 - Round int - Type byte - BlockID types.BlockID - Votes *cmn.BitArray -} - -// String returns a string representation. -func (m *VoteSetBitsMessage) String() string { - return fmt.Sprintf("[VSB %v/%02d/%v %v %v]", m.Height, m.Round, m.Type, m.BlockID, m.Votes) -} - -//------------------------------------- - -// ProposalHeartbeatMessage is sent to signal that a node is alive and waiting for transactions for a proposal. -type ProposalHeartbeatMessage struct { - Heartbeat *types.Heartbeat -} - -// String returns a string representation. -func (m *ProposalHeartbeatMessage) String() string { - return fmt.Sprintf("[HEARTBEAT %v]", m.Heartbeat) -} diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go deleted file mode 100644 index 0d997119..00000000 --- a/consensus/reactor_test.go +++ /dev/null @@ -1,538 +0,0 @@ -package consensus - -import ( - "context" - "fmt" - "os" - "runtime" - "runtime/pprof" - "sync" - "testing" - "time" - - "github.com/tendermint/abci/example/kvstore" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/p2p" - p2pdummy "github.com/tendermint/tendermint/p2p/dummy" - "github.com/tendermint/tendermint/types" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func init() { - config = ResetConfig("consensus_reactor_test") -} - -//---------------------------------------------- -// in-process testnets - -func startConsensusNet(t *testing.T, css []*ConsensusState, N int) ([]*ConsensusReactor, []chan interface{}, []*types.EventBus) { - reactors := make([]*ConsensusReactor, N) - eventChans := make([]chan interface{}, N) - eventBuses := make([]*types.EventBus, N) - for i := 0; i < N; i++ { - /*logger, err := tmflags.ParseLogLevel("consensus:info,*:error", logger, "info") - if err != nil { t.Fatal(err)}*/ - reactors[i] = NewConsensusReactor(css[i], true) // so we dont start the consensus states - reactors[i].SetLogger(css[i].Logger) - - // eventBus is already started with the cs - eventBuses[i] = css[i].eventBus - reactors[i].SetEventBus(eventBuses[i]) - - eventChans[i] = make(chan interface{}, 1) - err := eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, eventChans[i]) - require.NoError(t, err) - } - // make connected switches and start all reactors - p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { - s.AddReactor("CONSENSUS", reactors[i]) - s.SetLogger(reactors[i].conS.Logger.With("module", "p2p")) - return s - }, p2p.Connect2Switches) - - // now that everyone is connected, start the state machines - // If we started the state machines before everyone was connected, - // we'd block when the cs fires NewBlockEvent and the peers are trying to start their reactors - // TODO: is this still true with new pubsub? - for i := 0; i < N; i++ { - s := reactors[i].conS.GetState() - reactors[i].SwitchToConsensus(s, 0) - } - return reactors, eventChans, eventBuses -} - -func stopConsensusNet(logger log.Logger, reactors []*ConsensusReactor, eventBuses []*types.EventBus) { - logger.Info("stopConsensusNet", "n", len(reactors)) - for i, r := range reactors { - logger.Info("stopConsensusNet: Stopping ConsensusReactor", "i", i) - r.Switch.Stop() - } - for i, b := range eventBuses { - logger.Info("stopConsensusNet: Stopping eventBus", "i", i) - b.Stop() - } - logger.Info("stopConsensusNet: DONE", "n", len(reactors)) -} - -// Ensure a testnet makes blocks -func TestReactorBasic(t *testing.T) { - N := 4 - css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) - reactors, eventChans, eventBuses := startConsensusNet(t, css, N) - defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) - // wait till everyone makes the first new block - timeoutWaitGroup(t, N, func(j int) { - <-eventChans[j] - }, css) -} - -// Ensure a testnet sends proposal heartbeats and makes blocks when there are txs -func TestReactorProposalHeartbeats(t *testing.T) { - N := 4 - css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter, - func(c *cfg.Config) { - c.Consensus.CreateEmptyBlocks = false - }) - reactors, eventChans, eventBuses := startConsensusNet(t, css, N) - defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) - heartbeatChans := make([]chan interface{}, N) - var err error - for i := 0; i < N; i++ { - heartbeatChans[i] = make(chan interface{}, 1) - err = eventBuses[i].Subscribe(context.Background(), testSubscriber, types.EventQueryProposalHeartbeat, heartbeatChans[i]) - require.NoError(t, err) - } - // wait till everyone sends a proposal heartbeat - timeoutWaitGroup(t, N, func(j int) { - <-heartbeatChans[j] - }, css) - - // send a tx - if err := css[3].mempool.CheckTx([]byte{1, 2, 3}, nil); err != nil { - //t.Fatal(err) - } - - // wait till everyone makes the first new block - timeoutWaitGroup(t, N, func(j int) { - <-eventChans[j] - }, css) -} - -// Test we record block parts from other peers -func TestReactorRecordsBlockParts(t *testing.T) { - // create dummy peer - peer := p2pdummy.NewPeer() - ps := NewPeerState(peer).SetLogger(log.TestingLogger()) - peer.Set(types.PeerStateKey, ps) - - // create reactor - css := randConsensusNet(1, "consensus_reactor_records_block_parts_test", newMockTickerFunc(true), newPersistentKVStore) - reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states - reactor.SetEventBus(css[0].eventBus) - reactor.SetLogger(log.TestingLogger()) - sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) - reactor.SetSwitch(sw) - err := reactor.Start() - require.NoError(t, err) - defer reactor.Stop() - - // 1) new block part - parts := types.NewPartSetFromData(cmn.RandBytes(100), 10) - msg := &BlockPartMessage{ - Height: 2, - Round: 0, - Part: parts.GetPart(0), - } - bz, err := cdc.MarshalBinaryBare(msg) - require.NoError(t, err) - - reactor.Receive(DataChannel, peer, bz) - require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should have increased by 1") - - // 2) block part with the same height, but different round - msg.Round = 1 - - bz, err = cdc.MarshalBinaryBare(msg) - require.NoError(t, err) - - reactor.Receive(DataChannel, peer, bz) - require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same") - - // 3) block part from earlier height - msg.Height = 1 - msg.Round = 0 - - bz, err = cdc.MarshalBinaryBare(msg) - require.NoError(t, err) - - reactor.Receive(DataChannel, peer, bz) - require.Equal(t, 1, ps.BlockPartsSent(), "number of block parts sent should stay the same") -} - -// Test we record votes from other peers -func TestReactorRecordsVotes(t *testing.T) { - // create dummy peer - peer := p2pdummy.NewPeer() - ps := NewPeerState(peer).SetLogger(log.TestingLogger()) - peer.Set(types.PeerStateKey, ps) - - // create reactor - css := randConsensusNet(1, "consensus_reactor_records_votes_test", newMockTickerFunc(true), newPersistentKVStore) - reactor := NewConsensusReactor(css[0], false) // so we dont start the consensus states - reactor.SetEventBus(css[0].eventBus) - reactor.SetLogger(log.TestingLogger()) - sw := p2p.MakeSwitch(cfg.DefaultP2PConfig(), 1, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) - reactor.SetSwitch(sw) - err := reactor.Start() - require.NoError(t, err) - defer reactor.Stop() - _, val := css[0].state.Validators.GetByIndex(0) - - // 1) new vote - vote := &types.Vote{ - ValidatorIndex: 0, - ValidatorAddress: val.Address, - Height: 2, - Round: 0, - Timestamp: time.Now().UTC(), - Type: types.VoteTypePrevote, - BlockID: types.BlockID{}, - } - bz, err := cdc.MarshalBinaryBare(&VoteMessage{vote}) - require.NoError(t, err) - - reactor.Receive(VoteChannel, peer, bz) - assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should have increased by 1") - - // 2) vote with the same height, but different round - vote.Round = 1 - - bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote}) - require.NoError(t, err) - - reactor.Receive(VoteChannel, peer, bz) - assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should stay the same") - - // 3) vote from earlier height - vote.Height = 1 - vote.Round = 0 - - bz, err = cdc.MarshalBinaryBare(&VoteMessage{vote}) - require.NoError(t, err) - - reactor.Receive(VoteChannel, peer, bz) - assert.Equal(t, 1, ps.VotesSent(), "number of votes sent should stay the same") -} - -//------------------------------------------------------------- -// ensure we can make blocks despite cycling a validator set - -func TestReactorVotingPowerChange(t *testing.T) { - nVals := 4 - logger := log.TestingLogger() - css := randConsensusNet(nVals, "consensus_voting_power_changes_test", newMockTickerFunc(true), newPersistentKVStore) - reactors, eventChans, eventBuses := startConsensusNet(t, css, nVals) - defer stopConsensusNet(logger, reactors, eventBuses) - - // map of active validators - activeVals := make(map[string]struct{}) - for i := 0; i < nVals; i++ { - activeVals[string(css[i].privValidator.GetAddress())] = struct{}{} - } - - // wait till everyone makes block 1 - timeoutWaitGroup(t, nVals, func(j int) { - <-eventChans[j] - }, css) - - //--------------------------------------------------------------------------- - logger.Debug("---------------------------- Testing changing the voting power of one validator a few times") - - val1PubKey := css[0].privValidator.GetPubKey() - val1PubKeyABCI := types.TM2PB.PubKey(val1PubKey) - updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) - previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower() - - waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx) - waitForAndValidateBlockWithTx(t, nVals, activeVals, eventChans, css, updateValidatorTx) - waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) - waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) - - if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { - t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower()) - } - - updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2) - previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() - - waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx) - waitForAndValidateBlockWithTx(t, nVals, activeVals, eventChans, css, updateValidatorTx) - waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) - waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) - - if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { - t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower()) - } - - updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26) - previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() - - waitForAndValidateBlock(t, nVals, activeVals, eventChans, css, updateValidatorTx) - waitForAndValidateBlockWithTx(t, nVals, activeVals, eventChans, css, updateValidatorTx) - waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) - waitForAndValidateBlock(t, nVals, activeVals, eventChans, css) - - if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { - t.Fatalf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[0].GetRoundState().LastValidators.TotalVotingPower()) - } -} - -func TestReactorValidatorSetChanges(t *testing.T) { - nPeers := 7 - nVals := 4 - css := randConsensusNetWithPeers(nVals, nPeers, "consensus_val_set_changes_test", newMockTickerFunc(true), newPersistentKVStore) - - logger := log.TestingLogger() - - reactors, eventChans, eventBuses := startConsensusNet(t, css, nPeers) - defer stopConsensusNet(logger, reactors, eventBuses) - - // map of active validators - activeVals := make(map[string]struct{}) - for i := 0; i < nVals; i++ { - activeVals[string(css[i].privValidator.GetAddress())] = struct{}{} - } - - // wait till everyone makes block 1 - timeoutWaitGroup(t, nPeers, func(j int) { - <-eventChans[j] - }, css) - - //--------------------------------------------------------------------------- - logger.Info("---------------------------- Testing adding one validator") - - newValidatorPubKey1 := css[nVals].privValidator.GetPubKey() - valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1) - newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) - - // wait till everyone makes block 2 - // ensure the commit includes all validators - // send newValTx to change vals in block 3 - waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx1) - - // wait till everyone makes block 3. - // it includes the commit for block 2, which is by the original validator set - waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, newValidatorTx1) - - // wait till everyone makes block 4. - // it includes the commit for block 3, which is by the original validator set - waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) - - // the commits for block 4 should be with the updated validator set - activeVals[string(newValidatorPubKey1.Address())] = struct{}{} - - // wait till everyone makes block 5 - // it includes the commit for block 4, which should have the updated validator set - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css) - - //--------------------------------------------------------------------------- - logger.Info("---------------------------- Testing changing the voting power of one validator") - - updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey() - updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1) - updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) - previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower() - - waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, updateValidatorTx1) - waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, updateValidatorTx1) - waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css) - - if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { - t.Errorf("expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, css[nVals].GetRoundState().LastValidators.TotalVotingPower()) - } - - //--------------------------------------------------------------------------- - logger.Info("---------------------------- Testing adding two validators at once") - - newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey() - newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2) - newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) - - newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey() - newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3) - newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) - - waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3) - waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, newValidatorTx2, newValidatorTx3) - waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) - activeVals[string(newValidatorPubKey2.Address())] = struct{}{} - activeVals[string(newValidatorPubKey3.Address())] = struct{}{} - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css) - - //--------------------------------------------------------------------------- - logger.Info("---------------------------- Testing removing two validators at once") - - removeValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, 0) - removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) - - waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3) - waitForAndValidateBlockWithTx(t, nPeers, activeVals, eventChans, css, removeValidatorTx2, removeValidatorTx3) - waitForAndValidateBlock(t, nPeers, activeVals, eventChans, css) - delete(activeVals, string(newValidatorPubKey2.Address())) - delete(activeVals, string(newValidatorPubKey3.Address())) - waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, eventChans, css) -} - -// Check we can make blocks with skip_timeout_commit=false -func TestReactorWithTimeoutCommit(t *testing.T) { - N := 4 - css := randConsensusNet(N, "consensus_reactor_with_timeout_commit_test", newMockTickerFunc(false), newCounter) - // override default SkipTimeoutCommit == true for tests - for i := 0; i < N; i++ { - css[i].config.SkipTimeoutCommit = false - } - - reactors, eventChans, eventBuses := startConsensusNet(t, css, N-1) - defer stopConsensusNet(log.TestingLogger(), reactors, eventBuses) - - // wait till everyone makes the first new block - timeoutWaitGroup(t, N-1, func(j int) { - <-eventChans[j] - }, css) -} - -func waitForAndValidateBlock(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) { - timeoutWaitGroup(t, n, func(j int) { - css[j].Logger.Debug("waitForAndValidateBlock") - newBlockI, ok := <-eventChans[j] - if !ok { - return - } - newBlock := newBlockI.(types.EventDataNewBlock).Block - css[j].Logger.Debug("waitForAndValidateBlock: Got block", "height", newBlock.Height) - err := validateBlock(newBlock, activeVals) - assert.Nil(t, err) - for _, tx := range txs { - css[j].mempool.CheckTx(tx, nil) - assert.Nil(t, err) - } - }, css) -} - -func waitForAndValidateBlockWithTx(t *testing.T, n int, activeVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState, txs ...[]byte) { - timeoutWaitGroup(t, n, func(j int) { - ntxs := 0 - BLOCK_TX_LOOP: - for { - css[j].Logger.Debug("waitForAndValidateBlockWithTx", "ntxs", ntxs) - newBlockI, ok := <-eventChans[j] - if !ok { - return - } - newBlock := newBlockI.(types.EventDataNewBlock).Block - css[j].Logger.Debug("waitForAndValidateBlockWithTx: Got block", "height", newBlock.Height) - err := validateBlock(newBlock, activeVals) - assert.Nil(t, err) - - // check that txs match the txs we're waiting for. - // note they could be spread over multiple blocks, - // but they should be in order. - for _, tx := range newBlock.Data.Txs { - assert.EqualValues(t, txs[ntxs], tx) - ntxs++ - } - - if ntxs == len(txs) { - break BLOCK_TX_LOOP - } - } - - }, css) -} - -func waitForBlockWithUpdatedValsAndValidateIt(t *testing.T, n int, updatedVals map[string]struct{}, eventChans []chan interface{}, css []*ConsensusState) { - timeoutWaitGroup(t, n, func(j int) { - - var newBlock *types.Block - LOOP: - for { - css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt") - newBlockI, ok := <-eventChans[j] - if !ok { - return - } - newBlock = newBlockI.(types.EventDataNewBlock).Block - if newBlock.LastCommit.Size() == len(updatedVals) { - css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block", "height", newBlock.Height) - break LOOP - } else { - css[j].Logger.Debug("waitForBlockWithUpdatedValsAndValidateIt: Got block with no new validators. Skipping", "height", newBlock.Height) - } - } - - err := validateBlock(newBlock, updatedVals) - assert.Nil(t, err) - }, css) -} - -// expects high synchrony! -func validateBlock(block *types.Block, activeVals map[string]struct{}) error { - if block.LastCommit.Size() != len(activeVals) { - return fmt.Errorf("Commit size doesn't match number of active validators. Got %d, expected %d", block.LastCommit.Size(), len(activeVals)) - } - - for _, vote := range block.LastCommit.Precommits { - if _, ok := activeVals[string(vote.ValidatorAddress)]; !ok { - return fmt.Errorf("Found vote for unactive validator %X", vote.ValidatorAddress) - } - } - return nil -} - -func timeoutWaitGroup(t *testing.T, n int, f func(int), css []*ConsensusState) { - wg := new(sync.WaitGroup) - wg.Add(n) - for i := 0; i < n; i++ { - go func(j int) { - f(j) - wg.Done() - }(i) - } - - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - // we're running many nodes in-process, possibly in in a virtual machine, - // and spewing debug messages - making a block could take a while, - timeout := time.Second * 300 - - select { - case <-done: - case <-time.After(timeout): - for i, cs := range css { - t.Log("#################") - t.Log("Validator", i) - t.Log(cs.GetRoundState()) - t.Log("") - } - os.Stdout.Write([]byte("pprof.Lookup('goroutine'):\n")) - pprof.Lookup("goroutine").WriteTo(os.Stdout, 1) - capture() - panic("Timed out waiting for all validators to commit a block") - } -} - -func capture() { - trace := make([]byte, 10240000) - count := runtime.Stack(trace, true) - fmt.Printf("Stack of %d bytes: %s\n", count, trace) -} diff --git a/consensus/replay.go b/consensus/replay.go deleted file mode 100644 index 13ec9e40..00000000 --- a/consensus/replay.go +++ /dev/null @@ -1,469 +0,0 @@ -package consensus - -import ( - "bytes" - "fmt" - "hash/crc32" - "io" - "reflect" - //"strconv" - //"strings" - "time" - - abci "github.com/tendermint/abci/types" - //auto "github.com/tendermint/tmlibs/autofile" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" -) - -var crc32c = crc32.MakeTable(crc32.Castagnoli) - -// Functionality to replay blocks and messages on recovery from a crash. -// There are two general failure scenarios: -// -// 1. failure during consensus -// 2. failure while applying the block -// -// The former is handled by the WAL, the latter by the proxyApp Handshake on -// restart, which ultimately hands off the work to the WAL. - -//----------------------------------------- -// 1. Recover from failure during consensus -// (by replaying messages from the WAL) -//----------------------------------------- - -// Unmarshal and apply a single message to the consensus state as if it were -// received in receiveRoutine. Lines that start with "#" are ignored. -// NOTE: receiveRoutine should not be running. -func (cs *ConsensusState) readReplayMessage(msg *TimedWALMessage, newStepCh chan interface{}) error { - // Skip meta messages which exist for demarcating boundaries. - if _, ok := msg.Msg.(EndHeightMessage); ok { - return nil - } - - // for logging - switch m := msg.Msg.(type) { - case types.EventDataRoundState: - cs.Logger.Info("Replay: New Step", "height", m.Height, "round", m.Round, "step", m.Step) - // these are playback checks - ticker := time.After(time.Second * 2) - if newStepCh != nil { - select { - case mi := <-newStepCh: - m2 := mi.(types.EventDataRoundState) - if m.Height != m2.Height || m.Round != m2.Round || m.Step != m2.Step { - return fmt.Errorf("RoundState mismatch. Got %v; Expected %v", m2, m) - } - case <-ticker: - return fmt.Errorf("Failed to read off newStepCh") - } - } - case msgInfo: - peerID := m.PeerID - if peerID == "" { - peerID = "local" - } - switch msg := m.Msg.(type) { - case *ProposalMessage: - p := msg.Proposal - cs.Logger.Info("Replay: Proposal", "height", p.Height, "round", p.Round, "header", - p.BlockPartsHeader, "pol", p.POLRound, "peer", peerID) - case *BlockPartMessage: - cs.Logger.Info("Replay: BlockPart", "height", msg.Height, "round", msg.Round, "peer", peerID) - case *VoteMessage: - v := msg.Vote - cs.Logger.Info("Replay: Vote", "height", v.Height, "round", v.Round, "type", v.Type, - "blockID", v.BlockID, "peer", peerID) - } - - cs.handleMsg(m) - case timeoutInfo: - cs.Logger.Info("Replay: Timeout", "height", m.Height, "round", m.Round, "step", m.Step, "dur", m.Duration) - cs.handleTimeout(m, cs.RoundState) - default: - return fmt.Errorf("Replay: Unknown TimedWALMessage type: %v", reflect.TypeOf(msg.Msg)) - } - return nil -} - -// Replay only those messages since the last block. `timeoutRoutine` should -// run concurrently to read off tickChan. -func (cs *ConsensusState) catchupReplay(csHeight int64) error { - - // Set replayMode to true so we don't log signing errors. - cs.replayMode = true - defer func() { cs.replayMode = false }() - - // Ensure that #ENDHEIGHT for this height doesn't exist. - // NOTE: This is just a sanity check. As far as we know things work fine - // without it, and Handshake could reuse ConsensusState if it weren't for - // this check (since we can crash after writing #ENDHEIGHT). - // - // Ignore data corruption errors since this is a sanity check. - gr, found, err := cs.wal.SearchForEndHeight(csHeight, &WALSearchOptions{IgnoreDataCorruptionErrors: true}) - if err != nil { - return err - } - if gr != nil { - if err := gr.Close(); err != nil { - return err - } - } - if found { - return fmt.Errorf("WAL should not contain #ENDHEIGHT %d", csHeight) - } - - // Search for last height marker. - // - // Ignore data corruption errors in previous heights because we only care about last height - gr, found, err = cs.wal.SearchForEndHeight(csHeight-1, &WALSearchOptions{IgnoreDataCorruptionErrors: true}) - if err == io.EOF { - cs.Logger.Error("Replay: wal.group.Search returned EOF", "#ENDHEIGHT", csHeight-1) - } else if err != nil { - return err - } - if !found { - return fmt.Errorf("Cannot replay height %d. WAL does not contain #ENDHEIGHT for %d", csHeight, csHeight-1) - } - defer gr.Close() // nolint: errcheck - - cs.Logger.Info("Catchup by replaying consensus messages", "height", csHeight) - - var msg *TimedWALMessage - dec := WALDecoder{gr} - - for { - msg, err = dec.Decode() - if err == io.EOF { - break - } else if IsDataCorruptionError(err) { - cs.Logger.Debug("data has been corrupted in last height of consensus WAL", "err", err, "height", csHeight) - panic(fmt.Sprintf("data has been corrupted (%v) in last height %d of consensus WAL", err, csHeight)) - } else if err != nil { - return err - } - - // NOTE: since the priv key is set when the msgs are received - // it will attempt to eg double sign but we can just ignore it - // since the votes will be replayed and we'll get to the next step - if err := cs.readReplayMessage(msg, nil); err != nil { - return err - } - } - cs.Logger.Info("Replay: Done") - return nil -} - -//-------------------------------------------------------------------------------- - -// Parses marker lines of the form: -// #ENDHEIGHT: 12345 -/* -func makeHeightSearchFunc(height int64) auto.SearchFunc { - return func(line string) (int, error) { - line = strings.TrimRight(line, "\n") - parts := strings.Split(line, " ") - if len(parts) != 2 { - return -1, errors.New("Line did not have 2 parts") - } - i, err := strconv.Atoi(parts[1]) - if err != nil { - return -1, errors.New("Failed to parse INFO: " + err.Error()) - } - if height < i { - return 1, nil - } else if height == i { - return 0, nil - } else { - return -1, nil - } - } -}*/ - -//--------------------------------------------------- -// 2. Recover from failure while applying the block. -// (by handshaking with the app to figure out where -// we were last, and using the WAL to recover there.) -//--------------------------------------------------- - -type Handshaker struct { - stateDB dbm.DB - initialState sm.State - store sm.BlockStore - genDoc *types.GenesisDoc - logger log.Logger - - nBlocks int // number of blocks applied to the state -} - -func NewHandshaker(stateDB dbm.DB, state sm.State, - store sm.BlockStore, genDoc *types.GenesisDoc) *Handshaker { - - return &Handshaker{ - stateDB: stateDB, - initialState: state, - store: store, - genDoc: genDoc, - logger: log.NewNopLogger(), - nBlocks: 0, - } -} - -func (h *Handshaker) SetLogger(l log.Logger) { - h.logger = l -} - -func (h *Handshaker) NBlocks() int { - return h.nBlocks -} - -// TODO: retry the handshake/replay if it fails ? -func (h *Handshaker) Handshake(proxyApp proxy.AppConns) error { - - // Handshake is done via ABCI Info on the query conn. - res, err := proxyApp.Query().InfoSync(abci.RequestInfo{version.Version}) - if err != nil { - return fmt.Errorf("Error calling Info: %v", err) - } - - blockHeight := int64(res.LastBlockHeight) - if blockHeight < 0 { - return fmt.Errorf("Got a negative last block height (%d) from the app", blockHeight) - } - appHash := res.LastBlockAppHash - - h.logger.Info("ABCI Handshake", "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash)) - - // TODO: check app version. - - // Replay blocks up to the latest in the blockstore. - _, err = h.ReplayBlocks(h.initialState, appHash, blockHeight, proxyApp) - if err != nil { - return fmt.Errorf("Error on replay: %v", err) - } - - h.logger.Info("Completed ABCI Handshake - Tendermint and App are synced", - "appHeight", blockHeight, "appHash", fmt.Sprintf("%X", appHash)) - - // TODO: (on restart) replay mempool - - return nil -} - -// Replay all blocks since appBlockHeight and ensure the result matches the current state. -// Returns the final AppHash or an error. -func (h *Handshaker) ReplayBlocks(state sm.State, appHash []byte, appBlockHeight int64, proxyApp proxy.AppConns) ([]byte, error) { - - storeBlockHeight := h.store.Height() - stateBlockHeight := state.LastBlockHeight - h.logger.Info("ABCI Replay Blocks", "appHeight", appBlockHeight, "storeHeight", storeBlockHeight, "stateHeight", stateBlockHeight) - - // If appBlockHeight == 0 it means that we are at genesis and hence should send InitChain - if appBlockHeight == 0 { - validators := types.TM2PB.Validators(state.Validators) - csParams := types.TM2PB.ConsensusParams(h.genDoc.ConsensusParams) - req := abci.RequestInitChain{ - Time: h.genDoc.GenesisTime.Unix(), // TODO - ChainId: h.genDoc.ChainID, - ConsensusParams: csParams, - Validators: validators, - AppStateBytes: h.genDoc.AppStateJSON, - } - res, err := proxyApp.Consensus().InitChainSync(req) - if err != nil { - return nil, err - } - - // if the app returned validators - // or consensus params, update the state - // with the them - if len(res.Validators) > 0 { - vals, err := types.PB2TM.Validators(res.Validators) - if err != nil { - return nil, err - } - state.Validators = types.NewValidatorSet(vals) - } - if res.ConsensusParams != nil { - state.ConsensusParams = types.PB2TM.ConsensusParams(res.ConsensusParams) - } - sm.SaveState(h.stateDB, state) - } - - // First handle edge cases and constraints on the storeBlockHeight - if storeBlockHeight == 0 { - return appHash, checkAppHash(state, appHash) - - } else if storeBlockHeight < appBlockHeight { - // the app should never be ahead of the store (but this is under app's control) - return appHash, sm.ErrAppBlockHeightTooHigh{storeBlockHeight, appBlockHeight} - - } else if storeBlockHeight < stateBlockHeight { - // the state should never be ahead of the store (this is under tendermint's control) - cmn.PanicSanity(cmn.Fmt("StateBlockHeight (%d) > StoreBlockHeight (%d)", stateBlockHeight, storeBlockHeight)) - - } else if storeBlockHeight > stateBlockHeight+1 { - // store should be at most one ahead of the state (this is under tendermint's control) - cmn.PanicSanity(cmn.Fmt("StoreBlockHeight (%d) > StateBlockHeight + 1 (%d)", storeBlockHeight, stateBlockHeight+1)) - } - - var err error - // Now either store is equal to state, or one ahead. - // For each, consider all cases of where the app could be, given app <= store - if storeBlockHeight == stateBlockHeight { - // Tendermint ran Commit and saved the state. - // Either the app is asking for replay, or we're all synced up. - if appBlockHeight < storeBlockHeight { - // the app is behind, so replay blocks, but no need to go through WAL (state is already synced to store) - return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, false) - - } else if appBlockHeight == storeBlockHeight { - // We're good! - return appHash, checkAppHash(state, appHash) - } - - } else if storeBlockHeight == stateBlockHeight+1 { - // We saved the block in the store but haven't updated the state, - // so we'll need to replay a block using the WAL. - if appBlockHeight < stateBlockHeight { - // the app is further behind than it should be, so replay blocks - // but leave the last block to go through the WAL - return h.replayBlocks(state, proxyApp, appBlockHeight, storeBlockHeight, true) - - } else if appBlockHeight == stateBlockHeight { - // We haven't run Commit (both the state and app are one block behind), - // so replayBlock with the real app. - // NOTE: We could instead use the cs.WAL on cs.Start, - // but we'd have to allow the WAL to replay a block that wrote it's #ENDHEIGHT - h.logger.Info("Replay last block using real app") - state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) - return state.AppHash, err - - } else if appBlockHeight == storeBlockHeight { - // We ran Commit, but didn't save the state, so replayBlock with mock app - abciResponses, err := sm.LoadABCIResponses(h.stateDB, storeBlockHeight) - if err != nil { - return nil, err - } - mockApp := newMockProxyApp(appHash, abciResponses) - h.logger.Info("Replay last block using mock app") - state, err = h.replayBlock(state, storeBlockHeight, mockApp) - return state.AppHash, err - } - - } - - cmn.PanicSanity("Should never happen") - return nil, nil -} - -func (h *Handshaker) replayBlocks(state sm.State, proxyApp proxy.AppConns, appBlockHeight, storeBlockHeight int64, mutateState bool) ([]byte, error) { - // App is further behind than it should be, so we need to replay blocks. - // We replay all blocks from appBlockHeight+1. - // - // Note that we don't have an old version of the state, - // so we by-pass state validation/mutation using sm.ExecCommitBlock. - // This also means we won't be saving validator sets if they change during this period. - // TODO: Load the historical information to fix this and just use state.ApplyBlock - // - // If mutateState == true, the final block is replayed with h.replayBlock() - - var appHash []byte - var err error - finalBlock := storeBlockHeight - if mutateState { - finalBlock-- - } - for i := appBlockHeight + 1; i <= finalBlock; i++ { - h.logger.Info("Applying block", "height", i) - block := h.store.LoadBlock(i) - appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, state.LastValidators, h.stateDB) - if err != nil { - return nil, err - } - - h.nBlocks++ - } - - if mutateState { - // sync the final block - state, err = h.replayBlock(state, storeBlockHeight, proxyApp.Consensus()) - if err != nil { - return nil, err - } - appHash = state.AppHash - } - - return appHash, checkAppHash(state, appHash) -} - -// ApplyBlock on the proxyApp with the last block. -func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.AppConnConsensus) (sm.State, error) { - block := h.store.LoadBlock(height) - meta := h.store.LoadBlockMeta(height) - - blockExec := sm.NewBlockExecutor(h.stateDB, h.logger, proxyApp, sm.MockMempool{}, sm.MockEvidencePool{}) - - var err error - state, err = blockExec.ApplyBlock(state, meta.BlockID, block) - if err != nil { - return sm.State{}, err - } - - h.nBlocks++ - - return state, nil -} - -func checkAppHash(state sm.State, appHash []byte) error { - if !bytes.Equal(state.AppHash, appHash) { - panic(fmt.Errorf("Tendermint state.AppHash does not match AppHash after replay. Got %X, expected %X", appHash, state.AppHash).Error()) - } - return nil -} - -//-------------------------------------------------------------------------------- -// mockProxyApp uses ABCIResponses to give the right results -// Useful because we don't want to call Commit() twice for the same block on the real app. - -func newMockProxyApp(appHash []byte, abciResponses *sm.ABCIResponses) proxy.AppConnConsensus { - clientCreator := proxy.NewLocalClientCreator(&mockProxyApp{ - appHash: appHash, - abciResponses: abciResponses, - }) - cli, _ := clientCreator.NewABCIClient() - err := cli.Start() - if err != nil { - panic(err) - } - return proxy.NewAppConnConsensus(cli) -} - -type mockProxyApp struct { - abci.BaseApplication - - appHash []byte - txCount int - abciResponses *sm.ABCIResponses -} - -func (mock *mockProxyApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { - r := mock.abciResponses.DeliverTx[mock.txCount] - mock.txCount++ - return *r -} - -func (mock *mockProxyApp) EndBlock(req abci.RequestEndBlock) abci.ResponseEndBlock { - mock.txCount = 0 - return *mock.abciResponses.EndBlock -} - -func (mock *mockProxyApp) Commit() abci.ResponseCommit { - return abci.ResponseCommit{Data: mock.appHash} -} diff --git a/consensus/replay_file.go b/consensus/replay_file.go deleted file mode 100644 index 57204b01..00000000 --- a/consensus/replay_file.go +++ /dev/null @@ -1,321 +0,0 @@ -package consensus - -import ( - "bufio" - "context" - "fmt" - "io" - "os" - "strconv" - "strings" - - "github.com/pkg/errors" - - bc "github.com/tendermint/tendermint/blockchain" - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" -) - -const ( - // event bus subscriber - subscriber = "replay-file" -) - -//-------------------------------------------------------- -// replay messages interactively or all at once - -// replay the wal file -func RunReplayFile(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig, console bool) { - consensusState := newConsensusStateForReplay(config, csConfig) - - if err := consensusState.ReplayFile(csConfig.WalFile(), console); err != nil { - cmn.Exit(cmn.Fmt("Error during consensus replay: %v", err)) - } -} - -// Replay msgs in file or start the console -func (cs *ConsensusState) ReplayFile(file string, console bool) error { - - if cs.IsRunning() { - return errors.New("cs is already running, cannot replay") - } - if cs.wal != nil { - return errors.New("cs wal is open, cannot replay") - } - - cs.startForReplay() - - // ensure all new step events are regenerated as expected - newStepCh := make(chan interface{}, 1) - - ctx := context.Background() - err := cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, newStepCh) - if err != nil { - return errors.Errorf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep) - } - defer cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep) - - // just open the file for reading, no need to use wal - fp, err := os.OpenFile(file, os.O_RDONLY, 0600) - if err != nil { - return err - } - - pb := newPlayback(file, fp, cs, cs.state.Copy()) - defer pb.fp.Close() // nolint: errcheck - - var nextN int // apply N msgs in a row - var msg *TimedWALMessage - for { - if nextN == 0 && console { - nextN = pb.replayConsoleLoop() - } - - msg, err = pb.dec.Decode() - if err == io.EOF { - return nil - } else if err != nil { - return err - } - - if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil { - return err - } - - if nextN > 0 { - nextN-- - } - pb.count++ - } - return nil -} - -//------------------------------------------------ -// playback manager - -type playback struct { - cs *ConsensusState - - fp *os.File - dec *WALDecoder - count int // how many lines/msgs into the file are we - - // replays can be reset to beginning - fileName string // so we can close/reopen the file - genesisState sm.State // so the replay session knows where to restart from -} - -func newPlayback(fileName string, fp *os.File, cs *ConsensusState, genState sm.State) *playback { - return &playback{ - cs: cs, - fp: fp, - fileName: fileName, - genesisState: genState, - dec: NewWALDecoder(fp), - } -} - -// go back count steps by resetting the state and running (pb.count - count) steps -func (pb *playback) replayReset(count int, newStepCh chan interface{}) error { - pb.cs.Stop() - pb.cs.Wait() - - newCS := NewConsensusState(pb.cs.config, pb.genesisState.Copy(), pb.cs.blockExec, - pb.cs.blockStore, pb.cs.mempool, pb.cs.evpool) - newCS.SetEventBus(pb.cs.eventBus) - newCS.startForReplay() - - if err := pb.fp.Close(); err != nil { - return err - } - fp, err := os.OpenFile(pb.fileName, os.O_RDONLY, 0600) - if err != nil { - return err - } - pb.fp = fp - pb.dec = NewWALDecoder(fp) - count = pb.count - count - fmt.Printf("Reseting from %d to %d\n", pb.count, count) - pb.count = 0 - pb.cs = newCS - var msg *TimedWALMessage - for i := 0; i < count; i++ { - msg, err = pb.dec.Decode() - if err == io.EOF { - return nil - } else if err != nil { - return err - } - if err := pb.cs.readReplayMessage(msg, newStepCh); err != nil { - return err - } - pb.count++ - } - return nil -} - -func (cs *ConsensusState) startForReplay() { - cs.Logger.Error("Replay commands are disabled until someone updates them and writes tests") - /* TODO:! - // since we replay tocks we just ignore ticks - go func() { - for { - select { - case <-cs.tickChan: - case <-cs.Quit: - return - } - } - }()*/ -} - -// console function for parsing input and running commands -func (pb *playback) replayConsoleLoop() int { - for { - fmt.Printf("> ") - bufReader := bufio.NewReader(os.Stdin) - line, more, err := bufReader.ReadLine() - if more { - cmn.Exit("input is too long") - } else if err != nil { - cmn.Exit(err.Error()) - } - - tokens := strings.Split(string(line), " ") - if len(tokens) == 0 { - continue - } - - switch tokens[0] { - case "next": - // "next" -> replay next message - // "next N" -> replay next N messages - - if len(tokens) == 1 { - return 0 - } - i, err := strconv.Atoi(tokens[1]) - if err != nil { - fmt.Println("next takes an integer argument") - } else { - return i - } - - case "back": - // "back" -> go back one message - // "back N" -> go back N messages - - // NOTE: "back" is not supported in the state machine design, - // so we restart and replay up to - - ctx := context.Background() - // ensure all new step events are regenerated as expected - newStepCh := make(chan interface{}, 1) - - err := pb.cs.eventBus.Subscribe(ctx, subscriber, types.EventQueryNewRoundStep, newStepCh) - if err != nil { - cmn.Exit(fmt.Sprintf("failed to subscribe %s to %v", subscriber, types.EventQueryNewRoundStep)) - } - defer pb.cs.eventBus.Unsubscribe(ctx, subscriber, types.EventQueryNewRoundStep) - - if len(tokens) == 1 { - if err := pb.replayReset(1, newStepCh); err != nil { - pb.cs.Logger.Error("Replay reset error", "err", err) - } - } else { - i, err := strconv.Atoi(tokens[1]) - if err != nil { - fmt.Println("back takes an integer argument") - } else if i > pb.count { - fmt.Printf("argument to back must not be larger than the current count (%d)\n", pb.count) - } else { - if err := pb.replayReset(i, newStepCh); err != nil { - pb.cs.Logger.Error("Replay reset error", "err", err) - } - } - } - - case "rs": - // "rs" -> print entire round state - // "rs short" -> print height/round/step - // "rs " -> print another field of the round state - - rs := pb.cs.RoundState - if len(tokens) == 1 { - fmt.Println(rs) - } else { - switch tokens[1] { - case "short": - fmt.Printf("%v/%v/%v\n", rs.Height, rs.Round, rs.Step) - case "validators": - fmt.Println(rs.Validators) - case "proposal": - fmt.Println(rs.Proposal) - case "proposal_block": - fmt.Printf("%v %v\n", rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort()) - case "locked_round": - fmt.Println(rs.LockedRound) - case "locked_block": - fmt.Printf("%v %v\n", rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort()) - case "votes": - fmt.Println(rs.Votes.StringIndented(" ")) - - default: - fmt.Println("Unknown option", tokens[1]) - } - } - case "n": - fmt.Println(pb.count) - } - } - return 0 -} - -//-------------------------------------------------------------------------------- - -// convenience for replay mode -func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusConfig) *ConsensusState { - dbType := dbm.DBBackendType(config.DBBackend) - // Get BlockStore - blockStoreDB := dbm.NewDB("blockstore", dbType, config.DBDir()) - blockStore := bc.NewBlockStore(blockStoreDB) - - // Get State - stateDB := dbm.NewDB("state", dbType, config.DBDir()) - gdoc, err := sm.MakeGenesisDocFromFile(config.GenesisFile()) - if err != nil { - cmn.Exit(err.Error()) - } - state, err := sm.MakeGenesisState(gdoc) - if err != nil { - cmn.Exit(err.Error()) - } - - // Create proxyAppConn connection (consensus, mempool, query) - clientCreator := proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()) - proxyApp := proxy.NewAppConns(clientCreator, - NewHandshaker(stateDB, state, blockStore, gdoc)) - err = proxyApp.Start() - if err != nil { - cmn.Exit(cmn.Fmt("Error starting proxy app conns: %v", err)) - } - - eventBus := types.NewEventBus() - if err := eventBus.Start(); err != nil { - cmn.Exit(cmn.Fmt("Failed to start event bus: %v", err)) - } - - mempool, evpool := sm.MockMempool{}, sm.MockEvidencePool{} - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) - - consensusState := NewConsensusState(csConfig, state.Copy(), blockExec, - blockStore, mempool, evpool) - - consensusState.SetEventBus(eventBus) - return consensusState -} diff --git a/consensus/replay_test.go b/consensus/replay_test.go deleted file mode 100644 index 725568ed..00000000 --- a/consensus/replay_test.go +++ /dev/null @@ -1,687 +0,0 @@ -package consensus - -import ( - "bytes" - "context" - "errors" - "fmt" - "io" - "io/ioutil" - "os" - "path" - "runtime" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/abci/example/kvstore" - abci "github.com/tendermint/abci/types" - crypto "github.com/tendermint/go-crypto" - auto "github.com/tendermint/tmlibs/autofile" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" - "github.com/tendermint/tmlibs/log" -) - -var consensusReplayConfig *cfg.Config - -func init() { - consensusReplayConfig = ResetConfig("consensus_replay_test") -} - -// These tests ensure we can always recover from failure at any part of the consensus process. -// There are two general failure scenarios: failure during consensus, and failure while applying the block. -// Only the latter interacts with the app and store, -// but the former has to deal with restrictions on re-use of priv_validator keys. -// The `WAL Tests` are for failures during the consensus; -// the `Handshake Tests` are for failures in applying the block. -// With the help of the WAL, we can recover from it all! - -//------------------------------------------------------------------------------------------ -// WAL Tests - -// TODO: It would be better to verify explicitly which states we can recover from without the wal -// and which ones we need the wal for - then we'd also be able to only flush the -// wal writer when we need to, instead of with every message. - -func startNewConsensusStateAndWaitForBlock(t *testing.T, lastBlockHeight int64, blockDB dbm.DB, stateDB dbm.DB) { - logger := log.TestingLogger() - state, _ := sm.LoadStateFromDBOrGenesisFile(stateDB, consensusReplayConfig.GenesisFile()) - privValidator := loadPrivValidator(consensusReplayConfig) - cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB) - cs.SetLogger(logger) - - bytes, _ := ioutil.ReadFile(cs.config.WalFile()) - // fmt.Printf("====== WAL: \n\r%s\n", bytes) - t.Logf("====== WAL: \n\r%X\n", bytes) - - err := cs.Start() - require.NoError(t, err) - defer cs.Stop() - - // This is just a signal that we haven't halted; its not something contained - // in the WAL itself. Assuming the consensus state is running, replay of any - // WAL, including the empty one, should eventually be followed by a new - // block, or else something is wrong. - newBlockCh := make(chan interface{}, 1) - err = cs.eventBus.Subscribe(context.Background(), testSubscriber, types.EventQueryNewBlock, newBlockCh) - require.NoError(t, err) - select { - case <-newBlockCh: - case <-time.After(60 * time.Second): - t.Fatalf("Timed out waiting for new block (see trace above)") - } -} - -func sendTxs(cs *ConsensusState, ctx context.Context) { - for i := 0; i < 256; i++ { - select { - case <-ctx.Done(): - return - default: - tx := []byte{byte(i)} - cs.mempool.CheckTx(tx, nil) - i++ - } - } -} - -// TestWALCrash uses crashing WAL to test we can recover from any WAL failure. -func TestWALCrash(t *testing.T) { - testCases := []struct { - name string - initFn func(dbm.DB, *ConsensusState, context.Context) - heightToStop int64 - }{ - {"empty block", - func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) {}, - 1}, - {"block with a smaller part size", - func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) { - // XXX: is there a better way to change BlockPartSizeBytes? - cs.state.ConsensusParams.BlockPartSizeBytes = 512 - sm.SaveState(stateDB, cs.state) - go sendTxs(cs, ctx) - }, - 1}, - {"many non-empty blocks", - func(stateDB dbm.DB, cs *ConsensusState, ctx context.Context) { - go sendTxs(cs, ctx) - }, - 3}, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - crashWALandCheckLiveness(t, tc.initFn, tc.heightToStop) - }) - } -} - -func crashWALandCheckLiveness(t *testing.T, initFn func(dbm.DB, *ConsensusState, context.Context), heightToStop int64) { - walPaniced := make(chan error) - crashingWal := &crashingWAL{panicCh: walPaniced, heightToStop: heightToStop} - - i := 1 -LOOP: - for { - // fmt.Printf("====== LOOP %d\n", i) - t.Logf("====== LOOP %d\n", i) - - // create consensus state from a clean slate - logger := log.NewNopLogger() - stateDB := dbm.NewMemDB() - state, _ := sm.MakeGenesisStateFromFile(consensusReplayConfig.GenesisFile()) - privValidator := loadPrivValidator(consensusReplayConfig) - blockDB := dbm.NewMemDB() - cs := newConsensusStateWithConfigAndBlockStore(consensusReplayConfig, state, privValidator, kvstore.NewKVStoreApplication(), blockDB) - cs.SetLogger(logger) - - // start sending transactions - ctx, cancel := context.WithCancel(context.Background()) - initFn(stateDB, cs, ctx) - - // clean up WAL file from the previous iteration - walFile := cs.config.WalFile() - os.Remove(walFile) - - // set crashing WAL - csWal, err := cs.OpenWAL(walFile) - require.NoError(t, err) - crashingWal.next = csWal - // reset the message counter - crashingWal.msgIndex = 1 - cs.wal = crashingWal - - // start consensus state - err = cs.Start() - require.NoError(t, err) - - i++ - - select { - case err := <-walPaniced: - t.Logf("WAL paniced: %v", err) - - // make sure we can make blocks after a crash - startNewConsensusStateAndWaitForBlock(t, cs.Height, blockDB, stateDB) - - // stop consensus state and transactions sender (initFn) - cs.Stop() - cancel() - - // if we reached the required height, exit - if _, ok := err.(ReachedHeightToStopError); ok { - break LOOP - } - case <-time.After(10 * time.Second): - t.Fatal("WAL did not panic for 10 seconds (check the log)") - } - } -} - -// crashingWAL is a WAL which crashes or rather simulates a crash during Save -// (before and after). It remembers a message for which we last panicked -// (lastPanicedForMsgIndex), so we don't panic for it in subsequent iterations. -type crashingWAL struct { - next WAL - panicCh chan error - heightToStop int64 - - msgIndex int // current message index - lastPanicedForMsgIndex int // last message for which we panicked -} - -// WALWriteError indicates a WAL crash. -type WALWriteError struct { - msg string -} - -func (e WALWriteError) Error() string { - return e.msg -} - -// ReachedHeightToStopError indicates we've reached the required consensus -// height and may exit. -type ReachedHeightToStopError struct { - height int64 -} - -func (e ReachedHeightToStopError) Error() string { - return fmt.Sprintf("reached height to stop %d", e.height) -} - -// Write simulate WAL's crashing by sending an error to the panicCh and then -// exiting the cs.receiveRoutine. -func (w *crashingWAL) Write(m WALMessage) { - if endMsg, ok := m.(EndHeightMessage); ok { - if endMsg.Height == w.heightToStop { - w.panicCh <- ReachedHeightToStopError{endMsg.Height} - runtime.Goexit() - } else { - w.next.Write(m) - } - return - } - - if w.msgIndex > w.lastPanicedForMsgIndex { - w.lastPanicedForMsgIndex = w.msgIndex - _, file, line, _ := runtime.Caller(1) - w.panicCh <- WALWriteError{fmt.Sprintf("failed to write %T to WAL (fileline: %s:%d)", m, file, line)} - runtime.Goexit() - } else { - w.msgIndex++ - w.next.Write(m) - } -} - -func (w *crashingWAL) WriteSync(m WALMessage) { - w.Write(m) -} - -func (w *crashingWAL) Group() *auto.Group { return w.next.Group() } -func (w *crashingWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) { - return w.next.SearchForEndHeight(height, options) -} - -func (w *crashingWAL) Start() error { return w.next.Start() } -func (w *crashingWAL) Stop() error { return w.next.Stop() } -func (w *crashingWAL) Wait() { w.next.Wait() } - -//------------------------------------------------------------------------------------------ -// Handshake Tests - -const ( - NUM_BLOCKS = 6 -) - -var ( - mempool = sm.MockMempool{} - evpool = sm.MockEvidencePool{} -) - -//--------------------------------------- -// Test handshake/replay - -// 0 - all synced up -// 1 - saved block but app and state are behind -// 2 - save block and committed but state is behind -var modes = []uint{0, 1, 2} - -// Sync from scratch -func TestHandshakeReplayAll(t *testing.T) { - for _, m := range modes { - testHandshakeReplay(t, 0, m) - } -} - -// Sync many, not from scratch -func TestHandshakeReplaySome(t *testing.T) { - for _, m := range modes { - testHandshakeReplay(t, 1, m) - } -} - -// Sync from lagging by one -func TestHandshakeReplayOne(t *testing.T) { - for _, m := range modes { - testHandshakeReplay(t, NUM_BLOCKS-1, m) - } -} - -// Sync from caught up -func TestHandshakeReplayNone(t *testing.T) { - for _, m := range modes { - testHandshakeReplay(t, NUM_BLOCKS, m) - } -} - -func tempWALWithData(data []byte) string { - walFile, err := ioutil.TempFile("", "wal") - if err != nil { - panic(fmt.Errorf("failed to create temp WAL file: %v", err)) - } - _, err = walFile.Write(data) - if err != nil { - panic(fmt.Errorf("failed to write to temp WAL file: %v", err)) - } - if err := walFile.Close(); err != nil { - panic(fmt.Errorf("failed to close temp WAL file: %v", err)) - } - return walFile.Name() -} - -// Make some blocks. Start a fresh app and apply nBlocks blocks. Then restart the app and sync it up with the remaining blocks -func testHandshakeReplay(t *testing.T, nBlocks int, mode uint) { - config := ResetConfig("proxy_test_") - - walBody, err := WALWithNBlocks(NUM_BLOCKS) - if err != nil { - t.Fatal(err) - } - walFile := tempWALWithData(walBody) - config.Consensus.SetWalFile(walFile) - - privVal := privval.LoadFilePV(config.PrivValidatorFile()) - - wal, err := NewWAL(walFile) - if err != nil { - t.Fatal(err) - } - wal.SetLogger(log.TestingLogger()) - if err := wal.Start(); err != nil { - t.Fatal(err) - } - defer wal.Stop() - - chain, commits, err := makeBlockchainFromWAL(wal) - if err != nil { - t.Fatalf(err.Error()) - } - - stateDB, state, store := stateAndStore(config, privVal.GetPubKey()) - store.chain = chain - store.commits = commits - - // run the chain through state.ApplyBlock to build up the tendermint state - state = buildTMStateFromChain(config, stateDB, state, chain, mode) - latestAppHash := state.AppHash - - // make a new client creator - kvstoreApp := kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "2")) - clientCreator2 := proxy.NewLocalClientCreator(kvstoreApp) - if nBlocks > 0 { - // run nBlocks against a new client to build up the app state. - // use a throwaway tendermint state - proxyApp := proxy.NewAppConns(clientCreator2, nil) - stateDB, state, _ := stateAndStore(config, privVal.GetPubKey()) - buildAppStateFromChain(proxyApp, stateDB, state, chain, nBlocks, mode) - } - - // now start the app using the handshake - it should sync - genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) - handshaker := NewHandshaker(stateDB, state, store, genDoc) - proxyApp := proxy.NewAppConns(clientCreator2, handshaker) - if err := proxyApp.Start(); err != nil { - t.Fatalf("Error starting proxy app connections: %v", err) - } - defer proxyApp.Stop() - - // get the latest app hash from the app - res, err := proxyApp.Query().InfoSync(abci.RequestInfo{""}) - if err != nil { - t.Fatal(err) - } - - // the app hash should be synced up - if !bytes.Equal(latestAppHash, res.LastBlockAppHash) { - t.Fatalf("Expected app hashes to match after handshake/replay. got %X, expected %X", res.LastBlockAppHash, latestAppHash) - } - - expectedBlocksToSync := NUM_BLOCKS - nBlocks - if nBlocks == NUM_BLOCKS && mode > 0 { - expectedBlocksToSync++ - } else if nBlocks > 0 && mode == 1 { - expectedBlocksToSync++ - } - - if handshaker.NBlocks() != expectedBlocksToSync { - t.Fatalf("Expected handshake to sync %d blocks, got %d", expectedBlocksToSync, handshaker.NBlocks()) - } -} - -func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.AppConns) sm.State { - testPartSize := st.ConsensusParams.BlockPartSizeBytes - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) - - blkID := types.BlockID{blk.Hash(), blk.MakePartSet(testPartSize).Header()} - newState, err := blockExec.ApplyBlock(st, blkID, blk) - if err != nil { - panic(err) - } - return newState -} - -func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, - state sm.State, chain []*types.Block, nBlocks int, mode uint) { - // start a new app without handshake, play nBlocks blocks - if err := proxyApp.Start(); err != nil { - panic(err) - } - defer proxyApp.Stop() - - validators := types.TM2PB.Validators(state.Validators) - if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ - Validators: validators, - }); err != nil { - panic(err) - } - - switch mode { - case 0: - for i := 0; i < nBlocks; i++ { - block := chain[i] - state = applyBlock(stateDB, state, block, proxyApp) - } - case 1, 2: - for i := 0; i < nBlocks-1; i++ { - block := chain[i] - state = applyBlock(stateDB, state, block, proxyApp) - } - - if mode == 2 { - // update the kvstore height and apphash - // as if we ran commit but not - state = applyBlock(stateDB, state, chain[nBlocks-1], proxyApp) - } - } - -} - -func buildTMStateFromChain(config *cfg.Config, stateDB dbm.DB, state sm.State, chain []*types.Block, mode uint) sm.State { - // run the whole chain against this client to build up the tendermint state - clientCreator := proxy.NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(path.Join(config.DBDir(), "1"))) - proxyApp := proxy.NewAppConns(clientCreator, nil) // sm.NewHandshaker(config, state, store, ReplayLastBlock)) - if err := proxyApp.Start(); err != nil { - panic(err) - } - defer proxyApp.Stop() - - validators := types.TM2PB.Validators(state.Validators) - if _, err := proxyApp.Consensus().InitChainSync(abci.RequestInitChain{ - Validators: validators, - }); err != nil { - panic(err) - } - - switch mode { - case 0: - // sync right up - for _, block := range chain { - state = applyBlock(stateDB, state, block, proxyApp) - } - - case 1, 2: - // sync up to the penultimate as if we stored the block. - // whether we commit or not depends on the appHash - for _, block := range chain[:len(chain)-1] { - state = applyBlock(stateDB, state, block, proxyApp) - } - - // apply the final block to a state copy so we can - // get the right next appHash but keep the state back - applyBlock(stateDB, state, chain[len(chain)-1], proxyApp) - } - - return state -} - -//-------------------------- -// utils for making blocks - -func makeBlockchainFromWAL(wal WAL) ([]*types.Block, []*types.Commit, error) { - // Search for height marker - gr, found, err := wal.SearchForEndHeight(0, &WALSearchOptions{}) - if err != nil { - return nil, nil, err - } - if !found { - return nil, nil, errors.New(cmn.Fmt("WAL does not contain height %d.", 1)) - } - defer gr.Close() // nolint: errcheck - - // log.Notice("Build a blockchain by reading from the WAL") - - var blocks []*types.Block - var commits []*types.Commit - - var thisBlockParts *types.PartSet - var thisBlockCommit *types.Commit - var height int64 - - dec := NewWALDecoder(gr) - for { - msg, err := dec.Decode() - if err == io.EOF { - break - } else if err != nil { - return nil, nil, err - } - - piece := readPieceFromWAL(msg) - if piece == nil { - continue - } - - switch p := piece.(type) { - case EndHeightMessage: - // if its not the first one, we have a full block - if thisBlockParts != nil { - var block = new(types.Block) - _, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0) - if err != nil { - panic(err) - } - if block.Height != height+1 { - panic(cmn.Fmt("read bad block from wal. got height %d, expected %d", block.Height, height+1)) - } - commitHeight := thisBlockCommit.Precommits[0].Height - if commitHeight != height+1 { - panic(cmn.Fmt("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) - } - blocks = append(blocks, block) - commits = append(commits, thisBlockCommit) - height++ - } - case *types.PartSetHeader: - thisBlockParts = types.NewPartSetFromHeader(*p) - case *types.Part: - _, err := thisBlockParts.AddPart(p) - if err != nil { - return nil, nil, err - } - case *types.Vote: - if p.Type == types.VoteTypePrecommit { - thisBlockCommit = &types.Commit{ - BlockID: p.BlockID, - Precommits: []*types.Vote{p}, - } - } - } - } - // grab the last block too - var block = new(types.Block) - _, err = cdc.UnmarshalBinaryReader(thisBlockParts.GetReader(), block, 0) - if err != nil { - panic(err) - } - if block.Height != height+1 { - panic(cmn.Fmt("read bad block from wal. got height %d, expected %d", block.Height, height+1)) - } - commitHeight := thisBlockCommit.Precommits[0].Height - if commitHeight != height+1 { - panic(cmn.Fmt("commit doesnt match. got height %d, expected %d", commitHeight, height+1)) - } - blocks = append(blocks, block) - commits = append(commits, thisBlockCommit) - return blocks, commits, nil -} - -func readPieceFromWAL(msg *TimedWALMessage) interface{} { - // for logging - switch m := msg.Msg.(type) { - case msgInfo: - switch msg := m.Msg.(type) { - case *ProposalMessage: - return &msg.Proposal.BlockPartsHeader - case *BlockPartMessage: - return msg.Part - case *VoteMessage: - return msg.Vote - } - case EndHeightMessage: - return m - } - - return nil -} - -// fresh state and mock store -func stateAndStore(config *cfg.Config, pubKey crypto.PubKey) (dbm.DB, sm.State, *mockBlockStore) { - stateDB := dbm.NewMemDB() - state, _ := sm.MakeGenesisStateFromFile(config.GenesisFile()) - store := NewMockBlockStore(config, state.ConsensusParams) - return stateDB, state, store -} - -//---------------------------------- -// mock block store - -type mockBlockStore struct { - config *cfg.Config - params types.ConsensusParams - chain []*types.Block - commits []*types.Commit -} - -// TODO: NewBlockStore(db.NewMemDB) ... -func NewMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { - return &mockBlockStore{config, params, nil, nil} -} - -func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } -func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } -func (bs *mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { - block := bs.chain[height-1] - return &types.BlockMeta{ - BlockID: types.BlockID{block.Hash(), block.MakePartSet(bs.params.BlockPartSizeBytes).Header()}, - Header: block.Header, - } -} -func (bs *mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } -func (bs *mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { -} -func (bs *mockBlockStore) LoadBlockCommit(height int64) *types.Commit { - return bs.commits[height-1] -} -func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { - return bs.commits[height-1] -} - -//---------------------------------------- - -func TestInitChainUpdateValidators(t *testing.T) { - val, _ := types.RandValidator(true, 10) - vals := types.NewValidatorSet([]*types.Validator{val}) - app := &initChainApp{vals: types.TM2PB.Validators(vals)} - clientCreator := proxy.NewLocalClientCreator(app) - - config := ResetConfig("proxy_test_") - privVal := privval.LoadFilePV(config.PrivValidatorFile()) - stateDB, state, store := stateAndStore(config, privVal.GetPubKey()) - - oldValAddr := state.Validators.Validators[0].Address - - // now start the app using the handshake - it should sync - genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) - handshaker := NewHandshaker(stateDB, state, store, genDoc) - proxyApp := proxy.NewAppConns(clientCreator, handshaker) - if err := proxyApp.Start(); err != nil { - t.Fatalf("Error starting proxy app connections: %v", err) - } - defer proxyApp.Stop() - - // reload the state, check the validator set was updated - state = sm.LoadState(stateDB) - - newValAddr := state.Validators.Validators[0].Address - expectValAddr := val.Address - assert.NotEqual(t, oldValAddr, newValAddr) - assert.Equal(t, newValAddr, expectValAddr) -} - -func newInitChainApp(vals []abci.Validator) *initChainApp { - return &initChainApp{ - vals: vals, - } -} - -// returns the vals on InitChain -type initChainApp struct { - abci.BaseApplication - vals []abci.Validator -} - -func (ica *initChainApp) InitChain(req abci.RequestInitChain) abci.ResponseInitChain { - return abci.ResponseInitChain{ - Validators: ica.vals, - } -} diff --git a/consensus/state.go b/consensus/state.go deleted file mode 100644 index aab82296..00000000 --- a/consensus/state.go +++ /dev/null @@ -1,1673 +0,0 @@ -package consensus - -import ( - "bytes" - "errors" - "fmt" - "reflect" - "runtime/debug" - "sync" - "time" - - fail "github.com/ebuchman/fail-test" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" - - cfg "github.com/tendermint/tendermint/config" - cstypes "github.com/tendermint/tendermint/consensus/types" - tmevents "github.com/tendermint/tendermint/libs/events" - "github.com/tendermint/tendermint/p2p" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -//----------------------------------------------------------------------------- -// Config - -const ( - proposalHeartbeatIntervalSeconds = 2 -) - -//----------------------------------------------------------------------------- -// Errors - -var ( - ErrInvalidProposalSignature = errors.New("Error invalid proposal signature") - ErrInvalidProposalPOLRound = errors.New("Error invalid proposal POL round") - ErrAddingVote = errors.New("Error adding vote") - ErrVoteHeightMismatch = errors.New("Error vote height mismatch") -) - -//----------------------------------------------------------------------------- - -var ( - msgQueueSize = 1000 -) - -// msgs from the reactor which may update the state -type msgInfo struct { - Msg ConsensusMessage `json:"msg"` - PeerID p2p.ID `json:"peer_key"` -} - -// internally generated messages which may update the state -type timeoutInfo struct { - Duration time.Duration `json:"duration"` - Height int64 `json:"height"` - Round int `json:"round"` - Step cstypes.RoundStepType `json:"step"` -} - -func (ti *timeoutInfo) String() string { - return fmt.Sprintf("%v ; %d/%d %v", ti.Duration, ti.Height, ti.Round, ti.Step) -} - -// ConsensusState handles execution of the consensus algorithm. -// It processes votes and proposals, and upon reaching agreement, -// commits blocks to the chain and executes them against the application. -// The internal state machine receives input from peers, the internal validator, and from a timer. -type ConsensusState struct { - cmn.BaseService - - // config details - config *cfg.ConsensusConfig - privValidator types.PrivValidator // for signing votes - - // services for creating and executing blocks - // TODO: encapsulate all of this in one "BlockManager" - blockExec *sm.BlockExecutor - blockStore sm.BlockStore - mempool sm.Mempool - evpool sm.EvidencePool - - // internal state - mtx sync.Mutex - cstypes.RoundState - state sm.State // State until height-1. - - // state changes may be triggered by: msgs from peers, - // msgs from ourself, or by timeouts - peerMsgQueue chan msgInfo - internalMsgQueue chan msgInfo - timeoutTicker TimeoutTicker - - // we use eventBus to trigger msg broadcasts in the reactor, - // and to notify external subscribers, eg. through a websocket - eventBus *types.EventBus - - // a Write-Ahead Log ensures we can recover from any kind of crash - // and helps us avoid signing conflicting votes - wal WAL - replayMode bool // so we don't log signing errors during replay - doWALCatchup bool // determines if we even try to do the catchup - - // for tests where we want to limit the number of transitions the state makes - nSteps int - - // some functions can be overwritten for testing - decideProposal func(height int64, round int) - doPrevote func(height int64, round int) - setProposal func(proposal *types.Proposal) error - - // closed when we finish shutting down - done chan struct{} - - // synchronous pubsub between consensus state and reactor. - // state only emits EventNewRoundStep, EventVote and EventProposalHeartbeat - evsw tmevents.EventSwitch - - // for reporting metrics - metrics *Metrics -} - -// CSOption sets an optional parameter on the ConsensusState. -type CSOption func(*ConsensusState) - -// NewConsensusState returns a new ConsensusState. -func NewConsensusState( - config *cfg.ConsensusConfig, - state sm.State, - blockExec *sm.BlockExecutor, - blockStore sm.BlockStore, - mempool sm.Mempool, - evpool sm.EvidencePool, - options ...CSOption, -) *ConsensusState { - cs := &ConsensusState{ - config: config, - blockExec: blockExec, - blockStore: blockStore, - mempool: mempool, - peerMsgQueue: make(chan msgInfo, msgQueueSize), - internalMsgQueue: make(chan msgInfo, msgQueueSize), - timeoutTicker: NewTimeoutTicker(), - done: make(chan struct{}), - doWALCatchup: true, - wal: nilWAL{}, - evpool: evpool, - evsw: tmevents.NewEventSwitch(), - metrics: NopMetrics(), - } - // set function defaults (may be overwritten before calling Start) - cs.decideProposal = cs.defaultDecideProposal - cs.doPrevote = cs.defaultDoPrevote - cs.setProposal = cs.defaultSetProposal - - cs.updateToState(state) - // Don't call scheduleRound0 yet. - // We do that upon Start(). - cs.reconstructLastCommit(state) - cs.BaseService = *cmn.NewBaseService(nil, "ConsensusState", cs) - for _, option := range options { - option(cs) - } - return cs -} - -//---------------------------------------- -// Public interface - -// SetLogger implements Service. -func (cs *ConsensusState) SetLogger(l log.Logger) { - cs.BaseService.Logger = l - cs.timeoutTicker.SetLogger(l) -} - -// SetEventBus sets event bus. -func (cs *ConsensusState) SetEventBus(b *types.EventBus) { - cs.eventBus = b - cs.blockExec.SetEventBus(b) -} - -// WithMetrics sets the metrics. -func WithMetrics(metrics *Metrics) CSOption { - return func(cs *ConsensusState) { cs.metrics = metrics } -} - -// String returns a string. -func (cs *ConsensusState) String() string { - // better not to access shared variables - return cmn.Fmt("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step) -} - -// GetState returns a copy of the chain state. -func (cs *ConsensusState) GetState() sm.State { - cs.mtx.Lock() - defer cs.mtx.Unlock() - return cs.state.Copy() -} - -// GetRoundState returns a shallow copy of the internal consensus state. -func (cs *ConsensusState) GetRoundState() *cstypes.RoundState { - cs.mtx.Lock() - defer cs.mtx.Unlock() - - rs := cs.RoundState // copy - return &rs -} - -// GetRoundStateJSON returns a json of RoundState, marshalled using go-amino. -func (cs *ConsensusState) GetRoundStateJSON() ([]byte, error) { - cs.mtx.Lock() - defer cs.mtx.Unlock() - - return cdc.MarshalJSON(cs.RoundState) -} - -// GetRoundStateSimpleJSON returns a json of RoundStateSimple, marshalled using go-amino. -func (cs *ConsensusState) GetRoundStateSimpleJSON() ([]byte, error) { - cs.mtx.Lock() - defer cs.mtx.Unlock() - - return cdc.MarshalJSON(cs.RoundState.RoundStateSimple()) -} - -// GetValidators returns a copy of the current validators. -func (cs *ConsensusState) GetValidators() (int64, []*types.Validator) { - cs.mtx.Lock() - defer cs.mtx.Unlock() - return cs.state.LastBlockHeight, cs.state.Validators.Copy().Validators -} - -// SetPrivValidator sets the private validator account for signing votes. -func (cs *ConsensusState) SetPrivValidator(priv types.PrivValidator) { - cs.mtx.Lock() - defer cs.mtx.Unlock() - cs.privValidator = priv -} - -// SetTimeoutTicker sets the local timer. It may be useful to overwrite for testing. -func (cs *ConsensusState) SetTimeoutTicker(timeoutTicker TimeoutTicker) { - cs.mtx.Lock() - defer cs.mtx.Unlock() - cs.timeoutTicker = timeoutTicker -} - -// LoadCommit loads the commit for a given height. -func (cs *ConsensusState) LoadCommit(height int64) *types.Commit { - cs.mtx.Lock() - defer cs.mtx.Unlock() - if height == cs.blockStore.Height() { - return cs.blockStore.LoadSeenCommit(height) - } - return cs.blockStore.LoadBlockCommit(height) -} - -// OnStart implements cmn.Service. -// It loads the latest state via the WAL, and starts the timeout and receive routines. -func (cs *ConsensusState) OnStart() error { - if err := cs.evsw.Start(); err != nil { - return err - } - - // we may set the WAL in testing before calling Start, - // so only OpenWAL if its still the nilWAL - if _, ok := cs.wal.(nilWAL); ok { - walFile := cs.config.WalFile() - wal, err := cs.OpenWAL(walFile) - if err != nil { - cs.Logger.Error("Error loading ConsensusState wal", "err", err.Error()) - return err - } - cs.wal = wal - } - - // we need the timeoutRoutine for replay so - // we don't block on the tick chan. - // NOTE: we will get a build up of garbage go routines - // firing on the tockChan until the receiveRoutine is started - // to deal with them (by that point, at most one will be valid) - if err := cs.timeoutTicker.Start(); err != nil { - return err - } - - // we may have lost some votes if the process crashed - // reload from consensus log to catchup - if cs.doWALCatchup { - if err := cs.catchupReplay(cs.Height); err != nil { - cs.Logger.Error("Error on catchup replay. Proceeding to start ConsensusState anyway", "err", err.Error()) - // NOTE: if we ever do return an error here, - // make sure to stop the timeoutTicker - } - } - - // now start the receiveRoutine - go cs.receiveRoutine(0) - - // schedule the first round! - // use GetRoundState so we don't race the receiveRoutine for access - cs.scheduleRound0(cs.GetRoundState()) - - return nil -} - -// timeoutRoutine: receive requests for timeouts on tickChan and fire timeouts on tockChan -// receiveRoutine: serializes processing of proposoals, block parts, votes; coordinates state transitions -func (cs *ConsensusState) startRoutines(maxSteps int) { - err := cs.timeoutTicker.Start() - if err != nil { - cs.Logger.Error("Error starting timeout ticker", "err", err) - return - } - go cs.receiveRoutine(maxSteps) -} - -// OnStop implements cmn.Service. It stops all routines and waits for the WAL to finish. -func (cs *ConsensusState) OnStop() { - cs.BaseService.OnStop() - - cs.evsw.Stop() - - cs.timeoutTicker.Stop() - - // Make BaseService.Wait() wait until cs.wal.Wait() - if cs.IsRunning() { - cs.wal.Wait() - } -} - -// Wait waits for the the main routine to return. -// NOTE: be sure to Stop() the event switch and drain -// any event channels or this may deadlock -func (cs *ConsensusState) Wait() { - <-cs.done -} - -// OpenWAL opens a file to log all consensus messages and timeouts for deterministic accountability -func (cs *ConsensusState) OpenWAL(walFile string) (WAL, error) { - wal, err := NewWAL(walFile) - if err != nil { - cs.Logger.Error("Failed to open WAL for consensus state", "wal", walFile, "err", err) - return nil, err - } - wal.SetLogger(cs.Logger.With("wal", walFile)) - if err := wal.Start(); err != nil { - return nil, err - } - return wal, nil -} - -//------------------------------------------------------------ -// Public interface for passing messages into the consensus state, possibly causing a state transition. -// If peerID == "", the msg is considered internal. -// Messages are added to the appropriate queue (peer or internal). -// If the queue is full, the function may block. -// TODO: should these return anything or let callers just use events? - -// AddVote inputs a vote. -func (cs *ConsensusState) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) { - if peerID == "" { - cs.internalMsgQueue <- msgInfo{&VoteMessage{vote}, ""} - } else { - cs.peerMsgQueue <- msgInfo{&VoteMessage{vote}, peerID} - } - - // TODO: wait for event?! - return false, nil -} - -// SetProposal inputs a proposal. -func (cs *ConsensusState) SetProposal(proposal *types.Proposal, peerID p2p.ID) error { - - if peerID == "" { - cs.internalMsgQueue <- msgInfo{&ProposalMessage{proposal}, ""} - } else { - cs.peerMsgQueue <- msgInfo{&ProposalMessage{proposal}, peerID} - } - - // TODO: wait for event?! - return nil -} - -// AddProposalBlockPart inputs a part of the proposal block. -func (cs *ConsensusState) AddProposalBlockPart(height int64, round int, part *types.Part, peerID p2p.ID) error { - - if peerID == "" { - cs.internalMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, ""} - } else { - cs.peerMsgQueue <- msgInfo{&BlockPartMessage{height, round, part}, peerID} - } - - // TODO: wait for event?! - return nil -} - -// SetProposalAndBlock inputs the proposal and all block parts. -func (cs *ConsensusState) SetProposalAndBlock(proposal *types.Proposal, block *types.Block, parts *types.PartSet, peerID p2p.ID) error { - if err := cs.SetProposal(proposal, peerID); err != nil { - return err - } - for i := 0; i < parts.Total(); i++ { - part := parts.GetPart(i) - if err := cs.AddProposalBlockPart(proposal.Height, proposal.Round, part, peerID); err != nil { - return err - } - } - return nil -} - -//------------------------------------------------------------ -// internal functions for managing the state - -func (cs *ConsensusState) updateHeight(height int64) { - cs.metrics.Height.Set(float64(height)) - cs.Height = height -} - -func (cs *ConsensusState) updateRoundStep(round int, step cstypes.RoundStepType) { - cs.Round = round - cs.Step = step -} - -// enterNewRound(height, 0) at cs.StartTime. -func (cs *ConsensusState) scheduleRound0(rs *cstypes.RoundState) { - //cs.Logger.Info("scheduleRound0", "now", time.Now(), "startTime", cs.StartTime) - sleepDuration := rs.StartTime.Sub(time.Now()) // nolint: gotype, gosimple - cs.scheduleTimeout(sleepDuration, rs.Height, 0, cstypes.RoundStepNewHeight) -} - -// Attempt to schedule a timeout (by sending timeoutInfo on the tickChan) -func (cs *ConsensusState) scheduleTimeout(duration time.Duration, height int64, round int, step cstypes.RoundStepType) { - cs.timeoutTicker.ScheduleTimeout(timeoutInfo{duration, height, round, step}) -} - -// send a msg into the receiveRoutine regarding our own proposal, block part, or vote -func (cs *ConsensusState) sendInternalMessage(mi msgInfo) { - select { - case cs.internalMsgQueue <- mi: - default: - // NOTE: using the go-routine means our votes can - // be processed out of order. - // TODO: use CList here for strict determinism and - // attempt push to internalMsgQueue in receiveRoutine - cs.Logger.Info("Internal msg queue is full. Using a go-routine") - go func() { cs.internalMsgQueue <- mi }() - } -} - -// Reconstruct LastCommit from SeenCommit, which we saved along with the block, -// (which happens even before saving the state) -func (cs *ConsensusState) reconstructLastCommit(state sm.State) { - if state.LastBlockHeight == 0 { - return - } - seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight) - lastPrecommits := types.NewVoteSet(state.ChainID, state.LastBlockHeight, seenCommit.Round(), types.VoteTypePrecommit, state.LastValidators) - for _, precommit := range seenCommit.Precommits { - if precommit == nil { - continue - } - added, err := lastPrecommits.AddVote(precommit) - if !added || err != nil { - cmn.PanicCrisis(cmn.Fmt("Failed to reconstruct LastCommit: %v", err)) - } - } - if !lastPrecommits.HasTwoThirdsMajority() { - cmn.PanicSanity("Failed to reconstruct LastCommit: Does not have +2/3 maj") - } - cs.LastCommit = lastPrecommits -} - -// Updates ConsensusState and increments height to match that of state. -// The round becomes 0 and cs.Step becomes cstypes.RoundStepNewHeight. -func (cs *ConsensusState) updateToState(state sm.State) { - if cs.CommitRound > -1 && 0 < cs.Height && cs.Height != state.LastBlockHeight { - cmn.PanicSanity(cmn.Fmt("updateToState() expected state height of %v but found %v", - cs.Height, state.LastBlockHeight)) - } - if !cs.state.IsEmpty() && cs.state.LastBlockHeight+1 != cs.Height { - // This might happen when someone else is mutating cs.state. - // Someone forgot to pass in state.Copy() somewhere?! - cmn.PanicSanity(cmn.Fmt("Inconsistent cs.state.LastBlockHeight+1 %v vs cs.Height %v", - cs.state.LastBlockHeight+1, cs.Height)) - } - - // If state isn't further out than cs.state, just ignore. - // This happens when SwitchToConsensus() is called in the reactor. - // We don't want to reset e.g. the Votes. - if !cs.state.IsEmpty() && (state.LastBlockHeight <= cs.state.LastBlockHeight) { - cs.Logger.Info("Ignoring updateToState()", "newHeight", state.LastBlockHeight+1, "oldHeight", cs.state.LastBlockHeight+1) - return - } - - // Reset fields based on state. - validators := state.Validators - lastPrecommits := (*types.VoteSet)(nil) - if cs.CommitRound > -1 && cs.Votes != nil { - if !cs.Votes.Precommits(cs.CommitRound).HasTwoThirdsMajority() { - cmn.PanicSanity("updateToState(state) called but last Precommit round didn't have +2/3") - } - lastPrecommits = cs.Votes.Precommits(cs.CommitRound) - } - - // Next desired block height - height := state.LastBlockHeight + 1 - - // RoundState fields - cs.updateHeight(height) - cs.updateRoundStep(0, cstypes.RoundStepNewHeight) - if cs.CommitTime.IsZero() { - // "Now" makes it easier to sync up dev nodes. - // We add timeoutCommit to allow transactions - // to be gathered for the first block. - // And alternative solution that relies on clocks: - // cs.StartTime = state.LastBlockTime.Add(timeoutCommit) - cs.StartTime = cs.config.Commit(time.Now()) - } else { - cs.StartTime = cs.config.Commit(cs.CommitTime) - } - cs.Validators = validators - cs.Proposal = nil - cs.ProposalBlock = nil - cs.ProposalBlockParts = nil - cs.LockedRound = 0 - cs.LockedBlock = nil - cs.LockedBlockParts = nil - cs.ValidRound = 0 - cs.ValidBlock = nil - cs.ValidBlockParts = nil - cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) - cs.CommitRound = -1 - cs.LastCommit = lastPrecommits - cs.LastValidators = state.LastValidators - - cs.state = state - - // Finally, broadcast RoundState - cs.newStep() -} - -func (cs *ConsensusState) newStep() { - rs := cs.RoundStateEvent() - cs.wal.Write(rs) - cs.nSteps++ - // newStep is called by updateToStep in NewConsensusState before the eventBus is set! - if cs.eventBus != nil { - cs.eventBus.PublishEventNewRoundStep(rs) - cs.evsw.FireEvent(types.EventNewRoundStep, &cs.RoundState) - } -} - -//----------------------------------------- -// the main go routines - -// receiveRoutine handles messages which may cause state transitions. -// it's argument (n) is the number of messages to process before exiting - use 0 to run forever -// It keeps the RoundState and is the only thing that updates it. -// Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. -// ConsensusState must be locked before any internal state is updated. -func (cs *ConsensusState) receiveRoutine(maxSteps int) { - defer func() { - if r := recover(); r != nil { - cs.Logger.Error("CONSENSUS FAILURE!!!", "err", r, "stack", string(debug.Stack())) - } - }() - - for { - if maxSteps > 0 { - if cs.nSteps >= maxSteps { - cs.Logger.Info("reached max steps. exiting receive routine") - cs.nSteps = 0 - return - } - } - rs := cs.RoundState - var mi msgInfo - - select { - case height := <-cs.mempool.TxsAvailable(): - cs.handleTxsAvailable(height) - case mi = <-cs.peerMsgQueue: - cs.wal.Write(mi) - // handles proposals, block parts, votes - // may generate internal events (votes, complete proposals, 2/3 majorities) - cs.handleMsg(mi) - case mi = <-cs.internalMsgQueue: - cs.wal.WriteSync(mi) // NOTE: fsync - // handles proposals, block parts, votes - cs.handleMsg(mi) - case ti := <-cs.timeoutTicker.Chan(): // tockChan: - cs.wal.Write(ti) - // if the timeout is relevant to the rs - // go to the next step - cs.handleTimeout(ti, rs) - case <-cs.Quit(): - - // NOTE: the internalMsgQueue may have signed messages from our - // priv_val that haven't hit the WAL, but its ok because - // priv_val tracks LastSig - - // close wal now that we're done writing to it - cs.wal.Stop() - - close(cs.done) - return - } - } -} - -// state transitions on complete-proposal, 2/3-any, 2/3-one -func (cs *ConsensusState) handleMsg(mi msgInfo) { - cs.mtx.Lock() - defer cs.mtx.Unlock() - - var err error - msg, peerID := mi.Msg, mi.PeerID - switch msg := msg.(type) { - case *ProposalMessage: - // will not cause transition. - // once proposal is set, we can receive block parts - err = cs.setProposal(msg.Proposal) - case *BlockPartMessage: - // if the proposal is complete, we'll enterPrevote or tryFinalizeCommit - _, err = cs.addProposalBlockPart(msg.Height, msg.Part) - if err != nil && msg.Round != cs.Round { - cs.Logger.Debug("Received block part from wrong round", "height", cs.Height, "csRound", cs.Round, "blockRound", msg.Round) - err = nil - } - case *VoteMessage: - // attempt to add the vote and dupeout the validator if its a duplicate signature - // if the vote gives us a 2/3-any or 2/3-one, we transition - err := cs.tryAddVote(msg.Vote, peerID) - if err == ErrAddingVote { - // TODO: punish peer - // We probably don't want to stop the peer here. The vote does not - // necessarily comes from a malicious peer but can be just broadcasted by - // a typical peer. - // https://github.com/tendermint/tendermint/issues/1281 - } - - // NOTE: the vote is broadcast to peers by the reactor listening - // for vote events - - // TODO: If rs.Height == vote.Height && rs.Round < vote.Round, - // the peer is sending us CatchupCommit precommits. - // We could make note of this and help filter in broadcastHasVoteMessage(). - default: - cs.Logger.Error("Unknown msg type", reflect.TypeOf(msg)) - } - if err != nil { - cs.Logger.Error("Error with msg", "height", cs.Height, "round", cs.Round, "type", reflect.TypeOf(msg), "peer", peerID, "err", err, "msg", msg) - } -} - -func (cs *ConsensusState) handleTimeout(ti timeoutInfo, rs cstypes.RoundState) { - cs.Logger.Debug("Received tock", "timeout", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) - - // timeouts must be for current height, round, step - if ti.Height != rs.Height || ti.Round < rs.Round || (ti.Round == rs.Round && ti.Step < rs.Step) { - cs.Logger.Debug("Ignoring tock because we're ahead", "height", rs.Height, "round", rs.Round, "step", rs.Step) - return - } - - // the timeout will now cause a state transition - cs.mtx.Lock() - defer cs.mtx.Unlock() - - switch ti.Step { - case cstypes.RoundStepNewHeight: - // NewRound event fired from enterNewRound. - // XXX: should we fire timeout here (for timeout commit)? - cs.enterNewRound(ti.Height, 0) - case cstypes.RoundStepNewRound: - cs.enterPropose(ti.Height, 0) - case cstypes.RoundStepPropose: - cs.eventBus.PublishEventTimeoutPropose(cs.RoundStateEvent()) - cs.enterPrevote(ti.Height, ti.Round) - case cstypes.RoundStepPrevoteWait: - cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()) - cs.enterPrecommit(ti.Height, ti.Round) - case cstypes.RoundStepPrecommitWait: - cs.eventBus.PublishEventTimeoutWait(cs.RoundStateEvent()) - cs.enterNewRound(ti.Height, ti.Round+1) - default: - panic(cmn.Fmt("Invalid timeout step: %v", ti.Step)) - } - -} - -func (cs *ConsensusState) handleTxsAvailable(height int64) { - cs.mtx.Lock() - defer cs.mtx.Unlock() - // we only need to do this for round 0 - cs.enterPropose(height, 0) -} - -//----------------------------------------------------------------------------- -// State functions -// Used internally by handleTimeout and handleMsg to make state transitions - -// Enter: `timeoutNewHeight` by startTime (commitTime+timeoutCommit), -// or, if SkipTimeout==true, after receiving all precommits from (height,round-1) -// Enter: `timeoutPrecommits` after any +2/3 precommits from (height,round-1) -// Enter: +2/3 precommits for nil at (height,round-1) -// Enter: +2/3 prevotes any or +2/3 precommits for block or any from (height, round) -// NOTE: cs.StartTime was already set for height. -func (cs *ConsensusState) enterNewRound(height int64, round int) { - logger := cs.Logger.With("height", height, "round", round) - - if cs.Height != height || round < cs.Round || (cs.Round == round && cs.Step != cstypes.RoundStepNewHeight) { - logger.Debug(cmn.Fmt("enterNewRound(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) - return - } - - if now := time.Now(); cs.StartTime.After(now) { - logger.Info("Need to set a buffer and log message here for sanity.", "startTime", cs.StartTime, "now", now) - } - - logger.Info(cmn.Fmt("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) - - // Increment validators if necessary - validators := cs.Validators - if cs.Round < round { - validators = validators.Copy() - validators.IncrementAccum(round - cs.Round) - } - - // Setup new round - // we don't fire newStep for this step, - // but we fire an event, so update the round step first - cs.updateRoundStep(round, cstypes.RoundStepNewRound) - cs.Validators = validators - if round == 0 { - // We've already reset these upon new height, - // and meanwhile we might have received a proposal - // for round 0. - } else { - logger.Info("Resetting Proposal info") - cs.Proposal = nil - cs.ProposalBlock = nil - cs.ProposalBlockParts = nil - } - cs.Votes.SetRound(round + 1) // also track next round (round+1) to allow round-skipping - - cs.eventBus.PublishEventNewRound(cs.RoundStateEvent()) - cs.metrics.Rounds.Set(float64(round)) - - // Wait for txs to be available in the mempool - // before we enterPropose in round 0. If the last block changed the app hash, - // we may need an empty "proof" block, and enterPropose immediately. - waitForTxs := cs.config.WaitForTxs() && round == 0 && !cs.needProofBlock(height) - if waitForTxs { - if cs.config.CreateEmptyBlocksInterval > 0 { - cs.scheduleTimeout(cs.config.EmptyBlocksInterval(), height, round, cstypes.RoundStepNewRound) - } - go cs.proposalHeartbeat(height, round) - } else { - cs.enterPropose(height, round) - } -} - -// needProofBlock returns true on the first height (so the genesis app hash is signed right away) -// and where the last block (height-1) caused the app hash to change -func (cs *ConsensusState) needProofBlock(height int64) bool { - if height == 1 { - return true - } - - lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) - return !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash) -} - -func (cs *ConsensusState) proposalHeartbeat(height int64, round int) { - counter := 0 - addr := cs.privValidator.GetAddress() - valIndex, _ := cs.Validators.GetByAddress(addr) - chainID := cs.state.ChainID - for { - rs := cs.GetRoundState() - // if we've already moved on, no need to send more heartbeats - if rs.Step > cstypes.RoundStepNewRound || rs.Round > round || rs.Height > height { - return - } - heartbeat := &types.Heartbeat{ - Height: rs.Height, - Round: rs.Round, - Sequence: counter, - ValidatorAddress: addr, - ValidatorIndex: valIndex, - } - cs.privValidator.SignHeartbeat(chainID, heartbeat) - cs.eventBus.PublishEventProposalHeartbeat(types.EventDataProposalHeartbeat{heartbeat}) - cs.evsw.FireEvent(types.EventProposalHeartbeat, heartbeat) - counter++ - time.Sleep(proposalHeartbeatIntervalSeconds * time.Second) - } -} - -// Enter (CreateEmptyBlocks): from enterNewRound(height,round) -// Enter (CreateEmptyBlocks, CreateEmptyBlocksInterval > 0 ): after enterNewRound(height,round), after timeout of CreateEmptyBlocksInterval -// Enter (!CreateEmptyBlocks) : after enterNewRound(height,round), once txs are in the mempool -func (cs *ConsensusState) enterPropose(height int64, round int) { - logger := cs.Logger.With("height", height, "round", round) - - if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPropose <= cs.Step) { - logger.Debug(cmn.Fmt("enterPropose(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) - return - } - logger.Info(cmn.Fmt("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) - - defer func() { - // Done enterPropose: - cs.updateRoundStep(round, cstypes.RoundStepPropose) - cs.newStep() - - // If we have the whole proposal + POL, then goto Prevote now. - // else, we'll enterPrevote when the rest of the proposal is received (in AddProposalBlockPart), - // or else after timeoutPropose - if cs.isProposalComplete() { - cs.enterPrevote(height, cs.Round) - } - }() - - // If we don't get the proposal and all block parts quick enough, enterPrevote - cs.scheduleTimeout(cs.config.Propose(round), height, round, cstypes.RoundStepPropose) - - // Nothing more to do if we're not a validator - if cs.privValidator == nil { - logger.Debug("This node is not a validator") - return - } - - // if not a validator, we're done - if !cs.Validators.HasAddress(cs.privValidator.GetAddress()) { - logger.Debug("This node is not a validator", "addr", cs.privValidator.GetAddress(), "vals", cs.Validators) - return - } - logger.Debug("This node is a validator") - - if cs.isProposer() { - logger.Info("enterPropose: Our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator) - cs.decideProposal(height, round) - } else { - logger.Info("enterPropose: Not our turn to propose", "proposer", cs.Validators.GetProposer().Address, "privValidator", cs.privValidator) - } -} - -func (cs *ConsensusState) isProposer() bool { - return bytes.Equal(cs.Validators.GetProposer().Address, cs.privValidator.GetAddress()) -} - -func (cs *ConsensusState) defaultDecideProposal(height int64, round int) { - var block *types.Block - var blockParts *types.PartSet - - // Decide on block - if cs.LockedBlock != nil { - // If we're locked onto a block, just choose that. - block, blockParts = cs.LockedBlock, cs.LockedBlockParts - } else if cs.ValidBlock != nil { - // If there is valid block, choose that. - block, blockParts = cs.ValidBlock, cs.ValidBlockParts - } else { - // Create a new proposal block from state/txs from the mempool. - block, blockParts = cs.createProposalBlock() - if block == nil { // on error - return - } - } - - // Make proposal - polRound, polBlockID := cs.Votes.POLInfo() - proposal := types.NewProposal(height, round, blockParts.Header(), polRound, polBlockID) - if err := cs.privValidator.SignProposal(cs.state.ChainID, proposal); err == nil { - // Set fields - /* fields set by setProposal and addBlockPart - cs.Proposal = proposal - cs.ProposalBlock = block - cs.ProposalBlockParts = blockParts - */ - - // send proposal and block parts on internal msg queue - cs.sendInternalMessage(msgInfo{&ProposalMessage{proposal}, ""}) - for i := 0; i < blockParts.Total(); i++ { - part := blockParts.GetPart(i) - cs.sendInternalMessage(msgInfo{&BlockPartMessage{cs.Height, cs.Round, part}, ""}) - } - cs.Logger.Info("Signed proposal", "height", height, "round", round, "proposal", proposal) - cs.Logger.Debug(cmn.Fmt("Signed proposal block: %v", block)) - } else { - if !cs.replayMode { - cs.Logger.Error("enterPropose: Error signing proposal", "height", height, "round", round, "err", err) - } - } -} - -// Returns true if the proposal block is complete && -// (if POLRound was proposed, we have +2/3 prevotes from there). -func (cs *ConsensusState) isProposalComplete() bool { - if cs.Proposal == nil || cs.ProposalBlock == nil { - return false - } - // we have the proposal. if there's a POLRound, - // make sure we have the prevotes from it too - if cs.Proposal.POLRound < 0 { - return true - } - // if this is false the proposer is lying or we haven't received the POL yet - return cs.Votes.Prevotes(cs.Proposal.POLRound).HasTwoThirdsMajority() - -} - -// Create the next block to propose and return it. -// Returns nil block upon error. -// NOTE: keep it side-effect free for clarity. -func (cs *ConsensusState) createProposalBlock() (block *types.Block, blockParts *types.PartSet) { - var commit *types.Commit - if cs.Height == 1 { - // We're creating a proposal for the first block. - // The commit is empty, but not nil. - commit = &types.Commit{} - } else if cs.LastCommit.HasTwoThirdsMajority() { - // Make the commit from LastCommit - commit = cs.LastCommit.MakeCommit() - } else { - // This shouldn't happen. - cs.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block.") - return - } - - // Mempool validated transactions - txs := cs.mempool.Reap(cs.config.MaxBlockSizeTxs) - block, parts := cs.state.MakeBlock(cs.Height, txs, commit) - evidence := cs.evpool.PendingEvidence() - block.AddEvidence(evidence) - return block, parts -} - -// Enter: `timeoutPropose` after entering Propose. -// Enter: proposal block and POL is ready. -// Enter: any +2/3 prevotes for future round. -// Prevote for LockedBlock if we're locked, or ProposalBlock if valid. -// Otherwise vote nil. -func (cs *ConsensusState) enterPrevote(height int64, round int) { - if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevote <= cs.Step) { - cs.Logger.Debug(cmn.Fmt("enterPrevote(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) - return - } - - defer func() { - // Done enterPrevote: - cs.updateRoundStep(round, cstypes.RoundStepPrevote) - cs.newStep() - }() - - // fire event for how we got here - if cs.isProposalComplete() { - cs.eventBus.PublishEventCompleteProposal(cs.RoundStateEvent()) - } else { - // we received +2/3 prevotes for a future round - // TODO: catchup event? - } - - cs.Logger.Info(cmn.Fmt("enterPrevote(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) - - // Sign and broadcast vote as necessary - cs.doPrevote(height, round) - - // Once `addVote` hits any +2/3 prevotes, we will go to PrevoteWait - // (so we have more time to try and collect +2/3 prevotes for a single block) -} - -func (cs *ConsensusState) defaultDoPrevote(height int64, round int) { - logger := cs.Logger.With("height", height, "round", round) - // If a block is locked, prevote that. - if cs.LockedBlock != nil { - logger.Info("enterPrevote: Block was locked") - cs.signAddVote(types.VoteTypePrevote, cs.LockedBlock.Hash(), cs.LockedBlockParts.Header()) - return - } - - // If ProposalBlock is nil, prevote nil. - if cs.ProposalBlock == nil { - logger.Info("enterPrevote: ProposalBlock is nil") - cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{}) - return - } - - // Validate proposal block - err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock) - if err != nil { - // ProposalBlock is invalid, prevote nil. - logger.Error("enterPrevote: ProposalBlock is invalid", "err", err) - cs.signAddVote(types.VoteTypePrevote, nil, types.PartSetHeader{}) - return - } - - // Prevote cs.ProposalBlock - // NOTE: the proposal signature is validated when it is received, - // and the proposal block parts are validated as they are received (against the merkle hash in the proposal) - logger.Info("enterPrevote: ProposalBlock is valid") - cs.signAddVote(types.VoteTypePrevote, cs.ProposalBlock.Hash(), cs.ProposalBlockParts.Header()) -} - -// Enter: any +2/3 prevotes at next round. -func (cs *ConsensusState) enterPrevoteWait(height int64, round int) { - logger := cs.Logger.With("height", height, "round", round) - - if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrevoteWait <= cs.Step) { - logger.Debug(cmn.Fmt("enterPrevoteWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) - return - } - if !cs.Votes.Prevotes(round).HasTwoThirdsAny() { - cmn.PanicSanity(cmn.Fmt("enterPrevoteWait(%v/%v), but Prevotes does not have any +2/3 votes", height, round)) - } - logger.Info(cmn.Fmt("enterPrevoteWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) - - defer func() { - // Done enterPrevoteWait: - cs.updateRoundStep(round, cstypes.RoundStepPrevoteWait) - cs.newStep() - }() - - // Wait for some more prevotes; enterPrecommit - cs.scheduleTimeout(cs.config.Prevote(round), height, round, cstypes.RoundStepPrevoteWait) -} - -// Enter: `timeoutPrevote` after any +2/3 prevotes. -// Enter: +2/3 precomits for block or nil. -// Enter: any +2/3 precommits for next round. -// Lock & precommit the ProposalBlock if we have enough prevotes for it (a POL in this round) -// else, unlock an existing lock and precommit nil if +2/3 of prevotes were nil, -// else, precommit nil otherwise. -func (cs *ConsensusState) enterPrecommit(height int64, round int) { - logger := cs.Logger.With("height", height, "round", round) - - if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommit <= cs.Step) { - logger.Debug(cmn.Fmt("enterPrecommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) - return - } - - logger.Info(cmn.Fmt("enterPrecommit(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) - - defer func() { - // Done enterPrecommit: - cs.updateRoundStep(round, cstypes.RoundStepPrecommit) - cs.newStep() - }() - - // check for a polka - blockID, ok := cs.Votes.Prevotes(round).TwoThirdsMajority() - - // If we don't have a polka, we must precommit nil. - if !ok { - if cs.LockedBlock != nil { - logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit while we're locked. Precommitting nil") - } else { - logger.Info("enterPrecommit: No +2/3 prevotes during enterPrecommit. Precommitting nil.") - } - cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) - return - } - - // At this point +2/3 prevoted for a particular block or nil. - cs.eventBus.PublishEventPolka(cs.RoundStateEvent()) - - // the latest POLRound should be this round. - polRound, _ := cs.Votes.POLInfo() - if polRound < round { - cmn.PanicSanity(cmn.Fmt("This POLRound should be %v but got %", round, polRound)) - } - - // +2/3 prevoted nil. Unlock and precommit nil. - if len(blockID.Hash) == 0 { - if cs.LockedBlock == nil { - logger.Info("enterPrecommit: +2/3 prevoted for nil.") - } else { - logger.Info("enterPrecommit: +2/3 prevoted for nil. Unlocking") - cs.LockedRound = 0 - cs.LockedBlock = nil - cs.LockedBlockParts = nil - cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) - } - cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) - return - } - - // At this point, +2/3 prevoted for a particular block. - - // If we're already locked on that block, precommit it, and update the LockedRound - if cs.LockedBlock.HashesTo(blockID.Hash) { - logger.Info("enterPrecommit: +2/3 prevoted locked block. Relocking") - cs.LockedRound = round - cs.eventBus.PublishEventRelock(cs.RoundStateEvent()) - cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader) - return - } - - // If +2/3 prevoted for proposal block, stage and precommit it - if cs.ProposalBlock.HashesTo(blockID.Hash) { - logger.Info("enterPrecommit: +2/3 prevoted proposal block. Locking", "hash", blockID.Hash) - // Validate the block. - if err := cs.blockExec.ValidateBlock(cs.state, cs.ProposalBlock); err != nil { - cmn.PanicConsensus(cmn.Fmt("enterPrecommit: +2/3 prevoted for an invalid block: %v", err)) - } - cs.LockedRound = round - cs.LockedBlock = cs.ProposalBlock - cs.LockedBlockParts = cs.ProposalBlockParts - cs.eventBus.PublishEventLock(cs.RoundStateEvent()) - cs.signAddVote(types.VoteTypePrecommit, blockID.Hash, blockID.PartsHeader) - return - } - - // There was a polka in this round for a block we don't have. - // Fetch that block, unlock, and precommit nil. - // The +2/3 prevotes for this round is the POL for our unlock. - // TODO: In the future save the POL prevotes for justification. - cs.LockedRound = 0 - cs.LockedBlock = nil - cs.LockedBlockParts = nil - if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) { - cs.ProposalBlock = nil - cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) - } - cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) - cs.signAddVote(types.VoteTypePrecommit, nil, types.PartSetHeader{}) -} - -// Enter: any +2/3 precommits for next round. -func (cs *ConsensusState) enterPrecommitWait(height int64, round int) { - logger := cs.Logger.With("height", height, "round", round) - - if cs.Height != height || round < cs.Round || (cs.Round == round && cstypes.RoundStepPrecommitWait <= cs.Step) { - logger.Debug(cmn.Fmt("enterPrecommitWait(%v/%v): Invalid args. Current step: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) - return - } - if !cs.Votes.Precommits(round).HasTwoThirdsAny() { - cmn.PanicSanity(cmn.Fmt("enterPrecommitWait(%v/%v), but Precommits does not have any +2/3 votes", height, round)) - } - logger.Info(cmn.Fmt("enterPrecommitWait(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) - - defer func() { - // Done enterPrecommitWait: - cs.updateRoundStep(round, cstypes.RoundStepPrecommitWait) - cs.newStep() - }() - - // Wait for some more precommits; enterNewRound - cs.scheduleTimeout(cs.config.Precommit(round), height, round, cstypes.RoundStepPrecommitWait) - -} - -// Enter: +2/3 precommits for block -func (cs *ConsensusState) enterCommit(height int64, commitRound int) { - logger := cs.Logger.With("height", height, "commitRound", commitRound) - - if cs.Height != height || cstypes.RoundStepCommit <= cs.Step { - logger.Debug(cmn.Fmt("enterCommit(%v/%v): Invalid args. Current step: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) - return - } - logger.Info(cmn.Fmt("enterCommit(%v/%v). Current: %v/%v/%v", height, commitRound, cs.Height, cs.Round, cs.Step)) - - defer func() { - // Done enterCommit: - // keep cs.Round the same, commitRound points to the right Precommits set. - cs.updateRoundStep(cs.Round, cstypes.RoundStepCommit) - cs.CommitRound = commitRound - cs.CommitTime = time.Now() - cs.newStep() - - // Maybe finalize immediately. - cs.tryFinalizeCommit(height) - }() - - blockID, ok := cs.Votes.Precommits(commitRound).TwoThirdsMajority() - if !ok { - cmn.PanicSanity("RunActionCommit() expects +2/3 precommits") - } - - // The Locked* fields no longer matter. - // Move them over to ProposalBlock if they match the commit hash, - // otherwise they'll be cleared in updateToState. - if cs.LockedBlock.HashesTo(blockID.Hash) { - logger.Info("Commit is for locked block. Set ProposalBlock=LockedBlock", "blockHash", blockID.Hash) - cs.ProposalBlock = cs.LockedBlock - cs.ProposalBlockParts = cs.LockedBlockParts - } - - // If we don't have the block being committed, set up to get it. - if !cs.ProposalBlock.HashesTo(blockID.Hash) { - if !cs.ProposalBlockParts.HasHeader(blockID.PartsHeader) { - logger.Info("Commit is for a block we don't know about. Set ProposalBlock=nil", "proposal", cs.ProposalBlock.Hash(), "commit", blockID.Hash) - // We're getting the wrong block. - // Set up ProposalBlockParts and keep waiting. - cs.ProposalBlock = nil - cs.ProposalBlockParts = types.NewPartSetFromHeader(blockID.PartsHeader) - } else { - // We just need to keep waiting. - } - } -} - -// If we have the block AND +2/3 commits for it, finalize. -func (cs *ConsensusState) tryFinalizeCommit(height int64) { - logger := cs.Logger.With("height", height) - - if cs.Height != height { - cmn.PanicSanity(cmn.Fmt("tryFinalizeCommit() cs.Height: %v vs height: %v", cs.Height, height)) - } - - blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() - if !ok || len(blockID.Hash) == 0 { - logger.Error("Attempt to finalize failed. There was no +2/3 majority, or +2/3 was for .") - return - } - if !cs.ProposalBlock.HashesTo(blockID.Hash) { - // TODO: this happens every time if we're not a validator (ugly logs) - // TODO: ^^ wait, why does it matter that we're a validator? - logger.Info("Attempt to finalize failed. We don't have the commit block.", "proposal-block", cs.ProposalBlock.Hash(), "commit-block", blockID.Hash) - return - } - - // go - cs.finalizeCommit(height) -} - -// Increment height and goto cstypes.RoundStepNewHeight -func (cs *ConsensusState) finalizeCommit(height int64) { - if cs.Height != height || cs.Step != cstypes.RoundStepCommit { - cs.Logger.Debug(cmn.Fmt("finalizeCommit(%v): Invalid args. Current step: %v/%v/%v", height, cs.Height, cs.Round, cs.Step)) - return - } - - blockID, ok := cs.Votes.Precommits(cs.CommitRound).TwoThirdsMajority() - block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts - - if !ok { - cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, commit does not have two thirds majority")) - } - if !blockParts.HasHeader(blockID.PartsHeader) { - cmn.PanicSanity(cmn.Fmt("Expected ProposalBlockParts header to be commit header")) - } - if !block.HashesTo(blockID.Hash) { - cmn.PanicSanity(cmn.Fmt("Cannot finalizeCommit, ProposalBlock does not hash to commit hash")) - } - if err := cs.blockExec.ValidateBlock(cs.state, block); err != nil { - cmn.PanicConsensus(cmn.Fmt("+2/3 committed an invalid block: %v", err)) - } - - cs.Logger.Info(cmn.Fmt("Finalizing commit of block with %d txs", block.NumTxs), - "height", block.Height, "hash", block.Hash(), "root", block.AppHash) - cs.Logger.Info(cmn.Fmt("%v", block)) - - fail.Fail() // XXX - - // Save to blockStore. - if cs.blockStore.Height() < block.Height { - // NOTE: the seenCommit is local justification to commit this block, - // but may differ from the LastCommit included in the next block - precommits := cs.Votes.Precommits(cs.CommitRound) - seenCommit := precommits.MakeCommit() - cs.blockStore.SaveBlock(block, blockParts, seenCommit) - } else { - // Happens during replay if we already saved the block but didn't commit - cs.Logger.Info("Calling finalizeCommit on already stored block", "height", block.Height) - } - - fail.Fail() // XXX - - // Write EndHeightMessage{} for this height, implying that the blockstore - // has saved the block. - // - // If we crash before writing this EndHeightMessage{}, we will recover by - // running ApplyBlock during the ABCI handshake when we restart. If we - // didn't save the block to the blockstore before writing - // EndHeightMessage{}, we'd have to change WAL replay -- currently it - // complains about replaying for heights where an #ENDHEIGHT entry already - // exists. - // - // Either way, the ConsensusState should not be resumed until we - // successfully call ApplyBlock (ie. later here, or in Handshake after - // restart). - cs.wal.WriteSync(EndHeightMessage{height}) // NOTE: fsync - - fail.Fail() // XXX - - // Create a copy of the state for staging and an event cache for txs. - stateCopy := cs.state.Copy() - - // Execute and commit the block, update and save the state, and update the mempool. - // NOTE The block.AppHash wont reflect these txs until the next block. - var err error - stateCopy, err = cs.blockExec.ApplyBlock(stateCopy, types.BlockID{block.Hash(), blockParts.Header()}, block) - if err != nil { - cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "err", err) - err := cmn.Kill() - if err != nil { - cs.Logger.Error("Failed to kill this process - please do so manually", "err", err) - } - return - } - - fail.Fail() // XXX - - // must be called before we update state - cs.recordMetrics(height, block) - - // NewHeightStep! - cs.updateToState(stateCopy) - - fail.Fail() // XXX - - // cs.StartTime is already set. - // Schedule Round0 to start soon. - cs.scheduleRound0(&cs.RoundState) - - // By here, - // * cs.Height has been increment to height+1 - // * cs.Step is now cstypes.RoundStepNewHeight - // * cs.StartTime is set to when we will start round0. -} - -func (cs *ConsensusState) recordMetrics(height int64, block *types.Block) { - cs.metrics.Validators.Set(float64(cs.Validators.Size())) - cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower())) - missingValidators := 0 - missingValidatorsPower := int64(0) - for i, val := range cs.Validators.Validators { - var vote *types.Vote - if i < len(block.LastCommit.Precommits) { - vote = block.LastCommit.Precommits[i] - } - if vote == nil { - missingValidators++ - missingValidatorsPower += val.VotingPower - } - } - cs.metrics.MissingValidators.Set(float64(missingValidators)) - cs.metrics.MissingValidatorsPower.Set(float64(missingValidatorsPower)) - cs.metrics.ByzantineValidators.Set(float64(len(block.Evidence.Evidence))) - byzantineValidatorsPower := int64(0) - for _, ev := range block.Evidence.Evidence { - if _, val := cs.Validators.GetByAddress(ev.Address()); val != nil { - byzantineValidatorsPower += val.VotingPower - } - } - cs.metrics.ByzantineValidatorsPower.Set(float64(byzantineValidatorsPower)) - - if height > 1 { - lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) - cs.metrics.BlockIntervalSeconds.Observe( - block.Time.Sub(lastBlockMeta.Header.Time).Seconds(), - ) - } - - cs.metrics.NumTxs.Set(float64(block.NumTxs)) - cs.metrics.BlockSizeBytes.Set(float64(block.Size())) - cs.metrics.TotalTxs.Set(float64(block.TotalTxs)) -} - -//----------------------------------------------------------------------------- - -func (cs *ConsensusState) defaultSetProposal(proposal *types.Proposal) error { - // Already have one - // TODO: possibly catch double proposals - if cs.Proposal != nil { - return nil - } - - // Does not apply - if proposal.Height != cs.Height || proposal.Round != cs.Round { - return nil - } - - // We don't care about the proposal if we're already in cstypes.RoundStepCommit. - if cstypes.RoundStepCommit <= cs.Step { - return nil - } - - // Verify POLRound, which must be -1 or between 0 and proposal.Round exclusive. - if proposal.POLRound != -1 && - (proposal.POLRound < 0 || proposal.Round <= proposal.POLRound) { - return ErrInvalidProposalPOLRound - } - - // Verify signature - if !cs.Validators.GetProposer().PubKey.VerifyBytes(proposal.SignBytes(cs.state.ChainID), proposal.Signature) { - return ErrInvalidProposalSignature - } - - cs.Proposal = proposal - cs.ProposalBlockParts = types.NewPartSetFromHeader(proposal.BlockPartsHeader) - cs.Logger.Info("Received proposal", "proposal", proposal) - return nil -} - -// NOTE: block is not necessarily valid. -// Asynchronously triggers either enterPrevote (before we timeout of propose) or tryFinalizeCommit, once we have the full block. -func (cs *ConsensusState) addProposalBlockPart(height int64, part *types.Part) (added bool, err error) { - // Blocks might be reused, so round mismatch is OK - if cs.Height != height { - cs.Logger.Debug("Received block part from wrong height", "height", height) - return false, nil - } - - // We're not expecting a block part. - if cs.ProposalBlockParts == nil { - cs.Logger.Info("Received a block part when we're not expecting any", "height", height) - return false, nil // TODO: bad peer? Return error? - } - - added, err = cs.ProposalBlockParts.AddPart(part) - if err != nil { - return added, err - } - if added && cs.ProposalBlockParts.IsComplete() { - // Added and completed! - _, err = cdc.UnmarshalBinaryReader(cs.ProposalBlockParts.GetReader(), &cs.ProposalBlock, int64(cs.state.ConsensusParams.BlockSize.MaxBytes)) - if err != nil { - return true, err - } - // NOTE: it's possible to receive complete proposal blocks for future rounds without having the proposal - cs.Logger.Info("Received complete proposal block", "height", cs.ProposalBlock.Height, "hash", cs.ProposalBlock.Hash()) - - // Update Valid* if we can. - prevotes := cs.Votes.Prevotes(cs.Round) - blockID, hasTwoThirds := prevotes.TwoThirdsMajority() - if hasTwoThirds && !blockID.IsZero() && (cs.ValidRound < cs.Round) { - if cs.ProposalBlock.HashesTo(blockID.Hash) { - cs.Logger.Info("Updating valid block to new proposal block", - "valid-round", cs.Round, "valid-block-hash", cs.ProposalBlock.Hash()) - cs.ValidRound = cs.Round - cs.ValidBlock = cs.ProposalBlock - cs.ValidBlockParts = cs.ProposalBlockParts - } - // TODO: In case there is +2/3 majority in Prevotes set for some - // block and cs.ProposalBlock contains different block, either - // proposer is faulty or voting power of faulty processes is more - // than 1/3. We should trigger in the future accountability - // procedure at this point. - } - - if cs.Step == cstypes.RoundStepPropose && cs.isProposalComplete() { - // Move onto the next step - cs.enterPrevote(height, cs.Round) - } else if cs.Step == cstypes.RoundStepCommit { - // If we're waiting on the proposal block... - cs.tryFinalizeCommit(height) - } - return true, nil - } - return added, nil -} - -// Attempt to add the vote. if its a duplicate signature, dupeout the validator -func (cs *ConsensusState) tryAddVote(vote *types.Vote, peerID p2p.ID) error { - _, err := cs.addVote(vote, peerID) - if err != nil { - // If the vote height is off, we'll just ignore it, - // But if it's a conflicting sig, add it to the cs.evpool. - // If it's otherwise invalid, punish peer. - if err == ErrVoteHeightMismatch { - return err - } else if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok { - if bytes.Equal(vote.ValidatorAddress, cs.privValidator.GetAddress()) { - cs.Logger.Error("Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", vote.Height, "round", vote.Round, "type", vote.Type) - return err - } - cs.evpool.AddEvidence(voteErr.DuplicateVoteEvidence) - return err - } else { - // Probably an invalid signature / Bad peer. - // Seems this can also err sometimes with "Unexpected step" - perhaps not from a bad peer ? - cs.Logger.Error("Error attempting to add vote", "err", err) - return ErrAddingVote - } - } - return nil -} - -//----------------------------------------------------------------------------- - -func (cs *ConsensusState) addVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) { - cs.Logger.Debug("addVote", "voteHeight", vote.Height, "voteType", vote.Type, "valIndex", vote.ValidatorIndex, "csHeight", cs.Height) - - // A precommit for the previous height? - // These come in while we wait timeoutCommit - if vote.Height+1 == cs.Height { - if !(cs.Step == cstypes.RoundStepNewHeight && vote.Type == types.VoteTypePrecommit) { - // TODO: give the reason .. - // fmt.Errorf("tryAddVote: Wrong height, not a LastCommit straggler commit.") - return added, ErrVoteHeightMismatch - } - added, err = cs.LastCommit.AddVote(vote) - if !added { - return added, err - } - - cs.Logger.Info(cmn.Fmt("Added to lastPrecommits: %v", cs.LastCommit.StringShort())) - cs.eventBus.PublishEventVote(types.EventDataVote{vote}) - cs.evsw.FireEvent(types.EventVote, vote) - - // if we can skip timeoutCommit and have all the votes now, - if cs.config.SkipTimeoutCommit && cs.LastCommit.HasAll() { - // go straight to new round (skip timeout commit) - // cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight) - cs.enterNewRound(cs.Height, 0) - } - - return - } - - // Height mismatch is ignored. - // Not necessarily a bad peer, but not favourable behaviour. - if vote.Height != cs.Height { - err = ErrVoteHeightMismatch - cs.Logger.Info("Vote ignored and not added", "voteHeight", vote.Height, "csHeight", cs.Height, "err", err) - return - } - - height := cs.Height - added, err = cs.Votes.AddVote(vote, peerID) - if !added { - // Either duplicate, or error upon cs.Votes.AddByIndex() - return - } - - cs.eventBus.PublishEventVote(types.EventDataVote{vote}) - cs.evsw.FireEvent(types.EventVote, vote) - - switch vote.Type { - case types.VoteTypePrevote: - prevotes := cs.Votes.Prevotes(vote.Round) - cs.Logger.Info("Added to prevote", "vote", vote, "prevotes", prevotes.StringShort()) - - // If +2/3 prevotes for a block or nil for *any* round: - if blockID, ok := prevotes.TwoThirdsMajority(); ok { - - // There was a polka! - // If we're locked but this is a recent polka, unlock. - // If it matches our ProposalBlock, update the ValidBlock - - // Unlock if `cs.LockedRound < vote.Round <= cs.Round` - // NOTE: If vote.Round > cs.Round, we'll deal with it when we get to vote.Round - if (cs.LockedBlock != nil) && - (cs.LockedRound < vote.Round) && - (vote.Round <= cs.Round) && - !cs.LockedBlock.HashesTo(blockID.Hash) { - - cs.Logger.Info("Unlocking because of POL.", "lockedRound", cs.LockedRound, "POLRound", vote.Round) - cs.LockedRound = 0 - cs.LockedBlock = nil - cs.LockedBlockParts = nil - cs.eventBus.PublishEventUnlock(cs.RoundStateEvent()) - } - - // Update Valid* if we can. - // NOTE: our proposal block may be nil or not what received a polka.. - // TODO: we may want to still update the ValidBlock and obtain it via gossipping - if !blockID.IsZero() && - (cs.ValidRound < vote.Round) && - (vote.Round <= cs.Round) && - cs.ProposalBlock.HashesTo(blockID.Hash) { - - cs.Logger.Info("Updating ValidBlock because of POL.", "validRound", cs.ValidRound, "POLRound", vote.Round) - cs.ValidRound = vote.Round - cs.ValidBlock = cs.ProposalBlock - cs.ValidBlockParts = cs.ProposalBlockParts - } - } - - // If +2/3 prevotes for *anything* for this or future round: - if cs.Round <= vote.Round && prevotes.HasTwoThirdsAny() { - // Round-skip over to PrevoteWait or goto Precommit. - cs.enterNewRound(height, vote.Round) // if the vote is ahead of us - if prevotes.HasTwoThirdsMajority() { - cs.enterPrecommit(height, vote.Round) - } else { - cs.enterPrevote(height, vote.Round) // if the vote is ahead of us - cs.enterPrevoteWait(height, vote.Round) - } - } else if cs.Proposal != nil && 0 <= cs.Proposal.POLRound && cs.Proposal.POLRound == vote.Round { - // If the proposal is now complete, enter prevote of cs.Round. - if cs.isProposalComplete() { - cs.enterPrevote(height, cs.Round) - } - } - - case types.VoteTypePrecommit: - precommits := cs.Votes.Precommits(vote.Round) - cs.Logger.Info("Added to precommit", "vote", vote, "precommits", precommits.StringShort()) - blockID, ok := precommits.TwoThirdsMajority() - if ok { - if len(blockID.Hash) == 0 { - cs.enterNewRound(height, vote.Round+1) - } else { - cs.enterNewRound(height, vote.Round) - cs.enterPrecommit(height, vote.Round) - cs.enterCommit(height, vote.Round) - - if cs.config.SkipTimeoutCommit && precommits.HasAll() { - // if we have all the votes now, - // go straight to new round (skip timeout commit) - // cs.scheduleTimeout(time.Duration(0), cs.Height, 0, cstypes.RoundStepNewHeight) - cs.enterNewRound(cs.Height, 0) - } - - } - } else if cs.Round <= vote.Round && precommits.HasTwoThirdsAny() { - cs.enterNewRound(height, vote.Round) - cs.enterPrecommit(height, vote.Round) - cs.enterPrecommitWait(height, vote.Round) - } - default: - panic(cmn.Fmt("Unexpected vote type %X", vote.Type)) // go-wire should prevent this. - } - - return -} - -func (cs *ConsensusState) signVote(type_ byte, hash []byte, header types.PartSetHeader) (*types.Vote, error) { - addr := cs.privValidator.GetAddress() - valIndex, _ := cs.Validators.GetByAddress(addr) - vote := &types.Vote{ - ValidatorAddress: addr, - ValidatorIndex: valIndex, - Height: cs.Height, - Round: cs.Round, - Timestamp: time.Now().UTC(), - Type: type_, - BlockID: types.BlockID{hash, header}, - } - err := cs.privValidator.SignVote(cs.state.ChainID, vote) - return vote, err -} - -// sign the vote and publish on internalMsgQueue -func (cs *ConsensusState) signAddVote(type_ byte, hash []byte, header types.PartSetHeader) *types.Vote { - // if we don't have a key or we're not in the validator set, do nothing - if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetAddress()) { - return nil - } - vote, err := cs.signVote(type_, hash, header) - if err == nil { - cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""}) - cs.Logger.Info("Signed and pushed vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) - return vote - } - //if !cs.replayMode { - cs.Logger.Error("Error signing vote", "height", cs.Height, "round", cs.Round, "vote", vote, "err", err) - //} - return nil -} - -//--------------------------------------------------------- - -func CompareHRS(h1 int64, r1 int, s1 cstypes.RoundStepType, h2 int64, r2 int, s2 cstypes.RoundStepType) int { - if h1 < h2 { - return -1 - } else if h1 > h2 { - return 1 - } - if r1 < r2 { - return -1 - } else if r1 > r2 { - return 1 - } - if s1 < s2 { - return -1 - } else if s1 > s2 { - return 1 - } - return 0 -} diff --git a/consensus/state_test.go b/consensus/state_test.go deleted file mode 100644 index d0def630..00000000 --- a/consensus/state_test.go +++ /dev/null @@ -1,1099 +0,0 @@ -package consensus - -import ( - "bytes" - "context" - "fmt" - "testing" - "time" - - cstypes "github.com/tendermint/tendermint/consensus/types" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" -) - -func init() { - config = ResetConfig("consensus_state_test") -} - -func ensureProposeTimeout(timeoutPropose int) time.Duration { - return time.Duration(timeoutPropose*2) * time.Millisecond -} - -/* - -ProposeSuite -x * TestProposerSelection0 - round robin ordering, round 0 -x * TestProposerSelection2 - round robin ordering, round 2++ -x * TestEnterProposeNoValidator - timeout into prevote round -x * TestEnterPropose - finish propose without timing out (we have the proposal) -x * TestBadProposal - 2 vals, bad proposal (bad block state hash), should prevote and precommit nil -FullRoundSuite -x * TestFullRound1 - 1 val, full successful round -x * TestFullRoundNil - 1 val, full round of nil -x * TestFullRound2 - 2 vals, both required for full round -LockSuite -x * TestLockNoPOL - 2 vals, 4 rounds. one val locked, precommits nil every round except first. -x * TestLockPOLRelock - 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka -x * TestLockPOLUnlock - 4 vals, one precommits, other 3 polka nil at next round, so we unlock and precomit nil -x * TestLockPOLSafety1 - 4 vals. We shouldn't change lock based on polka at earlier round -x * TestLockPOLSafety2 - 4 vals. After unlocking, we shouldn't relock based on polka at earlier round - * TestNetworkLock - once +1/3 precommits, network should be locked - * TestNetworkLockPOL - once +1/3 precommits, the block with more recent polka is committed -SlashingSuite -x * TestSlashingPrevotes - a validator prevoting twice in a round gets slashed -x * TestSlashingPrecommits - a validator precomitting twice in a round gets slashed -CatchupSuite - * TestCatchup - if we might be behind and we've seen any 2/3 prevotes, round skip to new round, precommit, or prevote -HaltSuite -x * TestHalt1 - if we see +2/3 precommits after timing out into new round, we should still commit - -*/ - -//---------------------------------------------------------------------------------------------------- -// ProposeSuite - -func TestStateProposerSelection0(t *testing.T) { - cs1, vss := randConsensusState(4) - height, round := cs1.Height, cs1.Round - - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - - startTestRound(cs1, height, round) - - // wait for new round so proposer is set - <-newRoundCh - - // lets commit a block and ensure proposer for the next height is correct - prop := cs1.GetRoundState().Validators.GetProposer() - if !bytes.Equal(prop.Address, cs1.privValidator.GetAddress()) { - t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address) - } - - // wait for complete proposal - <-proposalCh - - rs := cs1.GetRoundState() - signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[1:]...) - - // wait for new round so next validator is set - <-newRoundCh - - prop = cs1.GetRoundState().Validators.GetProposer() - if !bytes.Equal(prop.Address, vss[1].GetAddress()) { - panic(cmn.Fmt("expected proposer to be validator %d. Got %X", 1, prop.Address)) - } -} - -// Now let's do it all again, but starting from round 2 instead of 0 -func TestStateProposerSelection2(t *testing.T) { - cs1, vss := randConsensusState(4) // test needs more work for more than 3 validators - - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - - // this time we jump in at round 2 - incrementRound(vss[1:]...) - incrementRound(vss[1:]...) - startTestRound(cs1, cs1.Height, 2) - - <-newRoundCh // wait for the new round - - // everyone just votes nil. we get a new proposer each round - for i := 0; i < len(vss); i++ { - prop := cs1.GetRoundState().Validators.GetProposer() - if !bytes.Equal(prop.Address, vss[(i+2)%len(vss)].GetAddress()) { - panic(cmn.Fmt("expected proposer to be validator %d. Got %X", (i+2)%len(vss), prop.Address)) - } - - rs := cs1.GetRoundState() - signAddVotes(cs1, types.VoteTypePrecommit, nil, rs.ProposalBlockParts.Header(), vss[1:]...) - <-newRoundCh // wait for the new round event each round - - incrementRound(vss[1:]...) - } - -} - -// a non-validator should timeout into the prevote round -func TestStateEnterProposeNoPrivValidator(t *testing.T) { - cs, _ := randConsensusState(1) - cs.SetPrivValidator(nil) - height, round := cs.Height, cs.Round - - // Listen for propose timeout event - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) - - startTestRound(cs, height, round) - - // if we're not a validator, EnterPropose should timeout - ticker := time.NewTicker(ensureProposeTimeout(cs.config.TimeoutPropose)) - select { - case <-timeoutCh: - case <-ticker.C: - panic("Expected EnterPropose to timeout") - - } - - if cs.GetRoundState().Proposal != nil { - t.Error("Expected to make no proposal, since no privValidator") - } -} - -// a validator should not timeout of the prevote round (TODO: unless the block is really big!) -func TestStateEnterProposeYesPrivValidator(t *testing.T) { - cs, _ := randConsensusState(1) - height, round := cs.Height, cs.Round - - // Listen for propose timeout event - - timeoutCh := subscribe(cs.eventBus, types.EventQueryTimeoutPropose) - proposalCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) - - cs.enterNewRound(height, round) - cs.startRoutines(3) - - <-proposalCh - - // Check that Proposal, ProposalBlock, ProposalBlockParts are set. - rs := cs.GetRoundState() - if rs.Proposal == nil { - t.Error("rs.Proposal should be set") - } - if rs.ProposalBlock == nil { - t.Error("rs.ProposalBlock should be set") - } - if rs.ProposalBlockParts.Total() == 0 { - t.Error("rs.ProposalBlockParts should be set") - } - - // if we're a validator, enterPropose should not timeout - ticker := time.NewTicker(ensureProposeTimeout(cs.config.TimeoutPropose)) - select { - case <-timeoutCh: - panic("Expected EnterPropose not to timeout") - case <-ticker.C: - - } -} - -func TestStateBadProposal(t *testing.T) { - cs1, vss := randConsensusState(2) - height, round := cs1.Height, cs1.Round - vs2 := vss[1] - - partSize := cs1.state.ConsensusParams.BlockPartSizeBytes - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - - propBlock, _ := cs1.createProposalBlock() //changeProposer(t, cs1, vs2) - - // make the second validator the proposer by incrementing round - round = round + 1 - incrementRound(vss[1:]...) - - // make the block bad by tampering with statehash - stateHash := propBlock.AppHash - if len(stateHash) == 0 { - stateHash = make([]byte, 32) - } - stateHash[0] = byte((stateHash[0] + 1) % 255) - propBlock.AppHash = stateHash - propBlockParts := propBlock.MakePartSet(partSize) - proposal := types.NewProposal(vs2.Height, round, propBlockParts.Header(), -1, types.BlockID{}) - if err := vs2.SignProposal(config.ChainID(), proposal); err != nil { - t.Fatal("failed to sign bad proposal", err) - } - - // set the proposal block - if err := cs1.SetProposalAndBlock(proposal, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } - - // start the machine - startTestRound(cs1, height, round) - - // wait for proposal - <-proposalCh - - // wait for prevote - <-voteCh - - validatePrevote(t, cs1, round, vss[0], nil) - - // add bad prevote from vs2 and wait for it - signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh - - // wait for precommit - <-voteCh - - validatePrecommit(t, cs1, round, 0, vss[0], nil, nil) - signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) -} - -//---------------------------------------------------------------------------------------------------- -// FullRoundSuite - -// propose, prevote, and precommit a block -func TestStateFullRound1(t *testing.T) { - cs, vss := randConsensusState(1) - height, round := cs.Height, cs.Round - - // NOTE: buffer capacity of 0 ensures we can validate prevote and last commit - // before consensus can move to the next height (and cause a race condition) - cs.eventBus.Stop() - eventBus := types.NewEventBusWithBufferCapacity(0) - eventBus.SetLogger(log.TestingLogger().With("module", "events")) - cs.SetEventBus(eventBus) - eventBus.Start() - - voteCh := subscribe(cs.eventBus, types.EventQueryVote) - propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) - - startTestRound(cs, height, round) - - <-newRoundCh - - // grab proposal - re := <-propCh - propBlockHash := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState).ProposalBlock.Hash() - - <-voteCh // wait for prevote - validatePrevote(t, cs, round, vss[0], propBlockHash) - - <-voteCh // wait for precommit - - // we're going to roll right into new height - <-newRoundCh - - validateLastPrecommit(t, cs, vss[0], propBlockHash) -} - -// nil is proposed, so prevote and precommit nil -func TestStateFullRoundNil(t *testing.T) { - cs, vss := randConsensusState(1) - height, round := cs.Height, cs.Round - - voteCh := subscribe(cs.eventBus, types.EventQueryVote) - - cs.enterPrevote(height, round) - cs.startRoutines(4) - - <-voteCh // prevote - <-voteCh // precommit - - // should prevote and precommit nil - validatePrevoteAndPrecommit(t, cs, round, 0, vss[0], nil, nil) -} - -// run through propose, prevote, precommit commit with two validators -// where the first validator has to wait for votes from the second -func TestStateFullRound2(t *testing.T) { - cs1, vss := randConsensusState(2) - vs2 := vss[1] - height, round := cs1.Height, cs1.Round - - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) - - // start round and wait for propose and prevote - startTestRound(cs1, height, round) - - <-voteCh // prevote - - // we should be stuck in limbo waiting for more prevotes - rs := cs1.GetRoundState() - propBlockHash, propPartsHeader := rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header() - - // prevote arrives from vs2: - signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propPartsHeader, vs2) - <-voteCh - - <-voteCh //precommit - - // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], propBlockHash, propBlockHash) - - // we should be stuck in limbo waiting for more precommits - - // precommit arrives from vs2: - signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propPartsHeader, vs2) - <-voteCh - - // wait to finish commit, propose in next height - <-newBlockCh -} - -//------------------------------------------------------------------------------------------ -// LockSuite - -// two validators, 4 rounds. -// two vals take turns proposing. val1 locks on first one, precommits nil on everything else -func TestStateLockNoPOL(t *testing.T) { - cs1, vss := randConsensusState(2) - vs2 := vss[1] - height := cs1.Height - - partSize := cs1.state.ConsensusParams.BlockPartSizeBytes - - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - - /* - Round1 (cs1, B) // B B // B B2 - */ - - // start round and wait for prevote - cs1.enterNewRound(height, 0) - cs1.startRoutines(0) - - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - theBlockHash := rs.ProposalBlock.Hash() - - <-voteCh // prevote - - // we should now be stuck in limbo forever, waiting for more prevotes - // prevote arrives from vs2: - signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2) - <-voteCh // prevote - - <-voteCh // precommit - - // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) - - // we should now be stuck in limbo forever, waiting for more precommits - // lets add one for a different block - // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round - hash := make([]byte, len(theBlockHash)) - copy(hash, theBlockHash) - hash[0] = byte((hash[0] + 1) % 255) - signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh // precommit - - // (note we're entering precommit for a second time this round) - // but with invalid args. then we enterPrecommitWait, and the timeout to new round - <-timeoutWaitCh - - /// - - <-newRoundCh - t.Log("#### ONTO ROUND 1") - /* - Round2 (cs1, B) // B B2 - */ - - incrementRound(vs2) - - // now we're on a new round and not the proposer, so wait for timeout - re = <-timeoutProposeCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - - if rs.ProposalBlock != nil { - panic("Expected proposal block to be nil") - } - - // wait to finish prevote - <-voteCh - - // we should have prevoted our locked block - validatePrevote(t, cs1, 1, vss[0], rs.LockedBlock.Hash()) - - // add a conflicting prevote from the other validator - signAddVotes(cs1, types.VoteTypePrevote, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh - - // now we're going to enter prevote again, but with invalid args - // and then prevote wait, which should timeout. then wait for precommit - <-timeoutWaitCh - - <-voteCh // precommit - - // the proposed block should still be locked and our precommit added - // we should precommit nil and be locked on the proposal - validatePrecommit(t, cs1, 1, 0, vss[0], nil, theBlockHash) - - // add conflicting precommit from vs2 - // NOTE: in practice we should never get to a point where there are precommits for different blocks at the same round - signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.LockedBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh - - // (note we're entering precommit for a second time this round, but with invalid args - // then we enterPrecommitWait and timeout into NewRound - <-timeoutWaitCh - - <-newRoundCh - t.Log("#### ONTO ROUND 2") - /* - Round3 (vs2, _) // B, B2 - */ - - incrementRound(vs2) - - re = <-proposalCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - - // now we're on a new round and are the proposer - if !bytes.Equal(rs.ProposalBlock.Hash(), rs.LockedBlock.Hash()) { - panic(cmn.Fmt("Expected proposal block to be locked block. Got %v, Expected %v", rs.ProposalBlock, rs.LockedBlock)) - } - - <-voteCh // prevote - - validatePrevote(t, cs1, 2, vss[0], rs.LockedBlock.Hash()) - - signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh - - <-timeoutWaitCh // prevote wait - <-voteCh // precommit - - validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but be locked on proposal - - signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height - <-voteCh - - <-timeoutWaitCh - - // before we time out into new round, set next proposal block - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) - if prop == nil || propBlock == nil { - t.Fatal("Failed to create proposal block with vs2") - } - - incrementRound(vs2) - - <-newRoundCh - t.Log("#### ONTO ROUND 3") - /* - Round4 (vs2, C) // B C // B C - */ - - // now we're on a new round and not the proposer - // so set the proposal block - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlock.MakePartSet(partSize), ""); err != nil { - t.Fatal(err) - } - - <-proposalCh - <-voteCh // prevote - - // prevote for locked block (not proposal) - validatePrevote(t, cs1, 0, vss[0], cs1.LockedBlock.Hash()) - - signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) - <-voteCh - - <-timeoutWaitCh - <-voteCh - - validatePrecommit(t, cs1, 2, 0, vss[0], nil, theBlockHash) // precommit nil but locked on proposal - - signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2) // NOTE: conflicting precommits at same height - <-voteCh -} - -// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka -func TestStateLockPOLRelock(t *testing.T) { - cs1, vss := randConsensusState(4) - vs2, vs3, vs4 := vss[1], vss[2], vss[3] - - partSize := cs1.state.ConsensusParams.BlockPartSizeBytes - - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - - // everything done from perspective of cs1 - - /* - Round1 (cs1, B) // B B B B// B nil B nil - - eg. vs2 and vs4 didn't see the 2/3 prevotes - */ - - // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - - <-newRoundCh - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - theBlockHash := rs.ProposalBlock.Hash() - - <-voteCh // prevote - - signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4) - // prevotes - discardFromChan(voteCh, 3) - - <-voteCh // our precommit - // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) - - // add precommits from the rest - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3) - // precommites - discardFromChan(voteCh, 3) - - // before we timeout to the new round set the new proposal - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) - propBlockParts := propBlock.MakePartSet(partSize) - propBlockHash := propBlock.Hash() - - incrementRound(vs2, vs3, vs4) - - // timeout to new round - <-timeoutWaitCh - - //XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } - - <-newRoundCh - t.Log("### ONTO ROUND 1") - - /* - Round2 (vs2, C) // B C C C // C C C _) - - cs1 changes lock! - */ - - // now we're on a new round and not the proposer - // but we should receive the proposal - select { - case <-proposalCh: - case <-timeoutProposeCh: - <-proposalCh - } - - // go to prevote, prevote for locked block (not proposal), move on - <-voteCh - validatePrevote(t, cs1, 0, vss[0], theBlockHash) - - // now lets add prevotes from everyone else for the new block - signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - // prevotes - discardFromChan(voteCh, 3) - - // now either we go to PrevoteWait or Precommit - select { - case <-timeoutWaitCh: // we're in PrevoteWait, go to Precommit - // XXX: there's no guarantee we see the polka, this might be a precommit for nil, - // in which case the test fails! - <-voteCh - case <-voteCh: // we went straight to Precommit - } - - // we should have unlocked and locked on the new block - validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash) - - signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash, propBlockParts.Header(), vs2, vs3) - discardFromChan(voteCh, 2) - - be := <-newBlockCh - b := be.(types.EventDataNewBlockHeader) - re = <-newRoundCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - if rs.Height != 2 { - panic("Expected height to increment") - } - - if !bytes.Equal(b.Header.Hash(), propBlockHash) { - panic("Expected new block to be proposal block") - } -} - -// 4 vals, one precommits, other 3 polka at next round, so we unlock and precomit the polka -func TestStateLockPOLUnlock(t *testing.T) { - cs1, vss := randConsensusState(4) - vs2, vs3, vs4 := vss[1], vss[2], vss[3] - - partSize := cs1.state.ConsensusParams.BlockPartSizeBytes - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) - - // everything done from perspective of cs1 - - /* - Round1 (cs1, B) // B B B B // B nil B nil - - eg. didn't see the 2/3 prevotes - */ - - // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - theBlockHash := rs.ProposalBlock.Hash() - - <-voteCh // prevote - - signAddVotes(cs1, types.VoteTypePrevote, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs2, vs3, vs4) - - <-voteCh //precommit - - // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], theBlockHash, theBlockHash) - - rs = cs1.GetRoundState() - - // add precommits from the rest - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(cs1, types.VoteTypePrecommit, cs1.ProposalBlock.Hash(), cs1.ProposalBlockParts.Header(), vs3) - - // before we time out into new round, set next proposal block - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) - propBlockParts := propBlock.MakePartSet(partSize) - - incrementRound(vs2, vs3, vs4) - - // timeout to new round - re = <-timeoutWaitCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - lockedBlockHash := rs.LockedBlock.Hash() - - //XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } - - <-newRoundCh - t.Log("#### ONTO ROUND 1") - /* - Round2 (vs2, C) // B nil nil nil // nil nil nil _ - - cs1 unlocks! - */ - - // now we're on a new round and not the proposer, - // but we should receive the proposal - select { - case <-proposalCh: - case <-timeoutProposeCh: - <-proposalCh - } - - // go to prevote, prevote for locked block (not proposal) - <-voteCh - validatePrevote(t, cs1, 0, vss[0], lockedBlockHash) - // now lets add prevotes from everyone else for nil (a polka!) - signAddVotes(cs1, types.VoteTypePrevote, nil, types.PartSetHeader{}, vs2, vs3, vs4) - - // the polka makes us unlock and precommit nil - <-unlockCh - <-voteCh // precommit - - // we should have unlocked and committed nil - // NOTE: since we don't relock on nil, the lock round is 0 - validatePrecommit(t, cs1, 1, 0, vss[0], nil, nil) - - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3) - <-newRoundCh -} - -// 4 vals -// a polka at round 1 but we miss it -// then a polka at round 2 that we lock on -// then we see the polka from round 1 but shouldn't unlock -func TestStateLockPOLSafety1(t *testing.T) { - cs1, vss := randConsensusState(4) - vs2, vs3, vs4 := vss[1], vss[2], vss[3] - - partSize := cs1.state.ConsensusParams.BlockPartSizeBytes - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) - - // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - propBlock := rs.ProposalBlock - - <-voteCh // prevote - - validatePrevote(t, cs1, 0, vss[0], propBlock.Hash()) - - // the others sign a polka but we don't see it - prevotes := signVotes(types.VoteTypePrevote, propBlock.Hash(), propBlock.MakePartSet(partSize).Header(), vs2, vs3, vs4) - - // before we time out into new round, set next proposer - // and next proposal block - /* - _, v1 := cs1.Validators.GetByAddress(vss[0].Address) - v1.VotingPower = 1 - if updated := cs1.Validators.Update(v1); !updated { - panic("failed to update validator") - }*/ - - t.Logf("old prop hash %v", fmt.Sprintf("%X", propBlock.Hash())) - - // we do see them precommit nil - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3, vs4) - - prop, propBlock := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) - propBlockHash := propBlock.Hash() - propBlockParts := propBlock.MakePartSet(partSize) - - incrementRound(vs2, vs3, vs4) - - //XXX: this isnt guaranteed to get there before the timeoutPropose ... - if err := cs1.SetProposalAndBlock(prop, propBlock, propBlockParts, "some peer"); err != nil { - t.Fatal(err) - } - - <-newRoundCh - t.Log("### ONTO ROUND 1") - /*Round2 - // we timeout and prevote our lock - // a polka happened but we didn't see it! - */ - - // now we're on a new round and not the proposer, - // but we should receive the proposal - select { - case re = <-proposalCh: - case <-timeoutProposeCh: - re = <-proposalCh - } - - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - - if rs.LockedBlock != nil { - panic("we should not be locked!") - } - t.Logf("new prop hash %v", fmt.Sprintf("%X", propBlockHash)) - // go to prevote, prevote for proposal block - <-voteCh - validatePrevote(t, cs1, 1, vss[0], propBlockHash) - - // now we see the others prevote for it, so we should lock on it - signAddVotes(cs1, types.VoteTypePrevote, propBlockHash, propBlockParts.Header(), vs2, vs3, vs4) - - <-voteCh // precommit - - // we should have precommitted - validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash, propBlockHash) - - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs3) - - <-timeoutWaitCh - - incrementRound(vs2, vs3, vs4) - - <-newRoundCh - - t.Log("### ONTO ROUND 2") - /*Round3 - we see the polka from round 1 but we shouldn't unlock! - */ - - // timeout of propose - <-timeoutProposeCh - - // finish prevote - <-voteCh - - // we should prevote what we're locked on - validatePrevote(t, cs1, 2, vss[0], propBlockHash) - - newStepCh := subscribe(cs1.eventBus, types.EventQueryNewRoundStep) - - // add prevotes from the earlier round - addVotes(cs1, prevotes...) - - t.Log("Done adding prevotes!") - - ensureNoNewStep(newStepCh) -} - -// 4 vals. -// polka P0 at R0, P1 at R1, and P2 at R2, -// we lock on P0 at R0, don't see P1, and unlock using P2 at R2 -// then we should make sure we don't lock using P1 - -// What we want: -// dont see P0, lock on P1 at R1, dont unlock using P0 at R2 -func TestStateLockPOLSafety2(t *testing.T) { - cs1, vss := randConsensusState(4) - vs2, vs3, vs4 := vss[1], vss[2], vss[3] - - partSize := cs1.state.ConsensusParams.BlockPartSizeBytes - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) - - // the block for R0: gets polkad but we miss it - // (even though we signed it, shhh) - _, propBlock0 := decideProposal(cs1, vss[0], cs1.Height, cs1.Round) - propBlockHash0 := propBlock0.Hash() - propBlockParts0 := propBlock0.MakePartSet(partSize) - - // the others sign a polka but we don't see it - prevotes := signVotes(types.VoteTypePrevote, propBlockHash0, propBlockParts0.Header(), vs2, vs3, vs4) - - // the block for round 1 - prop1, propBlock1 := decideProposal(cs1, vs2, vs2.Height, vs2.Round+1) - propBlockHash1 := propBlock1.Hash() - propBlockParts1 := propBlock1.MakePartSet(partSize) - propBlockID1 := types.BlockID{propBlockHash1, propBlockParts1.Header()} - - incrementRound(vs2, vs3, vs4) - - cs1.updateRoundStep(0, cstypes.RoundStepPrecommitWait) - - t.Log("### ONTO Round 1") - // jump in at round 1 - height := cs1.Height - startTestRound(cs1, height, 1) - <-newRoundCh - - if err := cs1.SetProposalAndBlock(prop1, propBlock1, propBlockParts1, "some peer"); err != nil { - t.Fatal(err) - } - <-proposalCh - - <-voteCh // prevote - - signAddVotes(cs1, types.VoteTypePrevote, propBlockHash1, propBlockParts1.Header(), vs2, vs3, vs4) - - <-voteCh // precommit - // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 1, 1, vss[0], propBlockHash1, propBlockHash1) - - // add precommits from the rest - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2, vs4) - signAddVotes(cs1, types.VoteTypePrecommit, propBlockHash1, propBlockParts1.Header(), vs3) - - incrementRound(vs2, vs3, vs4) - - // timeout of precommit wait to new round - <-timeoutWaitCh - - // in round 2 we see the polkad block from round 0 - newProp := types.NewProposal(height, 2, propBlockParts0.Header(), 0, propBlockID1) - if err := vs3.SignProposal(config.ChainID(), newProp); err != nil { - t.Fatal(err) - } - if err := cs1.SetProposalAndBlock(newProp, propBlock0, propBlockParts0, "some peer"); err != nil { - t.Fatal(err) - } - - // Add the pol votes - addVotes(cs1, prevotes...) - - <-newRoundCh - t.Log("### ONTO Round 2") - /*Round2 - // now we see the polka from round 1, but we shouldnt unlock - */ - - select { - case <-timeoutProposeCh: - <-proposalCh - case <-proposalCh: - } - - select { - case <-unlockCh: - panic("validator unlocked using an old polka") - case <-voteCh: - // prevote our locked block - } - validatePrevote(t, cs1, 2, vss[0], propBlockHash1) - -} - -//------------------------------------------------------------------------------------------ -// SlashingSuite -// TODO: Slashing - -/* -func TestStateSlashingPrevotes(t *testing.T) { - cs1, vss := randConsensusState(2) - vs2 := vss[1] - - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) - - // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh - re := <-proposalCh - <-voteCh // prevote - - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - - // we should now be stuck in limbo forever, waiting for more prevotes - // add one for a different block should cause us to go into prevote wait - hash := rs.ProposalBlock.Hash() - hash[0] = byte(hash[0]+1) % 255 - signAddVotes(cs1, types.VoteTypePrevote, hash, rs.ProposalBlockParts.Header(), vs2) - - <-timeoutWaitCh - - // NOTE: we have to send the vote for different block first so we don't just go into precommit round right - // away and ignore more prevotes (and thus fail to slash!) - - // add the conflicting vote - signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) - - // XXX: Check for existence of Dupeout info -} - -func TestStateSlashingPrecommits(t *testing.T) { - cs1, vss := randConsensusState(2) - vs2 := vss[1] - - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) - - // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh - re := <-proposalCh - <-voteCh // prevote - - // add prevote from vs2 - signAddVotes(cs1, types.VoteTypePrevote, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) - - <-voteCh // precommit - - // we should now be stuck in limbo forever, waiting for more prevotes - // add one for a different block should cause us to go into prevote wait - hash := rs.ProposalBlock.Hash() - hash[0] = byte(hash[0]+1) % 255 - signAddVotes(cs1, types.VoteTypePrecommit, hash, rs.ProposalBlockParts.Header(), vs2) - - // NOTE: we have to send the vote for different block first so we don't just go into precommit round right - // away and ignore more prevotes (and thus fail to slash!) - - // add precommit from vs2 - signAddVotes(cs1, types.VoteTypePrecommit, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vs2) - - // XXX: Check for existence of Dupeout info -} -*/ - -//------------------------------------------------------------------------------------------ -// CatchupSuite - -//------------------------------------------------------------------------------------------ -// HaltSuite - -// 4 vals. -// we receive a final precommit after going into next round, but others might have gone to commit already! -func TestStateHalt1(t *testing.T) { - cs1, vss := randConsensusState(4) - vs2, vs3, vs4 := vss[1], vss[2], vss[3] - - partSize := cs1.state.ConsensusParams.BlockPartSizeBytes - - proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) - newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) - voteCh := subscribeToVoter(cs1, cs1.privValidator.GetAddress()) - - // start round and wait for propose and prevote - startTestRound(cs1, cs1.Height, 0) - <-newRoundCh - re := <-proposalCh - rs := re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - propBlock := rs.ProposalBlock - propBlockParts := propBlock.MakePartSet(partSize) - - <-voteCh // prevote - - signAddVotes(cs1, types.VoteTypePrevote, propBlock.Hash(), propBlockParts.Header(), vs3, vs4) - <-voteCh // precommit - - // the proposed block should now be locked and our precommit added - validatePrecommit(t, cs1, 0, 0, vss[0], propBlock.Hash(), propBlock.Hash()) - - // add precommits from the rest - signAddVotes(cs1, types.VoteTypePrecommit, nil, types.PartSetHeader{}, vs2) // didnt receive proposal - signAddVotes(cs1, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header(), vs3) - // we receive this later, but vs3 might receive it earlier and with ours will go to commit! - precommit4 := signVote(vs4, types.VoteTypePrecommit, propBlock.Hash(), propBlockParts.Header()) - - incrementRound(vs2, vs3, vs4) - - // timeout to new round - <-timeoutWaitCh - re = <-newRoundCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - - t.Log("### ONTO ROUND 1") - /*Round2 - // we timeout and prevote our lock - // a polka happened but we didn't see it! - */ - - // go to prevote, prevote for locked block - <-voteCh // prevote - validatePrevote(t, cs1, 0, vss[0], rs.LockedBlock.Hash()) - - // now we receive the precommit from the previous round - addVotes(cs1, precommit4) - - // receiving that precommit should take us straight to commit - <-newBlockCh - re = <-newRoundCh - rs = re.(types.EventDataRoundState).RoundState.(*cstypes.RoundState) - - if rs.Height != 2 { - panic("expected height to increment") - } -} - -// subscribe subscribes test client to the given query and returns a channel with cap = 1. -func subscribe(eventBus *types.EventBus, q tmpubsub.Query) <-chan interface{} { - out := make(chan interface{}, 1) - err := eventBus.Subscribe(context.Background(), testSubscriber, q, out) - if err != nil { - panic(fmt.Sprintf("failed to subscribe %s to %v", testSubscriber, q)) - } - return out -} - -// discardFromChan reads n values from the channel. -func discardFromChan(ch <-chan interface{}, n int) { - for i := 0; i < n; i++ { - <-ch - } -} diff --git a/consensus/test_data/many_blocks.cswal b/consensus/test_data/many_blocks.cswal deleted file mode 100644 index d443fff7..00000000 Binary files a/consensus/test_data/many_blocks.cswal and /dev/null differ diff --git a/consensus/ticker.go b/consensus/ticker.go deleted file mode 100644 index b37b7c49..00000000 --- a/consensus/ticker.go +++ /dev/null @@ -1,134 +0,0 @@ -package consensus - -import ( - "time" - - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" -) - -var ( - tickTockBufferSize = 10 -) - -// TimeoutTicker is a timer that schedules timeouts -// conditional on the height/round/step in the timeoutInfo. -// The timeoutInfo.Duration may be non-positive. -type TimeoutTicker interface { - Start() error - Stop() error - Chan() <-chan timeoutInfo // on which to receive a timeout - ScheduleTimeout(ti timeoutInfo) // reset the timer - - SetLogger(log.Logger) -} - -// timeoutTicker wraps time.Timer, -// scheduling timeouts only for greater height/round/step -// than what it's already seen. -// Timeouts are scheduled along the tickChan, -// and fired on the tockChan. -type timeoutTicker struct { - cmn.BaseService - - timer *time.Timer - tickChan chan timeoutInfo // for scheduling timeouts - tockChan chan timeoutInfo // for notifying about them -} - -// NewTimeoutTicker returns a new TimeoutTicker. -func NewTimeoutTicker() TimeoutTicker { - tt := &timeoutTicker{ - timer: time.NewTimer(0), - tickChan: make(chan timeoutInfo, tickTockBufferSize), - tockChan: make(chan timeoutInfo, tickTockBufferSize), - } - tt.BaseService = *cmn.NewBaseService(nil, "TimeoutTicker", tt) - tt.stopTimer() // don't want to fire until the first scheduled timeout - return tt -} - -// OnStart implements cmn.Service. It starts the timeout routine. -func (t *timeoutTicker) OnStart() error { - - go t.timeoutRoutine() - - return nil -} - -// OnStop implements cmn.Service. It stops the timeout routine. -func (t *timeoutTicker) OnStop() { - t.BaseService.OnStop() - t.stopTimer() -} - -// Chan returns a channel on which timeouts are sent. -func (t *timeoutTicker) Chan() <-chan timeoutInfo { - return t.tockChan -} - -// ScheduleTimeout schedules a new timeout by sending on the internal tickChan. -// The timeoutRoutine is always available to read from tickChan, so this won't block. -// The scheduling may fail if the timeoutRoutine has already scheduled a timeout for a later height/round/step. -func (t *timeoutTicker) ScheduleTimeout(ti timeoutInfo) { - t.tickChan <- ti -} - -//------------------------------------------------------------- - -// stop the timer and drain if necessary -func (t *timeoutTicker) stopTimer() { - // Stop() returns false if it was already fired or was stopped - if !t.timer.Stop() { - select { - case <-t.timer.C: - default: - t.Logger.Debug("Timer already stopped") - } - } -} - -// send on tickChan to start a new timer. -// timers are interupted and replaced by new ticks from later steps -// timeouts of 0 on the tickChan will be immediately relayed to the tockChan -func (t *timeoutTicker) timeoutRoutine() { - t.Logger.Debug("Starting timeout routine") - var ti timeoutInfo - for { - select { - case newti := <-t.tickChan: - t.Logger.Debug("Received tick", "old_ti", ti, "new_ti", newti) - - // ignore tickers for old height/round/step - if newti.Height < ti.Height { - continue - } else if newti.Height == ti.Height { - if newti.Round < ti.Round { - continue - } else if newti.Round == ti.Round { - if ti.Step > 0 && newti.Step <= ti.Step { - continue - } - } - } - - // stop the last timer - t.stopTimer() - - // update timeoutInfo and reset timer - // NOTE time.Timer allows duration to be non-positive - ti = newti - t.timer.Reset(ti.Duration) - t.Logger.Debug("Scheduled timeout", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) - case <-t.timer.C: - t.Logger.Info("Timed out", "dur", ti.Duration, "height", ti.Height, "round", ti.Round, "step", ti.Step) - // go routine here guarantees timeoutRoutine doesn't block. - // Determinism comes from playback in the receiveRoutine. - // We can eliminate it by merging the timeoutRoutine into receiveRoutine - // and managing the timeouts ourselves with a millisecond ticker - go func(toi timeoutInfo) { t.tockChan <- toi }(ti) - case <-t.Quit(): - return - } - } -} diff --git a/consensus/types/height_vote_set.go b/consensus/types/height_vote_set.go deleted file mode 100644 index 3c986794..00000000 --- a/consensus/types/height_vote_set.go +++ /dev/null @@ -1,261 +0,0 @@ -package types - -import ( - "errors" - "fmt" - "strings" - "sync" - - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -type RoundVoteSet struct { - Prevotes *types.VoteSet - Precommits *types.VoteSet -} - -var ( - GotVoteFromUnwantedRoundError = errors.New("Peer has sent a vote that does not match our round for more than one round") -) - -/* -Keeps track of all VoteSets from round 0 to round 'round'. - -Also keeps track of up to one RoundVoteSet greater than -'round' from each peer, to facilitate catchup syncing of commits. - -A commit is +2/3 precommits for a block at a round, -but which round is not known in advance, so when a peer -provides a precommit for a round greater than mtx.round, -we create a new entry in roundVoteSets but also remember the -peer to prevent abuse. -We let each peer provide us with up to 2 unexpected "catchup" rounds. -One for their LastCommit round, and another for the official commit round. -*/ -type HeightVoteSet struct { - chainID string - height int64 - valSet *types.ValidatorSet - - mtx sync.Mutex - round int // max tracked round - roundVoteSets map[int]RoundVoteSet // keys: [0...round] - peerCatchupRounds map[p2p.ID][]int // keys: peer.ID; values: at most 2 rounds -} - -func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet { - hvs := &HeightVoteSet{ - chainID: chainID, - } - hvs.Reset(height, valSet) - return hvs -} - -func (hvs *HeightVoteSet) Reset(height int64, valSet *types.ValidatorSet) { - hvs.mtx.Lock() - defer hvs.mtx.Unlock() - - hvs.height = height - hvs.valSet = valSet - hvs.roundVoteSets = make(map[int]RoundVoteSet) - hvs.peerCatchupRounds = make(map[p2p.ID][]int) - - hvs.addRound(0) - hvs.round = 0 -} - -func (hvs *HeightVoteSet) Height() int64 { - hvs.mtx.Lock() - defer hvs.mtx.Unlock() - return hvs.height -} - -func (hvs *HeightVoteSet) Round() int { - hvs.mtx.Lock() - defer hvs.mtx.Unlock() - return hvs.round -} - -// Create more RoundVoteSets up to round. -func (hvs *HeightVoteSet) SetRound(round int) { - hvs.mtx.Lock() - defer hvs.mtx.Unlock() - if hvs.round != 0 && (round < hvs.round+1) { - cmn.PanicSanity("SetRound() must increment hvs.round") - } - for r := hvs.round + 1; r <= round; r++ { - if _, ok := hvs.roundVoteSets[r]; ok { - continue // Already exists because peerCatchupRounds. - } - hvs.addRound(r) - } - hvs.round = round -} - -func (hvs *HeightVoteSet) addRound(round int) { - if _, ok := hvs.roundVoteSets[round]; ok { - cmn.PanicSanity("addRound() for an existing round") - } - // log.Debug("addRound(round)", "round", round) - prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.VoteTypePrevote, hvs.valSet) - precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, types.VoteTypePrecommit, hvs.valSet) - hvs.roundVoteSets[round] = RoundVoteSet{ - Prevotes: prevotes, - Precommits: precommits, - } -} - -// Duplicate votes return added=false, err=nil. -// By convention, peerID is "" if origin is self. -func (hvs *HeightVoteSet) AddVote(vote *types.Vote, peerID p2p.ID) (added bool, err error) { - hvs.mtx.Lock() - defer hvs.mtx.Unlock() - if !types.IsVoteTypeValid(vote.Type) { - return - } - voteSet := hvs.getVoteSet(vote.Round, vote.Type) - if voteSet == nil { - if rndz := hvs.peerCatchupRounds[peerID]; len(rndz) < 2 { - hvs.addRound(vote.Round) - voteSet = hvs.getVoteSet(vote.Round, vote.Type) - hvs.peerCatchupRounds[peerID] = append(rndz, vote.Round) - } else { - // punish peer - err = GotVoteFromUnwantedRoundError - return - } - } - added, err = voteSet.AddVote(vote) - return -} - -func (hvs *HeightVoteSet) Prevotes(round int) *types.VoteSet { - hvs.mtx.Lock() - defer hvs.mtx.Unlock() - return hvs.getVoteSet(round, types.VoteTypePrevote) -} - -func (hvs *HeightVoteSet) Precommits(round int) *types.VoteSet { - hvs.mtx.Lock() - defer hvs.mtx.Unlock() - return hvs.getVoteSet(round, types.VoteTypePrecommit) -} - -// Last round and blockID that has +2/3 prevotes for a particular block or nil. -// Returns -1 if no such round exists. -func (hvs *HeightVoteSet) POLInfo() (polRound int, polBlockID types.BlockID) { - hvs.mtx.Lock() - defer hvs.mtx.Unlock() - for r := hvs.round; r >= 0; r-- { - rvs := hvs.getVoteSet(r, types.VoteTypePrevote) - polBlockID, ok := rvs.TwoThirdsMajority() - if ok { - return r, polBlockID - } - } - return -1, types.BlockID{} -} - -func (hvs *HeightVoteSet) getVoteSet(round int, type_ byte) *types.VoteSet { - rvs, ok := hvs.roundVoteSets[round] - if !ok { - return nil - } - switch type_ { - case types.VoteTypePrevote: - return rvs.Prevotes - case types.VoteTypePrecommit: - return rvs.Precommits - default: - cmn.PanicSanity(cmn.Fmt("Unexpected vote type %X", type_)) - return nil - } -} - -// If a peer claims that it has 2/3 majority for given blockKey, call this. -// NOTE: if there are too many peers, or too much peer churn, -// this can cause memory issues. -// TODO: implement ability to remove peers too -func (hvs *HeightVoteSet) SetPeerMaj23(round int, type_ byte, peerID p2p.ID, blockID types.BlockID) error { - hvs.mtx.Lock() - defer hvs.mtx.Unlock() - if !types.IsVoteTypeValid(type_) { - return fmt.Errorf("SetPeerMaj23: Invalid vote type %v", type_) - } - voteSet := hvs.getVoteSet(round, type_) - if voteSet == nil { - return nil // something we don't know about yet - } - return voteSet.SetPeerMaj23(types.P2PID(peerID), blockID) -} - -//--------------------------------------------------------- -// string and json - -func (hvs *HeightVoteSet) String() string { - return hvs.StringIndented("") -} - -func (hvs *HeightVoteSet) StringIndented(indent string) string { - hvs.mtx.Lock() - defer hvs.mtx.Unlock() - vsStrings := make([]string, 0, (len(hvs.roundVoteSets)+1)*2) - // rounds 0 ~ hvs.round inclusive - for round := 0; round <= hvs.round; round++ { - voteSetString := hvs.roundVoteSets[round].Prevotes.StringShort() - vsStrings = append(vsStrings, voteSetString) - voteSetString = hvs.roundVoteSets[round].Precommits.StringShort() - vsStrings = append(vsStrings, voteSetString) - } - // all other peer catchup rounds - for round, roundVoteSet := range hvs.roundVoteSets { - if round <= hvs.round { - continue - } - voteSetString := roundVoteSet.Prevotes.StringShort() - vsStrings = append(vsStrings, voteSetString) - voteSetString = roundVoteSet.Precommits.StringShort() - vsStrings = append(vsStrings, voteSetString) - } - return cmn.Fmt(`HeightVoteSet{H:%v R:0~%v -%s %v -%s}`, - hvs.height, hvs.round, - indent, strings.Join(vsStrings, "\n"+indent+" "), - indent) -} - -func (hvs *HeightVoteSet) MarshalJSON() ([]byte, error) { - hvs.mtx.Lock() - defer hvs.mtx.Unlock() - - allVotes := hvs.toAllRoundVotes() - return cdc.MarshalJSON(allVotes) -} - -func (hvs *HeightVoteSet) toAllRoundVotes() []roundVotes { - totalRounds := hvs.round + 1 - allVotes := make([]roundVotes, totalRounds) - // rounds 0 ~ hvs.round inclusive - for round := 0; round < totalRounds; round++ { - allVotes[round] = roundVotes{ - Round: round, - Prevotes: hvs.roundVoteSets[round].Prevotes.VoteStrings(), - PrevotesBitArray: hvs.roundVoteSets[round].Prevotes.BitArrayString(), - Precommits: hvs.roundVoteSets[round].Precommits.VoteStrings(), - PrecommitsBitArray: hvs.roundVoteSets[round].Precommits.BitArrayString(), - } - } - // TODO: all other peer catchup rounds - return allVotes -} - -type roundVotes struct { - Round int `json:"round"` - Prevotes []string `json:"prevotes"` - PrevotesBitArray string `json:"prevotes_bit_array"` - Precommits []string `json:"precommits"` - PrecommitsBitArray string `json:"precommits_bit_array"` -} diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go deleted file mode 100644 index 678d3475..00000000 --- a/consensus/types/height_vote_set_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package types - -import ( - "testing" - "time" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -var config *cfg.Config // NOTE: must be reset for each _test.go file - -func init() { - config = cfg.ResetTestRoot("consensus_height_vote_set_test") -} - -func TestPeerCatchupRounds(t *testing.T) { - valSet, privVals := types.RandValidatorSet(10, 1) - - hvs := NewHeightVoteSet(config.ChainID(), 1, valSet) - - vote999_0 := makeVoteHR(t, 1, 999, privVals, 0) - added, err := hvs.AddVote(vote999_0, "peer1") - if !added || err != nil { - t.Error("Expected to successfully add vote from peer", added, err) - } - - vote1000_0 := makeVoteHR(t, 1, 1000, privVals, 0) - added, err = hvs.AddVote(vote1000_0, "peer1") - if !added || err != nil { - t.Error("Expected to successfully add vote from peer", added, err) - } - - vote1001_0 := makeVoteHR(t, 1, 1001, privVals, 0) - added, err = hvs.AddVote(vote1001_0, "peer1") - if err != GotVoteFromUnwantedRoundError { - t.Errorf("Expected GotVoteFromUnwantedRoundError, but got %v", err) - } - if added { - t.Error("Expected to *not* add vote from peer, too many catchup rounds.") - } - - added, err = hvs.AddVote(vote1001_0, "peer2") - if !added || err != nil { - t.Error("Expected to successfully add vote from another peer") - } - -} - -func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivValidator, valIndex int) *types.Vote { - privVal := privVals[valIndex] - vote := &types.Vote{ - ValidatorAddress: privVal.GetAddress(), - ValidatorIndex: valIndex, - Height: height, - Round: round, - Timestamp: time.Now().UTC(), - Type: types.VoteTypePrecommit, - BlockID: types.BlockID{[]byte("fakehash"), types.PartSetHeader{}}, - } - chainID := config.ChainID() - err := privVal.SignVote(chainID, vote) - if err != nil { - panic(cmn.Fmt("Error signing vote: %v", err)) - return nil - } - return vote -} diff --git a/consensus/types/peer_round_state.go b/consensus/types/peer_round_state.go deleted file mode 100644 index dcb6c8e0..00000000 --- a/consensus/types/peer_round_state.go +++ /dev/null @@ -1,57 +0,0 @@ -package types - -import ( - "fmt" - "time" - - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -//----------------------------------------------------------------------------- - -// PeerRoundState contains the known state of a peer. -// NOTE: Read-only when returned by PeerState.GetRoundState(). -type PeerRoundState struct { - Height int64 `json:"height"` // Height peer is at - Round int `json:"round"` // Round peer is at, -1 if unknown. - Step RoundStepType `json:"step"` // Step peer is at - StartTime time.Time `json:"start_time"` // Estimated start of round 0 at this height - Proposal bool `json:"proposal"` // True if peer has proposal for this round - ProposalBlockPartsHeader types.PartSetHeader `json:"proposal_block_parts_header"` // - ProposalBlockParts *cmn.BitArray `json:"proposal_block_parts"` // - ProposalPOLRound int `json:"proposal_pol_round"` // Proposal's POL round. -1 if none. - ProposalPOL *cmn.BitArray `json:"proposal_pol"` // nil until ProposalPOLMessage received. - Prevotes *cmn.BitArray `json:"prevotes"` // All votes peer has for this round - Precommits *cmn.BitArray `json:"precommits"` // All precommits peer has for this round - LastCommitRound int `json:"last_commit_round"` // Round of commit for last height. -1 if none. - LastCommit *cmn.BitArray `json:"last_commit"` // All commit precommits of commit for last height. - CatchupCommitRound int `json:"catchup_commit_round"` // Round that we have commit for. Not necessarily unique. -1 if none. - CatchupCommit *cmn.BitArray `json:"catchup_commit"` // All commit precommits peer has for this height & CatchupCommitRound -} - -// String returns a string representation of the PeerRoundState -func (prs PeerRoundState) String() string { - return prs.StringIndented("") -} - -// StringIndented returns a string representation of the PeerRoundState -func (prs PeerRoundState) StringIndented(indent string) string { - return fmt.Sprintf(`PeerRoundState{ -%s %v/%v/%v @%v -%s Proposal %v -> %v -%s POL %v (round %v) -%s Prevotes %v -%s Precommits %v -%s LastCommit %v (round %v) -%s Catchup %v (round %v) -%s}`, - indent, prs.Height, prs.Round, prs.Step, prs.StartTime, - indent, prs.ProposalBlockPartsHeader, prs.ProposalBlockParts, - indent, prs.ProposalPOL, prs.ProposalPOLRound, - indent, prs.Prevotes, - indent, prs.Precommits, - indent, prs.LastCommit, prs.LastCommitRound, - indent, prs.CatchupCommit, prs.CatchupCommitRound, - indent) -} diff --git a/consensus/types/round_state.go b/consensus/types/round_state.go deleted file mode 100644 index 14da1f14..00000000 --- a/consensus/types/round_state.go +++ /dev/null @@ -1,164 +0,0 @@ -package types - -import ( - "encoding/json" - "fmt" - "time" - - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -//----------------------------------------------------------------------------- -// RoundStepType enum type - -// RoundStepType enumerates the state of the consensus state machine -type RoundStepType uint8 // These must be numeric, ordered. - -// RoundStepType -const ( - RoundStepNewHeight = RoundStepType(0x01) // Wait til CommitTime + timeoutCommit - RoundStepNewRound = RoundStepType(0x02) // Setup new round and go to RoundStepPropose - RoundStepPropose = RoundStepType(0x03) // Did propose, gossip proposal - RoundStepPrevote = RoundStepType(0x04) // Did prevote, gossip prevotes - RoundStepPrevoteWait = RoundStepType(0x05) // Did receive any +2/3 prevotes, start timeout - RoundStepPrecommit = RoundStepType(0x06) // Did precommit, gossip precommits - RoundStepPrecommitWait = RoundStepType(0x07) // Did receive any +2/3 precommits, start timeout - RoundStepCommit = RoundStepType(0x08) // Entered commit state machine - // NOTE: RoundStepNewHeight acts as RoundStepCommitWait. -) - -// String returns a string -func (rs RoundStepType) String() string { - switch rs { - case RoundStepNewHeight: - return "RoundStepNewHeight" - case RoundStepNewRound: - return "RoundStepNewRound" - case RoundStepPropose: - return "RoundStepPropose" - case RoundStepPrevote: - return "RoundStepPrevote" - case RoundStepPrevoteWait: - return "RoundStepPrevoteWait" - case RoundStepPrecommit: - return "RoundStepPrecommit" - case RoundStepPrecommitWait: - return "RoundStepPrecommitWait" - case RoundStepCommit: - return "RoundStepCommit" - default: - return "RoundStepUnknown" // Cannot panic. - } -} - -//----------------------------------------------------------------------------- - -// RoundState defines the internal consensus state. -// NOTE: Not thread safe. Should only be manipulated by functions downstream -// of the cs.receiveRoutine -type RoundState struct { - Height int64 `json:"height"` // Height we are working on - Round int `json:"round"` - Step RoundStepType `json:"step"` - StartTime time.Time `json:"start_time"` - CommitTime time.Time `json:"commit_time"` // Subjective time when +2/3 precommits for Block at Round were found - Validators *types.ValidatorSet `json:"validators"` - Proposal *types.Proposal `json:"proposal"` - ProposalBlock *types.Block `json:"proposal_block"` - ProposalBlockParts *types.PartSet `json:"proposal_block_parts"` - LockedRound int `json:"locked_round"` - LockedBlock *types.Block `json:"locked_block"` - LockedBlockParts *types.PartSet `json:"locked_block_parts"` - ValidRound int `json:"valid_round"` // Last known round with POL for non-nil valid block. - ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above. - ValidBlockParts *types.PartSet `json:"valid_block_parts"` // Last known block parts of POL metnioned above. - Votes *HeightVoteSet `json:"votes"` - CommitRound int `json:"commit_round"` // - LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1 - LastValidators *types.ValidatorSet `json:"last_validators"` -} - -// Compressed version of the RoundState for use in RPC -type RoundStateSimple struct { - HeightRoundStep string `json:"height/round/step"` - StartTime time.Time `json:"start_time"` - ProposalBlockHash cmn.HexBytes `json:"proposal_block_hash"` - LockedBlockHash cmn.HexBytes `json:"locked_block_hash"` - ValidBlockHash cmn.HexBytes `json:"valid_block_hash"` - Votes json.RawMessage `json:"height_vote_set"` -} - -// Compress the RoundState to RoundStateSimple -func (rs *RoundState) RoundStateSimple() RoundStateSimple { - votesJSON, err := rs.Votes.MarshalJSON() - if err != nil { - panic(err) - } - return RoundStateSimple{ - HeightRoundStep: fmt.Sprintf("%d/%d/%d", rs.Height, rs.Round, rs.Step), - StartTime: rs.StartTime, - ProposalBlockHash: rs.ProposalBlock.Hash(), - LockedBlockHash: rs.LockedBlock.Hash(), - ValidBlockHash: rs.ValidBlock.Hash(), - Votes: votesJSON, - } -} - -// RoundStateEvent returns the H/R/S of the RoundState as an event. -func (rs *RoundState) RoundStateEvent() types.EventDataRoundState { - // XXX: copy the RoundState - // if we want to avoid this, we may need synchronous events after all - rsCopy := *rs - edrs := types.EventDataRoundState{ - Height: rs.Height, - Round: rs.Round, - Step: rs.Step.String(), - RoundState: &rsCopy, - } - return edrs -} - -// String returns a string -func (rs *RoundState) String() string { - return rs.StringIndented("") -} - -// StringIndented returns a string -func (rs *RoundState) StringIndented(indent string) string { - return fmt.Sprintf(`RoundState{ -%s H:%v R:%v S:%v -%s StartTime: %v -%s CommitTime: %v -%s Validators: %v -%s Proposal: %v -%s ProposalBlock: %v %v -%s LockedRound: %v -%s LockedBlock: %v %v -%s ValidRound: %v -%s ValidBlock: %v %v -%s Votes: %v -%s LastCommit: %v -%s LastValidators:%v -%s}`, - indent, rs.Height, rs.Round, rs.Step, - indent, rs.StartTime, - indent, rs.CommitTime, - indent, rs.Validators.StringIndented(indent+" "), - indent, rs.Proposal, - indent, rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort(), - indent, rs.LockedRound, - indent, rs.LockedBlockParts.StringShort(), rs.LockedBlock.StringShort(), - indent, rs.ValidRound, - indent, rs.ValidBlockParts.StringShort(), rs.ValidBlock.StringShort(), - indent, rs.Votes.StringIndented(indent+" "), - indent, rs.LastCommit.StringShort(), - indent, rs.LastValidators.StringIndented(indent+" "), - indent) -} - -// StringShort returns a string -func (rs *RoundState) StringShort() string { - return fmt.Sprintf(`RoundState{H:%v R:%v S:%v ST:%v}`, - rs.Height, rs.Round, rs.Step, rs.StartTime) -} diff --git a/consensus/types/round_state_test.go b/consensus/types/round_state_test.go deleted file mode 100644 index dc88c3a1..00000000 --- a/consensus/types/round_state_test.go +++ /dev/null @@ -1,95 +0,0 @@ -package types - -import ( - "testing" - "time" - - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -func BenchmarkRoundStateDeepCopy(b *testing.B) { - b.StopTimer() - - // Random validators - nval, ntxs := 100, 100 - vset, _ := types.RandValidatorSet(nval, 1) - precommits := make([]*types.Vote, nval) - blockID := types.BlockID{ - Hash: cmn.RandBytes(20), - PartsHeader: types.PartSetHeader{ - Hash: cmn.RandBytes(20), - }, - } - sig := crypto.SignatureEd25519{} - for i := 0; i < nval; i++ { - precommits[i] = &types.Vote{ - ValidatorAddress: types.Address(cmn.RandBytes(20)), - Timestamp: time.Now(), - BlockID: blockID, - Signature: sig, - } - } - txs := make([]types.Tx, ntxs) - for i := 0; i < ntxs; i++ { - txs[i] = cmn.RandBytes(100) - } - // Random block - block := &types.Block{ - Header: &types.Header{ - ChainID: cmn.RandStr(12), - Time: time.Now(), - LastBlockID: blockID, - LastCommitHash: cmn.RandBytes(20), - DataHash: cmn.RandBytes(20), - ValidatorsHash: cmn.RandBytes(20), - ConsensusHash: cmn.RandBytes(20), - AppHash: cmn.RandBytes(20), - LastResultsHash: cmn.RandBytes(20), - EvidenceHash: cmn.RandBytes(20), - }, - Data: &types.Data{ - Txs: txs, - }, - Evidence: types.EvidenceData{}, - LastCommit: &types.Commit{ - BlockID: blockID, - Precommits: precommits, - }, - } - parts := block.MakePartSet(4096) - // Random Proposal - proposal := &types.Proposal{ - Timestamp: time.Now(), - BlockPartsHeader: types.PartSetHeader{ - Hash: cmn.RandBytes(20), - }, - POLBlockID: blockID, - Signature: sig, - } - // Random HeightVoteSet - // TODO: hvs := - - rs := &RoundState{ - StartTime: time.Now(), - CommitTime: time.Now(), - Validators: vset, - Proposal: proposal, - ProposalBlock: block, - ProposalBlockParts: parts, - LockedBlock: block, - LockedBlockParts: parts, - ValidBlock: block, - ValidBlockParts: parts, - Votes: nil, // TODO - LastCommit: nil, // TODO - LastValidators: vset, - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - amino.DeepCopy(rs) - } -} diff --git a/consensus/types/wire.go b/consensus/types/wire.go deleted file mode 100644 index bd5c4497..00000000 --- a/consensus/types/wire.go +++ /dev/null @@ -1,12 +0,0 @@ -package types - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" -) - -var cdc = amino.NewCodec() - -func init() { - crypto.RegisterAmino(cdc) -} diff --git a/consensus/version.go b/consensus/version.go deleted file mode 100644 index 2c137bf7..00000000 --- a/consensus/version.go +++ /dev/null @@ -1,13 +0,0 @@ -package consensus - -import ( - cmn "github.com/tendermint/tmlibs/common" -) - -// kind of arbitrary -var Spec = "1" // async -var Major = "0" // -var Minor = "2" // replay refactor -var Revision = "2" // validation -> commit - -var Version = cmn.Fmt("v%s/%s.%s.%s", Spec, Major, Minor, Revision) diff --git a/consensus/wal.go b/consensus/wal.go deleted file mode 100644 index 3d9bf8af..00000000 --- a/consensus/wal.go +++ /dev/null @@ -1,323 +0,0 @@ -package consensus - -import ( - "encoding/binary" - "fmt" - "hash/crc32" - "io" - "path/filepath" - "time" - - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" - "github.com/tendermint/tendermint/types" - auto "github.com/tendermint/tmlibs/autofile" - cmn "github.com/tendermint/tmlibs/common" -) - -const ( - // must be greater than params.BlockGossip.BlockPartSizeBytes + a few bytes - maxMsgSizeBytes = 1024 * 1024 // 1MB -) - -//-------------------------------------------------------- -// types and functions for savings consensus messages - -type TimedWALMessage struct { - Time time.Time `json:"time"` // for debugging purposes - Msg WALMessage `json:"msg"` -} - -// EndHeightMessage marks the end of the given height inside WAL. -// @internal used by scripts/wal2json util. -type EndHeightMessage struct { - Height int64 `json:"height"` -} - -type WALMessage interface{} - -func RegisterWALMessages(cdc *amino.Codec) { - cdc.RegisterInterface((*WALMessage)(nil), nil) - cdc.RegisterConcrete(types.EventDataRoundState{}, "tendermint/wal/EventDataRoundState", nil) - cdc.RegisterConcrete(msgInfo{}, "tendermint/wal/MsgInfo", nil) - cdc.RegisterConcrete(timeoutInfo{}, "tendermint/wal/TimeoutInfo", nil) - cdc.RegisterConcrete(EndHeightMessage{}, "tendermint/wal/EndHeightMessage", nil) -} - -//-------------------------------------------------------- -// Simple write-ahead logger - -// WAL is an interface for any write-ahead logger. -type WAL interface { - Write(WALMessage) - WriteSync(WALMessage) - Group() *auto.Group - SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) - - Start() error - Stop() error - Wait() -} - -// Write ahead logger writes msgs to disk before they are processed. -// Can be used for crash-recovery and deterministic replay -// TODO: currently the wal is overwritten during replay catchup -// give it a mode so it's either reading or appending - must read to end to start appending again -type baseWAL struct { - cmn.BaseService - - group *auto.Group - - enc *WALEncoder -} - -func NewWAL(walFile string) (*baseWAL, error) { - err := cmn.EnsureDir(filepath.Dir(walFile), 0700) - if err != nil { - return nil, errors.Wrap(err, "failed to ensure WAL directory is in place") - } - - group, err := auto.OpenGroup(walFile) - if err != nil { - return nil, err - } - wal := &baseWAL{ - group: group, - enc: NewWALEncoder(group), - } - wal.BaseService = *cmn.NewBaseService(nil, "baseWAL", wal) - return wal, nil -} - -func (wal *baseWAL) Group() *auto.Group { - return wal.group -} - -func (wal *baseWAL) OnStart() error { - size, err := wal.group.Head.Size() - if err != nil { - return err - } else if size == 0 { - wal.WriteSync(EndHeightMessage{0}) - } - err = wal.group.Start() - return err -} - -func (wal *baseWAL) OnStop() { - wal.group.Stop() - wal.group.Close() -} - -// Write is called in newStep and for each receive on the -// peerMsgQueue and the timeoutTicker. -// NOTE: does not call fsync() -func (wal *baseWAL) Write(msg WALMessage) { - if wal == nil { - return - } - - // Write the wal message - if err := wal.enc.Encode(&TimedWALMessage{time.Now(), msg}); err != nil { - panic(cmn.Fmt("Error writing msg to consensus wal: %v \n\nMessage: %v", err, msg)) - } -} - -// WriteSync is called when we receive a msg from ourselves -// so that we write to disk before sending signed messages. -// NOTE: calls fsync() -func (wal *baseWAL) WriteSync(msg WALMessage) { - if wal == nil { - return - } - - wal.Write(msg) - if err := wal.group.Flush(); err != nil { - panic(cmn.Fmt("Error flushing consensus wal buf to file. Error: %v \n", err)) - } -} - -// WALSearchOptions are optional arguments to SearchForEndHeight. -type WALSearchOptions struct { - // IgnoreDataCorruptionErrors set to true will result in skipping data corruption errors. - IgnoreDataCorruptionErrors bool -} - -// SearchForEndHeight searches for the EndHeightMessage with the given height -// and returns an auto.GroupReader, whenever it was found or not and an error. -// Group reader will be nil if found equals false. -// -// CONTRACT: caller must close group reader. -func (wal *baseWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) { - var msg *TimedWALMessage - lastHeightFound := int64(-1) - - // NOTE: starting from the last file in the group because we're usually - // searching for the last height. See replay.go - min, max := wal.group.MinIndex(), wal.group.MaxIndex() - wal.Logger.Debug("Searching for height", "height", height, "min", min, "max", max) - for index := max; index >= min; index-- { - gr, err = wal.group.NewReader(index) - if err != nil { - return nil, false, err - } - - dec := NewWALDecoder(gr) - for { - msg, err = dec.Decode() - if err == io.EOF { - // OPTIMISATION: no need to look for height in older files if we've seen h < height - if lastHeightFound > 0 && lastHeightFound < height { - gr.Close() - return nil, false, nil - } - // check next file - break - } - if options.IgnoreDataCorruptionErrors && IsDataCorruptionError(err) { - wal.Logger.Debug("Corrupted entry. Skipping...", "err", err) - // do nothing - continue - } else if err != nil { - gr.Close() - return nil, false, err - } - - if m, ok := msg.Msg.(EndHeightMessage); ok { - lastHeightFound = m.Height - if m.Height == height { // found - wal.Logger.Debug("Found", "height", height, "index", index) - return gr, true, nil - } - } - } - gr.Close() - } - - return nil, false, nil -} - -/////////////////////////////////////////////////////////////////////////////// - -// A WALEncoder writes custom-encoded WAL messages to an output stream. -// -// Format: 4 bytes CRC sum + 4 bytes length + arbitrary-length value (go-amino encoded) -type WALEncoder struct { - wr io.Writer -} - -// NewWALEncoder returns a new encoder that writes to wr. -func NewWALEncoder(wr io.Writer) *WALEncoder { - return &WALEncoder{wr} -} - -// Encode writes the custom encoding of v to the stream. -func (enc *WALEncoder) Encode(v *TimedWALMessage) error { - data := cdc.MustMarshalBinaryBare(v) - - crc := crc32.Checksum(data, crc32c) - length := uint32(len(data)) - totalLength := 8 + int(length) - - msg := make([]byte, totalLength) - binary.BigEndian.PutUint32(msg[0:4], crc) - binary.BigEndian.PutUint32(msg[4:8], length) - copy(msg[8:], data) - - _, err := enc.wr.Write(msg) - - return err -} - -/////////////////////////////////////////////////////////////////////////////// - -// IsDataCorruptionError returns true if data has been corrupted inside WAL. -func IsDataCorruptionError(err error) bool { - _, ok := err.(DataCorruptionError) - return ok -} - -// DataCorruptionError is an error that occures if data on disk was corrupted. -type DataCorruptionError struct { - cause error -} - -func (e DataCorruptionError) Error() string { - return fmt.Sprintf("DataCorruptionError[%v]", e.cause) -} - -func (e DataCorruptionError) Cause() error { - return e.cause -} - -// A WALDecoder reads and decodes custom-encoded WAL messages from an input -// stream. See WALEncoder for the format used. -// -// It will also compare the checksums and make sure data size is equal to the -// length from the header. If that is not the case, error will be returned. -type WALDecoder struct { - rd io.Reader -} - -// NewWALDecoder returns a new decoder that reads from rd. -func NewWALDecoder(rd io.Reader) *WALDecoder { - return &WALDecoder{rd} -} - -// Decode reads the next custom-encoded value from its reader and returns it. -func (dec *WALDecoder) Decode() (*TimedWALMessage, error) { - b := make([]byte, 4) - - _, err := dec.rd.Read(b) - if err == io.EOF { - return nil, err - } - if err != nil { - return nil, fmt.Errorf("failed to read checksum: %v", err) - } - crc := binary.BigEndian.Uint32(b) - - b = make([]byte, 4) - _, err = dec.rd.Read(b) - if err != nil { - return nil, fmt.Errorf("failed to read length: %v", err) - } - length := binary.BigEndian.Uint32(b) - - if length > maxMsgSizeBytes { - return nil, fmt.Errorf("length %d exceeded maximum possible value of %d bytes", length, maxMsgSizeBytes) - } - - data := make([]byte, length) - _, err = dec.rd.Read(data) - if err != nil { - return nil, fmt.Errorf("failed to read data: %v", err) - } - - // check checksum before decoding data - actualCRC := crc32.Checksum(data, crc32c) - if actualCRC != crc { - return nil, DataCorruptionError{fmt.Errorf("checksums do not match: (read: %v, actual: %v)", crc, actualCRC)} - } - - var res = new(TimedWALMessage) // nolint: gosimple - err = cdc.UnmarshalBinaryBare(data, res) - if err != nil { - return nil, DataCorruptionError{fmt.Errorf("failed to decode data: %v", err)} - } - - return res, err -} - -type nilWAL struct{} - -func (nilWAL) Write(m WALMessage) {} -func (nilWAL) WriteSync(m WALMessage) {} -func (nilWAL) Group() *auto.Group { return nil } -func (nilWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) { - return nil, false, nil -} -func (nilWAL) Start() error { return nil } -func (nilWAL) Stop() error { return nil } -func (nilWAL) Wait() {} diff --git a/consensus/wal_fuzz.go b/consensus/wal_fuzz.go deleted file mode 100644 index e15097c3..00000000 --- a/consensus/wal_fuzz.go +++ /dev/null @@ -1,31 +0,0 @@ -// +build gofuzz - -package consensus - -import ( - "bytes" - "io" -) - -func Fuzz(data []byte) int { - dec := NewWALDecoder(bytes.NewReader(data)) - for { - msg, err := dec.Decode() - if err == io.EOF { - break - } - if err != nil { - if msg != nil { - panic("msg != nil on error") - } - return 0 - } - var w bytes.Buffer - enc := NewWALEncoder(&w) - err = enc.Encode(msg) - if err != nil { - panic(err) - } - } - return 1 -} diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go deleted file mode 100644 index f61af15f..00000000 --- a/consensus/wal_generator.go +++ /dev/null @@ -1,205 +0,0 @@ -package consensus - -import ( - "bufio" - "bytes" - "fmt" - "os" - "path/filepath" - "strings" - "time" - - "github.com/pkg/errors" - "github.com/tendermint/abci/example/kvstore" - bc "github.com/tendermint/tendermint/blockchain" - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" - auto "github.com/tendermint/tmlibs/autofile" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" -) - -// WALWithNBlocks generates a consensus WAL. It does this by spining up a -// stripped down version of node (proxy app, event bus, consensus state) with a -// persistent kvstore application and special consensus wal instance -// (byteBufferWAL) and waits until numBlocks are created. Then it returns a WAL -// content. -func WALWithNBlocks(numBlocks int) (data []byte, err error) { - config := getConfig() - - app := kvstore.NewPersistentKVStoreApplication(filepath.Join(config.DBDir(), "wal_generator")) - - logger := log.TestingLogger().With("wal_generator", "wal_generator") - logger.Info("generating WAL (last height msg excluded)", "numBlocks", numBlocks) - - ///////////////////////////////////////////////////////////////////////////// - // COPY PASTE FROM node.go WITH A FEW MODIFICATIONS - // NOTE: we can't import node package because of circular dependency - privValidatorFile := config.PrivValidatorFile() - privValidator := privval.LoadOrGenFilePV(privValidatorFile) - genDoc, err := types.GenesisDocFromFile(config.GenesisFile()) - if err != nil { - return nil, errors.Wrap(err, "failed to read genesis file") - } - stateDB := db.NewMemDB() - blockStoreDB := db.NewMemDB() - state, err := sm.MakeGenesisState(genDoc) - if err != nil { - return nil, errors.Wrap(err, "failed to make genesis state") - } - blockStore := bc.NewBlockStore(blockStoreDB) - handshaker := NewHandshaker(stateDB, state, blockStore, genDoc) - proxyApp := proxy.NewAppConns(proxy.NewLocalClientCreator(app), handshaker) - proxyApp.SetLogger(logger.With("module", "proxy")) - if err := proxyApp.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start proxy app connections") - } - defer proxyApp.Stop() - eventBus := types.NewEventBus() - eventBus.SetLogger(logger.With("module", "events")) - if err := eventBus.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start event bus") - } - defer eventBus.Stop() - mempool := sm.MockMempool{} - evpool := sm.MockEvidencePool{} - blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) - consensusState := NewConsensusState(config.Consensus, state.Copy(), blockExec, blockStore, mempool, evpool) - consensusState.SetLogger(logger) - consensusState.SetEventBus(eventBus) - if privValidator != nil { - consensusState.SetPrivValidator(privValidator) - } - // END OF COPY PASTE - ///////////////////////////////////////////////////////////////////////////// - - // set consensus wal to buffered WAL, which will write all incoming msgs to buffer - var b bytes.Buffer - wr := bufio.NewWriter(&b) - numBlocksWritten := make(chan struct{}) - wal := newByteBufferWAL(logger, NewWALEncoder(wr), int64(numBlocks), numBlocksWritten) - // see wal.go#103 - wal.Write(EndHeightMessage{0}) - consensusState.wal = wal - - if err := consensusState.Start(); err != nil { - return nil, errors.Wrap(err, "failed to start consensus state") - } - defer consensusState.Stop() - - select { - case <-numBlocksWritten: - wr.Flush() - return b.Bytes(), nil - case <-time.After(1 * time.Minute): - wr.Flush() - return b.Bytes(), fmt.Errorf("waited too long for tendermint to produce %d blocks (grep logs for `wal_generator`)", numBlocks) - } -} - -// f**ing long, but unique for each test -func makePathname() string { - // get path - p, err := os.Getwd() - if err != nil { - panic(err) - } - // fmt.Println(p) - sep := string(filepath.Separator) - return strings.Replace(p, sep, "_", -1) -} - -func randPort() int { - // returns between base and base + spread - base, spread := 20000, 20000 - return base + cmn.RandIntn(spread) -} - -func makeAddrs() (string, string, string) { - start := randPort() - return fmt.Sprintf("tcp://0.0.0.0:%d", start), - fmt.Sprintf("tcp://0.0.0.0:%d", start+1), - fmt.Sprintf("tcp://0.0.0.0:%d", start+2) -} - -// getConfig returns a config for test cases -func getConfig() *cfg.Config { - pathname := makePathname() - c := cfg.ResetTestRoot(fmt.Sprintf("%s_%d", pathname, cmn.RandInt())) - - // and we use random ports to run in parallel - tm, rpc, grpc := makeAddrs() - c.P2P.ListenAddress = tm - c.RPC.ListenAddress = rpc - c.RPC.GRPCListenAddress = grpc - return c -} - -// byteBufferWAL is a WAL which writes all msgs to a byte buffer. Writing stops -// when the heightToStop is reached. Client will be notified via -// signalWhenStopsTo channel. -type byteBufferWAL struct { - enc *WALEncoder - stopped bool - heightToStop int64 - signalWhenStopsTo chan<- struct{} - - logger log.Logger -} - -// needed for determinism -var fixedTime, _ = time.Parse(time.RFC3339, "2017-01-02T15:04:05Z") - -func newByteBufferWAL(logger log.Logger, enc *WALEncoder, nBlocks int64, signalStop chan<- struct{}) *byteBufferWAL { - return &byteBufferWAL{ - enc: enc, - heightToStop: nBlocks, - signalWhenStopsTo: signalStop, - logger: logger, - } -} - -// Save writes message to the internal buffer except when heightToStop is -// reached, in which case it will signal the caller via signalWhenStopsTo and -// skip writing. -func (w *byteBufferWAL) Write(m WALMessage) { - if w.stopped { - w.logger.Debug("WAL already stopped. Not writing message", "msg", m) - return - } - - if endMsg, ok := m.(EndHeightMessage); ok { - w.logger.Debug("WAL write end height message", "height", endMsg.Height, "stopHeight", w.heightToStop) - if endMsg.Height == w.heightToStop { - w.logger.Debug("Stopping WAL at height", "height", endMsg.Height) - w.signalWhenStopsTo <- struct{}{} - w.stopped = true - return - } - } - - w.logger.Debug("WAL Write Message", "msg", m) - err := w.enc.Encode(&TimedWALMessage{fixedTime, m}) - if err != nil { - panic(fmt.Sprintf("failed to encode the msg %v", m)) - } -} - -func (w *byteBufferWAL) WriteSync(m WALMessage) { - w.Write(m) -} - -func (w *byteBufferWAL) Group() *auto.Group { - panic("not implemented") -} -func (w *byteBufferWAL) SearchForEndHeight(height int64, options *WALSearchOptions) (gr *auto.GroupReader, found bool, err error) { - return nil, false, nil -} - -func (w *byteBufferWAL) Start() error { return nil } -func (w *byteBufferWAL) Stop() error { return nil } -func (w *byteBufferWAL) Wait() {} diff --git a/consensus/wal_test.go b/consensus/wal_test.go deleted file mode 100644 index eebbc85a..00000000 --- a/consensus/wal_test.go +++ /dev/null @@ -1,133 +0,0 @@ -package consensus - -import ( - "bytes" - "crypto/rand" - // "sync" - "testing" - "time" - - "github.com/tendermint/tendermint/consensus/types" - tmtypes "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestWALEncoderDecoder(t *testing.T) { - now := time.Now() - msgs := []TimedWALMessage{ - TimedWALMessage{Time: now, Msg: EndHeightMessage{0}}, - TimedWALMessage{Time: now, Msg: timeoutInfo{Duration: time.Second, Height: 1, Round: 1, Step: types.RoundStepPropose}}, - } - - b := new(bytes.Buffer) - - for _, msg := range msgs { - b.Reset() - - enc := NewWALEncoder(b) - err := enc.Encode(&msg) - require.NoError(t, err) - - dec := NewWALDecoder(b) - decoded, err := dec.Decode() - require.NoError(t, err) - - assert.Equal(t, msg.Time.UTC(), decoded.Time) - assert.Equal(t, msg.Msg, decoded.Msg) - } -} - -func TestWALSearchForEndHeight(t *testing.T) { - walBody, err := WALWithNBlocks(6) - if err != nil { - t.Fatal(err) - } - walFile := tempWALWithData(walBody) - - wal, err := NewWAL(walFile) - if err != nil { - t.Fatal(err) - } - - h := int64(3) - gr, found, err := wal.SearchForEndHeight(h, &WALSearchOptions{}) - assert.NoError(t, err, cmn.Fmt("expected not to err on height %d", h)) - assert.True(t, found, cmn.Fmt("expected to find end height for %d", h)) - assert.NotNil(t, gr, "expected group not to be nil") - defer gr.Close() - - dec := NewWALDecoder(gr) - msg, err := dec.Decode() - assert.NoError(t, err, "expected to decode a message") - rs, ok := msg.Msg.(tmtypes.EventDataRoundState) - assert.True(t, ok, "expected message of type EventDataRoundState") - assert.Equal(t, rs.Height, h+1, cmn.Fmt("wrong height")) -} - -/* -var initOnce sync.Once - -func registerInterfacesOnce() { - initOnce.Do(func() { - var _ = wire.RegisterInterface( - struct{ WALMessage }{}, - wire.ConcreteType{[]byte{}, 0x10}, - ) - }) -} -*/ - -func nBytes(n int) []byte { - buf := make([]byte, n) - n, _ = rand.Read(buf) - return buf[:n] -} - -func benchmarkWalDecode(b *testing.B, n int) { - // registerInterfacesOnce() - - buf := new(bytes.Buffer) - enc := NewWALEncoder(buf) - - data := nBytes(n) - enc.Encode(&TimedWALMessage{Msg: data, Time: time.Now().Round(time.Second)}) - - encoded := buf.Bytes() - - b.ResetTimer() - for i := 0; i < b.N; i++ { - buf.Reset() - buf.Write(encoded) - dec := NewWALDecoder(buf) - if _, err := dec.Decode(); err != nil { - b.Fatal(err) - } - } - b.ReportAllocs() -} - -func BenchmarkWalDecode512B(b *testing.B) { - benchmarkWalDecode(b, 512) -} - -func BenchmarkWalDecode10KB(b *testing.B) { - benchmarkWalDecode(b, 10*1024) -} -func BenchmarkWalDecode100KB(b *testing.B) { - benchmarkWalDecode(b, 100*1024) -} -func BenchmarkWalDecode1MB(b *testing.B) { - benchmarkWalDecode(b, 1024*1024) -} -func BenchmarkWalDecode10MB(b *testing.B) { - benchmarkWalDecode(b, 10*1024*1024) -} -func BenchmarkWalDecode100MB(b *testing.B) { - benchmarkWalDecode(b, 100*1024*1024) -} -func BenchmarkWalDecode1GB(b *testing.B) { - benchmarkWalDecode(b, 1024*1024*1024) -} diff --git a/consensus/wire.go b/consensus/wire.go deleted file mode 100644 index 81223c68..00000000 --- a/consensus/wire.go +++ /dev/null @@ -1,14 +0,0 @@ -package consensus - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" -) - -var cdc = amino.NewCodec() - -func init() { - RegisterConsensusMessages(cdc) - RegisterWALMessages(cdc) - crypto.RegisterAmino(cdc) -} diff --git a/docker-compose.yml b/docker-compose.yml deleted file mode 100644 index 61862e5c..00000000 --- a/docker-compose.yml +++ /dev/null @@ -1,68 +0,0 @@ -version: '3' - -services: - node0: - container_name: node0 - image: "tendermint/localnode" - ports: - - "26656-26657:26656-26657" - environment: - - ID=0 - - LOG=$${LOG:-tendermint.log} - volumes: - - ./build:/tendermint:Z - networks: - localnet: - ipv4_address: 192.167.10.2 - - node1: - container_name: node1 - image: "tendermint/localnode" - ports: - - "26659-26660:26656-26657" - environment: - - ID=1 - - LOG=$${LOG:-tendermint.log} - volumes: - - ./build:/tendermint:Z - networks: - localnet: - ipv4_address: 192.167.10.3 - - node2: - container_name: node2 - image: "tendermint/localnode" - environment: - - ID=2 - - LOG=$${LOG:-tendermint.log} - ports: - - "26661-26662:26656-26657" - volumes: - - ./build:/tendermint:Z - networks: - localnet: - ipv4_address: 192.167.10.4 - - node3: - container_name: node3 - image: "tendermint/localnode" - environment: - - ID=3 - - LOG=$${LOG:-tendermint.log} - ports: - - "26663-26664:26656-26657" - volumes: - - ./build:/tendermint:Z - networks: - localnet: - ipv4_address: 192.167.10.5 - -networks: - localnet: - driver: bridge - ipam: - driver: default - config: - - - subnet: 192.167.10.0/16 - diff --git a/docs/.python-version b/docs/.python-version deleted file mode 100644 index 9bbf4924..00000000 --- a/docs/.python-version +++ /dev/null @@ -1 +0,0 @@ -2.7.14 diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index 442c9be6..00000000 --- a/docs/Makefile +++ /dev/null @@ -1,23 +0,0 @@ -# Minimal makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = python -msphinx -SPHINXPROJ = Tendermint -SOURCEDIR = . -BUILDDIR = _build - -# Put it first so that "make" without argument is like "make help". -help: - @$(SPHINXBUILD) -M help "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) - -install: - @pip install -r requirements.txt - -.PHONY: help Makefile - -# Catch-all target: route all unknown targets to Sphinx using the new -# "make mode" option. $(O) is meant as a shortcut for $(SPHINXOPTS). -%: Makefile - @$(SPHINXBUILD) -M $@ "$(SOURCEDIR)" "$(BUILDDIR)" $(SPHINXOPTS) $(O) diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 180acdcb..00000000 --- a/docs/README.md +++ /dev/null @@ -1,14 +0,0 @@ -Here lies our documentation. After making edits, run: - -``` -pip install -r requirements.txt -make html -``` - -to build the docs locally then open the file `_build/html/index.html` in your browser. - -**WARNING:** This documentation is intended to be viewed at: - -https://tendermint.readthedocs.io - -and may contain broken internal links when viewed from Github. diff --git a/docs/_static/custom_collapsible_code.css b/docs/_static/custom_collapsible_code.css deleted file mode 100644 index 695268a8..00000000 --- a/docs/_static/custom_collapsible_code.css +++ /dev/null @@ -1,17 +0,0 @@ -.toggle { - padding-bottom: 1em ; -} - -.toggle .header { - display: block; - clear: both; - cursor: pointer; -} - -.toggle .header:after { - content: " ▼"; -} - -.toggle .header.open:after { - content: " ▲"; -} \ No newline at end of file diff --git a/docs/_static/custom_collapsible_code.js b/docs/_static/custom_collapsible_code.js deleted file mode 100644 index f4ff22ad..00000000 --- a/docs/_static/custom_collapsible_code.js +++ /dev/null @@ -1,10 +0,0 @@ -let makeCodeBlocksCollapsible = function() { - $(".toggle > *").hide(); - $(".toggle .header").show(); - $(".toggle .header").click(function() { - $(this).parent().children().not(".header").toggle({"duration": 400}); - $(this).parent().children(".header").toggleClass("open"); - }); -}; -// we could use the }(); way if we would have access to jQuery in HEAD, i.e. we would need to force the theme -// to load jQuery before our custom scripts diff --git a/docs/_templates/layout.html b/docs/_templates/layout.html deleted file mode 100644 index 736460bc..00000000 --- a/docs/_templates/layout.html +++ /dev/null @@ -1,20 +0,0 @@ -{% extends "!layout.html" %} - -{% set css_files = css_files + ["_static/custom_collapsible_code.css"] %} - -# sadly, I didn't find a css style way to add custom JS to a list that is automagically added to head like CSS (above) #} -{% block extrahead %} - -{% endblock %} - -{% block footer %} - -{% endblock %} - - diff --git a/docs/abci-cli.md b/docs/abci-cli.md deleted file mode 100644 index 14095d16..00000000 --- a/docs/abci-cli.md +++ /dev/null @@ -1,329 +0,0 @@ -# Using ABCI-CLI - -To facilitate testing and debugging of ABCI servers and simple apps, we -built a CLI, the `abci-cli`, for sending ABCI messages from the command -line. - -## Install - -Make sure you [have Go installed](https://golang.org/doc/install). - -Next, install the `abci-cli` tool and example applications: - - go get -u github.com/tendermint/abci/cmd/abci-cli - -If this fails, you may need to use [dep](https://github.com/golang/dep) -to get vendored dependencies: - - cd $GOPATH/src/github.com/tendermint/abci - make get_tools - make get_vendor_deps - make install - -Now run `abci-cli` to see the list of commands: - - Usage: - abci-cli [command] - - Available Commands: - batch Run a batch of abci commands against an application - check_tx Validate a tx - commit Commit the application state and return the Merkle root hash - console Start an interactive abci console for multiple commands - counter ABCI demo example - deliver_tx Deliver a new tx to the application - kvstore ABCI demo example - echo Have the application echo a message - help Help about any command - info Get some info about the application - query Query the application state - set_option Set an options on the application - - Flags: - --abci string socket or grpc (default "socket") - --address string address of application socket (default "tcp://127.0.0.1:26658") - -h, --help help for abci-cli - -v, --verbose print the command and results as if it were a console session - - Use "abci-cli [command] --help" for more information about a command. - -## KVStore - First Example - -The `abci-cli` tool lets us send ABCI messages to our application, to -help build and debug them. - -The most important messages are `deliver_tx`, `check_tx`, and `commit`, -but there are others for convenience, configuration, and information -purposes. - -We'll start a kvstore application, which was installed at the same time -as `abci-cli` above. The kvstore just stores transactions in a merkle -tree. - -Its code can be found -[here](https://github.com/tendermint/abci/blob/master/cmd/abci-cli/abci-cli.go) -and looks like: - - func cmdKVStore(cmd *cobra.Command, args []string) error { - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) - - // Create the application - in memory or persisted to disk - var app types.Application - if flagPersist == "" { - app = kvstore.NewKVStoreApplication() - } else { - app = kvstore.NewPersistentKVStoreApplication(flagPersist) - app.(*kvstore.PersistentKVStoreApplication).SetLogger(logger.With("module", "kvstore")) - } - - // Start the listener - srv, err := server.NewServer(flagAddrD, flagAbci, app) - if err != nil { - return err - } - srv.SetLogger(logger.With("module", "abci-server")) - if err := srv.Start(); err != nil { - return err - } - - // Wait forever - cmn.TrapSignal(func() { - // Cleanup - srv.Stop() - }) - return nil - } - -Start by running: - - abci-cli kvstore - -And in another terminal, run - - abci-cli echo hello - abci-cli info - -You'll see something like: - - -> data: hello - -> data.hex: 68656C6C6F - -and: - - -> data: {"size":0} - -> data.hex: 7B2273697A65223A307D - -An ABCI application must provide two things: - -- a socket server -- a handler for ABCI messages - -When we run the `abci-cli` tool we open a new connection to the -application's socket server, send the given ABCI message, and wait for a -response. - -The server may be generic for a particular language, and we provide a -[reference implementation in -Golang](https://github.com/tendermint/abci/tree/master/server). See the -[list of other ABCI implementations](./ecosystem.html) for servers in -other languages. - -The handler is specific to the application, and may be arbitrary, so -long as it is deterministic and conforms to the ABCI interface -specification. - -So when we run `abci-cli info`, we open a new connection to the ABCI -server, which calls the `Info()` method on the application, which tells -us the number of transactions in our Merkle tree. - -Now, since every command opens a new connection, we provide the -`abci-cli console` and `abci-cli batch` commands, to allow multiple ABCI -messages to be sent over a single connection. - -Running `abci-cli console` should drop you in an interactive console for -speaking ABCI messages to your application. - -Try running these commands: - - > echo hello - -> code: OK - -> data: hello - -> data.hex: 0x68656C6C6F - - > info - -> code: OK - -> data: {"size":0} - -> data.hex: 0x7B2273697A65223A307D - - > commit - -> code: OK - -> data.hex: 0x0000000000000000 - - > deliver_tx "abc" - -> code: OK - - > info - -> code: OK - -> data: {"size":1} - -> data.hex: 0x7B2273697A65223A317D - - > commit - -> code: OK - -> data.hex: 0x0200000000000000 - - > query "abc" - -> code: OK - -> log: exists - -> height: 0 - -> value: abc - -> value.hex: 616263 - - > deliver_tx "def=xyz" - -> code: OK - - > commit - -> code: OK - -> data.hex: 0x0400000000000000 - - > query "def" - -> code: OK - -> log: exists - -> height: 0 - -> value: xyz - -> value.hex: 78797A - -Note that if we do `deliver_tx "abc"` it will store `(abc, abc)`, but if -we do `deliver_tx "abc=efg"` it will store `(abc, efg)`. - -Similarly, you could put the commands in a file and run -`abci-cli --verbose batch < myfile`. - -## Counter - Another Example - -Now that we've got the hang of it, let's try another application, the -"counter" app. - -Like the kvstore app, its code can be found -[here](https://github.com/tendermint/abci/blob/master/cmd/abci-cli/abci-cli.go) -and looks like: - - func cmdCounter(cmd *cobra.Command, args []string) error { - - app := counter.NewCounterApplication(flagSerial) - - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) - - // Start the listener - srv, err := server.NewServer(flagAddrC, flagAbci, app) - if err != nil { - return err - } - srv.SetLogger(logger.With("module", "abci-server")) - if err := srv.Start(); err != nil { - return err - } - - // Wait forever - cmn.TrapSignal(func() { - // Cleanup - srv.Stop() - }) - return nil - } - -The counter app doesn't use a Merkle tree, it just counts how many times -we've sent a transaction, asked for a hash, or committed the state. The -result of `commit` is just the number of transactions sent. - -This application has two modes: `serial=off` and `serial=on`. - -When `serial=on`, transactions must be a big-endian encoded incrementing -integer, starting at 0. - -If `serial=off`, there are no restrictions on transactions. - -We can toggle the value of `serial` using the `set_option` ABCI message. - -When `serial=on`, some transactions are invalid. In a live blockchain, -transactions collect in memory before they are committed into blocks. To -avoid wasting resources on invalid transactions, ABCI provides the -`check_tx` message, which application developers can use to accept or -reject transactions, before they are stored in memory or gossipped to -other peers. - -In this instance of the counter app, `check_tx` only allows transactions -whose integer is greater than the last committed one. - -Let's kill the console and the kvstore application, and start the -counter app: - - abci-cli counter - -In another window, start the `abci-cli console`: - - > set_option serial on - -> code: OK - -> log: OK (SetOption doesn't return anything.) - - > check_tx 0x00 - -> code: OK - - > check_tx 0xff - -> code: OK - - > deliver_tx 0x00 - -> code: OK - - > check_tx 0x00 - -> code: BadNonce - -> log: Invalid nonce. Expected >= 1, got 0 - - > deliver_tx 0x01 - -> code: OK - - > deliver_tx 0x04 - -> code: BadNonce - -> log: Invalid nonce. Expected 2, got 4 - - > info - -> code: OK - -> data: {"hashes":0,"txs":2} - -> data.hex: 0x7B22686173686573223A302C22747873223A327D - -This is a very simple application, but between `counter` and `kvstore`, -its easy to see how you can build out arbitrary application states on -top of the ABCI. [Hyperledger's -Burrow](https://github.com/hyperledger/burrow) also runs atop ABCI, -bringing with it Ethereum-like accounts, the Ethereum virtual-machine, -Monax's permissioning scheme, and native contracts extensions. - -But the ultimate flexibility comes from being able to write the -application easily in any language. - -We have implemented the counter in a number of languages [see the -example directory](https://github.com/tendermint/abci/tree/master/example). - -To run the Node JS version, `cd` to `example/js` and run - - node app.js - -(you'll have to kill the other counter application process). In another -window, run the console and those previous ABCI commands. You should get -the same results as for the Go version. - -## Bounties - -Want to write the counter app in your favorite language?! We'd be happy -to add you to our [ecosystem](https://tendermint.com/ecosystem)! We're -also offering [bounties](https://hackerone.com/tendermint/) for -implementations in new languages! - -The `abci-cli` is designed strictly for testing and debugging. In a real -deployment, the role of sending messages is taken by Tendermint, which -connects to the app using three separate connections, each with its own -pattern of messages. - -For more information, see the [application developers -guide](./app-development.html). For examples of running an ABCI app with -Tendermint, see the [getting started guide](./getting-started.html). -Next is the ABCI specification. diff --git a/docs/app-architecture.md b/docs/app-architecture.md deleted file mode 100644 index 64b1a379..00000000 --- a/docs/app-architecture.md +++ /dev/null @@ -1,50 +0,0 @@ -# Application Architecture Guide - -Here we provide a brief guide on the recommended architecture of a -Tendermint blockchain application. - -The following diagram provides a superb example: - - - -The end-user application here is the Cosmos Voyager, at the bottom left. -Voyager communicates with a REST API exposed by a local Light-Client -Daemon. The Light-Client Daemon is an application specific program that -communicates with Tendermint nodes and verifies Tendermint light-client -proofs through the Tendermint Core RPC. The Tendermint Core process -communicates with a local ABCI application, where the user query or -transaction is actually processed. - -The ABCI application must be a deterministic result of the Tendermint -consensus - any external influence on the application state that didn't -come through Tendermint could cause a consensus failure. Thus *nothing* -should communicate with the application except Tendermint via ABCI. - -If the application is written in Go, it can be compiled into the -Tendermint binary. Otherwise, it should use a unix socket to communicate -with Tendermint. If it's necessary to use TCP, extra care must be taken -to encrypt and authenticate the connection. - -All reads from the app happen through the Tendermint `/abci_query` -endpoint. All writes to the app happen through the Tendermint -`/broadcast_tx_*` endpoints. - -The Light-Client Daemon is what provides light clients (end users) with -nearly all the security of a full node. It formats and broadcasts -transactions, and verifies proofs of queries and transaction results. -Note that it need not be a daemon - the Light-Client logic could instead -be implemented in the same process as the end-user application. - -Note for those ABCI applications with weaker security requirements, the -functionality of the Light-Client Daemon can be moved into the ABCI -application process itself. That said, exposing the application process -to anything besides Tendermint over ABCI requires extreme caution, as -all transactions, and possibly all queries, should still pass through -Tendermint. - -See the following for more extensive documentation: -- [Interchain Standard for the Light-Client REST API](https://github.com/cosmos/cosmos-sdk/pull/1028) -- [Tendermint RPC Docs](https://tendermint.github.io/slate/) -- [Tendermint in Production](https://github.com/tendermint/tendermint/pull/1618) -- [Tendermint Basics](https://tendermint.readthedocs.io/en/master/using-tendermint.html) -- [ABCI spec](https://github.com/tendermint/abci/blob/develop/specification.md) diff --git a/docs/app-development.md b/docs/app-development.md deleted file mode 100644 index 48865767..00000000 --- a/docs/app-development.md +++ /dev/null @@ -1,527 +0,0 @@ -# Application Development Guide - -## ABCI Design - -The purpose of ABCI is to provide a clean interface between state -transition machines on one computer and the mechanics of their -replication across multiple computers. The former we call 'application -logic' and the latter the 'consensus engine'. Application logic -validates transactions and optionally executes transactions against some -persistent state. A consensus engine ensures all transactions are -replicated in the same order on every machine. We call each machine in a -consensus engine a 'validator', and each validator runs the same -transactions through the same application logic. In particular, we are -interested in blockchain-style consensus engines, where transactions are -committed in hash-linked blocks. - -The ABCI design has a few distinct components: - -- message protocol - - pairs of request and response messages - - consensus makes requests, application responds - - defined using protobuf -- server/client - - consensus engine runs the client - - application runs the server - - two implementations: - - async raw bytes - - grpc -- blockchain protocol - - abci is connection oriented - - Tendermint Core maintains three connections: - - [mempool connection](#mempool-connection): for checking if - transactions should be relayed before they are committed; - only uses `CheckTx` - - [consensus connection](#consensus-connection): for executing - transactions that have been committed. Message sequence is - -for every block - -`BeginBlock, [DeliverTx, ...], EndBlock, Commit` - - [query connection](#query-connection): for querying the - application state; only uses Query and Info - -The mempool and consensus logic act as clients, and each maintains an -open ABCI connection with the application, which hosts an ABCI server. -Shown are the request and response types sent on each connection. - -## Message Protocol - -The message protocol consists of pairs of requests and responses. Some -messages have no fields, while others may include byte-arrays, strings, -or integers. See the `message Request` and `message Response` -definitions in [the protobuf definition -file](https://github.com/tendermint/abci/blob/master/types/types.proto), -and the [protobuf -documentation](https://developers.google.com/protocol-buffers/docs/overview) -for more details. - -For each request, a server should respond with the corresponding -response, where order of requests is preserved in the order of -responses. - -## Server - -To use ABCI in your programming language of choice, there must be a ABCI -server in that language. Tendermint supports two kinds of implementation -of the server: - -- Asynchronous, raw socket server (Tendermint Socket Protocol, also - known as TSP or Teaspoon) -- GRPC - -Both can be tested using the `abci-cli` by setting the `--abci` flag -appropriately (ie. to `socket` or `grpc`). - -See examples, in various stages of maintenance, in -[Go](https://github.com/tendermint/abci/tree/master/server), -[JavaScript](https://github.com/tendermint/js-abci), -[Python](https://github.com/tendermint/abci/tree/master/example/python3/abci), -[C++](https://github.com/mdyring/cpp-tmsp), and -[Java](https://github.com/jTendermint/jabci). - -### GRPC - -If GRPC is available in your language, this is the easiest approach, -though it will have significant performance overhead. - -To get started with GRPC, copy in the [protobuf -file](https://github.com/tendermint/abci/blob/master/types/types.proto) -and compile it using the GRPC plugin for your language. For instance, -for golang, the command is `protoc --go_out=plugins=grpc:. types.proto`. -See the [grpc documentation for more details](http://www.grpc.io/docs/). -`protoc` will autogenerate all the necessary code for ABCI client and -server in your language, including whatever interface your application -must satisfy to be used by the ABCI server for handling requests. - -### TSP - -If GRPC is not available in your language, or you require higher -performance, or otherwise enjoy programming, you may implement your own -ABCI server using the Tendermint Socket Protocol, known affectionately -as Teaspoon. The first step is still to auto-generate the relevant data -types and codec in your language using `protoc`. Messages coming over -the socket are Protobuf3 encoded, but additionally length-prefixed to -facilitate use as a streaming protocol. Protobuf3 doesn't have an -official length-prefix standard, so we use our own. The first byte in -the prefix represents the length of the Big Endian encoded length. The -remaining bytes in the prefix are the Big Endian encoded length. - -For example, if the Protobuf3 encoded ABCI message is 0xDEADBEEF (4 -bytes), the length-prefixed message is 0x0104DEADBEEF. If the Protobuf3 -encoded ABCI message is 65535 bytes long, the length-prefixed message -would be like 0x02FFFF.... - -Note this prefixing does not apply for grpc. - -An ABCI server must also be able to support multiple connections, as -Tendermint uses three connections. - -## Client - -There are currently two use-cases for an ABCI client. One is a testing -tool, as in the `abci-cli`, which allows ABCI requests to be sent via -command line. The other is a consensus engine, such as Tendermint Core, -which makes requests to the application every time a new transaction is -received or a block is committed. - -It is unlikely that you will need to implement a client. For details of -our client, see -[here](https://github.com/tendermint/abci/tree/master/client). - -Most of the examples below are from [kvstore -application](https://github.com/tendermint/abci/blob/master/example/kvstore/kvstore.go), -which is a part of the abci repo. [persistent_kvstore -application](https://github.com/tendermint/abci/blob/master/example/kvstore/persistent_kvstore.go) -is used to show `BeginBlock`, `EndBlock` and `InitChain` example -implementations. - -## Blockchain Protocol - -In ABCI, a transaction is simply an arbitrary length byte-array. It is -the application's responsibility to define the transaction codec as they -please, and to use it for both CheckTx and DeliverTx. - -Note that there are two distinct means for running transactions, -corresponding to stages of 'awareness' of the transaction in the -network. The first stage is when a transaction is received by a -validator from a client into the so-called mempool or transaction pool --this is where we use CheckTx. The second is when the transaction is -successfully committed on more than 2/3 of validators - where we use -DeliverTx. In the former case, it may not be necessary to run all the -state transitions associated with the transaction, as the transaction -may not ultimately be committed until some much later time, when the -result of its execution will be different. For instance, an Ethereum -ABCI app would check signatures and amounts in CheckTx, but would not -actually execute any contract code until the DeliverTx, so as to avoid -executing state transitions that have not been finalized. - -To formalize the distinction further, two explicit ABCI connections are -made between Tendermint Core and the application: the mempool connection -and the consensus connection. We also make a third connection, the query -connection, to query the local state of the app. - -### Mempool Connection - -The mempool connection is used *only* for CheckTx requests. Transactions -are run using CheckTx in the same order they were received by the -validator. If the CheckTx returns `OK`, the transaction is kept in -memory and relayed to other peers in the same order it was received. -Otherwise, it is discarded. - -CheckTx requests run concurrently with block processing; so they should -run against a copy of the main application state which is reset after -every block. This copy is necessary to track transitions made by a -sequence of CheckTx requests before they are included in a block. When a -block is committed, the application must ensure to reset the mempool -state to the latest committed state. Tendermint Core will then filter -through all transactions in the mempool, removing any that were included -in the block, and re-run the rest using CheckTx against the post-Commit -mempool state (this behaviour can be turned off with -`[mempool] recheck = false`). - -In go: - - func (app *KVStoreApplication) CheckTx(tx []byte) types.Result { - return types.OK - } - -In Java: - - ResponseCheckTx requestCheckTx(RequestCheckTx req) { - byte[] transaction = req.getTx().toByteArray(); - - // validate transaction - - if (notValid) { - return ResponseCheckTx.newBuilder().setCode(CodeType.BadNonce).setLog("invalid tx").build(); - } else { - return ResponseCheckTx.newBuilder().setCode(CodeType.OK).build(); - } - } - -### Replay Protection - -To prevent old transactions from being replayed, CheckTx must implement -replay protection. - -Tendermint provides the first defence layer by keeping a lightweight -in-memory cache of 100k (`[mempool] cache_size`) last transactions in -the mempool. If Tendermint is just started or the clients sent more than -100k transactions, old transactions may be sent to the application. So -it is important CheckTx implements some logic to handle them. - -There are cases where a transaction will (or may) become valid in some -future state, in which case you probably want to disable Tendermint's -cache. You can do that by setting `[mempool] cache_size = 0` in the -config. - -### Consensus Connection - -The consensus connection is used only when a new block is committed, and -communicates all information from the block in a series of requests: -`BeginBlock, [DeliverTx, ...], EndBlock, Commit`. That is, when a block -is committed in the consensus, we send a list of DeliverTx requests (one -for each transaction) sandwiched by BeginBlock and EndBlock requests, -and followed by a Commit. - -### DeliverTx - -DeliverTx is the workhorse of the blockchain. Tendermint sends the -DeliverTx requests asynchronously but in order, and relies on the -underlying socket protocol (ie. TCP) to ensure they are received by the -app in order. They have already been ordered in the global consensus by -the Tendermint protocol. - -DeliverTx returns a abci.Result, which includes a Code, Data, and Log. -The code may be non-zero (non-OK), meaning the corresponding transaction -should have been rejected by the mempool, but may have been included in -a block by a Byzantine proposer. - -The block header will be updated (TODO) to include some commitment to -the results of DeliverTx, be it a bitarray of non-OK transactions, or a -merkle root of the data returned by the DeliverTx requests, or both. - -In go: - - // tx is either "key=value" or just arbitrary bytes - func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result { - parts := strings.Split(string(tx), "=") - if len(parts) == 2 { - app.state.Set([]byte(parts[0]), []byte(parts[1])) - } else { - app.state.Set(tx, tx) - } - return types.OK - } - -In Java: - - /** - * Using Protobuf types from the protoc compiler, we always start with a byte[] - */ - ResponseDeliverTx deliverTx(RequestDeliverTx request) { - byte[] transaction = request.getTx().toByteArray(); - - // validate your transaction - - if (notValid) { - return ResponseDeliverTx.newBuilder().setCode(CodeType.BadNonce).setLog("transaction was invalid").build(); - } else { - ResponseDeliverTx.newBuilder().setCode(CodeType.OK).build(); - } - - } - -### Commit - -Once all processing of the block is complete, Tendermint sends the -Commit request and blocks waiting for a response. While the mempool may -run concurrently with block processing (the BeginBlock, DeliverTxs, and -EndBlock), it is locked for the Commit request so that its state can be -safely reset during Commit. This means the app *MUST NOT* do any -blocking communication with the mempool (ie. broadcast\_tx) during -Commit, or there will be deadlock. Note also that all remaining -transactions in the mempool are replayed on the mempool connection -(CheckTx) following a commit. - -The app should respond to the Commit request with a byte array, which is -the deterministic state root of the application. It is included in the -header of the next block. It can be used to provide easily verified -Merkle-proofs of the state of the application. - -It is expected that the app will persist state to disk on Commit. The -option to have all transactions replayed from some previous block is the -job of the [Handshake](#handshake). - -In go: - - func (app *KVStoreApplication) Commit() types.Result { - hash := app.state.Hash() - return types.NewResultOK(hash, "") - } - -In Java: - - ResponseCommit requestCommit(RequestCommit requestCommit) { - - // update the internal app-state - byte[] newAppState = calculateAppState(); - - // and return it to the node - return ResponseCommit.newBuilder().setCode(CodeType.OK).setData(ByteString.copyFrom(newAppState)).build(); - } - -### BeginBlock - -The BeginBlock request can be used to run some code at the beginning of -every block. It also allows Tendermint to send the current block hash -and header to the application, before it sends any of the transactions. - -The app should remember the latest height and header (ie. from which it -has run a successful Commit) so that it can tell Tendermint where to -pick up from when it restarts. See information on the Handshake, below. - -In go: - - // Track the block hash and header information - func (app *PersistentKVStoreApplication) BeginBlock(params types.RequestBeginBlock) { - // update latest block info - app.blockHeader = params.Header - - // reset valset changes - app.changes = make([]*types.Validator, 0) - } - -In Java: - - /* - * all types come from protobuf definition - */ - ResponseBeginBlock requestBeginBlock(RequestBeginBlock req) { - - Header header = req.getHeader(); - byte[] prevAppHash = header.getAppHash().toByteArray(); - long prevHeight = header.getHeight(); - long numTxs = header.getNumTxs(); - - // run your pre-block logic. Maybe prepare a state snapshot, message components, etc - - return ResponseBeginBlock.newBuilder().build(); - } - -### EndBlock - -The EndBlock request can be used to run some code at the end of every -block. Additionally, the response may contain a list of validators, -which can be used to update the validator set. To add a new validator or -update an existing one, simply include them in the list returned in the -EndBlock response. To remove one, include it in the list with a `power` -equal to `0`. Tendermint core will take care of updating the validator -set. Note the change in voting power must be strictly less than 1/3 per -block if you want a light client to be able to prove the transition -externally. See the [light client -docs](https://godoc.org/github.com/tendermint/tendermint/lite#hdr-How_We_Track_Validators) -for details on how it tracks validators. - -In go: - - // Update the validator set - func (app *PersistentKVStoreApplication) EndBlock(req types.RequestEndBlock) types.ResponseEndBlock { - return types.ResponseEndBlock{ValidatorUpdates: app.ValUpdates} - } - -In Java: - - /* - * Assume that one validator changes. The new validator has a power of 10 - */ - ResponseEndBlock requestEndBlock(RequestEndBlock req) { - final long currentHeight = req.getHeight(); - final byte[] validatorPubKey = getValPubKey(); - - ResponseEndBlock.Builder builder = ResponseEndBlock.newBuilder(); - builder.addDiffs(1, Types.Validator.newBuilder().setPower(10L).setPubKey(ByteString.copyFrom(validatorPubKey)).build()); - - return builder.build(); - } - -### Query Connection - -This connection is used to query the application without engaging -consensus. It's exposed over the tendermint core rpc, so clients can -query the app without exposing a server on the app itself, but they must -serialize each query as a single byte array. Additionally, certain -"standardized" queries may be used to inform local decisions, for -instance about which peers to connect to. - -Tendermint Core currently uses the Query connection to filter peers upon -connecting, according to IP address or public key. For instance, -returning non-OK ABCI response to either of the following queries will -cause Tendermint to not connect to the corresponding peer: - -- `p2p/filter/addr/`, where `` is an IP address. -- `p2p/filter/pubkey/`, where `` is the hex-encoded - ED25519 key of the node (not it's validator key) - -Note: these query formats are subject to change! - -In go: - - func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { - if reqQuery.Prove { - value, proof, exists := app.state.Proof(reqQuery.Data) - resQuery.Index = -1 // TODO make Proof return index - resQuery.Key = reqQuery.Data - resQuery.Value = value - resQuery.Proof = proof - if exists { - resQuery.Log = "exists" - } else { - resQuery.Log = "does not exist" - } - return - } else { - index, value, exists := app.state.Get(reqQuery.Data) - resQuery.Index = int64(index) - resQuery.Value = value - if exists { - resQuery.Log = "exists" - } else { - resQuery.Log = "does not exist" - } - return - } - } - -In Java: - - ResponseQuery requestQuery(RequestQuery req) { - final boolean isProveQuery = req.getProve(); - final ResponseQuery.Builder responseBuilder = ResponseQuery.newBuilder(); - - if (isProveQuery) { - com.app.example.ProofResult proofResult = generateProof(req.getData().toByteArray()); - final byte[] proofAsByteArray = proofResult.getAsByteArray(); - - responseBuilder.setProof(ByteString.copyFrom(proofAsByteArray)); - responseBuilder.setKey(req.getData()); - responseBuilder.setValue(ByteString.copyFrom(proofResult.getData())); - responseBuilder.setLog(result.getLogValue()); - } else { - byte[] queryData = req.getData().toByteArray(); - - final com.app.example.QueryResult result = generateQueryResult(queryData); - - responseBuilder.setIndex(result.getIndex()); - responseBuilder.setValue(ByteString.copyFrom(result.getValue())); - responseBuilder.setLog(result.getLogValue()); - } - - return responseBuilder.build(); - } - -### Handshake - -When the app or tendermint restarts, they need to sync to a common -height. When an ABCI connection is first established, Tendermint will -call `Info` on the Query connection. The response should contain the -LastBlockHeight and LastBlockAppHash - the former is the last block for -which the app ran Commit successfully, the latter is the response from -that Commit. - -Using this information, Tendermint will determine what needs to be -replayed, if anything, against the app, to ensure both Tendermint and -the app are synced to the latest block height. - -If the app returns a LastBlockHeight of 0, Tendermint will just replay -all blocks. - -In go: - - func (app *KVStoreApplication) Info(req types.RequestInfo) (resInfo types.ResponseInfo) { - return types.ResponseInfo{Data: cmn.Fmt("{\"size\":%v}", app.state.Size())} - } - -In Java: - - ResponseInfo requestInfo(RequestInfo req) { - final byte[] lastAppHash = getLastAppHash(); - final long lastHeight = getLastHeight(); - return ResponseInfo.newBuilder().setLastBlockAppHash(ByteString.copyFrom(lastAppHash)).setLastBlockHeight(lastHeight).build(); - } - -### Genesis - -`InitChain` will be called once upon the genesis. `params` includes the -initial validator set. Later on, it may be extended to take parts of the -consensus params. - -In go: - - // Save the validators in the merkle tree - func (app *PersistentKVStoreApplication) InitChain(params types.RequestInitChain) { - for _, v := range params.Validators { - r := app.updateValidator(v) - if r.IsErr() { - app.logger.Error("Error updating validators", "r", r) - } - } - } - -In Java: - - /* - * all types come from protobuf definition - */ - ResponseInitChain requestInitChain(RequestInitChain req) { - final int validatorsCount = req.getValidatorsCount(); - final List validatorsList = req.getValidatorsList(); - - validatorsList.forEach((validator) -> { - long power = validator.getPower(); - byte[] validatorPubKey = validator.getPubKey().toByteArray(); - - // do somehing for validator setup in app - }); - - return ResponseInitChain.newBuilder().build(); - } diff --git a/docs/architecture/README.md b/docs/architecture/README.md deleted file mode 100644 index 9e41d306..00000000 --- a/docs/architecture/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# Architecture Decision Records - -This is a location to record all high-level architecture decisions in the tendermint project. Not the implementation details, but the reasoning that happened. This should be refered to for guidance of the "right way" to extend the application. And if we notice that the original decisions were lacking, we should have another open discussion, record the new decisions here, and then modify the code to match. - -Read up on the concept in this [blog post](https://product.reverb.com/documenting-architecture-decisions-the-reverb-way-a3563bb24bd0#.78xhdix6t). diff --git a/docs/architecture/adr-001-logging.md b/docs/architecture/adr-001-logging.md deleted file mode 100644 index a11a49e1..00000000 --- a/docs/architecture/adr-001-logging.md +++ /dev/null @@ -1,216 +0,0 @@ -# ADR 1: Logging - -## Context - -Current logging system in Tendermint is very static and not flexible enough. - -Issues: [358](https://github.com/tendermint/tendermint/issues/358), [375](https://github.com/tendermint/tendermint/issues/375). - -What we want from the new system: - -- per package dynamic log levels -- dynamic logger setting (logger tied to the processing struct) -- conventions -- be more visually appealing - -"dynamic" here means the ability to set smth in runtime. - -## Decision - -### 1) An interface - -First, we will need an interface for all of our libraries (`tmlibs`, Tendermint, etc.). My personal preference is go-kit `Logger` interface (see Appendix A.), but that is too much a bigger change. Plus we will still need levels. - -```go -# log.go -type Logger interface { - Debug(msg string, keyvals ...interface{}) error - Info(msg string, keyvals ...interface{}) error - Error(msg string, keyvals ...interface{}) error - - With(keyvals ...interface{}) Logger -} -``` - -On a side note: difference between `Info` and `Notice` is subtle. We probably -could do without `Notice`. Don't think we need `Panic` or `Fatal` as a part of -the interface. These funcs could be implemented as helpers. In fact, we already -have some in `tmlibs/common`. - -- `Debug` - extended output for devs -- `Info` - all that is useful for a user -- `Error` - errors - -`Notice` should become `Info`, `Warn` either `Error` or `Debug` depending on the message, `Crit` -> `Error`. - -This interface should go into `tmlibs/log`. All libraries which are part of the core (tendermint/tendermint) should obey it. - -### 2) Logger with our current formatting - -On top of this interface, we will need to implement a stdout logger, which will be used when Tendermint is configured to output logs to STDOUT. - -Many people say that they like the current output, so let's stick with it. - -``` -NOTE[04-25|14:45:08] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 -``` - -Couple of minor changes: - -``` -I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 -``` - -Notice the level is encoded using only one char plus milliseconds. - -Note: there are many other formats out there like [logfmt](https://brandur.org/logfmt). - -This logger could be implemented using any logger - [logrus](https://github.com/sirupsen/logrus), [go-kit/log](https://github.com/go-kit/kit/tree/master/log), [zap](https://github.com/uber-go/zap), log15 so far as it - -a) supports coloring output
-b) is moderately fast (buffering)
-c) conforms to the new interface or adapter could be written for it
-d) is somewhat configurable
- -go-kit is my favorite so far. Check out how easy it is to color errors in red https://github.com/go-kit/kit/blob/master/log/term/example_test.go#L12. Although, coloring could only be applied to the whole string :( - -``` -go-kit +: flexible, modular -go-kit “-”: logfmt format https://brandur.org/logfmt - -logrus +: popular, feature rich (hooks), API and output is more like what we want -logrus -: not so flexible -``` - -```go -# tm_logger.go -// NewTmLogger returns a logger that encodes keyvals to the Writer in -// tm format. -func NewTmLogger(w io.Writer) Logger { - return &tmLogger{kitlog.NewLogfmtLogger(w)} -} - -func (l tmLogger) SetLevel(level string() { - switch (level) { - case "debug": - l.sourceLogger = level.NewFilter(l.sourceLogger, level.AllowDebug()) - } -} - -func (l tmLogger) Info(msg string, keyvals ...interface{}) error { - l.sourceLogger.Log("msg", msg, keyvals...) -} - -# log.go -func With(logger Logger, keyvals ...interface{}) Logger { - kitlog.With(logger.sourceLogger, keyvals...) -} -``` - -Usage: - -```go -logger := log.NewTmLogger(os.Stdout) -logger.SetLevel(config.GetString("log_level")) -node.SetLogger(log.With(logger, "node", Name)) -``` - -**Other log formatters** - -In the future, we may want other formatters like JSONFormatter. - -``` -{ "level": "notice", "time": "2017-04-25 14:45:08.562471297 -0400 EDT", "module": "consensus", "msg": "ABCI Replay Blocks", "appHeight": 0, "storeHeight": 0, "stateHeight": 0 } -``` - -### 3) Dynamic logger setting - -https://dave.cheney.net/2017/01/23/the-package-level-logger-anti-pattern - -This is the hardest part and where the most work will be done. logger should be tied to the processing struct, or the context if it adds some fields to the logger. - -```go -type BaseService struct { - log log15.Logger - name string - started uint32 // atomic - stopped uint32 // atomic -... -} -``` - -BaseService already contains `log` field, so most of the structs embedding it should be fine. We should rename it to `logger`. - -The only thing missing is the ability to set logger: - -``` -func (bs *BaseService) SetLogger(l log.Logger) { - bs.logger = l -} -``` - -### 4) Conventions - -Important keyvals should go first. Example: - -``` -correct -I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus instance=1 appHeight=0 storeHeight=0 stateHeight=0 -``` - -not - -``` -wrong -I[04-25|14:45:08.322] ABCI Replay Blocks module=consensus appHeight=0 storeHeight=0 stateHeight=0 instance=1 -``` - -for that in most cases you'll need to add `instance` field to a logger upon creating, not when u log a particular message: - -```go -colorFn := func(keyvals ...interface{}) term.FgBgColor { - for i := 1; i < len(keyvals); i += 2 { - if keyvals[i] == "instance" && keyvals[i+1] == "1" { - return term.FgBgColor{Fg: term.Blue} - } else if keyvals[i] == "instance" && keyvals[i+1] == "1" { - return term.FgBgColor{Fg: term.Red} - } - } - return term.FgBgColor{} - } -logger := term.NewLogger(os.Stdout, log.NewTmLogger, colorFn) - -c1 := NewConsensusReactor(...) -c1.SetLogger(log.With(logger, "instance", 1)) - -c2 := NewConsensusReactor(...) -c2.SetLogger(log.With(logger, "instance", 2)) -``` - -## Status - -proposed - -## Consequences - -### Positive - -Dynamic logger, which could be turned off for some modules at runtime. Public interface for other projects using Tendermint libraries. - -### Negative - -We may loose the ability to color keys in keyvalue pairs. go-kit allow you to easily change foreground / background colors of the whole string, but not its parts. - -### Neutral - -## Appendix A. - -I really like a minimalistic approach go-kit took with his logger https://github.com/go-kit/kit/tree/master/log: - -``` -type Logger interface { - Log(keyvals ...interface{}) error -} -``` - -See [The Hunt for a Logger Interface](https://go-talks.appspot.com/github.com/ChrisHines/talks/structured-logging/structured-logging.slide). The advantage is greater composability (check out how go-kit defines colored logging or log-leveled logging on top of this interface https://github.com/go-kit/kit/tree/master/log). diff --git a/docs/architecture/adr-002-event-subscription.md b/docs/architecture/adr-002-event-subscription.md deleted file mode 100644 index cc207c4a..00000000 --- a/docs/architecture/adr-002-event-subscription.md +++ /dev/null @@ -1,90 +0,0 @@ -# ADR 2: Event Subscription - -## Context - -In the light client (or any other client), the user may want to **subscribe to -a subset of transactions** (rather than all of them) using `/subscribe?event=X`. For -example, I want to subscribe for all transactions associated with a particular -account. Same for fetching. The user may want to **fetch transactions based on -some filter** (rather than fetching all the blocks). For example, I want to get -all transactions for a particular account in the last two weeks (`tx's block -time >= '2017-06-05'`). - -Now you can't even subscribe to "all txs" in Tendermint. - -The goal is a simple and easy to use API for doing that. - -![Tx Send Flow Diagram](img/tags1.png) - -## Decision - -ABCI app return tags with a `DeliverTx` response inside the `data` field (_for -now, later we may create a separate field_). Tags is a list of key-value pairs, -protobuf encoded. - -Example data: - -```json -{ - "abci.account.name": "Igor", - "abci.account.address": "0xdeadbeef", - "tx.gas": 7 -} -``` - -### Subscribing for transactions events - -If the user wants to receive only a subset of transactions, ABCI-app must -return a list of tags with a `DeliverTx` response. These tags will be parsed and -matched with the current queries (subscribers). If the query matches the tags, -subscriber will get the transaction event. - -``` -/subscribe?query="tm.event = Tx AND tx.hash = AB0023433CF0334223212243BDD AND abci.account.invoice.number = 22" -``` - -A new package must be developed to replace the current `events` package. It -will allow clients to subscribe to a different types of events in the future: - -``` -/subscribe?query="abci.account.invoice.number = 22" -/subscribe?query="abci.account.invoice.owner CONTAINS Igor" -``` - -### Fetching transactions - -This is a bit tricky because a) we want to support a number of indexers, all of -which have a different API b) we don't know whenever tags will be sufficient -for the most apps (I guess we'll see). - -``` -/txs/search?query="tx.hash = AB0023433CF0334223212243BDD AND abci.account.owner CONTAINS Igor" -/txs/search?query="abci.account.owner = Igor" -``` - -For historic queries we will need a indexing storage (Postgres, SQLite, ...). - -### Issues - -- https://github.com/tendermint/basecoin/issues/91 -- https://github.com/tendermint/tendermint/issues/376 -- https://github.com/tendermint/tendermint/issues/287 -- https://github.com/tendermint/tendermint/issues/525 (related) - -## Status - -proposed - -## Consequences - -### Positive - -- same format for event notifications and search APIs -- powerful enough query - -### Negative - -- performance of the `match` function (where we have too many queries / subscribers) -- there is an issue where there are too many txs in the DB - -### Neutral diff --git a/docs/architecture/adr-003-abci-app-rpc.md b/docs/architecture/adr-003-abci-app-rpc.md deleted file mode 100644 index 2775db07..00000000 --- a/docs/architecture/adr-003-abci-app-rpc.md +++ /dev/null @@ -1,34 +0,0 @@ -# ADR 3: Must an ABCI-app have an RPC server? - -## Context - -ABCI-server could expose its own RPC-server and act as a proxy to Tendermint. - -The idea was for the Tendermint RPC to just be a transparent proxy to the app. -Clients need to talk to Tendermint for proofs, unless we burden all app devs -with exposing Tendermint proof stuff. Also seems less complex to lock down one -server than two, but granted it makes querying a bit more kludgy since it needs -to be passed as a `Query`. Also, **having a very standard rpc interface means -the light-client can work with all apps and handle proofs**. The only -app-specific logic is decoding the binary data to a more readable form (eg. -json). This is a huge advantage for code-reuse and standardization. - -## Decision - -We dont expose an RPC server on any of our ABCI-apps. - -## Status - -accepted - -## Consequences - -### Positive - -- Unified interface for all apps - -### Negative - -- `Query` interface - -### Neutral diff --git a/docs/architecture/adr-004-historical-validators.md b/docs/architecture/adr-004-historical-validators.md deleted file mode 100644 index be0de22c..00000000 --- a/docs/architecture/adr-004-historical-validators.md +++ /dev/null @@ -1,38 +0,0 @@ -# ADR 004: Historical Validators - -## Context - -Right now, we can query the present validator set, but there is no history. -If you were offline for a long time, there is no way to reconstruct past validators. This is needed for the light client and we agreed needs enhancement of the API. - -## Decision - -For every block, store a new structure that contains either the latest validator set, -or the height of the last block for which the validator set changed. Note this is not -the height of the block which returned the validator set change itself, but the next block, -ie. the first block it comes into effect for. - -Storing the validators will be handled by the `state` package. - -At some point in the future, we may consider more efficient storage in the case where the validators -are updated frequently - for instance by only saving the diffs, rather than the whole set. - -An alternative approach suggested keeping the validator set, or diffs of it, in a merkle IAVL tree. -While it might afford cheaper proofs that a validator set has not changed, it would be more complex, -and likely less efficient. - -## Status - -Accepted. - -## Consequences - -### Positive - -- Can query old validator sets, with proof. - -### Negative - -- Writes an extra structure to disk with every block. - -### Neutral diff --git a/docs/architecture/adr-005-consensus-params.md b/docs/architecture/adr-005-consensus-params.md deleted file mode 100644 index 6656d35b..00000000 --- a/docs/architecture/adr-005-consensus-params.md +++ /dev/null @@ -1,86 +0,0 @@ -# ADR 005: Consensus Params - -## Context - -Consensus critical parameters controlling blockchain capacity have until now been hard coded, loaded from a local config, or neglected. -Since they may be need to be different in different networks, and potentially to evolve over time within -networks, we seek to initialize them in a genesis file, and expose them through the ABCI. - -While we have some specific parameters now, like maximum block and transaction size, we expect to have more in the future, -such as a period over which evidence is valid, or the frequency of checkpoints. - -## Decision - -### ConsensusParams - -No consensus critical parameters should ever be found in the `config.toml`. - -A new `ConsensusParams` is optionally included in the `genesis.json` file, -and loaded into the `State`. Any items not included are set to their default value. -A value of 0 is undefined (see ABCI, below). A value of -1 is used to indicate the parameter does not apply. -The parameters are used to determine the validity of a block (and tx) via the union of all relevant parameters. - -``` -type ConsensusParams struct { - BlockSize - TxSize - BlockGossip -} - -type BlockSize struct { - MaxBytes int - MaxTxs int - MaxGas int -} - -type TxSize struct { - MaxBytes int - MaxGas int -} - -type BlockGossip struct { - BlockPartSizeBytes int -} -``` - -The `ConsensusParams` can evolve over time by adding new structs that cover different aspects of the consensus rules. - -The `BlockPartSizeBytes` and the `BlockSize.MaxBytes` are enforced to be greater than 0. -The former because we need a part size, the latter so that we always have at least some sanity check over the size of blocks. - -### ABCI - -#### InitChain - -InitChain currently takes the initial validator set. It should be extended to also take parts of the ConsensusParams. -There is some case to be made for it to take the entire Genesis, except there may be things in the genesis, -like the BlockPartSize, that the app shouldn't really know about. - -#### EndBlock - -The EndBlock response includes a `ConsensusParams`, which includes BlockSize and TxSize, but not BlockGossip. -Other param struct can be added to `ConsensusParams` in the future. -The `0` value is used to denote no change. -Any other value will update that parameter in the `State.ConsensusParams`, to be applied for the next block. -Tendermint should have hard-coded upper limits as sanity checks. - -## Status - -Proposed. - -## Consequences - -### Positive - -- Alternative capacity limits and consensus parameters can be specified without re-compiling the software. -- They can also change over time under the control of the application - -### Negative - -- More exposed parameters is more complexity -- Different rules at different heights in the blockchain complicates fast sync - -### Neutral - -- The TxSize, which checks validity, may be in conflict with the config's `max_block_size_tx`, which determines proposal sizes - diff --git a/docs/architecture/adr-006-trust-metric.md b/docs/architecture/adr-006-trust-metric.md deleted file mode 100644 index ec8a0cce..00000000 --- a/docs/architecture/adr-006-trust-metric.md +++ /dev/null @@ -1,238 +0,0 @@ -# ADR 006: Trust Metric Design - -## Context - -The proposed trust metric will allow Tendermint to maintain local trust rankings for peers it has directly interacted with, which can then be used to implement soft security controls. The calculations were obtained from the [TrustGuard](https://dl.acm.org/citation.cfm?id=1060808) project. - -### Background - -The Tendermint Core project developers would like to improve Tendermint security and reliability by keeping track of the level of trustworthiness peers have demonstrated within the peer-to-peer network. This way, undesirable outcomes from peers will not immediately result in them being dropped from the network (potentially causing drastic changes to take place). Instead, peers behavior can be monitored with appropriate metrics and be removed from the network once Tendermint Core is certain the peer is a threat. For example, when the PEXReactor makes a request for peers network addresses from a already known peer, and the returned network addresses are unreachable, this untrustworthy behavior should be tracked. Returning a few bad network addresses probably shouldn’t cause a peer to be dropped, while excessive amounts of this behavior does qualify the peer being dropped. - -Trust metrics can be circumvented by malicious nodes through the use of strategic oscillation techniques, which adapts the malicious node’s behavior pattern in order to maximize its goals. For instance, if the malicious node learns that the time interval of the Tendermint trust metric is *X* hours, then it could wait *X* hours in-between malicious activities. We could try to combat this issue by increasing the interval length, yet this will make the system less adaptive to recent events. - -Instead, having shorter intervals, but keeping a history of interval values, will give our metric the flexibility needed in order to keep the network stable, while also making it resilient against a strategic malicious node in the Tendermint peer-to-peer network. Also, the metric can access trust data over a rather long period of time while not greatly increasing its history size by aggregating older history values over a larger number of intervals, and at the same time, maintain great precision for the recent intervals. This approach is referred to as fading memories, and closely resembles the way human beings remember their experiences. The trade-off to using history data is that the interval values should be preserved in-between executions of the node. - -### References - -S. Mudhakar, L. Xiong, and L. Liu, “TrustGuard: Countering Vulnerabilities in Reputation Management for Decentralized Overlay Networks,” in *Proceedings of the 14th international conference on World Wide Web, pp. 422-431*, May 2005. - -## Decision - -The proposed trust metric will allow a developer to inform the trust metric store of all good and bad events relevant to a peer's behavior, and at any time, the metric can be queried for a peer's current trust ranking. - -The three subsections below will cover the process being considered for calculating the trust ranking, the concept of the trust metric store, and the interface for the trust metric. - -### Proposed Process - -The proposed trust metric will count good and bad events relevant to the object, and calculate the percent of counters that are good over an interval with a predefined duration. This is the procedure that will continue for the life of the trust metric. When the trust metric is queried for the current **trust value**, a resilient equation will be utilized to perform the calculation. - -The equation being proposed resembles a Proportional-Integral-Derivative (PID) controller used in control systems. The proportional component allows us to be sensitive to the value of the most recent interval, while the integral component allows us to incorporate trust values stored in the history data, and the derivative component allows us to give weight to sudden changes in the behavior of a peer. We compute the trust value of a peer in interval i based on its current trust ranking, its trust rating history prior to interval *i* (over the past *maxH* number of intervals) and its trust ranking fluctuation. We will break up the equation into the three components. - -```math -(1) Proportional Value = a * R[i] -``` - -where *R*[*i*] denotes the raw trust value at time interval *i* (where *i* == 0 being current time) and *a* is the weight applied to the contribution of the current reports. The next component of our equation uses a weighted sum over the last *maxH* intervals to calculate the history value for time *i*: - - -`H[i] = ` ![formula1](img/formula1.png "Weighted Sum Formula") - - -The weights can be chosen either optimistically or pessimistically. An optimistic weight creates larger weights for newer history data values, while the the pessimistic weight creates larger weights for time intervals with lower scores. The default weights used during the calculation of the history value are optimistic and calculated as *Wk* = 0.8^*k*, for time interval *k*. With the history value available, we can now finish calculating the integral value: - -```math -(2) Integral Value = b * H[i] -``` - -Where *H*[*i*] denotes the history value at time interval *i* and *b* is the weight applied to the contribution of past performance for the object being measured. The derivative component will be calculated as follows: - -```math -D[i] = R[i] – H[i] - -(3) Derivative Value = c(D[i]) * D[i] -``` - -Where the value of *c* is selected based on the *D*[*i*] value relative to zero. The default selection process makes *c* equal to 0 unless *D*[*i*] is a negative value, in which case c is equal to 1. The result is that the maximum penalty is applied when current behavior is lower than previously experienced behavior. If the current behavior is better than the previously experienced behavior, then the Derivative Value has no impact on the trust value. With the three components brought together, our trust value equation is calculated as follows: - -```math -TrustValue[i] = a * R[i] + b * H[i] + c(D[i]) * D[i] -``` - -As a performance optimization that will keep the amount of raw interval data being saved to a reasonable size of *m*, while allowing us to represent 2^*m* - 1 history intervals, we can employ the fading memories technique that will trade space and time complexity for the precision of the history data values by summarizing larger quantities of less recent values. While our equation above attempts to access up to *maxH* (which can be 2^*m* - 1), we will map those requests down to *m* values using equation 4 below: - -```math -(4) j = index, where index > 0 -``` - -Where *j* is one of *(0, 1, 2, … , m – 1)* indices used to access history interval data. Now we can access the raw intervals using the following calculations: - -```math -R[0] = raw data for current time interval -``` - -`R[j] = ` ![formula2](img/formula2.png "Fading Memories Formula") - -### Trust Metric Store - -Similar to the P2P subsystem AddrBook, the trust metric store will maintain information relevant to Tendermint peers. Additionally, the trust metric store will ensure that trust metrics will only be active for peers that a node is currently and directly engaged with. - -Reactors will provide a peer key to the trust metric store in order to retrieve the associated trust metric. The trust metric can then record new positive and negative events experienced by the reactor, as well as provided the current trust score calculated by the metric. - -When the node is shutting down, the trust metric store will save history data for trust metrics associated with all known peers. This saved information allows experiences with a peer to be preserved across node executions, which can span a tracking windows of days or weeks. The trust history data is loaded automatically during OnStart. - -### Interface Detailed Design - -Each trust metric allows for the recording of positive/negative events, querying the current trust value/score, and the stopping/pausing of tracking over time intervals. This can be seen below: - - -```go - -// TrustMetric - keeps track of peer reliability -type TrustMetric struct { - // Private elements. -} - -// Pause tells the metric to pause recording data over time intervals. -// All method calls that indicate events will unpause the metric -func (tm *TrustMetric) Pause() {} - -// Stop tells the metric to stop recording data over time intervals -func (tm *TrustMetric) Stop() {} - -// BadEvents indicates that an undesirable event(s) took place -func (tm *TrustMetric) BadEvents(num int) {} - -// GoodEvents indicates that a desirable event(s) took place -func (tm *TrustMetric) GoodEvents(num int) {} - -// TrustValue gets the dependable trust value; always between 0 and 1 -func (tm *TrustMetric) TrustValue() float64 {} - -// TrustScore gets a score based on the trust value always between 0 and 100 -func (tm *TrustMetric) TrustScore() int {} - -// NewMetric returns a trust metric with the default configuration -func NewMetric() *TrustMetric {} - -//------------------------------------------------------------------------------------------------ -// For example - -tm := NewMetric() - -tm.BadEvents(1) -score := tm.TrustScore() - -tm.Stop() - -``` - -Some of the trust metric parameters can be configured. The weight values should probably be left alone in more cases, yet the time durations for the tracking window and individual time interval should be considered. - -```go - -// TrustMetricConfig - Configures the weight functions and time intervals for the metric -type TrustMetricConfig struct { - // Determines the percentage given to current behavior - ProportionalWeight float64 - - // Determines the percentage given to prior behavior - IntegralWeight float64 - - // The window of time that the trust metric will track events across. - // This can be set to cover many days without issue - TrackingWindow time.Duration - - // Each interval should be short for adapability. - // Less than 30 seconds is too sensitive, - // and greater than 5 minutes will make the metric numb - IntervalLength time.Duration -} - -// DefaultConfig returns a config with values that have been tested and produce desirable results -func DefaultConfig() TrustMetricConfig {} - -// NewMetricWithConfig returns a trust metric with a custom configuration -func NewMetricWithConfig(tmc TrustMetricConfig) *TrustMetric {} - -//------------------------------------------------------------------------------------------------ -// For example - -config := TrustMetricConfig{ - TrackingWindow: time.Minute * 60 * 24, // one day - IntervalLength: time.Minute * 2, -} - -tm := NewMetricWithConfig(config) - -tm.BadEvents(10) -tm.Pause() -tm.GoodEvents(1) // becomes active again - -``` - -A trust metric store should be created with a DB that has persistent storage so it can save history data across node executions. All trust metrics instantiated by the store will be created with the provided TrustMetricConfig configuration. - -When you attempt to fetch the trust metric for a peer, and an entry does not exist in the trust metric store, a new metric is automatically created and the entry made within the store. - -In additional to the fetching method, GetPeerTrustMetric, the trust metric store provides a method to call when a peer has disconnected from the node. This is so the metric can be paused (history data will not be saved) for periods of time when the node is not having direct experiences with the peer. - -```go - -// TrustMetricStore - Manages all trust metrics for peers -type TrustMetricStore struct { - cmn.BaseService - - // Private elements -} - -// OnStart implements Service -func (tms *TrustMetricStore) OnStart() error {} - -// OnStop implements Service -func (tms *TrustMetricStore) OnStop() {} - -// NewTrustMetricStore returns a store that saves data to the DB -// and uses the config when creating new trust metrics -func NewTrustMetricStore(db dbm.DB, tmc TrustMetricConfig) *TrustMetricStore {} - -// Size returns the number of entries in the trust metric store -func (tms *TrustMetricStore) Size() int {} - -// GetPeerTrustMetric returns a trust metric by peer key -func (tms *TrustMetricStore) GetPeerTrustMetric(key string) *TrustMetric {} - -// PeerDisconnected pauses the trust metric associated with the peer identified by the key -func (tms *TrustMetricStore) PeerDisconnected(key string) {} - -//------------------------------------------------------------------------------------------------ -// For example - -db := dbm.NewDB("trusthistory", "goleveldb", dirPathStr) -tms := NewTrustMetricStore(db, DefaultConfig()) - -tm := tms.GetPeerTrustMetric(key) -tm.BadEvents(1) - -tms.PeerDisconnected(key) - -``` - -## Status - -Approved. - -## Consequences - -### Positive - -- The trust metric will allow Tendermint to make non-binary security and reliability decisions -- Will help Tendermint implement deterrents that provide soft security controls, yet avoids disruption on the network -- Will provide useful profiling information when analyzing performance over time related to peer interaction - -### Negative - -- Requires saving the trust metric history data across node executions - -### Neutral - -- Keep in mind that, good events need to be recorded just as bad events do using this implementation diff --git a/docs/architecture/adr-007-trust-metric-usage.md b/docs/architecture/adr-007-trust-metric-usage.md deleted file mode 100644 index 4d833a69..00000000 --- a/docs/architecture/adr-007-trust-metric-usage.md +++ /dev/null @@ -1,103 +0,0 @@ -# ADR 007: Trust Metric Usage Guide - -## Context - -Tendermint is required to monitor peer quality in order to inform its peer dialing and peer exchange strategies. - -When a node first connects to the network, it is important that it can quickly find good peers. -Thus, while a node has fewer connections, it should prioritize connecting to higher quality peers. -As the node becomes well connected to the rest of the network, it can dial lesser known or lesser -quality peers and help assess their quality. Similarly, when queried for peers, a node should make -sure they dont return low quality peers. - -Peer quality can be tracked using a trust metric that flags certain behaviours as good or bad. When enough -bad behaviour accumulates, we can mark the peer as bad and disconnect. -For example, when the PEXReactor makes a request for peers network addresses from an already known peer, and the returned network addresses are unreachable, this undesirable behavior should be tracked. Returning a few bad network addresses probably shouldn’t cause a peer to be dropped, while excessive amounts of this behavior does qualify the peer for removal. The originally proposed approach and design document for the trust metric can be found in the [ADR 006](adr-006-trust-metric.md) document. - -The trust metric implementation allows a developer to obtain a peer's trust metric from a trust metric store, and track good and bad events relevant to a peer's behavior, and at any time, the peer's metric can be queried for a current trust value. The current trust value is calculated with a formula that utilizes current behavior, previous behavior, and change between the two. Current behavior is calculated as the percentage of good behavior within a time interval. The time interval is short; probably set between 30 seconds and 5 minutes. On the other hand, the historic data can estimate a peer's behavior over days worth of tracking. At the end of a time interval, the current behavior becomes part of the historic data, and a new time interval begins with the good and bad counters reset to zero. - -These are some important things to keep in mind regarding how the trust metrics handle time intervals and scoring: -- Each new time interval begins with a perfect score -- Bad events quickly bring the score down and good events cause the score to slowly rise -- When the time interval is over, the percentage of good events becomes historic data. - -Some useful information about the inner workings of the trust metric: -- When a trust metric is first instantiated, a timer (ticker) periodically fires in order to handle transitions between trust metric time intervals -- If a peer is disconnected from a node, the timer should be paused, since the node is no longer connected to that peer -- The ability to pause the metric is supported with the store **PeerDisconnected** method and the metric **Pause** method -- After a pause, if a good or bad event method is called on a metric, it automatically becomes unpaused and begins a new time interval. - -## Decision - -The trust metric capability is now available, yet, it still leaves the question of how should it be applied throughout Tendermint in order to properly track the quality of peers? - -### Proposed Process - -Peers are managed using an address book and a trust metric: - -- The address book keeps a record of peers and provides selection methods -- The trust metric tracks the quality of the peers - -#### Presence in Address Book - -Outbound peers are added to the address book before they are dialed, -and inbound peers are added once the peer connection is set up. -Peers are also added to the address book when they are received in response to -a pexRequestMessage. - -While a node has less than `needAddressThreshold`, it will periodically request more, -via pexRequestMessage, from randomly selected peers and from newly dialed outbound peers. - -When a new address is added to an address book that has more than `0.5*needAddressThreshold` addresses, -then with some low probability, a randomly chosen low quality peer is removed. - -#### Outbound Peers - -Peers attempt to maintain a minimum number of outbound connections by -repeatedly querying the address book for peers to connect to. -While a node has few to no outbound connections, the address book is biased to return -higher quality peers. As the node increases the number of outbound connections, -the address book is biased to return less-vetted or lower-quality peers. - -#### Inbound Peers - -Peers also maintain a maximum number of total connections, MaxNumPeers. -If a peer has MaxNumPeers, new incoming connections will be accepted with low probability. -When such a new connection is accepted, the peer disconnects from a probabilistically chosen low ranking peer -so it does not exceed MaxNumPeers. - -#### Peer Exchange - -When a peer receives a pexRequestMessage, it returns a random sample of high quality peers from the address book. Peers with no score or low score should not be inclided in a response to pexRequestMessage. - -#### Peer Quality - -Peer quality is tracked in the connection and across the reactors by storing the TrustMetric in the peer's -thread safe Data store. - -Peer behaviour is then defined as one of the following: -- Fatal - something outright malicious that causes us to disconnect the peer and ban it from the address book for some amount of time -- Bad - Any kind of timeout, messages that don't unmarshal, fail other validity checks, or messages we didn't ask for or aren't expecting (usually worth one bad event) -- Neutral - Unknown channels/message types/version upgrades (no good or bad events recorded) -- Correct - Normal correct behavior (worth one good event) -- Good - some random majority of peers per reactor sending us useful messages (worth more than one good event). - -Note that Fatal behaviour causes us to remove the peer, and neutral behaviour does not affect the score. - -## Status - -Proposed. - -## Consequences - -### Positive - -- Bringing the address book and trust metric store together will cause the network to be built in a way that encourages greater security and reliability. - -### Negative - -- TBD - -### Neutral - -- Keep in mind that, good events need to be recorded just as bad events do using this implementation. diff --git a/docs/architecture/adr-008-priv-validator.md b/docs/architecture/adr-008-priv-validator.md deleted file mode 100644 index 4c1d87be..00000000 --- a/docs/architecture/adr-008-priv-validator.md +++ /dev/null @@ -1,29 +0,0 @@ -# ADR 008: SocketPV - -Tendermint node's should support only two in-process PrivValidator -implementations: - -- FilePV uses an unencrypted private key in a "priv_validator.json" file - no - configuration required (just `tendermint init`). -- SocketPV uses a socket to send signing requests to another process - user is - responsible for starting that process themselves. - -The SocketPV address can be provided via flags at the command line - doing so -will cause Tendermint to ignore any "priv_validator.json" file and to listen on -the given address for incoming connections from an external priv_validator -process. It will halt any operation until at least one external process -succesfully connected. - -The external priv_validator process will dial the address to connect to -Tendermint, and then Tendermint will send requests on the ensuing connection to -sign votes and proposals. Thus the external process initiates the connection, -but the Tendermint process makes all requests. In a later stage we're going to -support multiple validators for fault tolerance. To prevent double signing they -need to be synced, which is deferred to an external solution (see #1185). - -In addition, Tendermint will provide implementations that can be run in that -external process. These include: - -- FilePV will encrypt the private key, and the user must enter password to - decrypt key when process is started. -- LedgerPV uses a Ledger Nano S to handle all signing. diff --git a/docs/architecture/adr-011-monitoring.md b/docs/architecture/adr-011-monitoring.md deleted file mode 100644 index ca16a9a1..00000000 --- a/docs/architecture/adr-011-monitoring.md +++ /dev/null @@ -1,116 +0,0 @@ -# ADR 011: Monitoring - -## Changelog - -08-06-2018: Initial draft -11-06-2018: Reorg after @xla comments -13-06-2018: Clarification about usage of labels - -## Context - -In order to bring more visibility into Tendermint, we would like it to report -metrics and, maybe later, traces of transactions and RPC queries. See -https://github.com/tendermint/tendermint/issues/986. - -A few solutions were considered: - -1. [Prometheus](https://prometheus.io) - a) Prometheus API - b) [go-kit metrics package](https://github.com/go-kit/kit/tree/master/metrics) as an interface plus Prometheus - c) [telegraf](https://github.com/influxdata/telegraf) - d) new service, which will listen to events emitted by pubsub and report metrics -5. [OpenCensus](https://opencensus.io/go/index.html) - -### 1. Prometheus - -Prometheus seems to be the most popular product out there for monitoring. It has -a Go client library, powerful queries, alerts. - -**a) Prometheus API** - -We can commit to using Prometheus in Tendermint, but I think Tendermint users -should be free to choose whatever monitoring tool they feel will better suit -their needs (if they don't have existing one already). So we should try to -abstract interface enough so people can switch between Prometheus and other -similar tools. - -**b) go-kit metrics package as an interface** - -metrics package provides a set of uniform interfaces for service -instrumentation and offers adapters to popular metrics packages: - -https://godoc.org/github.com/go-kit/kit/metrics#pkg-subdirectories - -Comparing to Prometheus API, we're losing customisability and control, but gaining -freedom in choosing any instrument from the above list given we will extract -metrics creation into a separate function (see "providers" in node/node.go). - -**c) telegraf** - -Unlike already discussed options, telegraf does not require modifying Tendermint -source code. You create something called an input plugin, which polls -Tendermint RPC every second and calculates the metrics itself. - -While it may sound good, but some metrics we want to report are not exposed via -RPC or pubsub, therefore can't be accessed externally. - -**d) service, listening to pubsub** - -Same issue as the above. - -### 2. opencensus - -opencensus provides both metrics and tracing, which may be important in the -future. It's API looks different from go-kit and Prometheus, but looks like it -covers everything we need. - -Unfortunately, OpenCensus go client does not define any -interfaces, so if we want to abstract away metrics we -will need to write interfaces ourselves. - -### List of metrics - -| | Name | Type | Description | -| - | --------------------------------------- | ------- | ----------------------------------------------------------------------------- | -| A | consensus_height | Gauge | | -| A | consensus_validators | Gauge | Number of validators who signed | -| A | consensus_validators_power | Gauge | Total voting power of all validators | -| A | consensus_missing_validators | Gauge | Number of validators who did not sign | -| A | consensus_missing_validators_power | Gauge | Total voting power of the missing validators | -| A | consensus_byzantine_validators | Gauge | Number of validators who tried to double sign | -| A | consensus_byzantine_validators_power | Gauge | Total voting power of the byzantine validators | -| A | consensus_block_interval | Timing | Time between this and last block (Block.Header.Time) | -| | consensus_block_time | Timing | Time to create a block (from creating a proposal to commit) | -| | consensus_time_between_blocks | Timing | Time between committing last block and (receiving proposal creating proposal) | -| A | consensus_rounds | Gauge | Number of rounds | -| | consensus_prevotes | Gauge | | -| | consensus_precommits | Gauge | | -| | consensus_prevotes_total_power | Gauge | | -| | consensus_precommits_total_power | Gauge | | -| A | consensus_num_txs | Gauge | | -| A | mempool_size | Gauge | | -| A | consensus_total_txs | Gauge | | -| A | consensus_block_size | Gauge | In bytes | -| A | p2p_peers | Gauge | Number of peers node's connected to | - -`A` - will be implemented in the fist place. - -**Proposed solution** - -## Status - -Proposed. - -## Consequences - -### Positive - -Better visibility, support of variety of monitoring backends - -### Negative - -One more library to audit, messing metrics reporting code with business domain. - -### Neutral - -- diff --git a/docs/architecture/adr-template.md b/docs/architecture/adr-template.md deleted file mode 100644 index 2303490a..00000000 --- a/docs/architecture/adr-template.md +++ /dev/null @@ -1,16 +0,0 @@ -# ADR 000: Template for an ADR - -## Context - -## Decision - -## Status - - -## Consequences - -### Positive - -### Negative - -### Neutral diff --git a/docs/architecture/img/formula1.png b/docs/architecture/img/formula1.png deleted file mode 100644 index 447ee30f..00000000 Binary files a/docs/architecture/img/formula1.png and /dev/null differ diff --git a/docs/architecture/img/formula2.png b/docs/architecture/img/formula2.png deleted file mode 100644 index 081a1576..00000000 Binary files a/docs/architecture/img/formula2.png and /dev/null differ diff --git a/docs/architecture/img/tags1.png b/docs/architecture/img/tags1.png deleted file mode 100644 index a6bc64e8..00000000 Binary files a/docs/architecture/img/tags1.png and /dev/null differ diff --git a/docs/assets/a_plus_t.png b/docs/assets/a_plus_t.png deleted file mode 100644 index 8f5bc5e9..00000000 Binary files a/docs/assets/a_plus_t.png and /dev/null differ diff --git a/docs/assets/abci.png b/docs/assets/abci.png deleted file mode 100644 index 73111caf..00000000 Binary files a/docs/assets/abci.png and /dev/null differ diff --git a/docs/assets/consensus_logic.png b/docs/assets/consensus_logic.png deleted file mode 100644 index 22b70b26..00000000 Binary files a/docs/assets/consensus_logic.png and /dev/null differ diff --git a/docs/assets/tm-application-example.png b/docs/assets/tm-application-example.png deleted file mode 100644 index 47d4e928..00000000 Binary files a/docs/assets/tm-application-example.png and /dev/null differ diff --git a/docs/assets/tm-transaction-flow.png b/docs/assets/tm-transaction-flow.png deleted file mode 100644 index ea490800..00000000 Binary files a/docs/assets/tm-transaction-flow.png and /dev/null differ diff --git a/docs/assets/tmint-logo-blue.png b/docs/assets/tmint-logo-blue.png deleted file mode 100644 index cc4c8fb8..00000000 Binary files a/docs/assets/tmint-logo-blue.png and /dev/null differ diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index 7d52a493..00000000 --- a/docs/conf.py +++ /dev/null @@ -1,206 +0,0 @@ -# -*- coding: utf-8 -*- -# -# Tendermint documentation build configuration file, created by -# sphinx-quickstart on Mon Aug 7 04:55:09 2017. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -# -import os -# import sys -# sys.path.insert(0, os.path.abspath('.')) -import urllib - -import sphinx_rtd_theme - - -# -- General configuration ------------------------------------------------ - -# If your documentation needs a minimal Sphinx version, state it here. -# -# needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = [] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix(es) of source filenames. -# You can specify multiple suffix as a list of string: -# - -from recommonmark.parser import CommonMarkParser - -source_parsers = { - '.md': CommonMarkParser, -} - -source_suffix = ['.rst', '.md'] -#source_suffix = '.rst' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'Tendermint' -copyright = u'2018, The Authors' -author = u'Tendermint' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = u'' -# The full version, including alpha/beta/rc tags. -release = u'' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -# -# This is also used if you do content translation via gettext catalogs. -# Usually you set "language" from the command line for these cases. -language = None - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -# This patterns also effect to html_static_path and html_extra_path -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'architecture', 'spec', 'examples'] - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' - -# If true, `todo` and `todoList` produce output, else they produce nothing. -todo_include_todos = False - - -# -- Options for HTML output ---------------------------------------------- - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -# -html_theme = 'sphinx_rtd_theme' -# html_theme = 'alabaster' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -# -# html_theme_options = {} - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Custom sidebar templates, must be a dictionary that maps document names -# to template names. -# -# This is required for the alabaster theme -# refs: http://alabaster.readthedocs.io/en/latest/installation.html#sidebars -html_sidebars = { - '**': [ - 'about.html', - 'navigation.html', - 'relations.html', # needs 'show_related': True theme option to display - 'searchbox.html', - 'donate.html', - ] -} - - -# -- Options for HTMLHelp output ------------------------------------------ - -# Output file base name for HTML help builder. -htmlhelp_basename = 'Tendermintdoc' - - -# -- Options for LaTeX output --------------------------------------------- - -latex_elements = { - # The paper size ('letterpaper' or 'a4paper'). - # - # 'papersize': 'letterpaper', - - # The font size ('10pt', '11pt' or '12pt'). - # - # 'pointsize': '10pt', - - # Additional stuff for the LaTeX preamble. - # - # 'preamble': '', - - # Latex figure (float) alignment - # - # 'figure_align': 'htbp', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - (master_doc, 'Tendermint.tex', u'Tendermint Documentation', - u'The Authors', 'manual'), -] - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - (master_doc, 'Tendermint', u'Tendermint Documentation', - [author], 1) -] - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - (master_doc, 'Tendermint', u'Tendermint Documentation', - author, 'Tendermint', 'Byzantine Fault Tolerant Consensus.', - 'Database'), -] - -# ---------------- customizations ---------------------- - -# for Docker README, below -from shutil import copyfile - -# tm-bench and tm-monitor -tools_repo = "https://raw.githubusercontent.com/tendermint/tools/" -tools_branch = "master" - -tools_dir = "./tools" - - -if os.path.isdir(tools_dir) != True: - os.mkdir(tools_dir) - -copyfile('../DOCKER/README.md', tools_dir+'/docker.md') - -urllib.urlretrieve(tools_repo+tools_branch+'/tm-bench/README.md', filename=tools_dir+'/benchmarking.md') -urllib.urlretrieve(tools_repo+tools_branch+'/tm-monitor/README.md', filename=tools_dir+'/monitoring.md') - -#### abci spec ################################# - -abci_repo = "https://raw.githubusercontent.com/tendermint/abci/" -abci_branch = "develop" - -urllib.urlretrieve(abci_repo+abci_branch+'/specification.md', filename='abci-spec.md') diff --git a/docs/deploy-testnets.md b/docs/deploy-testnets.md deleted file mode 100644 index 0c74b2c5..00000000 --- a/docs/deploy-testnets.md +++ /dev/null @@ -1,68 +0,0 @@ -# Deploy a Testnet - -Now that we've seen how ABCI works, and even played with a few -applications on a single validator node, it's time to deploy a test -network to four validator nodes. - -## Manual Deployments - -It's relatively easy to setup a Tendermint cluster manually. The only -requirements for a particular Tendermint node are a private key for the -validator, stored as `priv_validator.json`, a node key, stored as -`node_key.json` and a list of the public keys of all validators, stored -as `genesis.json`. These files should be stored in -`~/.tendermint/config`, or wherever the `$TMHOME` variable might be set -to. - -Here are the steps to setting up a testnet manually: - -1) Provision nodes on your cloud provider of choice -2) Install Tendermint and the application of interest on all nodes -3) Generate a private key and a node key for each validator using - `tendermint init` -4) Compile a list of public keys for each validator into a - `genesis.json` file and replace the existing file with it. -5) Run - `tendermint node --proxy_app=kvstore --p2p.persistent_peers=< peer addresses >` - on each node, where `< peer addresses >` is a comma separated list - of the IP:PORT combination for each node. The default port for - Tendermint is `26656`. Thus, if the IP addresses of your nodes were - `192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4`, the command - would look like: - - - tendermint node --proxy_app=kvstore --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:26656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:26656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:26656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:26656 - -After a few seconds, all the nodes should connect to each other and -start making blocks! For more information, see the Tendermint Networks -section of [the guide to using Tendermint](using-tendermint.html). - -But wait! Steps 3 and 4 are quite manual. Instead, use [this -script](https://github.com/tendermint/tendermint/blob/develop/docs/examples/init_testnet.sh), -which does the heavy lifting for you. And it gets better. - -Instead of the previously linked script to initialize the files required -for a testnet, we have the `tendermint testnet` command. By default, -running `tendermint testnet` will create all the required files, just -like the script. Of course, you'll still need to manually edit some -fields in the `config.toml`. Alternatively, see the available flags to -auto-populate the `config.toml` with the fields that would otherwise be -passed in via flags when running `tendermint node`. As you might -imagine, this command is useful for manual or automated deployments. - -## Automated Deployments - -The easiest and fastest way to get a testnet up in less than 5 minutes. - -### Local - -With `docker` and `docker-compose` installed, run the command: - - make localnet-start - -from the root of the tendermint repository. This will spin up a 4-node -local testnet. Review the target in the Makefile to debug any problems. - -### Cloud - -See the [next section](./terraform-and-ansible.html) for details. diff --git a/docs/determinism.md b/docs/determinism.md deleted file mode 100644 index 95958bb1..00000000 --- a/docs/determinism.md +++ /dev/null @@ -1,5 +0,0 @@ -# On Determinism - -Arguably, the most difficult part of blockchain programming is determinism - that is, ensuring that sources of indeterminism do not creep into the design of such systems. - -See [this issue](https://github.com/tendermint/abci/issues/56) for more information on the potential sources of indeterminism. diff --git a/docs/ecosystem.md b/docs/ecosystem.md deleted file mode 100644 index 6b7f833a..00000000 --- a/docs/ecosystem.md +++ /dev/null @@ -1,21 +0,0 @@ -# Ecosystem - -The growing list of applications built using various pieces of the -Tendermint stack can be found at: - -- https://tendermint.com/ecosystem - -We thank the community for their contributions thus far and welcome the -addition of new projects. A pull request can be submitted to [this -file](https://github.com/tendermint/aib-data/blob/master/json/ecosystem.json) -to include your project. - -## Other Tools - -See [deploy testnets](./deploy-testnets) for information about all -the tools built by Tendermint. We have Kubernetes, Ansible, and -Terraform integrations. - -For upgrading from older to newer versions of tendermint and to migrate -your chain data, see [tm-migrator](https://github.com/hxzqlh/tm-tools) -written by @hxzqlh. diff --git a/docs/examples/getting-started.md b/docs/examples/getting-started.md deleted file mode 100644 index ed61f999..00000000 --- a/docs/examples/getting-started.md +++ /dev/null @@ -1,149 +0,0 @@ -# Tendermint - -## Overview - -This is a quick start guide. If you have a vague idea about how Tendermint -works and want to get started right away, continue. Otherwise, [review the -documentation](http://tendermint.readthedocs.io/en/master/). - -## Install - -### Quick Install - -On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vpgEI), like so: - -``` -curl -L https://git.io/vpgEI | bash -source ~/.profile -``` - -WARNING: do not run the above on your local machine. - -The script is also used to facilitate cluster deployment below. - -### Manual Install - -Requires: -- `go` minimum version 1.10 -- `$GOPATH` environment variable must be set -- `$GOPATH/bin` must be on your `$PATH` (see https://github.com/tendermint/tendermint/wiki/Setting-GOPATH) - -To install Tendermint, run: - -``` -go get github.com/tendermint/tendermint -cd $GOPATH/src/github.com/tendermint/tendermint -make get_tools && make get_vendor_deps -make install -``` - -Note that `go get` may return an error but it can be ignored. - -Confirm installation: - -``` -$ tendermint version -0.18.0-XXXXXXX -``` - -## Initialization - -Running: - -``` -tendermint init -``` - -will create the required files for a single, local node. - -These files are found in `$HOME/.tendermint`: - -``` -$ ls $HOME/.tendermint - -config.toml data genesis.json priv_validator.json -``` - -For a single, local node, no further configuration is required. -Configuring a cluster is covered further below. - -## Local Node - -Start tendermint with a simple in-process application: - -``` -tendermint node --proxy_app=kvstore -``` - -and blocks will start to stream in: - -``` -I[01-06|01:45:15.592] Executed block module=state height=1 validTxs=0 invalidTxs=0 -I[01-06|01:45:15.624] Committed state module=state height=1 txs=0 appHash= -``` - -Check the status with: - -``` -curl -s localhost:26657/status -``` - -### Sending Transactions - -With the kvstore app running, we can send transactions: - -``` -curl -s 'localhost:26657/broadcast_tx_commit?tx="abcd"' -``` - -and check that it worked with: - -``` -curl -s 'localhost:26657/abci_query?data="abcd"' -``` - -We can send transactions with a key and value too: - -``` -curl -s 'localhost:26657/broadcast_tx_commit?tx="name=satoshi"' -``` - -and query the key: - -``` -curl -s 'localhost:26657/abci_query?data="name"' -``` - -where the value is returned in hex. - -## Cluster of Nodes - -First create four Ubuntu cloud machines. The following was tested on Digital -Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP -addresses below as IP1, IP2, IP3, IP4. - -Then, `ssh` into each machine, and execute [this script](https://git.io/vNLfY): - -``` -curl -L https://git.io/vpgEI | bash -source ~/.profile -``` - -This will install `go` and other dependencies, get the Tendermint source code, then compile the `tendermint` binary. - -Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence: - -``` -tendermint node --home ./node0 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:26656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:26656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:26656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:26656" -tendermint node --home ./node1 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:26656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:26656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:26656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:26656" -tendermint node --home ./node2 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:26656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:26656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:26656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:26656" -tendermint node --home ./node3 --proxy_app=kvstore --p2p.persistent_peers="167b80242c300bf0ccfb3ced3dec60dc2a81776e@IP1:26656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@IP2:26656,303a1a4312c30525c99ba66522dd81cca56a361a@IP3:26656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@IP4:26656" -``` - -Note that after the third node is started, blocks will start to stream in -because >2/3 of validators (defined in the `genesis.json`) have come online. -Seeds can also be specified in the `config.toml`. See [this -PR](https://github.com/tendermint/tendermint/pull/792) for more information -about configuration options. - -Transactions can then be sent as covered in the single, local node example above. diff --git a/docs/examples/init_testnet.sh b/docs/examples/init_testnet.sh deleted file mode 100644 index cd83751e..00000000 --- a/docs/examples/init_testnet.sh +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env bash - -# make all the files -tendermint init --home ./tester/node0 -tendermint init --home ./tester/node1 -tendermint init --home ./tester/node2 -tendermint init --home ./tester/node3 - -file0=./tester/node0/config/genesis.json -file1=./tester/node1/config/genesis.json -file2=./tester/node2/config/genesis.json -file3=./tester/node3/config/genesis.json - -genesis_time=`cat $file0 | jq '.genesis_time'` -chain_id=`cat $file0 | jq '.chain_id'` - -value0=`cat $file0 | jq '.validators[0].pub_key.value'` -value1=`cat $file1 | jq '.validators[0].pub_key.value'` -value2=`cat $file2 | jq '.validators[0].pub_key.value'` -value3=`cat $file3 | jq '.validators[0].pub_key.value'` - -rm $file0 -rm $file1 -rm $file2 -rm $file3 - -echo "{ - \"genesis_time\": $genesis_time, - \"chain_id\": $chain_id, - \"validators\": [ - { - \"pub_key\": { - \"type\": \"AC26791624DE60\", - \"value\": $value0 - }, - \"power:\": 10, - \"name\":, \"\" - }, - { - \"pub_key\": { - \"type\": \"AC26791624DE60\", - \"value\": $value1 - }, - \"power:\": 10, - \"name\":, \"\" - }, - { - \"pub_key\": { - \"type\": \"AC26791624DE60\", - \"value\": $value2 - }, - \"power:\": 10, - \"name\":, \"\" - }, - { - \"pub_key\": { - \"type\": \"AC26791624DE60\", - \"value\": $value3 - }, - \"power:\": 10, - \"name\":, \"\" - } - ], - \"app_hash\": \"\" -}" >> $file0 - -cp $file0 $file1 -cp $file0 $file2 -cp $file2 $file3 \ No newline at end of file diff --git a/docs/examples/install_tendermint.sh b/docs/examples/install_tendermint.sh deleted file mode 100644 index 5a9c49d7..00000000 --- a/docs/examples/install_tendermint.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/env bash - -# XXX: this script is meant to be used only on a fresh Ubuntu 16.04 instance -# and has only been tested on Digital Ocean - -# get and unpack golang -curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz -tar -xvf go1.10.linux-amd64.tar.gz - -apt install make - -## move go and add binary to path -mv go /usr/local -echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile - -## create the GOPATH directory, set GOPATH and put on PATH -mkdir goApps -echo "export GOPATH=/root/goApps" >> ~/.profile -echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile - -source ~/.profile - -## get the code and move into it -REPO=github.com/tendermint/tendermint -go get $REPO -cd $GOPATH/src/$REPO - -## build -git checkout master -make get_tools -make get_vendor_deps -make install diff --git a/docs/examples/node0/config/config.toml b/docs/examples/node0/config/config.toml deleted file mode 100644 index d1ecf238..00000000 --- a/docs/examples/node0/config/config.toml +++ /dev/null @@ -1,166 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -##### main base config options ##### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "alpha" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: leveldb | memdb -db_backend = "leveldb" - -# Database directory -db_path = "data" - -# Output level for logging, including package level options -log_level = "main:info,state:info,*:error" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_file = "config/priv_validator.json" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# TCP or UNIX socket address for the profiling server to listen on -prof_laddr = "" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - -##### advanced configuration options ##### - -##### rpc server configuration options ##### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -##### peer to peer configuration options ##### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -# Do not add private peers to this list if you don't want them advertised -persistent_peers = "" - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -addr_book_strict = true - -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = 100 - -# Maximum number of peers to connect to -max_num_peers = 50 - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 512000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 512000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -##### mempool configuration options ##### -[mempool] - -recheck = true -recheck_empty = true -broadcast = true -wal_dir = "data/mempool.wal" - -##### consensus configuration options ##### -[consensus] - -wal_file = "data/cs.wal/wal" - -# All timeouts are in milliseconds -timeout_propose = 3000 -timeout_propose_delta = 500 -timeout_prevote = 1000 -timeout_prevote_delta = 500 -timeout_precommit = 1000 -timeout_precommit_delta = 500 -timeout_commit = 1000 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# BlockSize -max_block_size_txs = 10000 -max_block_size_bytes = 1 - -# EmptyBlocks mode and possible interval between empty blocks in seconds -create_empty_blocks = true -create_empty_blocks_interval = 0 - -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = 100 -peer_query_maj23_sleep_duration = 2000 - -##### transactions indexer configuration options ##### -[tx_index] - -# What indexer to use for transactions -# -# Options: -# 1) "null" (default) -# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -indexer = "kv" - -# Comma-separated list of tags to index (by default the only tag is tx hash) -# -# It's recommended to index only a subset of tags due to possible memory -# bloat. This is, of course, depends on the indexer's DB and the volume of -# transactions. -index_tags = "" - -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). -index_all_tags = false diff --git a/docs/examples/node0/config/genesis.json b/docs/examples/node0/config/genesis.json deleted file mode 100644 index b9c12e31..00000000 --- a/docs/examples/node0/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "0001-01-01T00:00:00Z", - "chain_id": "test-chain-A2i3OZ", - "validators": [ - { - "pub_key": { - "type": "AC26791624DE60", - "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" - }, - "power": 10, - "name": "" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" - }, - "power": 10, - "name": "" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" - }, - "power": 10, - "name": "" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" - }, - "power": 10, - "name": "" - } - ], - "app_hash": "" -} diff --git a/docs/examples/node0/config/node_key.json b/docs/examples/node0/config/node_key.json deleted file mode 100644 index f4cdd093..00000000 --- a/docs/examples/node0/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"954568A3288910","value":"7lY+k6EDllG8Q9gVbF5313t/ag2YGkBVKdVa0YHJ9xO5k0w3Q/hke0Z7UFT1KgVDGRUEKzwAwwjwFQUvgF0ZWg=="}} \ No newline at end of file diff --git a/docs/examples/node0/config/priv_validator.json b/docs/examples/node0/config/priv_validator.json deleted file mode 100644 index e758b75b..00000000 --- a/docs/examples/node0/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "122A9414774A2FCAD026201DA477EF3F41970EF0", - "pub_key": { - "type": "AC26791624DE60", - "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" - }, - "last_height": 0, - "last_round": 0, - "last_step": 0, - "priv_key": { - "type": "954568A3288910", - "value": "YLxp3ho+kySgAnzjBptbxDzSGw2ntGZLsIHQsaVxY/cP6TgB2Odg9ZsH3CZp3XfsF2mj+QC6U6hNFCsvL9BziQ==" - } -} \ No newline at end of file diff --git a/docs/examples/node1/config/config.toml b/docs/examples/node1/config/config.toml deleted file mode 100644 index bc5a5bde..00000000 --- a/docs/examples/node1/config/config.toml +++ /dev/null @@ -1,166 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -##### main base config options ##### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "bravo" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: leveldb | memdb -db_backend = "leveldb" - -# Database directory -db_path = "data" - -# Output level for logging, including package level options -log_level = "main:info,state:info,*:error" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_file = "config/priv_validator.json" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# TCP or UNIX socket address for the profiling server to listen on -prof_laddr = "" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - -##### advanced configuration options ##### - -##### rpc server configuration options ##### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -##### peer to peer configuration options ##### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -# Do not add private peers to this list if you don't want them advertised -persistent_peers = "" - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -addr_book_strict = true - -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = 100 - -# Maximum number of peers to connect to -max_num_peers = 50 - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 512000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 512000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -##### mempool configuration options ##### -[mempool] - -recheck = true -recheck_empty = true -broadcast = true -wal_dir = "data/mempool.wal" - -##### consensus configuration options ##### -[consensus] - -wal_file = "data/cs.wal/wal" - -# All timeouts are in milliseconds -timeout_propose = 3000 -timeout_propose_delta = 500 -timeout_prevote = 1000 -timeout_prevote_delta = 500 -timeout_precommit = 1000 -timeout_precommit_delta = 500 -timeout_commit = 1000 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# BlockSize -max_block_size_txs = 10000 -max_block_size_bytes = 1 - -# EmptyBlocks mode and possible interval between empty blocks in seconds -create_empty_blocks = true -create_empty_blocks_interval = 0 - -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = 100 -peer_query_maj23_sleep_duration = 2000 - -##### transactions indexer configuration options ##### -[tx_index] - -# What indexer to use for transactions -# -# Options: -# 1) "null" (default) -# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -indexer = "kv" - -# Comma-separated list of tags to index (by default the only tag is tx hash) -# -# It's recommended to index only a subset of tags due to possible memory -# bloat. This is, of course, depends on the indexer's DB and the volume of -# transactions. -index_tags = "" - -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). -index_all_tags = false diff --git a/docs/examples/node1/config/genesis.json b/docs/examples/node1/config/genesis.json deleted file mode 100644 index b9c12e31..00000000 --- a/docs/examples/node1/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "0001-01-01T00:00:00Z", - "chain_id": "test-chain-A2i3OZ", - "validators": [ - { - "pub_key": { - "type": "AC26791624DE60", - "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" - }, - "power": 10, - "name": "" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" - }, - "power": 10, - "name": "" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" - }, - "power": 10, - "name": "" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" - }, - "power": 10, - "name": "" - } - ], - "app_hash": "" -} diff --git a/docs/examples/node1/config/node_key.json b/docs/examples/node1/config/node_key.json deleted file mode 100644 index 374efe63..00000000 --- a/docs/examples/node1/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"954568A3288910","value":"H71dc/TIG7nTselfa9nG0WRArXLKYnm7P5eFCk2lk8ASKQ3sIHpbdxCSHQD/RcdHe7TiabJeuOssNPvPWiyQEQ=="}} \ No newline at end of file diff --git a/docs/examples/node1/config/priv_validator.json b/docs/examples/node1/config/priv_validator.json deleted file mode 100644 index caf3dbc5..00000000 --- a/docs/examples/node1/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "BEA1B57F5806CF9AC4D54C8CF806DED5C0F102E1", - "pub_key": { - "type": "AC26791624DE60", - "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" - }, - "last_height": 0, - "last_round": 0, - "last_step": 0, - "priv_key": { - "type": "954568A3288910", - "value": "o0IqrHSPtd5YqGefodWxpJuRzvuVBjgbH785vbMgk7Vvno3kYJHVp1xVG4Q2N8rD+aubZ2SFPvA1ldX9IOwqxQ==" - } -} \ No newline at end of file diff --git a/docs/examples/node2/config/config.toml b/docs/examples/node2/config/config.toml deleted file mode 100644 index 1bf06286..00000000 --- a/docs/examples/node2/config/config.toml +++ /dev/null @@ -1,166 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -##### main base config options ##### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "charlie" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: leveldb | memdb -db_backend = "leveldb" - -# Database directory -db_path = "data" - -# Output level for logging, including package level options -log_level = "main:info,state:info,*:error" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_file = "config/priv_validator.json" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# TCP or UNIX socket address for the profiling server to listen on -prof_laddr = "" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - -##### advanced configuration options ##### - -##### rpc server configuration options ##### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -##### peer to peer configuration options ##### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -# Do not add private peers to this list if you don't want them advertised -persistent_peers = "" - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -addr_book_strict = true - -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = 100 - -# Maximum number of peers to connect to -max_num_peers = 50 - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 512000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 512000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -##### mempool configuration options ##### -[mempool] - -recheck = true -recheck_empty = true -broadcast = true -wal_dir = "data/mempool.wal" - -##### consensus configuration options ##### -[consensus] - -wal_file = "data/cs.wal/wal" - -# All timeouts are in milliseconds -timeout_propose = 3000 -timeout_propose_delta = 500 -timeout_prevote = 1000 -timeout_prevote_delta = 500 -timeout_precommit = 1000 -timeout_precommit_delta = 500 -timeout_commit = 1000 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# BlockSize -max_block_size_txs = 10000 -max_block_size_bytes = 1 - -# EmptyBlocks mode and possible interval between empty blocks in seconds -create_empty_blocks = true -create_empty_blocks_interval = 0 - -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = 100 -peer_query_maj23_sleep_duration = 2000 - -##### transactions indexer configuration options ##### -[tx_index] - -# What indexer to use for transactions -# -# Options: -# 1) "null" (default) -# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -indexer = "kv" - -# Comma-separated list of tags to index (by default the only tag is tx hash) -# -# It's recommended to index only a subset of tags due to possible memory -# bloat. This is, of course, depends on the indexer's DB and the volume of -# transactions. -index_tags = "" - -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). -index_all_tags = false diff --git a/docs/examples/node2/config/genesis.json b/docs/examples/node2/config/genesis.json deleted file mode 100644 index b9c12e31..00000000 --- a/docs/examples/node2/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "0001-01-01T00:00:00Z", - "chain_id": "test-chain-A2i3OZ", - "validators": [ - { - "pub_key": { - "type": "AC26791624DE60", - "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" - }, - "power": 10, - "name": "" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" - }, - "power": 10, - "name": "" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" - }, - "power": 10, - "name": "" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" - }, - "power": 10, - "name": "" - } - ], - "app_hash": "" -} diff --git a/docs/examples/node2/config/node_key.json b/docs/examples/node2/config/node_key.json deleted file mode 100644 index 52a978bb..00000000 --- a/docs/examples/node2/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"954568A3288910","value":"COHZ/Y2cWGWxJNkRwtpQBt5sYvOnb6Gpz0lO46XERRJFBIdSWD5x1UMGRSTmnvW1ec5G4bMdg6zUZKOZD+vVPg=="}} \ No newline at end of file diff --git a/docs/examples/node2/config/priv_validator.json b/docs/examples/node2/config/priv_validator.json deleted file mode 100644 index 65fa3048..00000000 --- a/docs/examples/node2/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "F0AA266949FB29ADA0B679C27889ED930BD1BDA1", - "pub_key": { - "type": "AC26791624DE60", - "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" - }, - "last_height": 0, - "last_round": 0, - "last_step": 0, - "priv_key": { - "type": "954568A3288910", - "value": "khADeZ5K/8u/L99DFaZNRq8V5g+EHWbwfqFjhCrppaAiBkOkm8YDRMBqaJwDyKtzL5Ff8GRSWPoNfAzv3XLAhQ==" - } -} \ No newline at end of file diff --git a/docs/examples/node3/config/config.toml b/docs/examples/node3/config/config.toml deleted file mode 100644 index 8c23f7d3..00000000 --- a/docs/examples/node3/config/config.toml +++ /dev/null @@ -1,166 +0,0 @@ -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -##### main base config options ##### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "delta" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: leveldb | memdb -db_backend = "leveldb" - -# Database directory -db_path = "data" - -# Output level for logging, including package level options -log_level = "main:info,state:info,*:error" - -##### additional base config options ##### - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "config/genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_file = "config/priv_validator.json" - -# Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "config/node_key.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# TCP or UNIX socket address for the profiling server to listen on -prof_laddr = "" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - -##### advanced configuration options ##### - -##### rpc server configuration options ##### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -##### peer to peer configuration options ##### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -# Do not add private peers to this list if you don't want them advertised -persistent_peers = "" - -# Path to address book -addr_book_file = "config/addrbook.json" - -# Set true for strict address routability rules -addr_book_strict = true - -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = 100 - -# Maximum number of peers to connect to -max_num_peers = 50 - -# Maximum size of a message packet payload, in bytes -max_packet_msg_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 512000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 512000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -##### mempool configuration options ##### -[mempool] - -recheck = true -recheck_empty = true -broadcast = true -wal_dir = "data/mempool.wal" - -##### consensus configuration options ##### -[consensus] - -wal_file = "data/cs.wal/wal" - -# All timeouts are in milliseconds -timeout_propose = 3000 -timeout_propose_delta = 500 -timeout_prevote = 1000 -timeout_prevote_delta = 500 -timeout_precommit = 1000 -timeout_precommit_delta = 500 -timeout_commit = 1000 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# BlockSize -max_block_size_txs = 10000 -max_block_size_bytes = 1 - -# EmptyBlocks mode and possible interval between empty blocks in seconds -create_empty_blocks = true -create_empty_blocks_interval = 0 - -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = 100 -peer_query_maj23_sleep_duration = 2000 - -##### transactions indexer configuration options ##### -[tx_index] - -# What indexer to use for transactions -# -# Options: -# 1) "null" (default) -# 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -indexer = "kv" - -# Comma-separated list of tags to index (by default the only tag is tx hash) -# -# It's recommended to index only a subset of tags due to possible memory -# bloat. This is, of course, depends on the indexer's DB and the volume of -# transactions. -index_tags = "" - -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). -index_all_tags = false diff --git a/docs/examples/node3/config/genesis.json b/docs/examples/node3/config/genesis.json deleted file mode 100644 index b9c12e31..00000000 --- a/docs/examples/node3/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "0001-01-01T00:00:00Z", - "chain_id": "test-chain-A2i3OZ", - "validators": [ - { - "pub_key": { - "type": "AC26791624DE60", - "value": "D+k4AdjnYPWbB9wmad137Bdpo/kAulOoTRQrLy/Qc4k=" - }, - "power": 10, - "name": "" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "b56N5GCR1adcVRuENjfKw/mrm2dkhT7wNZXV/SDsKsU=" - }, - "power": 10, - "name": "" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "IgZDpJvGA0TAamicA8ircy+RX/BkUlj6DXwM791ywIU=" - }, - "power": 10, - "name": "" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" - }, - "power": 10, - "name": "" - } - ], - "app_hash": "" -} diff --git a/docs/examples/node3/config/node_key.json b/docs/examples/node3/config/node_key.json deleted file mode 100644 index bde4e0ed..00000000 --- a/docs/examples/node3/config/node_key.json +++ /dev/null @@ -1 +0,0 @@ -{"priv_key":{"type":"954568A3288910","value":"9Y9xp/tUJJ6pHTF5SUV0bGKYSdVbFtMHu+Lr8S0JBSZAwneaejnfOEU1LMKOnQ07skrDUaJcj5di3jAyjxJzqg=="}} \ No newline at end of file diff --git a/docs/examples/node3/config/priv_validator.json b/docs/examples/node3/config/priv_validator.json deleted file mode 100644 index 1d985a00..00000000 --- a/docs/examples/node3/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "9A1A6914EB5F4FF0269C7EEEE627C27310CC64F9", - "pub_key": { - "type": "AC26791624DE60", - "value": "KGAZfxZvIZ7abbeIQ85U1ECG6+I62KSdaH8ulc0+OiU=" - }, - "last_height": 0, - "last_round": 0, - "last_step": 0, - "priv_key": { - "type": "954568A3288910", - "value": "jb52LZ5gp+eQ8nJlFK1z06nBMp1gD8ICmyzdM1icGOgoYBl/Fm8hntptt4hDzlTUQIbr4jrYpJ1ofy6VzT46JQ==" - } -} \ No newline at end of file diff --git a/docs/getting-started.md b/docs/getting-started.md deleted file mode 100644 index 9767dae5..00000000 --- a/docs/getting-started.md +++ /dev/null @@ -1,265 +0,0 @@ -# Getting Started - -## First Tendermint App - -As a general purpose blockchain engine, Tendermint is agnostic to the -application you want to run. So, to run a complete blockchain that does -something useful, you must start two programs: one is Tendermint Core, -the other is your application, which can be written in any programming -language. Recall from [the intro to -ABCI](introduction.html#ABCI-Overview) that Tendermint Core handles all -the p2p and consensus stuff, and just forwards transactions to the -application when they need to be validated, or when they're ready to be -committed to a block. - -In this guide, we show you some examples of how to run an application -using Tendermint. - -### Install - -The first apps we will work with are written in Go. To install them, you -need to [install Go](https://golang.org/doc/install) and put -`$GOPATH/bin` in your `$PATH`; see -[here](https://github.com/tendermint/tendermint/wiki/Setting-GOPATH) for -more info. - -Then run - - go get -u github.com/tendermint/abci/cmd/abci-cli - -If there is an error, install and run the -[dep](https://github.com/golang/dep) tool to pin the dependencies: - - cd $GOPATH/src/github.com/tendermint/abci - make get_tools - make get_vendor_deps - make install - -Now you should have the `abci-cli` installed; you'll see a couple of -commands (`counter` and `kvstore`) that are example applications written -in Go. See below for an application written in JavaScript. - -Now, let's run some apps! - -## KVStore - A First Example - -The kvstore app is a [Merkle -tree](https://en.wikipedia.org/wiki/Merkle_tree) that just stores all -transactions. If the transaction contains an `=`, e.g. `key=value`, then -the `value` is stored under the `key` in the Merkle tree. Otherwise, the -full transaction bytes are stored as the key and the value. - -Let's start a kvstore application. - - abci-cli kvstore - -In another terminal, we can start Tendermint. If you have never run -Tendermint before, use: - - tendermint init - tendermint node - -If you have used Tendermint, you may want to reset the data for a new -blockchain by running `tendermint unsafe_reset_all`. Then you can run -`tendermint node` to start Tendermint, and connect to the app. For more -details, see [the guide on using Tendermint](./using-tendermint.html). - -You should see Tendermint making blocks! We can get the status of our -Tendermint node as follows: - - curl -s localhost:26657/status - -The `-s` just silences `curl`. For nicer output, pipe the result into a -tool like [jq](https://stedolan.github.io/jq/) or `json_pp`. - -Now let's send some transactions to the kvstore. - - curl -s 'localhost:26657/broadcast_tx_commit?tx="abcd"' - -Note the single quote (`'`) around the url, which ensures that the -double quotes (`"`) are not escaped by bash. This command sent a -transaction with bytes `abcd`, so `abcd` will be stored as both the key -and the value in the Merkle tree. The response should look something -like: - - { - "jsonrpc": "2.0", - "id": "", - "result": { - "check_tx": { - "fee": {} - }, - "deliver_tx": { - "tags": [ - { - "key": "YXBwLmNyZWF0b3I=", - "value": "amFl" - }, - { - "key": "YXBwLmtleQ==", - "value": "YWJjZA==" - } - ], - "fee": {} - }, - "hash": "9DF66553F98DE3C26E3C3317A3E4CED54F714E39", - "height": 14 - } - } - -We can confirm that our transaction worked and the value got stored by -querying the app: - - curl -s 'localhost:26657/abci_query?data="abcd"' - -The result should look like: - - { - "jsonrpc": "2.0", - "id": "", - "result": { - "response": { - "log": "exists", - "index": "-1", - "key": "YWJjZA==", - "value": "YWJjZA==" - } - } - } - -Note the `value` in the result (`YWJjZA==`); this is the base64-encoding -of the ASCII of `abcd`. You can verify this in a python 2 shell by -running `"61626364".decode('base64')` or in python 3 shell by running -`import codecs; codecs.decode("61626364", 'base64').decode('ascii')`. -Stay tuned for a future release that [makes this output more -human-readable](https://github.com/tendermint/abci/issues/32). - -Now let's try setting a different key and value: - - curl -s 'localhost:26657/broadcast_tx_commit?tx="name=satoshi"' - -Now if we query for `name`, we should get `satoshi`, or `c2F0b3NoaQ==` -in base64: - - curl -s 'localhost:26657/abci_query?data="name"' - -Try some other transactions and queries to make sure everything is -working! - -## Counter - Another Example - -Now that we've got the hang of it, let's try another application, the -`counter` app. - -The counter app doesn't use a Merkle tree, it just counts how many times -we've sent a transaction, or committed the state. - -This application has two modes: `serial=off` and `serial=on`. - -When `serial=on`, transactions must be a big-endian encoded incrementing -integer, starting at 0. - -If `serial=off`, there are no restrictions on transactions. - -In a live blockchain, transactions collect in memory before they are -committed into blocks. To avoid wasting resources on invalid -transactions, ABCI provides the `CheckTx` message, which application -developers can use to accept or reject transactions, before they are -stored in memory or gossipped to other peers. - -In this instance of the counter app, with `serial=on`, `CheckTx` only -allows transactions whose integer is greater than the last committed -one. - -Let's kill the previous instance of `tendermint` and the `kvstore` -application, and start the counter app. We can enable `serial=on` with a -flag: - - abci-cli counter --serial - -In another window, reset then start Tendermint: - - tendermint unsafe_reset_all - tendermint node - -Once again, you can see the blocks streaming by. Let's send some -transactions. Since we have set `serial=on`, the first transaction must -be the number `0`: - - curl localhost:26657/broadcast_tx_commit?tx=0x00 - -Note the empty (hence successful) response. The next transaction must be -the number `1`. If instead, we try to send a `5`, we get an error: - - > curl localhost:26657/broadcast_tx_commit?tx=0x05 - { - "jsonrpc": "2.0", - "id": "", - "result": { - "check_tx": { - "fee": {} - }, - "deliver_tx": { - "code": 2, - "log": "Invalid nonce. Expected 1, got 5", - "fee": {} - }, - "hash": "33B93DFF98749B0D6996A70F64071347060DC19C", - "height": 34 - } - } - -But if we send a `1`, it works again: - - > curl localhost:26657/broadcast_tx_commit?tx=0x01 - { - "jsonrpc": "2.0", - "id": "", - "result": { - "check_tx": { - "fee": {} - }, - "deliver_tx": { - "fee": {} - }, - "hash": "F17854A977F6FA7EEA1BD758E296710B86F72F3D", - "height": 60 - } - } - -For more details on the `broadcast_tx` API, see [the guide on using -Tendermint](./using-tendermint.html). - -## CounterJS - Example in Another Language - -We also want to run applications in another language - in this case, -we'll run a Javascript version of the `counter`. To run it, you'll need -to [install node](https://nodejs.org/en/download/). - -You'll also need to fetch the relevant repository, from -[here](https://github.com/tendermint/js-abci) then install it. As go -devs, we keep all our code under the `$GOPATH`, so run: - - go get github.com/tendermint/js-abci &> /dev/null - cd $GOPATH/src/github.com/tendermint/js-abci/example - npm install - cd .. - -Kill the previous `counter` and `tendermint` processes. Now run the app: - - node example/app.js - -In another window, reset and start `tendermint`: - - tendermint unsafe_reset_all - tendermint node - -Once again, you should see blocks streaming by - but now, our -application is written in javascript! Try sending some transactions, and -like before - the results should be the same: - - curl localhost:26657/broadcast_tx_commit?tx=0x00 # ok - curl localhost:26657/broadcast_tx_commit?tx=0x05 # invalid nonce - curl localhost:26657/broadcast_tx_commit?tx=0x01 # ok - -Neat, eh? diff --git a/docs/how-to-read-logs.md b/docs/how-to-read-logs.md deleted file mode 100644 index 92f563cf..00000000 --- a/docs/how-to-read-logs.md +++ /dev/null @@ -1,130 +0,0 @@ -# How to read logs - -## Walkabout example - -We first create three connections (mempool, consensus and query) to the -application (running `kvstore` locally in this case). - - I[10-04|13:54:27.364] Starting multiAppConn module=proxy impl=multiAppConn - I[10-04|13:54:27.366] Starting localClient module=abci-client connection=query impl=localClient - I[10-04|13:54:27.366] Starting localClient module=abci-client connection=mempool impl=localClient - I[10-04|13:54:27.367] Starting localClient module=abci-client connection=consensus impl=localClient - -Then Tendermint Core and the application perform a handshake. - - I[10-04|13:54:27.367] ABCI Handshake module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD - I[10-04|13:54:27.368] ABCI Replay Blocks module=consensus appHeight=90 storeHeight=90 stateHeight=90 - I[10-04|13:54:27.368] Completed ABCI Handshake - Tendermint and App are synced module=consensus appHeight=90 appHash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD - -After that, we start a few more things like the event switch, reactors, -and perform UPNP discover in order to detect the IP address. - - I[10-04|13:54:27.374] Starting EventSwitch module=types impl=EventSwitch - I[10-04|13:54:27.375] This node is a validator module=consensus - I[10-04|13:54:27.379] Starting Node module=main impl=Node - I[10-04|13:54:27.381] Local listener module=p2p ip=:: port=26656 - I[10-04|13:54:27.382] Getting UPNP external address module=p2p - I[10-04|13:54:30.386] Could not perform UPNP discover module=p2p err="write udp4 0.0.0.0:38238->239.255.255.250:1900: i/o timeout" - I[10-04|13:54:30.386] Starting DefaultListener module=p2p impl=Listener(@10.0.2.15:26656) - I[10-04|13:54:30.387] Starting P2P Switch module=p2p impl="P2P Switch" - I[10-04|13:54:30.387] Starting MempoolReactor module=mempool impl=MempoolReactor - I[10-04|13:54:30.387] Starting BlockchainReactor module=blockchain impl=BlockchainReactor - I[10-04|13:54:30.387] Starting ConsensusReactor module=consensus impl=ConsensusReactor - I[10-04|13:54:30.387] ConsensusReactor module=consensus fastSync=false - I[10-04|13:54:30.387] Starting ConsensusState module=consensus impl=ConsensusState - I[10-04|13:54:30.387] Starting WAL module=consensus wal=/home/vagrant/.tendermint/data/cs.wal/wal impl=WAL - I[10-04|13:54:30.388] Starting TimeoutTicker module=consensus impl=TimeoutTicker - -Notice the second row where Tendermint Core reports that "This node is a -validator". It also could be just an observer (regular node). - -Next we replay all the messages from the WAL. - - I[10-04|13:54:30.390] Catchup by replaying consensus messages module=consensus height=91 - I[10-04|13:54:30.390] Replay: New Step module=consensus height=91 round=0 step=RoundStepNewHeight - I[10-04|13:54:30.390] Replay: Done module=consensus - -"Started node" message signals that everything is ready for work. - - I[10-04|13:54:30.391] Starting RPC HTTP server on tcp socket 0.0.0.0:26657 module=rpc-server - I[10-04|13:54:30.392] Started node module=main nodeInfo="NodeInfo{id: DF22D7C92C91082324A1312F092AA1DA197FA598DBBFB6526E, moniker: anonymous, network: test-chain-3MNw2N [remote , listen 10.0.2.15:26656], version: 0.11.0-10f361fc ([wire_version=0.6.2 p2p_version=0.5.0 consensus_version=v1/0.2.2 rpc_version=0.7.0/3 tx_index=on rpc_addr=tcp://0.0.0.0:26657])}" - -Next follows a standard block creation cycle, where we enter a new -round, propose a block, receive more than 2/3 of prevotes, then -precommits and finally have a chance to commit a block. For details, -please refer to [Consensus -Overview](introduction.html#consensus-overview) or [Byzantine Consensus -Algorithm](specification.html). - - I[10-04|13:54:30.393] enterNewRound(91/0). Current: 91/0/RoundStepNewHeight module=consensus - I[10-04|13:54:30.393] enterPropose(91/0). Current: 91/0/RoundStepNewRound module=consensus - I[10-04|13:54:30.393] enterPropose: Our turn to propose module=consensus proposer=125B0E3C5512F5C2B0E1109E31885C4511570C42 privValidator="PrivValidator{125B0E3C5512F5C2B0E1109E31885C4511570C42 LH:90, LR:0, LS:3}" - I[10-04|13:54:30.394] Signed proposal module=consensus height=91 round=0 proposal="Proposal{91/0 1:21B79872514F (-1,:0:000000000000) {/10EDEDD7C84E.../}}" - I[10-04|13:54:30.397] Received complete proposal block module=consensus height=91 hash=F671D562C7B9242900A286E1882EE64E5556FE9E - I[10-04|13:54:30.397] enterPrevote(91/0). Current: 91/0/RoundStepPropose module=consensus - I[10-04|13:54:30.397] enterPrevote: ProposalBlock is valid module=consensus height=91 round=0 - I[10-04|13:54:30.398] Signed and pushed vote module=consensus height=91 round=0 vote="Vote{0:125B0E3C5512 91/00/1(Prevote) F671D562C7B9 {/89047FFC21D8.../}}" err=null - I[10-04|13:54:30.401] Added to prevote module=consensus vote="Vote{0:125B0E3C5512 91/00/1(Prevote) F671D562C7B9 {/89047FFC21D8.../}}" prevotes="VoteSet{H:91 R:0 T:1 +2/3:F671D562C7B9242900A286E1882EE64E5556FE9E:1:21B79872514F BA{1:X} map[]}" - I[10-04|13:54:30.401] enterPrecommit(91/0). Current: 91/0/RoundStepPrevote module=consensus - I[10-04|13:54:30.401] enterPrecommit: +2/3 prevoted proposal block. Locking module=consensus hash=F671D562C7B9242900A286E1882EE64E5556FE9E - I[10-04|13:54:30.402] Signed and pushed vote module=consensus height=91 round=0 vote="Vote{0:125B0E3C5512 91/00/2(Precommit) F671D562C7B9 {/80533478E41A.../}}" err=null - I[10-04|13:54:30.404] Added to precommit module=consensus vote="Vote{0:125B0E3C5512 91/00/2(Precommit) F671D562C7B9 {/80533478E41A.../}}" precommits="VoteSet{H:91 R:0 T:2 +2/3:F671D562C7B9242900A286E1882EE64E5556FE9E:1:21B79872514F BA{1:X} map[]}" - I[10-04|13:54:30.404] enterCommit(91/0). Current: 91/0/RoundStepPrecommit module=consensus - I[10-04|13:54:30.405] Finalizing commit of block with 0 txs module=consensus height=91 hash=F671D562C7B9242900A286E1882EE64E5556FE9E root=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD - I[10-04|13:54:30.405] Block{ - Header{ - ChainID: test-chain-3MNw2N - Height: 91 - Time: 2017-10-04 13:54:30.393 +0000 UTC - NumTxs: 0 - LastBlockID: F15AB8BEF9A6AAB07E457A6E16BC410546AA4DC6:1:D505DA273544 - LastCommit: 56FEF2EFDB8B37E9C6E6D635749DF3169D5F005D - Data: - Validators: CE25FBFF2E10C0D51AA1A07C064A96931BC8B297 - App: E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD - }#F671D562C7B9242900A286E1882EE64E5556FE9E - Data{ - - }# - Commit{ - BlockID: F15AB8BEF9A6AAB07E457A6E16BC410546AA4DC6:1:D505DA273544 - Precommits: Vote{0:125B0E3C5512 90/00/2(Precommit) F15AB8BEF9A6 {/FE98E2B956F0.../}} - }#56FEF2EFDB8B37E9C6E6D635749DF3169D5F005D - }#F671D562C7B9242900A286E1882EE64E5556FE9E module=consensus - I[10-04|13:54:30.408] Executed block module=state height=91 validTxs=0 invalidTxs=0 - I[10-04|13:54:30.410] Committed state module=state height=91 txs=0 hash=E0FBAFBF6FCED8B9786DDFEB1A0D4FA2501BADAD - I[10-04|13:54:30.410] Recheck txs module=mempool numtxs=0 height=91 - -## List of modules - -Here is the list of modules you may encounter in Tendermint's log and a -little overview what they do. - -- `abci-client` As mentioned in [Application Development Guide](app-development.md#abci-design), Tendermint acts as an ABCI - client with respect to the application and maintains 3 connections: - mempool, consensus and query. The code used by Tendermint Core can - be found [here](https://github.com/tendermint/abci/tree/master/client). -- `blockchain` Provides storage, pool (a group of peers), and reactor - for both storing and exchanging blocks between peers. -- `consensus` The heart of Tendermint core, which is the - implementation of the consensus algorithm. Includes two - "submodules": `wal` (write-ahead logging) for ensuring data - integrity and `replay` to replay blocks and messages on recovery - from a crash. -- `events` Simple event notification system. The list of events can be - found - [here](https://github.com/tendermint/tendermint/blob/master/types/events.go). - You can subscribe to them by calling `subscribe` RPC method. Refer - to [RPC docs](specification/rpc.html) for additional information. -- `mempool` Mempool module handles all incoming transactions, whenever - they are coming from peers or the application. -- `p2p` Provides an abstraction around peer-to-peer communication. For - more details, please check out the - [README](https://github.com/tendermint/tendermint/blob/master/p2p/README.md). -- `rpc` [Tendermint's RPC](specification/rpc.html). -- `rpc-server` RPC server. For implementation details, please read the - [README](https://github.com/tendermint/tendermint/blob/master/rpc/lib/README.md). -- `state` Represents the latest state and execution submodule, which - executes blocks against the application. -- `types` A collection of the publicly exposed types and methods to - work with them. diff --git a/docs/images/tmint-logo-blue.png b/docs/images/tmint-logo-blue.png deleted file mode 100644 index cc4c8fb8..00000000 Binary files a/docs/images/tmint-logo-blue.png and /dev/null differ diff --git a/docs/index.rst b/docs/index.rst deleted file mode 100644 index e7d86bc2..00000000 --- a/docs/index.rst +++ /dev/null @@ -1,73 +0,0 @@ -.. Tendermint documentation master file, created by - sphinx-quickstart on Mon Aug 7 04:55:09 2017. - You can adapt this file completely to your liking, but it should at least - contain the root `toctree` directive. - -Welcome to Tendermint! -====================== - - -.. image:: assets/tmint-logo-blue.png - :height: 200px - :width: 200px - :align: center - -Introduction ------------- - -.. toctree:: - :maxdepth: 1 - - introduction.md - install.md - getting-started.md - using-tendermint.md - deploy-testnets.md - ecosystem.md - -Tendermint Tools ----------------- - -.. the tools/ files are pulled in from the tools repo -.. see the bottom of conf.py -.. toctree:: - :maxdepth: 1 - - tools/docker.md - terraform-and-ansible.md - tools/benchmarking.md - tools/monitoring.md - -ABCI, Apps, Logging, Etc ------------------------- - -.. toctree:: - :maxdepth: 1 - - abci-cli.md - abci-spec.md - app-architecture.md - app-development.md - subscribing-to-events-via-websocket.md - indexing-transactions.md - how-to-read-logs.md - running-in-production.md - metrics.md - -Research & Specification ------------------------- - -.. toctree:: - :maxdepth: 1 - - determinism.md - transactional-semantics.md - -.. specification.md ## keep this file for legacy purpose. needs to be fixed though - -* For a deeper dive, see `this thesis `__. -* There is also the `original whitepaper `__, though it is now quite outdated. -* Readers might also be interested in the `Cosmos Whitepaper `__ which describes Tendermint, ABCI, and how to build a scalable, heterogeneous, cryptocurrency network. -* For example applications and related software built by the Tendermint team and other, see the `software ecosystem `__. - -Join the `community `__ to ask questions and discuss projects. diff --git a/docs/indexing-transactions.md b/docs/indexing-transactions.md deleted file mode 100644 index 93a61fe6..00000000 --- a/docs/indexing-transactions.md +++ /dev/null @@ -1,89 +0,0 @@ -# Indexing Transactions - -Tendermint allows you to index transactions and later query or subscribe -to their results. - -Let's take a look at the `[tx_index]` config section: - - ##### transactions indexer configuration options ##### - [tx_index] - - # What indexer to use for transactions - # - # Options: - # 1) "null" (default) - # 2) "kv" - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). - indexer = "kv" - - # Comma-separated list of tags to index (by default the only tag is tx hash) - # - # It's recommended to index only a subset of tags due to possible memory - # bloat. This is, of course, depends on the indexer's DB and the volume of - # transactions. - index_tags = "" - - # When set to true, tells indexer to index all tags. Note this may be not - # desirable (see the comment above). IndexTags has a precedence over - # IndexAllTags (i.e. when given both, IndexTags will be indexed). - index_all_tags = false - -By default, Tendermint will index all transactions by their respective -hashes using an embedded simple indexer. Note, we are planning to add -more options in the future (e.g., Postgresql indexer). - -## Adding tags - -In your application's `DeliverTx` method, add the `Tags` field with the -pairs of UTF-8 encoded strings (e.g. "account.owner": "Bob", "balance": -"100.0", "date": "2018-01-02"). - -Example: - - func (app *KVStoreApplication) DeliverTx(tx []byte) types.Result { - ... - tags := []cmn.KVPair{ - {[]byte("account.name"), []byte("igor")}, - {[]byte("account.address"), []byte("0xdeadbeef")}, - {[]byte("tx.amount"), []byte("7")}, - } - return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags} - } - -If you want Tendermint to only index transactions by "account.name" tag, -in the config set `tx_index.index_tags="account.name"`. If you to index -all tags, set `index_all_tags=true` - -Note, there are a few predefined tags: - -- `tm.event` (event type) -- `tx.hash` (transaction's hash) -- `tx.height` (height of the block transaction was committed in) - -Tendermint will throw a warning if you try to use any of the above keys. - -## Querying transactions - -You can query the transaction results by calling `/tx_search` RPC -endpoint: - - curl "localhost:26657/tx_search?query=\"account.name='igor'\"&prove=true" - -Check out [API docs](https://tendermint.github.io/slate/?shell#txsearch) -for more information on query syntax and other options. - -## Subscribing to transactions - -Clients can subscribe to transactions with the given tags via Websocket -by providing a query to `/subscribe` RPC endpoint. - - { - "jsonrpc": "2.0", - "method": "subscribe", - "id": "0", - "params": { - "query": "account.name='igor'" - } - } - -Check out [API docs](https://tendermint.github.io/slate/#subscribe) for -more information on query syntax and other options. diff --git a/docs/install.md b/docs/install.md deleted file mode 100644 index ff101715..00000000 --- a/docs/install.md +++ /dev/null @@ -1,76 +0,0 @@ -# Install Tendermint - -The fastest and easiest way to install the `tendermint` binary -is to run [this script](https://github.com/tendermint/tendermint/blob/develop/scripts/install/install_tendermint_ubuntu.sh) on -a fresh Ubuntu instance, -or [this script](https://github.com/tendermint/tendermint/blob/develop/scripts/install/install_tendermint_bsd.sh) -on a fresh FreeBSD instance. Read the comments / instructions carefully (i.e., reset your terminal after running the script, -make sure your okay with the network connections being made). - -## From Binary - -To download pre-built binaries, see the [releases page](https://github.com/tendermint/tendermint/releases). - -## From Source - -You'll need `go` [installed](https://golang.org/doc/install) and the required -[environment variables set](https://github.com/tendermint/tendermint/wiki/Setting-GOPATH) - -### Get Source Code - -``` -mkdir -p $GOPATH/src/github.com/tendermint -cd $GOPATH/src/github.com/tendermint -git clone https://github.com/tendermint/tendermint.git -cd tendermint -``` - -### Get Tools & Dependencies - -``` -make get_tools -make get_vendor_deps -``` - -### Compile - -``` -make install -``` - -to put the binary in `$GOPATH/bin` or use: - -``` -make build -``` - -to put the binary in `./build`. - -The latest `tendermint version` is now installed. - -## Reinstall - -If you already have Tendermint installed, and you make updates, simply - -``` -cd $GOPATH/src/github.com/tendermint/tendermint -make install -``` - -To upgrade, run - -``` -cd $GOPATH/src/github.com/tendermint/tendermint -git pull origin master -make get_vendor_deps -make install -``` - -## Run - -To start a one-node blockchain with a simple in-process application: - -``` -tendermint init -tendermint node --proxy_app=kvstore -``` diff --git a/docs/introduction.md b/docs/introduction.md deleted file mode 100644 index 419071dc..00000000 --- a/docs/introduction.md +++ /dev/null @@ -1,331 +0,0 @@ -# What is Tendermint? - -Tendermint is software for securely and consistently replicating an -application on many machines. By securely, we mean that Tendermint works -even if up to 1/3 of machines fail in arbitrary ways. By consistently, -we mean that every non-faulty machine sees the same transaction log and -computes the same state. Secure and consistent replication is a -fundamental problem in distributed systems; it plays a critical role in -the fault tolerance of a broad range of applications, from currencies, -to elections, to infrastructure orchestration, and beyond. - -The ability to tolerate machines failing in arbitrary ways, including -becoming malicious, is known as Byzantine fault tolerance (BFT). The -theory of BFT is decades old, but software implementations have only -became popular recently, due largely to the success of "blockchain -technology" like Bitcoin and Ethereum. Blockchain technology is just a -reformalization of BFT in a more modern setting, with emphasis on -peer-to-peer networking and cryptographic authentication. The name -derives from the way transactions are batched in blocks, where each -block contains a cryptographic hash of the previous one, forming a -chain. In practice, the blockchain data structure actually optimizes BFT -design. - -Tendermint consists of two chief technical components: a blockchain -consensus engine and a generic application interface. The consensus -engine, called Tendermint Core, ensures that the same transactions are -recorded on every machine in the same order. The application interface, -called the Application BlockChain Interface (ABCI), enables the -transactions to be processed in any programming language. Unlike other -blockchain and consensus solutions, which come pre-packaged with built -in state machines (like a fancy key-value store, or a quirky scripting -language), developers can use Tendermint for BFT state machine -replication of applications written in whatever programming language and -development environment is right for them. - -Tendermint is designed to be easy-to-use, simple-to-understand, highly -performant, and useful for a wide variety of distributed applications. - -## Tendermint vs. X - -Tendermint is broadly similar to two classes of software. The first -class consists of distributed key-value stores, like Zookeeper, etcd, -and consul, which use non-BFT consensus. The second class is known as -"blockchain technology", and consists of both cryptocurrencies like -Bitcoin and Ethereum, and alternative distributed ledger designs like -Hyperledger's Burrow. - -### Zookeeper, etcd, consul - -Zookeeper, etcd, and consul are all implementations of a key-value store -atop a classical, non-BFT consensus algorithm. Zookeeper uses a version -of Paxos called Zookeeper Atomic Broadcast, while etcd and consul use -the Raft consensus algorithm, which is much younger and simpler. A -typical cluster contains 3-5 machines, and can tolerate crash failures -in up to 1/2 of the machines, but even a single Byzantine fault can -destroy the system. - -Each offering provides a slightly different implementation of a -featureful key-value store, but all are generally focused around -providing basic services to distributed systems, such as dynamic -configuration, service discovery, locking, leader-election, and so on. - -Tendermint is in essence similar software, but with two key differences: -- It is Byzantine Fault Tolerant, meaning it can only tolerate up to a -1/3 of failures, but those failures can include arbitrary behaviour - -including hacking and malicious attacks. - It does not specify a -particular application, like a fancy key-value store. Instead, it -focuses on arbitrary state machine replication, so developers can build -the application logic that's right for them, from key-value store to -cryptocurrency to e-voting platform and beyond. - -The layout of this Tendermint website content is also ripped directly -and without shame from [consul.io](https://www.consul.io/) and the other -[Hashicorp sites](https://www.hashicorp.com/#tools). - -### Bitcoin, Ethereum, etc. - -Tendermint emerged in the tradition of cryptocurrencies like Bitcoin, -Ethereum, etc. with the goal of providing a more efficient and secure -consensus algorithm than Bitcoin's Proof of Work. In the early days, -Tendermint had a simple currency built in, and to participate in -consensus, users had to "bond" units of the currency into a security -deposit which could be revoked if they misbehaved -this is what made -Tendermint a Proof-of-Stake algorithm. - -Since then, Tendermint has evolved to be a general purpose blockchain -consensus engine that can host arbitrary application states. That means -it can be used as a plug-and-play replacement for the consensus engines -of other blockchain software. So one can take the current Ethereum code -base, whether in Rust, or Go, or Haskell, and run it as a ABCI -application using Tendermint consensus. Indeed, [we did that with -Ethereum](https://github.com/tendermint/ethermint). And we plan to do -the same for Bitcoin, ZCash, and various other deterministic -applications as well. - -Another example of a cryptocurrency application built on Tendermint is -[the Cosmos network](http://cosmos.network). - -### Other Blockchain Projects - -[Fabric](https://github.com/hyperledger/fabric) takes a similar approach -to Tendermint, but is more opinionated about how the state is managed, -and requires that all application behaviour runs in potentially many -docker containers, modules it calls "chaincode". It uses an -implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf). -from a team at IBM that is [augmented to handle potentially -non-deterministic -chaincode](https://www.zurich.ibm.com/~cca/papers/sieve.pdf) It is -possible to implement this docker-based behaviour as a ABCI app in -Tendermint, though extending Tendermint to handle non-determinism -remains for future work. - -[Burrow](https://github.com/hyperledger/burrow) is an implementation of -the Ethereum Virtual Machine and Ethereum transaction mechanics, with -additional features for a name-registry, permissions, and native -contracts, and an alternative blockchain API. It uses Tendermint as its -consensus engine, and provides a particular application state. - -## ABCI Overview - -The [Application BlockChain Interface -(ABCI)](https://github.com/tendermint/abci) allows for Byzantine Fault -Tolerant replication of applications written in any programming -language. - -### Motivation - -Thus far, all blockchains "stacks" (such as -[Bitcoin](https://github.com/bitcoin/bitcoin)) have had a monolithic -design. That is, each blockchain stack is a single program that handles -all the concerns of a decentralized ledger; this includes P2P -connectivity, the "mempool" broadcasting of transactions, consensus on -the most recent block, account balances, Turing-complete contracts, -user-level permissions, etc. - -Using a monolithic architecture is typically bad practice in computer -science. It makes it difficult to reuse components of the code, and -attempts to do so result in complex maintenance procedures for forks of -the codebase. This is especially true when the codebase is not modular -in design and suffers from "spaghetti code". - -Another problem with monolithic design is that it limits you to the -language of the blockchain stack (or vice versa). In the case of -Ethereum which supports a Turing-complete bytecode virtual-machine, it -limits you to languages that compile down to that bytecode; today, those -are Serpent and Solidity. - -In contrast, our approach is to decouple the consensus engine and P2P -layers from the details of the application state of the particular -blockchain application. We do this by abstracting away the details of -the application to an interface, which is implemented as a socket -protocol. - -Thus we have an interface, the Application BlockChain Interface (ABCI), -and its primary implementation, the Tendermint Socket Protocol (TSP, or -Teaspoon). - -### Intro to ABCI - -[Tendermint Core](https://github.com/tendermint/tendermint) (the -"consensus engine") communicates with the application via a socket -protocol that satisfies the [ABCI](https://github.com/tendermint/abci). - -To draw an analogy, lets talk about a well-known cryptocurrency, -Bitcoin. Bitcoin is a cryptocurrency blockchain where each node -maintains a fully audited Unspent Transaction Output (UTXO) database. If -one wanted to create a Bitcoin-like system on top of ABCI, Tendermint -Core would be responsible for - -- Sharing blocks and transactions between nodes -- Establishing a canonical/immutable order of transactions - (the blockchain) - -The application will be responsible for - -- Maintaining the UTXO database -- Validating cryptographic signatures of transactions -- Preventing transactions from spending non-existent transactions -- Allowing clients to query the UTXO database. - -Tendermint is able to decompose the blockchain design by offering a very -simple API (ie. the ABCI) between the application process and consensus -process. - -The ABCI consists of 3 primary message types that get delivered from the -core to the application. The application replies with corresponding -response messages. - -The messages are specified here: [ABCI Message -Types](https://github.com/tendermint/abci#message-types). - -The **DeliverTx** message is the work horse of the application. Each -transaction in the blockchain is delivered with this message. The -application needs to validate each transaction received with the -**DeliverTx** message against the current state, application protocol, -and the cryptographic credentials of the transaction. A validated -transaction then needs to update the application state — by binding a -value into a key values store, or by updating the UTXO database, for -instance. - -The **CheckTx** message is similar to **DeliverTx**, but it's only for -validating transactions. Tendermint Core's mempool first checks the -validity of a transaction with **CheckTx**, and only relays valid -transactions to its peers. For instance, an application may check an -incrementing sequence number in the transaction and return an error upon -**CheckTx** if the sequence number is old. Alternatively, they might use -a capabilities based system that requires capabilities to be renewed -with every transaction. - -The **Commit** message is used to compute a cryptographic commitment to -the current application state, to be placed into the next block header. -This has some handy properties. Inconsistencies in updating that state -will now appear as blockchain forks which catches a whole class of -programming errors. This also simplifies the development of secure -lightweight clients, as Merkle-hash proofs can be verified by checking -against the block hash, and that the block hash is signed by a quorum. - -There can be multiple ABCI socket connections to an application. -Tendermint Core creates three ABCI connections to the application; one -for the validation of transactions when broadcasting in the mempool, one -for the consensus engine to run block proposals, and one more for -querying the application state. - -It's probably evident that applications designers need to very carefully -design their message handlers to create a blockchain that does anything -useful but this architecture provides a place to start. The diagram -below illustrates the flow of messages via ABCI. - -![](assets/abci.png) - -## A Note on Determinism - -The logic for blockchain transaction processing must be deterministic. -If the application logic weren't deterministic, consensus would not be -reached among the Tendermint Core replica nodes. - -Solidity on Ethereum is a great language of choice for blockchain -applications because, among other reasons, it is a completely -deterministic programming language. However, it's also possible to -create deterministic applications using existing popular languages like -Java, C++, Python, or Go. Game programmers and blockchain developers are -already familiar with creating deterministic programs by avoiding -sources of non-determinism such as: - -- random number generators (without deterministic seeding) -- race conditions on threads (or avoiding threads altogether) -- the system clock -- uninitialized memory (in unsafe programming languages like C - or C++) -- [floating point - arithmetic](http://gafferongames.com/networking-for-game-programmers/floating-point-determinism/) -- language features that are random (e.g. map iteration in Go) - -While programmers can avoid non-determinism by being careful, it is also -possible to create a special linter or static analyzer for each language -to check for determinism. In the future we may work with partners to -create such tools. - -## Consensus Overview - -Tendermint is an easy-to-understand, mostly asynchronous, BFT consensus -protocol. The protocol follows a simple state machine that looks like -this: - -![](assets/consensus_logic.png) - -Participants in the protocol are called **validators**; they take turns -proposing blocks of transactions and voting on them. Blocks are -committed in a chain, with one block at each **height**. A block may -fail to be committed, in which case the protocol moves to the next -**round**, and a new validator gets to propose a block for that height. -Two stages of voting are required to successfully commit a block; we -call them **pre-vote** and **pre-commit**. A block is committed when -more than 2/3 of validators pre-commit for the same block in the same -round. - -There is a picture of a couple doing the polka because validators are -doing something like a polka dance. When more than two-thirds of the -validators pre-vote for the same block, we call that a **polka**. Every -pre-commit must be justified by a polka in the same round. - -Validators may fail to commit a block for a number of reasons; the -current proposer may be offline, or the network may be slow. Tendermint -allows them to establish that a validator should be skipped. Validators -wait a small amount of time to receive a complete proposal block from -the proposer before voting to move to the next round. This reliance on a -timeout is what makes Tendermint a weakly synchronous protocol, rather -than an asynchronous one. However, the rest of the protocol is -asynchronous, and validators only make progress after hearing from more -than two-thirds of the validator set. A simplifying element of -Tendermint is that it uses the same mechanism to commit a block as it -does to skip to the next round. - -Assuming less than one-third of the validators are Byzantine, Tendermint -guarantees that safety will never be violated - that is, validators will -never commit conflicting blocks at the same height. To do this it -introduces a few **locking** rules which modulate which paths can be -followed in the flow diagram. Once a validator precommits a block, it is -locked on that block. Then, - -1) it must prevote for the block it is locked on -2) it can only unlock, and precommit for a new block, if there is a - polka for that block in a later round - -## Stake - -In many systems, not all validators will have the same "weight" in the -consensus protocol. Thus, we are not so much interested in one-third or -two-thirds of the validators, but in those proportions of the total -voting power, which may not be uniformly distributed across individual -validators. - -Since Tendermint can replicate arbitrary applications, it is possible to -define a currency, and denominate the voting power in that currency. -When voting power is denominated in a native currency, the system is -often referred to as Proof-of-Stake. Validators can be forced, by logic -in the application, to "bond" their currency holdings in a security -deposit that can be destroyed if they're found to misbehave in the -consensus protocol. This adds an economic element to the security of the -protocol, allowing one to quantify the cost of violating the assumption -that less than one-third of voting power is Byzantine. - -The [Cosmos Network](http://cosmos.network) is designed to use this -Proof-of-Stake mechanism across an array of cryptocurrencies implemented -as ABCI applications. - -The following diagram is Tendermint in a (technical) nutshell. [See here -for high resolution -version](https://github.com/mobfoundry/hackatom/blob/master/tminfo.pdf). - -![](assets/tm-transaction-flow.png) diff --git a/docs/metrics.md b/docs/metrics.md deleted file mode 100644 index 1cd758b9..00000000 --- a/docs/metrics.md +++ /dev/null @@ -1,40 +0,0 @@ -# Metrics - -Tendermint can report and serve the Prometheus metrics, which in their turn can -be consumed by Prometheus collector(s). - -This functionality is disabled by default. - -To enable the Prometheus metrics, set `instrumentation.prometheus=true` if your -config file. Metrics will be served under `/metrics` on 26660 port by default. -Listen address can be changed in the config file (see -`prometheus_listen_addr`). - -## List of available metrics - -The following metrics are available: - -| Name | Type | Since | Description | -| --------------------------------------- | ------- | --------- | ----------------------------------------------------------------------------- | -| consensus_height | Gauge | 0.20.1 | Height of the chain | -| consensus_validators | Gauge | 0.20.1 | Number of validators | -| consensus_validators_power | Gauge | 0.20.1 | Total voting power of all validators | -| consensus_missing_validators | Gauge | 0.20.1 | Number of validators who did not sign | -| consensus_missing_validators_power | Gauge | 0.20.1 | Total voting power of the missing validators | -| consensus_byzantine_validators | Gauge | 0.20.1 | Number of validators who tried to double sign | -| consensus_byzantine_validators_power | Gauge | 0.20.1 | Total voting power of the byzantine validators | -| consensus_block_interval_seconds | Histogram | 0.20.1 | Time between this and last block (Block.Header.Time) in seconds | -| consensus_rounds | Gauge | 0.20.1 | Number of rounds | -| consensus_num_txs | Gauge | 0.20.1 | Number of transactions | -| mempool_size | Gauge | 0.20.1 | Number of uncommitted transactions | -| consensus_total_txs | Gauge | 0.20.1 | Total number of transactions committed | -| consensus_block_size_bytes | Gauge | 0.20.1 | Block size in bytes | -| p2p_peers | Gauge | 0.20.1 | Number of peers node's connected to | - -## Useful queries - -Percentage of missing + byzantine validators: - -``` -((consensus_byzantine_validators_power + consensus_missing_validators_power) / consensus_validators_power) * 100 -``` diff --git a/docs/requirements.txt b/docs/requirements.txt deleted file mode 100644 index 85e42ba8..00000000 --- a/docs/requirements.txt +++ /dev/null @@ -1,4 +0,0 @@ -sphinx -sphinx-autobuild -recommonmark -sphinx_rtd_theme diff --git a/docs/running-in-production.md b/docs/running-in-production.md deleted file mode 100644 index 7e6068d4..00000000 --- a/docs/running-in-production.md +++ /dev/null @@ -1,206 +0,0 @@ -# Running in production - -## Logging - -Default logging level (`main:info,state:info,*:`) should suffice for -normal operation mode. Read [this -post](https://blog.cosmos.network/one-of-the-exciting-new-features-in-0-10-0-release-is-smart-log-level-flag-e2506b4ab756) -for details on how to configure `log_level` config variable. Some of the -modules can be found [here](./how-to-read-logs.md#list-of-modules). If -you're trying to debug Tendermint or asked to provide logs with debug -logging level, you can do so by running tendermint with -`--log_level="*:debug"`. - -## DOS Exposure and Mitigation - -Validators are supposed to setup [Sentry Node -Architecture](https://blog.cosmos.network/tendermint-explained-bringing-bft-based-pos-to-the-public-blockchain-domain-f22e274a0fdb) -to prevent Denial-of-service attacks. You can read more about it -[here](https://github.com/tendermint/aib-data/blob/develop/medium/TendermintBFT.md). - -### P2P - -The core of the Tendermint peer-to-peer system is `MConnection`. Each -connection has `MaxPacketMsgPayloadSize`, which is the maximum packet -size and bounded send & receive queues. One can impose restrictions on -send & receive rate per connection (`SendRate`, `RecvRate`). - -### RPC - -Endpoints returning multiple entries are limited by default to return 30 -elements (100 max). - -Rate-limiting and authentication are another key aspects to help protect -against DOS attacks. While in the future we may implement these -features, for now, validators are supposed to use external tools like -[NGINX](https://www.nginx.com/blog/rate-limiting-nginx/) or -[traefik](https://docs.traefik.io/configuration/commons/#rate-limiting) -to achieve the same things. - -## Debugging Tendermint - -If you ever have to debug Tendermint, the first thing you should -probably do is to check out the logs. See ["How to read -logs"](./how-to-read-logs.md), where we explain what certain log -statements mean. - -If, after skimming through the logs, things are not clear still, the -second TODO is to query the /status RPC endpoint. It provides the -necessary info: whenever the node is syncing or not, what height it is -on, etc. - - $ curl http(s)://{ip}:{rpcPort}/status - -`dump_consensus_state` will give you a detailed overview of the -consensus state (proposer, lastest validators, peers states). From it, -you should be able to figure out why, for example, the network had -halted. - - $ curl http(s)://{ip}:{rpcPort}/dump_consensus_state - -There is a reduced version of this endpoint - `consensus_state`, which -returns just the votes seen at the current height. - -- [Github Issues](https://github.com/tendermint/tendermint/issues) -- [StackOverflow - questions](https://stackoverflow.com/questions/tagged/tendermint) - -## Monitoring Tendermint - -Each Tendermint instance has a standard `/health` RPC endpoint, which -responds with 200 (OK) if everything is fine and 500 (or no response) - -if something is wrong. - -Other useful endpoints include mentioned earlier `/status`, `/net_info` and -`/validators`. - -We have a small tool, called `tm-monitor`, which outputs information from -the endpoints above plus some statistics. The tool can be found -[here](https://github.com/tendermint/tools/tree/master/tm-monitor). - -## What happens when my app dies? - -You are supposed to run Tendermint under a [process -supervisor](https://en.wikipedia.org/wiki/Process_supervision) (like -systemd or runit). It will ensure Tendermint is always running (despite -possible errors). - -Getting back to the original question, if your application dies, -Tendermint will panic. After a process supervisor restarts your -application, Tendermint should be able to reconnect successfully. The -order of restart does not matter for it. - -## Signal handling - -We catch SIGINT and SIGTERM and try to clean up nicely. For other -signals we use the default behaviour in Go: [Default behavior of signals -in Go -programs](https://golang.org/pkg/os/signal/#hdr-Default_behavior_of_signals_in_Go_programs). - -## Hardware - -### Processor and Memory - -While actual specs vary depending on the load and validators count, -minimal requirements are: - -- 1GB RAM -- 25GB of disk space -- 1.4 GHz CPU - -SSD disks are preferable for applications with high transaction -throughput. - -Recommended: - -- 2GB RAM -- 100GB SSD -- x64 2.0 GHz 2v CPU - -While for now, Tendermint stores all the history and it may require -significant disk space over time, we are planning to implement state -syncing (See -[this issue](https://github.com/tendermint/tendermint/issues/828)). So, -storing all the past blocks will not be necessary. - -### Operating Systems - -Tendermint can be compiled for a wide range of operating systems thanks -to Go language (the list of \$OS/\$ARCH pairs can be found -[here](https://golang.org/doc/install/source#environment)). - -While we do not favor any operation system, more secure and stable Linux -server distributions (like Centos) should be preferred over desktop -operation systems (like Mac OS). - -### Miscellaneous - -NOTE: if you are going to use Tendermint in a public domain, make sure -you read [hardware recommendations (see "4. -Hardware")](https://cosmos.network/validators) for a validator in the -Cosmos network. - -## Configuration parameters - -- `p2p.flush_throttle_timeout` `p2p.max_packet_msg_payload_size` - `p2p.send_rate` `p2p.recv_rate` - -If you are going to use Tendermint in a private domain and you have a -private high-speed network among your peers, it makes sense to lower -flush throttle timeout and increase other params. - - [p2p] - - send_rate=20000000 # 2MB/s - recv_rate=20000000 # 2MB/s - flush_throttle_timeout=10 - max_packet_msg_payload_size=10240 # 10KB - -- `mempool.recheck` - -After every block, Tendermint rechecks every transaction left in the -mempool to see if transactions committed in that block affected the -application state, so some of the transactions left may become invalid. -If that does not apply to your application, you can disable it by -setting `mempool.recheck=false`. - -- `mempool.broadcast` - -Setting this to false will stop the mempool from relaying transactions -to other peers until they are included in a block. It means only the -peer you send the tx to will see it until it is included in a block. - -- `consensus.skip_timeout_commit` - -We want `skip_timeout_commit=false` when there is economics on the line -because proposers should wait to hear for more votes. But if you don't -care about that and want the fastest consensus, you can skip it. It will -be kept false by default for public deployments (e.g. [Cosmos -Hub](https://cosmos.network/intro/hub)) while for enterprise -applications, setting it to true is not a problem. - -- `consensus.peer_gossip_sleep_duration` - -You can try to reduce the time your node sleeps before checking if -theres something to send its peers. - -- `consensus.timeout_commit` - -You can also try lowering `timeout_commit` (time we sleep before -proposing the next block). - -- `consensus.max_block_size_txs` - -By default, the maximum number of transactions per a block is 10_000. -Feel free to change it to suit your needs. - -- `p2p.addr_book_strict` - -By default, Tendermint checks whenever a peer's address is routable before -saving it to the address book. The address is considered as routable if the IP -is [valid and within allowed -ranges](https://github.com/tendermint/tendermint/blob/27bd1deabe4ba6a2d9b463b8f3e3f1e31b993e61/p2p/netaddress.go#L209). - -This may not be the case for private networks, where your IP range is usually -strictly limited and private. If that case, you need to set `addr_book_strict` -to `false` (turn off). diff --git a/docs/spec/README.md b/docs/spec/README.md deleted file mode 100644 index ab689d9d..00000000 --- a/docs/spec/README.md +++ /dev/null @@ -1,80 +0,0 @@ -# Tendermint Specification - -This is a markdown specification of the Tendermint blockchain. -It defines the base data structures, how they are validated, -and how they are communicated over the network. - -If you find discrepancies between the spec and the code that -do not have an associated issue or pull request on github, -please submit them to our [bug bounty](https://tendermint.com/security)! - -## Contents - -- [Overview](#overview) - -### Data Structures - -- [Encoding and Digests](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/encoding.md) -- [Blockchain](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md) -- [State](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/state.md) - -### Consensus Protocol - -- [Consensus Algorithm](/docs/spec/consensus/consensus.md) -- [Time](/docs/spec/consensus/bft-time.md) -- [Light-Client](/docs/spec/consensus/light-client.md) - -### P2P and Network Protocols - -- [The Base P2P Layer](https://github.com/tendermint/tendermint/tree/master/docs/spec/p2p): multiplex the protocols ("reactors") on authenticated and encrypted TCP connections -- [Peer Exchange (PEX)](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/pex): gossip known peer addresses so peers can find each other -- [Block Sync](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/block_sync): gossip blocks so peers can catch up quickly -- [Consensus](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/consensus): gossip votes and block parts so new blocks can be committed -- [Mempool](https://github.com/tendermint/tendermint/tree/master/docs/spec/reactors/mempool): gossip transactions so they get included in blocks -- Evidence: TODO - -### Software - -- [ABCI](/docs/spec/software/abci.md): Details about interactions between the - application and consensus engine over ABCI -- [Write-Ahead Log](/docs/spec/software/wal.md): Details about how the consensus - engine preserves data and recovers from crash failures - -## Overview - -Tendermint provides Byzantine Fault Tolerant State Machine Replication using -hash-linked batches of transactions. Such transaction batches are called "blocks". -Hence, Tendermint defines a "blockchain". - -Each block in Tendermint has a unique index - its Height. -Height's in the blockchain are monotonic. -Each block is committed by a known set of weighted Validators. -Membership and weighting within this validator set may change over time. -Tendermint guarantees the safety and liveness of the blockchain -so long as less than 1/3 of the total weight of the Validator set -is malicious or faulty. - -A commit in Tendermint is a set of signed messages from more than 2/3 of -the total weight of the current Validator set. Validators take turns proposing -blocks and voting on them. Once enough votes are received, the block is considered -committed. These votes are included in the *next* block as proof that the previous block -was committed - they cannot be included in the current block, as that block has already been -created. - -Once a block is committed, it can be executed against an application. -The application returns results for each of the transactions in the block. -The application can also return changes to be made to the validator set, -as well as a cryptographic digest of its latest state. - -Tendermint is designed to enable efficient verification and authentication -of the latest state of the blockchain. To achieve this, it embeds -cryptographic commitments to certain information in the block "header". -This information includes the contents of the block (eg. the transactions), -the validator set committing the block, as well as the various results returned by the application. -Note, however, that block execution only occurs *after* a block is committed. -Thus, application results can only be included in the *next* block. - -Also note that information like the transaction results and the validator set are never -directly included in the block - only their cryptographic digests (Merkle roots) are. -Hence, verification of a block requires a separate data structure to store this information. -We call this the `State`. Block verification also requires access to the previous block. diff --git a/docs/spec/blockchain/blockchain.md b/docs/spec/blockchain/blockchain.md deleted file mode 100644 index eb34f4c8..00000000 --- a/docs/spec/blockchain/blockchain.md +++ /dev/null @@ -1,431 +0,0 @@ -# Tendermint Blockchain - -Here we describe the data structures in the Tendermint blockchain and the rules for validating them. - -## Data Structures - -The Tendermint blockchains consists of a short list of basic data types: - -- `Block` -- `Header` -- `Vote` -- `BlockID` -- `Signature` -- `Evidence` - -## Block - -A block consists of a header, a list of transactions, a list of votes (the commit), -and a list of evidence of malfeasance (ie. signing conflicting votes). - -```go -type Block struct { - Header Header - Txs [][]byte - LastCommit []Vote - Evidence []Evidence -} -``` - -## Header - -A block header contains metadata about the block and about the consensus, as well as commitments to -the data in the current block, the previous block, and the results returned by the application: - -```go -type Header struct { - // block metadata - Version string // Version string - ChainID string // ID of the chain - Height int64 // Current block height - Time int64 // UNIX time, in millisconds - - // current block - NumTxs int64 // Number of txs in this block - TxHash []byte // SimpleMerkle of the block.Txs - LastCommitHash []byte // SimpleMerkle of the block.LastCommit - - // previous block - TotalTxs int64 // prevBlock.TotalTxs + block.NumTxs - LastBlockID BlockID // BlockID of prevBlock - - // application - ResultsHash []byte // SimpleMerkle of []abci.Result from prevBlock - AppHash []byte // Arbitrary state digest - ValidatorsHash []byte // SimpleMerkle of the ValidatorSet - ConsensusParamsHash []byte // SimpleMerkle of the ConsensusParams - - // consensus - Proposer []byte // Address of the block proposer - EvidenceHash []byte // SimpleMerkle of []Evidence -} -``` - -Further details on each of these fields is described below. - -## BlockID - -The `BlockID` contains two distinct Merkle roots of the block. -The first, used as the block's main hash, is the Merkle root -of all the fields in the header. The second, used for secure gossipping of -the block during consensus, is the Merkle root of the complete serialized block -cut into parts. The `BlockID` includes these two hashes, as well as the number of -parts. - -```go -type BlockID struct { - Hash []byte - Parts PartsHeader -} - -type PartsHeader struct { - Hash []byte - Total int32 -} -``` - -## Vote - -A vote is a signed message from a validator for a particular block. -The vote includes information about the validator signing it. - -```go -type Vote struct { - Timestamp int64 - Address []byte - Index int - Height int64 - Round int - Type int8 - BlockID BlockID - Signature Signature -} -``` - -There are two types of votes: -a *prevote* has `vote.Type == 1` and -a *precommit* has `vote.Type == 2`. - -## Signature - -Tendermint allows for multiple signature schemes to be used by prepending a single type-byte -to the signature bytes. Different signatures may also come with fixed or variable lengths. -Currently, Tendermint supports Ed25519 and Secp256k1. - -### ED25519 - -An ED25519 signature has `Type == 0x1`. It looks like: - -```go -// Implements Signature -type Ed25519Signature struct { - Type int8 = 0x1 - Signature [64]byte -} -``` - -where `Signature` is the 64 byte signature. - -### Secp256k1 - -A `Secp256k1` signature has `Type == 0x2`. It looks like: - -```go -// Implements Signature -type Secp256k1Signature struct { - Type int8 = 0x2 - Signature []byte -} -``` - -where `Signature` is the DER encoded signature, ie: - -```hex -0x30 <0x02> 0x2 . -``` - -## Evidence - -TODO - -## Validation - -Here we describe the validation rules for every element in a block. -Blocks which do not satisfy these rules are considered invalid. - -We abuse notation by using something that looks like Go, supplemented with English. -A statement such as `x == y` is an assertion - if it fails, the item is invalid. - -We refer to certain globally available objects: -`block` is the block under consideration, -`prevBlock` is the `block` at the previous height, -and `state` keeps track of the validator set, the consensus parameters -and other results from the application. -Elements of an object are accessed as expected, -ie. `block.Header`. See [here](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/state.md) for the definition of `state`. - -### Header - -A Header is valid if its corresponding fields are valid. - -### Version - -Arbitrary string. - -### ChainID - -Arbitrary constant string. - -### Height - -```go -block.Header.Height > 0 -block.Header.Height == prevBlock.Header.Height + 1 -``` - -The height is an incrementing integer. The first block has `block.Header.Height == 1`. - -### Time - -The median of the timestamps of the valid votes in the block.LastCommit. -Corresponds to the number of nanoseconds, with millisecond resolution, since January 1, 1970. - -Note: the timestamp of a vote must be greater by at least one millisecond than that of the -block being voted on. - -### NumTxs - -```go -block.Header.NumTxs == len(block.Txs) -``` - -Number of transactions included in the block. - -### TxHash - -```go -block.Header.TxHash == SimpleMerkleRoot(block.Txs) -``` - -Simple Merkle root of the transactions in the block. - -### LastCommitHash - -```go -block.Header.LastCommitHash == SimpleMerkleRoot(block.LastCommit) -``` - -Simple Merkle root of the votes included in the block. -These are the votes that committed the previous block. - -The first block has `block.Header.LastCommitHash == []byte{}` - -### TotalTxs - -```go -block.Header.TotalTxs == prevBlock.Header.TotalTxs + block.Header.NumTxs -``` - -The cumulative sum of all transactions included in this blockchain. - -The first block has `block.Header.TotalTxs = block.Header.NumberTxs`. - -### LastBlockID - -LastBlockID is the previous block's BlockID: - -```go -prevBlockParts := MakeParts(prevBlock, state.LastConsensusParams.BlockGossip.BlockPartSize) -block.Header.LastBlockID == BlockID { - Hash: SimpleMerkleRoot(prevBlock.Header), - PartsHeader{ - Hash: SimpleMerkleRoot(prevBlockParts), - Total: len(prevBlockParts), - }, -} -``` - -Note: it depends on the ConsensusParams, -which are held in the `state` and may be updated by the application. - -The first block has `block.Header.LastBlockID == BlockID{}`. - -### ResultsHash - -```go -block.ResultsHash == SimpleMerkleRoot(state.LastResults) -``` - -Simple Merkle root of the results of the transactions in the previous block. - -The first block has `block.Header.ResultsHash == []byte{}`. - -### AppHash - -```go -block.AppHash == state.AppHash -``` - -Arbitrary byte array returned by the application after executing and commiting the previous block. - -The first block has `block.Header.AppHash == []byte{}`. - -### ValidatorsHash - -```go -block.ValidatorsHash == SimpleMerkleRoot(state.Validators) -``` - -Simple Merkle root of the current validator set that is committing the block. -This can be used to validate the `LastCommit` included in the next block. -May be updated by the application. - -### ConsensusParamsHash - -```go -block.ConsensusParamsHash == SimpleMerkleRoot(state.ConsensusParams) -``` - -Simple Merkle root of the consensus parameters. -May be updated by the application. - -### Proposer - -```go -block.Header.Proposer in state.Validators -``` - -Original proposer of the block. Must be a current validator. - -NOTE: we also need to track the round. - -## EvidenceHash - -```go -block.EvidenceHash == SimpleMerkleRoot(block.Evidence) -``` - -Simple Merkle root of the evidence of Byzantine behaviour included in this block. - -## Txs - -Arbitrary length array of arbitrary length byte-arrays. - -## LastCommit - -The first height is an exception - it requires the LastCommit to be empty: - -```go -if block.Header.Height == 1 { - len(b.LastCommit) == 0 -} -``` - -Otherwise, we require: - -```go -len(block.LastCommit) == len(state.LastValidators) -talliedVotingPower := 0 -for i, vote := range block.LastCommit{ - if vote == nil{ - continue - } - vote.Type == 2 - vote.Height == block.LastCommit.Height() - vote.Round == block.LastCommit.Round() - vote.BlockID == block.LastBlockID - - val := state.LastValidators[i] - vote.Verify(block.ChainID, val.PubKey) == true - - talliedVotingPower += val.VotingPower -} - -talliedVotingPower > (2/3) * TotalVotingPower(state.LastValidators) -``` - -Includes one (possibly nil) vote for every current validator. -Non-nil votes must be Precommits. -All votes must be for the same height and round. -All votes must be for the previous block. -All votes must have a valid signature from the corresponding validator. -The sum total of the voting power of the validators that voted -must be greater than 2/3 of the total voting power of the complete validator set. - -### Vote - -A vote is a signed message broadcast in the consensus for a particular block at a particular height and round. -When stored in the blockchain or propagated over the network, votes are encoded in TMBIN. -For signing, votes are encoded in JSON, and the ChainID is included, in the form of the `CanonicalSignBytes`. - -We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the CanonicalSignBytes -using the given ChainID: - -```go -func (v Vote) Verify(chainID string, pubKey PubKey) bool { - return pubKey.Verify(v.Signature, CanonicalSignBytes(chainID, v)) -} -``` - -where `pubKey.Verify` performs the appropriate digital signature verification of the `pubKey` -against the given signature and message bytes. - -## Evidence - -There is currently only one kind of evidence: - -``` -// amino: "tendermint/DuplicateVoteEvidence" -type DuplicateVoteEvidence struct { - PubKey crypto.PubKey - VoteA *Vote - VoteB *Vote -} -``` - -DuplicateVoteEvidence `ev` is valid if - -- `ev.VoteA` and `ev.VoteB` can be verified with `ev.PubKey` -- `ev.VoteA` and `ev.VoteB` have the same `Height, Round, Address, Index, Type` -- `ev.VoteA.BlockID != ev.VoteB.BlockID` -- `(block.Height - ev.VoteA.Height) < MAX_EVIDENCE_AGE` - -# Execution - -Once a block is validated, it can be executed against the state. - -The state follows this recursive equation: - -```go -state(1) = InitialState -state(h+1) <- Execute(state(h), ABCIApp, block(h)) -``` - -where `InitialState` includes the initial consensus parameters and validator set, -and `ABCIApp` is an ABCI application that can return results and changes to the validator -set (TODO). Execute is defined as: - -```go -Execute(s State, app ABCIApp, block Block) State { - TODO: just spell out ApplyBlock here - and remove ABCIResponses struct. - abciResponses := app.ApplyBlock(block) - - return State{ - LastResults: abciResponses.DeliverTxResults, - AppHash: abciResponses.AppHash, - Validators: UpdateValidators(state.Validators, abciResponses.ValidatorChanges), - LastValidators: state.Validators, - ConsensusParams: UpdateConsensusParams(state.ConsensusParams, abci.Responses.ConsensusParamChanges), - } -} - -type ABCIResponses struct { - DeliverTxResults []Result - ValidatorChanges []Validator - ConsensusParamChanges ConsensusParams - AppHash []byte -} -``` - - diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md deleted file mode 100644 index aa184210..00000000 --- a/docs/spec/blockchain/encoding.md +++ /dev/null @@ -1,274 +0,0 @@ -# Tendermint Encoding - -## Amino - -Tendermint uses the Protobuf3 derivative [Amino](https://github.com/tendermint/go-amino) for all data structures. -Think of Amino as an object-oriented Protobuf3 with native JSON support. -The goal of the Amino encoding protocol is to bring parity between application -logic objects and persistence objects. - -Please see the [Amino -specification](https://github.com/tendermint/go-amino#amino-encoding-for-go) for -more details. - -Notably, every object that satisfies an interface (eg. a particular kind of p2p message, -or a particular kind of pubkey) is registered with a global name, the hash of -which is included in the object's encoding as the so-called "prefix bytes". - -We define the `func AminoEncode(obj interface{}) []byte` function to take an -arbitrary object and return the Amino encoded bytes. - -## Byte Arrays - -The encoding of a byte array is simply the raw-bytes prefixed with the length of -the array as a `UVarint` (what Protobuf calls a `Varint`). - -For details on varints, see the [protobuf -spec](https://developers.google.com/protocol-buffers/docs/encoding#varints). - -For example, the byte-array `[0xA, 0xB]` would be encoded as `0x020A0B`, -while a byte-array containing 300 entires beginning with `[0xA, 0xB, ...]` would -be encoded as `0xAC020A0B...` where `0xAC02` is the UVarint encoding of 300. - -## Public Key Cryptography - -Tendermint uses Amino to distinguish between different types of private keys, -public keys, and signatures. Additionally, for each public key, Tendermint -defines an Address function that can be used as a more compact identifier in -place of the public key. Here we list the concrete types, their names, -and prefix bytes for public keys and signatures, as well as the address schemes -for each PubKey. Note for brevity we don't -include details of the private keys beyond their type and name, as they can be -derived the same way as the others using Amino. - -All registered objects are encoded by Amino using a 4-byte PrefixBytes that -uniquely identifies the object and includes information about its underlying -type. For details on how PrefixBytes are computed, see the [Amino -spec](https://github.com/tendermint/go-amino#computing-the-prefix-and-disambiguation-bytes). - -In what follows, we provide the type names and prefix bytes directly. -Notice that when encoding byte-arrays, the length of the byte-array is appended -to the PrefixBytes. Thus the encoding of a byte array becomes ` - `. In other words, to encode any type listed below you do not need to be -familiar with amino encoding. -You can simply use below table and concatenate Prefix || Length (of raw bytes) || raw bytes -( while || stands for byte concatenation here). - -| Type | Name | Prefix | Length | -| ---- | ---- | ------ | ----- | -| PubKeyEd25519 | tendermint/PubKeyEd25519 | 0x1624DE62 | 0x20 | -| PubKeyLedgerEd25519 | tendermint/PubKeyLedgerEd25519 | 0x5C3453B2 | 0x20 | -| PubKeySecp256k1 | tendermint/PubKeySecp256k1 | 0xEB5AE982 | 0x21 | -| PrivKeyEd25519 | tendermint/PrivKeyEd25519 | 0xA3288912 | 0x40 | -| PrivKeySecp256k1 | tendermint/PrivKeySecp256k1 | 0xE1B0F79A | 0x20 | -| PrivKeyLedgerSecp256k1 | tendermint/PrivKeyLedgerSecp256k1 | 0x10CAB393 | variable | -| PrivKeyLedgerEd25519 | tendermint/PrivKeyLedgerEd25519 | 0x0CFEEF9B | variable | -| SignatureEd25519 | tendermint/SignatureKeyEd25519 | 0x3DA1DB2A | 0x40 | -| SignatureSecp256k1 | tendermint/SignatureKeySecp256k1 | 0x16E1FEEA | variable | - -### Examples - -1. For example, the 33-byte (or 0x21-byte in hex) Secp256k1 pubkey -`020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` -would be encoded as -`EB5AE98221020BD40F225A57ED383B440CF073BC5539D0341F5767D2BF2D78406D00475A2EE9` - -2. For example, the variable size Secp256k1 signature (in this particular example 70 or 0x46 bytes) -`304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7` -would be encoded as -`16E1FEEA46304402201CD4B8C764D2FD8AF23ECFE6666CA8A53886D47754D951295D2D311E1FEA33BF02201E0F906BB1CF2C30EAACFFB032A7129358AFF96B9F79B06ACFFB18AC90C2ADD7` - -### Addresses - -Addresses for each public key types are computed as follows: - -#### Ed25519 - -RIPEMD160 hash of the Amino encoded public key: - -``` -address = RIPEMD160(AMINO(pubkey)) -``` - -NOTE: this will soon change to the truncated 20-bytes of the SHA256 of the raw -public key - -#### Secp256k1 - -RIPEMD160 hash of the SHA256 hash of the OpenSSL compressed public key: - -``` -address = RIPEMD160(SHA256(pubkey)) -``` - -This is the same as Bitcoin. - -## Other Common Types - -### BitArray - -The BitArray is used in block headers and some consensus messages to signal -whether or not something was done by each validator. BitArray is represented -with a struct containing the number of bits (`Bits`) and the bit-array itself -encoded in base64 (`Elems`). - -```go -type BitArray struct { - Bits int - Elems []uint64 -} -``` - -This type is easily encoded directly by Amino. - -Note BitArray receives a special JSON encoding in the form of `x` and `_` -representing `1` and `0`. Ie. the BitArray `10110` would be JSON encoded as -`"x_xx_"` - -### Part - -Part is used to break up blocks into pieces that can be gossiped in parallel -and securely verified using a Merkle tree of the parts. - -Part contains the index of the part in the larger set (`Index`), the actual -underlying data of the part (`Bytes`), and a simple Merkle proof that the part is contained in -the larger set (`Proof`). - -```go -type Part struct { - Index int - Bytes byte[] - Proof byte[] -} -``` - -### MakeParts - -Encode an object using Amino and slice it into parts. - -```go -func MakeParts(obj interface{}, partSize int) []Part -``` - -## Merkle Trees - -Simple Merkle trees are used in numerous places in Tendermint to compute a cryptographic digest of a data structure. - -RIPEMD160 is always used as the hashing function. - -### Simple Merkle Root - -The function `SimpleMerkleRoot` is a simple recursive function defined as follows: - -```go -func SimpleMerkleRoot(hashes [][]byte) []byte{ - switch len(hashes) { - case 0: - return nil - case 1: - return hashes[0] - default: - left := SimpleMerkleRoot(hashes[:(len(hashes)+1)/2]) - right := SimpleMerkleRoot(hashes[(len(hashes)+1)/2:]) - return SimpleConcatHash(left, right) - } -} - -func SimpleConcatHash(left, right []byte) []byte{ - left = encodeByteSlice(left) - right = encodeByteSlice(right) - return RIPEMD160 (append(left, right)) -} -``` - -Note that the leaves are Amino encoded as byte-arrays (ie. simple Uvarint length -prefix) before being concatenated together and hashed. - -Note: we will abuse notion and invoke `SimpleMerkleRoot` with arguments of type `struct` or type `[]struct`. -For `struct` arguments, we compute a `[][]byte` by sorting elements of the `struct` according to -field name and then hashing them. -For `[]struct` arguments, we compute a `[][]byte` by hashing the individual `struct` elements. - -### Simple Merkle Proof - -Proof that a leaf is in a Merkle tree consists of a simple structure: - - -``` -type SimpleProof struct { - Aunts [][]byte -} -``` - -Which is verified using the following: - -``` -func (proof SimpleProof) Verify(index, total int, leafHash, rootHash []byte) bool { - computedHash := computeHashFromAunts(index, total, leafHash, proof.Aunts) - return computedHash == rootHash -} - -func computeHashFromAunts(index, total int, leafHash []byte, innerHashes [][]byte) []byte{ - assert(index < total && index >= 0 && total > 0) - - if total == 1{ - assert(len(proof.Aunts) == 0) - return leafHash - } - - assert(len(innerHashes) > 0) - - numLeft := (total + 1) / 2 - if index < numLeft { - leftHash := computeHashFromAunts(index, numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - assert(leftHash != nil) - return SimpleHashFromTwoHashes(leftHash, innerHashes[len(innerHashes)-1]) - } - rightHash := computeHashFromAunts(index-numLeft, total-numLeft, leafHash, innerHashes[:len(innerHashes)-1]) - assert(rightHash != nil) - return SimpleHashFromTwoHashes(innerHashes[len(innerHashes)-1], rightHash) -} -``` - -## JSON - -### Amino - -TODO: improve this - -Amino also supports JSON encoding - registered types are simply encoded as: - -``` -{ - "type": "", - "value": -} -``` - -For instance, an ED25519 PubKey would look like: - -``` -{ - "type": "AC26791624DE60", - "value": "uZ4h63OFWuQ36ZZ4Bd6NF+/w9fWUwrOncrQsackrsTk=" -} -``` - -Where the `"value"` is the base64 encoding of the raw pubkey bytes, and the -`"type"` is the full disfix bytes for Ed25519 pubkeys. - - -### Signed Messages - -Signed messages (eg. votes, proposals) in the consensus are encoded using Amino-JSON, rather than in the standard binary format. - -When signing, the elements of a message are sorted by key and the sorted message is embedded in an -outer JSON that includes a `chain_id` field. -We call this encoding the CanonicalSignBytes. For instance, CanonicalSignBytes for a vote would look -like: - -```json -{"chain_id":"my-chain-id","vote":{"block_id":{"hash":DEADBEEF,"parts":{"hash":BEEFDEAD,"total":3}},"height":3,"round":2,"timestamp":1234567890, "type":2} -``` - -Note how the fields within each level are sorted. diff --git a/docs/spec/blockchain/state.md b/docs/spec/blockchain/state.md deleted file mode 100644 index 3b374f70..00000000 --- a/docs/spec/blockchain/state.md +++ /dev/null @@ -1,80 +0,0 @@ -# Tendermint State - -## State - -The state contains information whose cryptographic digest is included in block headers, and thus is -necessary for validating new blocks. For instance, the set of validators and the results of -transactions are never included in blocks, but their Merkle roots are - the state keeps track of them. - -Note that the `State` object itself is an implementation detail, since it is never -included in a block or gossipped over the network, and we never compute -its hash. However, the types it contains are part of the specification, since -their Merkle roots are included in blocks. - -For details on an implementation of `State` with persistence, see TODO - -```go -type State struct { - LastResults []Result - AppHash []byte - - Validators []Validator - LastValidators []Validator - - ConsensusParams ConsensusParams -} -``` - -### Result - -```go -type Result struct { - Code uint32 - Data []byte - Tags []KVPair -} - -type KVPair struct { - Key []byte - Value []byte -} -``` - -`Result` is the result of executing a transaction against the application. -It returns a result code, an arbitrary byte array (ie. a return value), -and a list of key-value pairs ordered by key. The key-value pairs, or tags, -can be used to index transactions according to their "effects", which are -represented in the tags. - -### Validator - -A validator is an active participant in the consensus with a public key and a voting power. -Validator's also contain an address which is derived from the PubKey: - -```go -type Validator struct { - Address []byte - PubKey PubKey - VotingPower int64 -} -``` - -The `state.Validators` and `state.LastValidators` must always by sorted by validator address, -so that there is a canonical order for computing the SimpleMerkleRoot. - -We also define a `TotalVotingPower` function, to return the total voting power: - -```go -func TotalVotingPower(vals []Validators) int64{ - sum := 0 - for v := range vals{ - sum += v.VotingPower - } - return sum -} -``` - - -### ConsensusParams - -TODO diff --git a/docs/spec/consensus/abci.md b/docs/spec/consensus/abci.md deleted file mode 100644 index 82b88161..00000000 --- a/docs/spec/consensus/abci.md +++ /dev/null @@ -1 +0,0 @@ -[Moved](/docs/spec/software/abci.md) diff --git a/docs/spec/consensus/bft-time.md b/docs/spec/consensus/bft-time.md deleted file mode 100644 index a005e904..00000000 --- a/docs/spec/consensus/bft-time.md +++ /dev/null @@ -1,56 +0,0 @@ -# BFT time in Tendermint - -Tendermint provides a deterministic, Byzantine fault-tolerant, source of time. -Time in Tendermint is defined with the Time field of the block header. - -It satisfies the following properties: - -- Time Monotonicity: Time is monotonically increasing, i.e., given -a header H1 for height h1 and a header H2 for height `h2 = h1 + 1`, `H1.Time < H2.Time`. -- Time Validity: Given a set of Commit votes that forms the `block.LastCommit` field, a range of -valid values for the Time field of the block header is defined only by -Precommit messages (from the LastCommit field) sent by correct processes, i.e., -a faulty process cannot arbitrarily increase the Time value. - -In the context of Tendermint, time is of type int64 and denotes UNIX time in milliseconds, i.e., -corresponds to the number of milliseconds since January 1, 1970. Before defining rules that need to be enforced by the -Tendermint consensus protocol, so the properties above holds, we introduce the following definition: - -- median of a set of `Vote` messages is equal to the median of `Vote.Time` fields of the corresponding `Vote` messages, -where the value of `Vote.Time` is counted number of times proportional to the process voting power. As in Tendermint -the voting power is not uniform (one process one vote), a vote message is actually an aggregator of the same votes whose -number is equal to the voting power of the process that has casted the corresponding votes message. - -Let's consider the following example: - - we have four processes p1, p2, p3 and p4, with the following voting power distribution (p1, 23), (p2, 27), (p3, 10) -and (p4, 10). The total voting power is 70 (`N = 3f+1`, where `N` is the total voting power, and `f` is the maximum voting -power of the faulty processes), so we assume that the faulty processes have at most 23 of voting power. -Furthermore, we have the following vote messages in some LastCommit field (we ignore all fields except Time field): - - (p1, 100), (p2, 98), (p3, 1000), (p4, 500). We assume that p3 and p4 are faulty processes. Let's assume that the - `block.LastCommit` message contains votes of processes p2, p3 and p4. Median is then chosen the following way: - the value 98 is counted 27 times, the value 1000 is counted 10 times and the value 500 is counted also 10 times. - So the median value will be the value 98. No matter what set of messages with at least `2f+1` voting power we - choose, the median value will always be between the values sent by correct processes. - -We ensure Time Monotonicity and Time Validity properties by the following rules: - -- let rs denotes `RoundState` (consensus internal state) of some process. Then -`rs.ProposalBlock.Header.Time == median(rs.LastCommit) && -rs.Proposal.Timestamp == rs.ProposalBlock.Header.Time`. - -- Furthermore, when creating the `vote` message, the following rules for determining `vote.Time` field should hold: - - - if `rs.Proposal` is defined then - `vote.Time = max(rs.Proposal.Timestamp + 1, time.Now())`, where `time.Now()` - denotes local Unix time in milliseconds. - - - if `rs.Proposal` is not defined and `rs.Votes` contains +2/3 of the corresponding vote messages (votes for the - current height and round, and with the corresponding type (`Prevote` or `Precommit`)), then - - `vote.Time = max(median(getVotes(rs.Votes, vote.Height, vote.Round, vote.Type)), time.Now())`, - - where `getVotes` function returns the votes for particular `Height`, `Round` and `Type`. - The second rule is relevant for the case when a process jumps to a higher round upon receiving +2/3 votes for a higher - round, but the corresponding `Proposal` message for the higher round hasn't been received yet. - - diff --git a/docs/spec/consensus/consensus.md b/docs/spec/consensus/consensus.md deleted file mode 100644 index 1bf07577..00000000 --- a/docs/spec/consensus/consensus.md +++ /dev/null @@ -1,9 +0,0 @@ -We are working to finalize an updated Tendermint specification with formal -proofs of safety and liveness. - -In the meantime, see the [description in the -docs](http://tendermint.readthedocs.io/en/master/specification/byzantine-consensus-algorithm.html). - -There are also relevant but somewhat outdated descriptions in Jae Kwon's [original -whitepaper](https://tendermint.com/static/docs/tendermint.pdf) and Ethan Buchman's [master's -thesis](https://atrium.lib.uoguelph.ca/xmlui/handle/10214/9769). diff --git a/docs/spec/consensus/light-client.md b/docs/spec/consensus/light-client.md deleted file mode 100644 index 0ed9d36d..00000000 --- a/docs/spec/consensus/light-client.md +++ /dev/null @@ -1,114 +0,0 @@ -# Light client - -A light client is a process that connects to the Tendermint Full Node(s) and then tries to verify the Merkle proofs -about the blockchain application. In this document we describe mechanisms that ensures that the Tendermint light client -has the same level of security as Full Node processes (without being itself a Full Node). - -To be able to validate a Merkle proof, a light client needs to validate the blockchain header that contains the root app hash. -Validating a blockchain header in Tendermint consists in verifying that the header is committed (signed) by >2/3 of the -voting power of the corresponding validator set. As the validator set is a dynamic set (it is changing), one of the -core functionality of the light client is updating the current validator set, that is then used to verify the -blockchain header, and further the corresponding Merkle proofs. - -For the purpose of this light client specification, we assume that the Tendermint Full Node exposes the following functions over -Tendermint RPC: - -```golang -Header(height int64) (SignedHeader, error) // returns signed header for the given height -Validators(height int64) (ResultValidators, error) // returns validator set for the given height -LastHeader(valSetNumber int64) (SignedHeader, error) // returns last header signed by the validator set with the given validator set number - -type SignedHeader struct { - Header Header - Commit Commit - ValSetNumber int64 -} - -type ResultValidators struct { - BlockHeight int64 - Validators []Validator - // time the current validator set is initialised, i.e, time of the last validator change before header BlockHeight - ValSetTime int64 -} -``` - -We assume that Tendermint keeps track of the validator set changes and that each time a validator set is changed it is -being assigned the next sequence number. We can call this number the validator set sequence number. Tendermint also remembers -the Time from the header when the next validator set is initialised (starts to be in power), and we refer to this time -as validator set init time. -Furthermore, we assume that each validator set change is signed (committed) by the current validator set. More precisely, -given a block `H` that contains transactions that are modifying the current validator set, the Merkle root hash of the next -validator set (modified based on transactions from block H) will be in block `H+1` (and signed by the current validator -set), and then starting from the block `H+2`, it will be signed by the next validator set. - -Note that the real Tendermint RPC API is slightly different (for example, response messages contain more data and function -names are slightly different); we shortened (and modified) it for the purpose of this document to make the spec more -clear and simple. Furthermore, note that in case of the third function, the returned header has `ValSetNumber` equals to -`valSetNumber+1`. - - -Locally, light client manages the following state: - -```golang -valSet []Validator // current validator set (last known and verified validator set) -valSetNumber int64 // sequence number of the current validator set -valSetHash []byte // hash of the current validator set -valSetTime int64 // time when the current validator set is initialised -``` - -The light client is initialised with the trusted validator set, for example based on the known validator set hash, -validator set sequence number and the validator set init time. -The core of the light client logic is captured by the VerifyAndUpdate function that is used to 1) verify if the given header is valid, -and 2) update the validator set (when the given header is valid and it is more recent than the seen headers). - -```golang -VerifyAndUpdate(signedHeader SignedHeader): - assertThat signedHeader.valSetNumber >= valSetNumber - if isValid(signedHeader) and signedHeader.Header.Time <= valSetTime + UNBONDING_PERIOD then - setValidatorSet(signedHeader) - return true - else - updateValidatorSet(signedHeader.ValSetNumber) - return VerifyAndUpdate(signedHeader) - -isValid(signedHeader SignedHeader): - valSetOfTheHeader = Validators(signedHeader.Header.Height) - assertThat Hash(valSetOfTheHeader) == signedHeader.Header.ValSetHash - assertThat signedHeader is passing basic validation - if votingPower(signedHeader.Commit) > 2/3 * votingPower(valSetOfTheHeader) then return true - else - return false - -setValidatorSet(signedHeader SignedHeader): - nextValSet = Validators(signedHeader.Header.Height) - assertThat Hash(nextValSet) == signedHeader.Header.ValidatorsHash - valSet = nextValSet.Validators - valSetHash = signedHeader.Header.ValidatorsHash - valSetNumber = signedHeader.ValSetNumber - valSetTime = nextValSet.ValSetTime - -votingPower(commit Commit): - votingPower = 0 - for each precommit in commit.Precommits do: - if precommit.ValidatorAddress is in valSet and signature of the precommit verifies then - votingPower += valSet[precommit.ValidatorAddress].VotingPower - return votingPower - -votingPower(validatorSet []Validator): - for each validator in validatorSet do: - votingPower += validator.VotingPower - return votingPower - -updateValidatorSet(valSetNumberOfTheHeader): - while valSetNumber != valSetNumberOfTheHeader do - signedHeader = LastHeader(valSetNumber) - if isValid(signedHeader) then - setValidatorSet(signedHeader) - else return error - return -``` - -Note that in the logic above we assume that the light client will always go upward with respect to header verifications, -i.e., that it will always be used to verify more recent headers. In case a light client needs to be used to verify older -headers (go backward) the same mechanisms and similar logic can be used. In case a call to the FullNode or subsequent -checks fail, a light client need to implement some recovery strategy, for example connecting to other FullNode. diff --git a/docs/spec/consensus/wal.md b/docs/spec/consensus/wal.md deleted file mode 100644 index 589680f9..00000000 --- a/docs/spec/consensus/wal.md +++ /dev/null @@ -1 +0,0 @@ -[Moved](/docs/spec/software/wal.md) diff --git a/docs/spec/p2p/config.md b/docs/spec/p2p/config.md deleted file mode 100644 index b31a3673..00000000 --- a/docs/spec/p2p/config.md +++ /dev/null @@ -1,38 +0,0 @@ -# P2P Config - -Here we describe configuration options around the Peer Exchange. -These can be set using flags or via the `$TMHOME/config/config.toml` file. - -## Seed Mode - -`--p2p.seed_mode` - -The node operates in seed mode. In seed mode, a node continuously crawls the network for peers, -and upon incoming connection shares some peers and disconnects. - -## Seeds - -`--p2p.seeds “1.2.3.4:26656,2.3.4.5:4444”` - -Dials these seeds when we need more peers. They should return a list of peers and then disconnect. -If we already have enough peers in the address book, we may never need to dial them. - -## Persistent Peers - -`--p2p.persistent_peers “1.2.3.4:26656,2.3.4.5:26656”` - -Dial these peers and auto-redial them if the connection fails. -These are intended to be trusted persistent peers that can help -anchor us in the p2p network. The auto-redial uses exponential -backoff and will give up after a day of trying to connect. - -**Note:** If `seeds` and `persistent_peers` intersect, -the user will be warned that seeds may auto-close connections -and that the node may not be able to keep the connection persistent. - -## Private Persistent Peers - -`--p2p.private_persistent_peers “1.2.3.4:26656,2.3.4.5:26656”` - -These are persistent peers that we do not add to the address book or -gossip to other peers. They stay private to us. diff --git a/docs/spec/p2p/connection.md b/docs/spec/p2p/connection.md deleted file mode 100644 index 9b5e4967..00000000 --- a/docs/spec/p2p/connection.md +++ /dev/null @@ -1,110 +0,0 @@ -# P2P Multiplex Connection - -## MConnection - -`MConnection` is a multiplex connection that supports multiple independent streams -with distinct quality of service guarantees atop a single TCP connection. -Each stream is known as a `Channel` and each `Channel` has a globally unique *byte id*. -Each `Channel` also has a relative priority that determines the quality of service -of the `Channel` compared to other `Channel`s. -The *byte id* and the relative priorities of each `Channel` are configured upon -initialization of the connection. - -The `MConnection` supports three packet types: - -- Ping -- Pong -- Msg - -### Ping and Pong - -The ping and pong messages consist of writing a single byte to the connection; 0x1 and 0x2, respectively. - -When we haven't received any messages on an `MConnection` in time `pingTimeout`, we send a ping message. -When a ping is received on the `MConnection`, a pong is sent in response only if there are no other messages -to send and the peer has not sent us too many pings (TODO). - -If a pong or message is not received in sufficient time after a ping, the peer is disconnected from. - -### Msg - -Messages in channels are chopped into smaller `msgPacket`s for multiplexing. - -``` -type msgPacket struct { - ChannelID byte - EOF byte // 1 means message ends here. - Bytes []byte -} -``` - -The `msgPacket` is serialized using [go-wire](https://github.com/tendermint/go-wire) and prefixed with 0x3. -The received `Bytes` of a sequential set of packets are appended together -until a packet with `EOF=1` is received, then the complete serialized message -is returned for processing by the `onReceive` function of the corresponding channel. - -### Multiplexing - -Messages are sent from a single `sendRoutine`, which loops over a select statement and results in the sending -of a ping, a pong, or a batch of data messages. The batch of data messages may include messages from multiple channels. -Message bytes are queued for sending in their respective channel, with each channel holding one unsent message at a time. -Messages are chosen for a batch one at a time from the channel with the lowest ratio of recently sent bytes to channel priority. - -## Sending Messages - -There are two methods for sending messages: -```go -func (m MConnection) Send(chID byte, msg interface{}) bool {} -func (m MConnection) TrySend(chID byte, msg interface{}) bool {} -``` - -`Send(chID, msg)` is a blocking call that waits until `msg` is successfully queued -for the channel with the given id byte `chID`. The message `msg` is serialized -using the `tendermint/wire` submodule's `WriteBinary()` reflection routine. - -`TrySend(chID, msg)` is a nonblocking call that queues the message msg in the channel -with the given id byte chID if the queue is not full; otherwise it returns false immediately. - -`Send()` and `TrySend()` are also exposed for each `Peer`. - -## Peer - -Each peer has one `MConnection` instance, and includes other information such as whether the connection -was outbound, whether the connection should be recreated if it closes, various identity information about the node, -and other higher level thread-safe data used by the reactors. - -## Switch/Reactor - -The `Switch` handles peer connections and exposes an API to receive incoming messages -on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one -or more `Channels`. So while sending outgoing messages is typically performed on the peer, -incoming messages are received on the reactor. - -```go -// Declare a MyReactor reactor that handles messages on MyChannelID. -type MyReactor struct{} - -func (reactor MyReactor) GetChannels() []*ChannelDescriptor { - return []*ChannelDescriptor{ChannelDescriptor{ID:MyChannelID, Priority: 1}} -} - -func (reactor MyReactor) Receive(chID byte, peer *Peer, msgBytes []byte) { - r, n, err := bytes.NewBuffer(msgBytes), new(int64), new(error) - msgString := ReadString(r, n, err) - fmt.Println(msgString) -} - -// Other Reactor methods omitted for brevity -... - -switch := NewSwitch([]Reactor{MyReactor{}}) - -... - -// Send a random message to all outbound connections -for _, peer := range switch.Peers().List() { - if peer.IsOutbound() { - peer.Send(MyChannelID, "Here's a random message") - } -} -``` diff --git a/docs/spec/p2p/node.md b/docs/spec/p2p/node.md deleted file mode 100644 index 366b27dd..00000000 --- a/docs/spec/p2p/node.md +++ /dev/null @@ -1,65 +0,0 @@ -# Tendermint Peer Discovery - -A Tendermint P2P network has different kinds of nodes with different requirements for connectivity to one another. -This document describes what kind of nodes Tendermint should enable and how they should work. - -## Seeds - -Seeds are the first point of contact for a new node. -They return a list of known active peers and then disconnect. - -Seeds should operate full nodes with the PEX reactor in a "crawler" mode -that continuously explores to validate the availability of peers. - -Seeds should only respond with some top percentile of the best peers it knows about. -See [the peer-exchange docs](https://github.com/tendermint/tendermint/blob/master/docs/spec/reactors/pex/pex.md)for details on peer quality. - -## New Full Node - -A new node needs a few things to connect to the network: -- a list of seeds, which can be provided to Tendermint via config file or flags, -or hardcoded into the software by in-process apps -- a `ChainID`, also called `Network` at the p2p layer -- a recent block height, H, and hash, HASH for the blockchain. - -The values `H` and `HASH` must be received and corroborated by means external to Tendermint, and specific to the user - ie. via the user's trusted social consensus. -This requirement to validate `H` and `HASH` out-of-band and via social consensus -is the essential difference in security models between Proof-of-Work and Proof-of-Stake blockchains. - -With the above, the node then queries some seeds for peers for its chain, -dials those peers, and runs the Tendermint protocols with those it successfully connects to. - -When the peer catches up to height H, it ensures the block hash matches HASH. -If not, Tendermint will exit, and the user must try again - either they are connected -to bad peers or their social consensus is invalid. - -## Restarted Full Node - -A node checks its address book on startup and attempts to connect to peers from there. -If it can't connect to any peers after some time, it falls back to the seeds to find more. - -Restarted full nodes can run the `blockchain` or `consensus` reactor protocols to sync up -to the latest state of the blockchain from wherever they were last. -In a Proof-of-Stake context, if they are sufficiently far behind (greater than the length -of the unbonding period), they will need to validate a recent `H` and `HASH` out-of-band again -so they know they have synced the correct chain. - -## Validator Node - -A validator node is a node that interfaces with a validator signing key. -These nodes require the highest security, and should not accept incoming connections. -They should maintain outgoing connections to a controlled set of "Sentry Nodes" that serve -as their proxy shield to the rest of the network. - -Validators that know and trust each other can accept incoming connections from one another and maintain direct private connectivity via VPN. - -## Sentry Node - -Sentry nodes are guardians of a validator node and provide it access to the rest of the network. -They should be well connected to other full nodes on the network. -Sentry nodes may be dynamic, but should maintain persistent connections to some evolving random subset of each other. -They should always expect to have direct incoming connections from the validator node and its backup(s). -They do not report the validator node's address in the PEX and -they may be more strict about the quality of peers they keep. - -Sentry nodes belonging to validators that trust each other may wish to maintain persistent connections via VPN with one another, but only report each other sparingly in the PEX. diff --git a/docs/spec/p2p/peer.md b/docs/spec/p2p/peer.md deleted file mode 100644 index 3cda0c8a..00000000 --- a/docs/spec/p2p/peer.md +++ /dev/null @@ -1,112 +0,0 @@ -# Tendermint Peers - -This document explains how Tendermint Peers are identified and how they connect to one another. - -For details on peer discovery, see the [peer exchange (PEX) reactor doc](https://github.com/tendermint/tendermint/blob/master/docs/spec/reactors/pex/pex.md). - -## Peer Identity - -Tendermint peers are expected to maintain long-term persistent identities in the form of a public key. -Each peer has an ID defined as `peer.ID == peer.PubKey.Address()`, where `Address` uses the scheme defined in go-crypto. - -A single peer ID can have multiple IP addresses associated with it, but a node -will only ever connect to one at a time. - -When attempting to connect to a peer, we use the PeerURL: `@:`. -We will attempt to connect to the peer at IP:PORT, and verify, -via authenticated encryption, that it is in possession of the private key -corresponding to ``. This prevents man-in-the-middle attacks on the peer layer. - -## Connections - -All p2p connections use TCP. -Upon establishing a successful TCP connection with a peer, -two handhsakes are performed: one for authenticated encryption, and one for Tendermint versioning. -Both handshakes have configurable timeouts (they should complete quickly). - -### Authenticated Encryption Handshake - -Tendermint implements the Station-to-Station protocol -using ED25519 keys for Diffie-Helman key-exchange and NACL SecretBox for encryption. -It goes as follows: -- generate an emphemeral ED25519 keypair -- send the ephemeral public key to the peer -- wait to receive the peer's ephemeral public key -- compute the Diffie-Hellman shared secret using the peers ephemeral public key and our ephemeral private key -- generate two nonces to use for encryption (sending and receiving) as follows: - - sort the ephemeral public keys in ascending order and concatenate them - - RIPEMD160 the result - - append 4 empty bytes (extending the hash to 24-bytes) - - the result is nonce1 - - flip the last bit of nonce1 to get nonce2 - - if we had the smaller ephemeral pubkey, use nonce1 for receiving, nonce2 for sending; - else the opposite -- all communications from now on are encrypted using the shared secret and the nonces, where each nonce -increments by 2 every time it is used -- we now have an encrypted channel, but still need to authenticate -- generate a common challenge to sign: - - SHA256 of the sorted (lowest first) and concatenated ephemeral pub keys -- sign the common challenge with our persistent private key -- send the go-wire encoded persistent pubkey and signature to the peer -- wait to receive the persistent public key and signature from the peer -- verify the signature on the challenge using the peer's persistent public key - - -If this is an outgoing connection (we dialed the peer) and we used a peer ID, -then finally verify that the peer's persistent public key corresponds to the peer ID we dialed, -ie. `peer.PubKey.Address() == `. - -The connection has now been authenticated. All traffic is encrypted. - -Note: only the dialer can authenticate the identity of the peer, -but this is what we care about since when we join the network we wish to -ensure we have reached the intended peer (and are not being MITMd). - -### Peer Filter - -Before continuing, we check if the new peer has the same ID as ourselves or -an existing peer. If so, we disconnect. - -We also check the peer's address and public key against -an optional whitelist which can be managed through the ABCI app - -if the whitelist is enabled and the peer does not qualify, the connection is -terminated. - - -### Tendermint Version Handshake - -The Tendermint Version Handshake allows the peers to exchange their NodeInfo: - -```golang -type NodeInfo struct { - ID p2p.ID - ListenAddr string - - Network string - Version string - Channels []int8 - - Moniker string - Other []string -} -``` - -The connection is disconnected if: -- `peer.NodeInfo.ID` is not equal `peerConn.ID` -- `peer.NodeInfo.Version` is not formatted as `X.X.X` where X are integers known as Major, Minor, and Revision -- `peer.NodeInfo.Version` Major is not the same as ours -- `peer.NodeInfo.Network` is not the same as ours -- `peer.Channels` does not intersect with our known Channels. -- `peer.NodeInfo.ListenAddr` is malformed or is a DNS host that cannot be - resolved - - -At this point, if we have not disconnected, the peer is valid. -It is added to the switch and hence all reactors via the `AddPeer` method. -Note that each reactor may handle multiple channels. - -## Connection Activity - -Once a peer is added, incoming messages for a given reactor are handled through -that reactor's `Receive` method, and output messages are sent directly by the Reactors -on each peer. A typical reactor maintains per-peer go-routine(s) that handle this. diff --git a/docs/spec/reactors/block_sync/img/bc-reactor.png b/docs/spec/reactors/block_sync/img/bc-reactor.png deleted file mode 100644 index f7fe0f81..00000000 Binary files a/docs/spec/reactors/block_sync/img/bc-reactor.png and /dev/null differ diff --git a/docs/spec/reactors/block_sync/impl.md b/docs/spec/reactors/block_sync/impl.md deleted file mode 100644 index a96f83b3..00000000 --- a/docs/spec/reactors/block_sync/impl.md +++ /dev/null @@ -1,46 +0,0 @@ -## Blockchain Reactor - -* coordinates the pool for syncing -* coordinates the store for persistence -* coordinates the playing of blocks towards the app using a sm.BlockExecutor -* handles switching between fastsync and consensus -* it is a p2p.BaseReactor -* starts the pool.Start() and its poolRoutine() -* registers all the concrete types and interfaces for serialisation - -### poolRoutine - -* listens to these channels: - * pool requests blocks from a specific peer by posting to requestsCh, block reactor then sends - a &bcBlockRequestMessage for a specific height - * pool signals timeout of a specific peer by posting to timeoutsCh - * switchToConsensusTicker to periodically try and switch to consensus - * trySyncTicker to periodically check if we have fallen behind and then catch-up sync - * if there aren't any new blocks available on the pool it skips syncing -* tries to sync the app by taking downloaded blocks from the pool, gives them to the app and stores - them on disk -* implements Receive which is called by the switch/peer - * calls AddBlock on the pool when it receives a new block from a peer - -## Block Pool - -* responsible for downloading blocks from peers -* makeRequestersRoutine() - * removes timeout peers - * starts new requesters by calling makeNextRequester() -* requestRoutine(): - * picks a peer and sends the request, then blocks until: - * pool is stopped by listening to pool.Quit - * requester is stopped by listening to Quit - * request is redone - * we receive a block - * gotBlockCh is strange - -## Block Store - -* persists blocks to disk - -# TODO - -* How does the switch from bcR to conR happen? Does conR persist blocks to disk too? -* What is the interaction between the consensus and blockchain reactors? diff --git a/docs/spec/reactors/block_sync/reactor.md b/docs/spec/reactors/block_sync/reactor.md deleted file mode 100644 index 97104eee..00000000 --- a/docs/spec/reactors/block_sync/reactor.md +++ /dev/null @@ -1,307 +0,0 @@ -# Blockchain Reactor - -The Blockchain Reactor's high level responsibility is to enable peers who are -far behind the current state of the consensus to quickly catch up by downloading -many blocks in parallel, verifying their commits, and executing them against the -ABCI application. - -Tendermint full nodes run the Blockchain Reactor as a service to provide blocks -to new nodes. New nodes run the Blockchain Reactor in "fast_sync" mode, -where they actively make requests for more blocks until they sync up. -Once caught up, "fast_sync" mode is disabled and the node switches to -using (and turns on) the Consensus Reactor. - -## Message Types - -```go -const ( - msgTypeBlockRequest = byte(0x10) - msgTypeBlockResponse = byte(0x11) - msgTypeNoBlockResponse = byte(0x12) - msgTypeStatusResponse = byte(0x20) - msgTypeStatusRequest = byte(0x21) -) -``` - -```go -type bcBlockRequestMessage struct { - Height int64 -} - -type bcNoBlockResponseMessage struct { - Height int64 -} - -type bcBlockResponseMessage struct { - Block Block -} - -type bcStatusRequestMessage struct { - Height int64 - -type bcStatusResponseMessage struct { - Height int64 -} -``` - -## Architecture and algorithm - -The Blockchain reactor is organised as a set of concurrent tasks: - - Receive routine of Blockchain Reactor - - Task for creating Requesters - - Set of Requesters tasks and - - Controller task. - -![Blockchain Reactor Architecture Diagram](img/bc-reactor.png) - -### Data structures - -These are the core data structures necessarily to provide the Blockchain Reactor logic. - -Requester data structure is used to track assignment of request for `block` at position `height` to a -peer with id equals to `peerID`. - -```go -type Requester { - mtx Mutex - block Block - height int64 - 
 peerID p2p.ID - redoChannel chan struct{} -} -``` -Pool is core data structure that stores last executed block (`height`), assignment of requests to peers (`requesters`), -current height for each peer and number of pending requests for each peer (`peers`), maximum peer height, etc. - -```go -type Pool { - mtx Mutex - requesters map[int64]*Requester - 
height int64 - peers map[p2p.ID]*Peer - 
maxPeerHeight int64 

 - 
numPending int32 - store BlockStore - 
requestsChannel chan<- BlockRequest - 
errorsChannel chan<- peerError -} -``` - -Peer data structure stores for each peer current `height` and number of pending requests sent to -the peer (`numPending`), etc. - -```go -type Peer struct { - id p2p.ID - height int64 - numPending int32 - timeout *time.Timer - didTimeout bool -} -``` - -BlockRequest is internal data structure used to denote current mapping of request for a block at some `height` to -a peer (`PeerID`). - -```go -type BlockRequest { - Height int64 - PeerID p2p.ID -} -``` - -### Receive routine of Blockchain Reactor - -It is executed upon message reception on the BlockchainChannel inside p2p receive routine. There is a separate p2p -receive routine (and therefore receive routine of the Blockchain Reactor) executed for each peer. Note that -try to send will not block (returns immediately) if outgoing buffer is full. - -```go -handleMsg(pool, m): - upon receiving bcBlockRequestMessage m from peer p: - block = load block for height m.Height from pool.store - if block != nil then - try to send BlockResponseMessage(block) to p - else - try to send bcNoBlockResponseMessage(m.Height) to p - - upon receiving bcBlockResponseMessage m from peer p: - pool.mtx.Lock() - requester = pool.requesters[m.Height] - if requester == nil then - error("peer sent us a block we didn't expect") - continue - - if requester.block == nil and requester.peerID == p then - requester.block = m - pool.numPending -= 1 // atomic decrement - peer = pool.peers[p] - if peer != nil then - peer.numPending-- - if peer.numPending == 0 then - peer.timeout.Stop() - // NOTE: we don't send Quit signal to the corresponding requester task! - else - trigger peer timeout to expire after peerTimeout - pool.mtx.Unlock() - - - upon receiving bcStatusRequestMessage m from peer p: - try to send bcStatusResponseMessage(pool.store.Height) - - upon receiving bcStatusResponseMessage m from peer p: - pool.mtx.Lock() - peer = pool.peers[p] - if peer != nil then - peer.height = m.height - else - peer = create new Peer data structure with id = p and height = m.Height - pool.peers[p] = peer - - if m.Height > pool.maxPeerHeight then - pool.maxPeerHeight = m.Height - pool.mtx.Unlock() - -onTimeout(p): - send error message to pool error channel - peer = pool.peers[p] - peer.didTimeout = true -``` - -### Requester tasks - -Requester task is responsible for fetching a single block at position `height`. - -```go -fetchBlock(height, pool): - while true do - peerID = nil - block = nil - peer = pickAvailablePeer(height) - peerId = peer.id - - enqueue BlockRequest(height, peerID) to pool.requestsChannel - redo = false - while !redo do - select { - upon receiving Quit message do - return - upon receiving message on redoChannel do - mtx.Lock() - pool.numPending++ - redo = true - mtx.UnLock() - } - -pickAvailablePeer(height): - selectedPeer = nil - while selectedPeer = nil do - pool.mtx.Lock() - for each peer in pool.peers do - if !peer.didTimeout and peer.numPending < maxPendingRequestsPerPeer and peer.height >= height then - peer.numPending++ - selectedPeer = peer - break - pool.mtx.Unlock() - - if selectedPeer = nil then - sleep requestIntervalMS - - return selectedPeer -``` -sleep for requestIntervalMS -### Task for creating Requesters - -This task is responsible for continuously creating and starting Requester tasks. -```go -createRequesters(pool): - while true do - if !pool.isRunning then break - if pool.numPending < maxPendingRequests or size(pool.requesters) < maxTotalRequesters then - pool.mtx.Lock() - nextHeight = pool.height + size(pool.requesters) - requester = create new requester for height nextHeight - pool.requesters[nextHeight] = requester - pool.numPending += 1 // atomic increment - start requester task - pool.mtx.Unlock() - else - sleep requestIntervalMS - pool.mtx.Lock() - for each peer in pool.peers do - if !peer.didTimeout && peer.numPending > 0 && peer.curRate < minRecvRate then - send error on pool error channel - peer.didTimeout = true - if peer.didTimeout then - for each requester in pool.requesters do - if requester.getPeerID() == peer then - enqueue msg on requestor's redoChannel - delete(pool.peers, peerID) - pool.mtx.Unlock() -``` - - -### Main blockchain reactor controller task -```go -main(pool): - create trySyncTicker with interval trySyncIntervalMS - create statusUpdateTicker with interval statusUpdateIntervalSeconds - create switchToConsensusTicker with interbal switchToConsensusIntervalSeconds - - while true do - select { - upon receiving BlockRequest(Height, Peer) on pool.requestsChannel: - try to send bcBlockRequestMessage(Height) to Peer - - upon receiving error(peer) on errorsChannel: - stop peer for error - - upon receiving message on statusUpdateTickerChannel: - broadcast bcStatusRequestMessage(bcR.store.Height) // message sent in a separate routine - - upon receiving message on switchToConsensusTickerChannel: - pool.mtx.Lock() - receivedBlockOrTimedOut = pool.height > 0 || (time.Now() - pool.startTime) > 5 Seconds - ourChainIsLongestAmongPeers = pool.maxPeerHeight == 0 || pool.height >= pool.maxPeerHeight - haveSomePeers = size of pool.peers > 0 - pool.mtx.Unlock() - if haveSomePeers && receivedBlockOrTimedOut && ourChainIsLongestAmongPeers then - switch to consensus mode - - upon receiving message on trySyncTickerChannel: - for i = 0; i < 10; i++ do - pool.mtx.Lock() - firstBlock = pool.requesters[pool.height].block - secondBlock = pool.requesters[pool.height].block - if firstBlock == nil or secondBlock == nil then continue - pool.mtx.Unlock() - verify firstBlock using LastCommit from secondBlock - if verification failed - pool.mtx.Lock() - peerID = pool.requesters[pool.height].peerID - redoRequestsForPeer(peerId) - delete(pool.peers, peerID) - stop peer peerID for error - pool.mtx.Unlock() - else - delete(pool.requesters, pool.height) - save firstBlock to store - pool.height++ - execute firstBlock - } - -redoRequestsForPeer(pool, peerId): - for each requester in pool.requesters do - if requester.getPeerID() == peerID - enqueue msg on redoChannel for requester -``` - -## Channels - -Defines `maxMsgSize` for the maximum size of incoming messages, -`SendQueueCapacity` and `RecvBufferCapacity` for maximum sending and -receiving buffers respectively. These are supposed to prevent amplification -attacks by setting up the upper limit on how much data we can receive & send to -a peer. - -Sending incorrectly encoded data will result in stopping the peer. diff --git a/docs/spec/reactors/consensus/consensus-reactor.md b/docs/spec/reactors/consensus/consensus-reactor.md deleted file mode 100644 index 0f03b44b..00000000 --- a/docs/spec/reactors/consensus/consensus-reactor.md +++ /dev/null @@ -1,352 +0,0 @@ -# Consensus Reactor - -Consensus Reactor defines a reactor for the consensus service. It contains the ConsensusState service that -manages the state of the Tendermint consensus internal state machine. -When Consensus Reactor is started, it starts Broadcast Routine which starts ConsensusState service. -Furthermore, for each peer that is added to the Consensus Reactor, it creates (and manages) the known peer state -(that is used extensively in gossip routines) and starts the following three routines for the peer p: -Gossip Data Routine, Gossip Votes Routine and QueryMaj23Routine. Finally, Consensus Reactor is responsible -for decoding messages received from a peer and for adequate processing of the message depending on its type and content. -The processing normally consists of updating the known peer state and for some messages -(`ProposalMessage`, `BlockPartMessage` and `VoteMessage`) also forwarding message to ConsensusState module -for further processing. In the following text we specify the core functionality of those separate unit of executions -that are part of the Consensus Reactor. - -## ConsensusState service - -Consensus State handles execution of the Tendermint BFT consensus algorithm. It processes votes and proposals, -and upon reaching agreement, commits blocks to the chain and executes them against the application. -The internal state machine receives input from peers, the internal validator and from a timer. - -Inside Consensus State we have the following units of execution: Timeout Ticker and Receive Routine. -Timeout Ticker is a timer that schedules timeouts conditional on the height/round/step that are processed -by the Receive Routine. - - -### Receive Routine of the ConsensusState service - -Receive Routine of the ConsensusState handles messages which may cause internal consensus state transitions. -It is the only routine that updates RoundState that contains internal consensus state. -Updates (state transitions) happen on timeouts, complete proposals, and 2/3 majorities. -It receives messages from peers, internal validators and from Timeout Ticker -and invokes the corresponding handlers, potentially updating the RoundState. -The details of the protocol (together with formal proofs of correctness) implemented by the Receive Routine are -discussed in separate document. For understanding of this document -it is sufficient to understand that the Receive Routine manages and updates RoundState data structure that is -then extensively used by the gossip routines to determine what information should be sent to peer processes. - -## Round State - -RoundState defines the internal consensus state. It contains height, round, round step, a current validator set, -a proposal and proposal block for the current round, locked round and block (if some block is being locked), set of -received votes and last commit and last validators set. - -```golang -type RoundState struct { - Height int64 - Round int - Step RoundStepType - Validators ValidatorSet - Proposal Proposal - ProposalBlock Block - ProposalBlockParts PartSet - LockedRound int - LockedBlock Block - LockedBlockParts PartSet - Votes HeightVoteSet - LastCommit VoteSet - LastValidators ValidatorSet -} -``` - -Internally, consensus will run as a state machine with the following states: - -- RoundStepNewHeight -- RoundStepNewRound -- RoundStepPropose -- RoundStepProposeWait -- RoundStepPrevote -- RoundStepPrevoteWait -- RoundStepPrecommit -- RoundStepPrecommitWait -- RoundStepCommit - -## Peer Round State - -Peer round state contains the known state of a peer. It is being updated by the Receive routine of -Consensus Reactor and by the gossip routines upon sending a message to the peer. - -```golang -type PeerRoundState struct { - Height int64 // Height peer is at - Round int // Round peer is at, -1 if unknown. - Step RoundStepType // Step peer is at - Proposal bool // True if peer has proposal for this round - ProposalBlockPartsHeader PartSetHeader - ProposalBlockParts BitArray - ProposalPOLRound int // Proposal's POL round. -1 if none. - ProposalPOL BitArray // nil until ProposalPOLMessage received. - Prevotes BitArray // All votes peer has for this round - Precommits BitArray // All precommits peer has for this round - LastCommitRound int // Round of commit for last height. -1 if none. - LastCommit BitArray // All commit precommits of commit for last height. - CatchupCommitRound int // Round that we have commit for. Not necessarily unique. -1 if none. - CatchupCommit BitArray // All commit precommits peer has for this height & CatchupCommitRound -} -``` - -## Receive method of Consensus reactor - -The entry point of the Consensus reactor is a receive method. When a message is received from a peer p, -normally the peer round state is updated correspondingly, and some messages -are passed for further processing, for example to ConsensusState service. We now specify the processing of messages -in the receive method of Consensus reactor for each message type. In the following message handler, `rs` and `prs` denote -`RoundState` and `PeerRoundState`, respectively. - -### NewRoundStepMessage handler - -``` -handleMessage(msg): - if msg is from smaller height/round/step then return - // Just remember these values. - prsHeight = prs.Height - prsRound = prs.Round - prsCatchupCommitRound = prs.CatchupCommitRound - prsCatchupCommit = prs.CatchupCommit - - Update prs with values from msg - if prs.Height or prs.Round has been updated then - reset Proposal related fields of the peer state - if prs.Round has been updated and msg.Round == prsCatchupCommitRound then - prs.Precommits = psCatchupCommit - if prs.Height has been updated then - if prsHeight+1 == msg.Height && prsRound == msg.LastCommitRound then - prs.LastCommitRound = msg.LastCommitRound - prs.LastCommit = prs.Precommits - } else { - prs.LastCommitRound = msg.LastCommitRound - prs.LastCommit = nil - } - Reset prs.CatchupCommitRound and prs.CatchupCommit -``` - -### CommitStepMessage handler - -``` -handleMessage(msg): - if prs.Height == msg.Height then - prs.ProposalBlockPartsHeader = msg.BlockPartsHeader - prs.ProposalBlockParts = msg.BlockParts -``` - -### HasVoteMessage handler - -``` -handleMessage(msg): - if prs.Height == msg.Height then - prs.setHasVote(msg.Height, msg.Round, msg.Type, msg.Index) -``` - -### VoteSetMaj23Message handler - -``` -handleMessage(msg): - if prs.Height == msg.Height then - Record in rs that a peer claim to have ⅔ majority for msg.BlockID - Send VoteSetBitsMessage showing votes node has for that BlockId -``` - -### ProposalMessage handler - -``` -handleMessage(msg): - if prs.Height != msg.Height || prs.Round != msg.Round || prs.Proposal then return - prs.Proposal = true - prs.ProposalBlockPartsHeader = msg.BlockPartsHeader - prs.ProposalBlockParts = empty set - prs.ProposalPOLRound = msg.POLRound - prs.ProposalPOL = nil - Send msg through internal peerMsgQueue to ConsensusState service -``` - -### ProposalPOLMessage handler - -``` -handleMessage(msg): - if prs.Height != msg.Height or prs.ProposalPOLRound != msg.ProposalPOLRound then return - prs.ProposalPOL = msg.ProposalPOL -``` - -### BlockPartMessage handler - -``` -handleMessage(msg): - if prs.Height != msg.Height || prs.Round != msg.Round then return - Record in prs that peer has block part msg.Part.Index - Send msg trough internal peerMsgQueue to ConsensusState service -``` - -### VoteMessage handler - -``` -handleMessage(msg): - Record in prs that a peer knows vote with index msg.vote.ValidatorIndex for particular height and round - Send msg trough internal peerMsgQueue to ConsensusState service -``` - -### VoteSetBitsMessage handler - -``` -handleMessage(msg): - Update prs for the bit-array of votes peer claims to have for the msg.BlockID -``` - -## Gossip Data Routine - -It is used to send the following messages to the peer: `BlockPartMessage`, `ProposalMessage` and -`ProposalPOLMessage` on the DataChannel. The gossip data routine is based on the local RoundState (`rs`) -and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: - -``` -1a) if rs.ProposalBlockPartsHeader == prs.ProposalBlockPartsHeader and the peer does not have all the proposal parts then - Part = pick a random proposal block part the peer does not have - Send BlockPartMessage(rs.Height, rs.Round, Part) to the peer on the DataChannel - if send returns true, record that the peer knows the corresponding block Part - Continue - -1b) if (0 < prs.Height) and (prs.Height < rs.Height) then - help peer catch up using gossipDataForCatchup function - Continue - -1c) if (rs.Height != prs.Height) or (rs.Round != prs.Round) then - Sleep PeerGossipSleepDuration - Continue - -// at this point rs.Height == prs.Height and rs.Round == prs.Round -1d) if (rs.Proposal != nil and !prs.Proposal) then - Send ProposalMessage(rs.Proposal) to the peer - if send returns true, record that the peer knows Proposal - if 0 <= rs.Proposal.POLRound then - polRound = rs.Proposal.POLRound - prevotesBitArray = rs.Votes.Prevotes(polRound).BitArray() - Send ProposalPOLMessage(rs.Height, polRound, prevotesBitArray) - Continue - -2) Sleep PeerGossipSleepDuration -``` - -### Gossip Data For Catchup - -This function is responsible for helping peer catch up if it is at the smaller height (prs.Height < rs.Height). -The function executes the following logic: - - if peer does not have all block parts for prs.ProposalBlockPart then - blockMeta = Load Block Metadata for height prs.Height from blockStore - if (!blockMeta.BlockID.PartsHeader == prs.ProposalBlockPartsHeader) then - Sleep PeerGossipSleepDuration - return - Part = pick a random proposal block part the peer does not have - Send BlockPartMessage(prs.Height, prs.Round, Part) to the peer on the DataChannel - if send returns true, record that the peer knows the corresponding block Part - return - else Sleep PeerGossipSleepDuration - -## Gossip Votes Routine - -It is used to send the following message: `VoteMessage` on the VoteChannel. -The gossip votes routine is based on the local RoundState (`rs`) -and the known PeerRoundState (`prs`). The routine repeats forever the logic shown below: - -``` -1a) if rs.Height == prs.Height then - if prs.Step == RoundStepNewHeight then - vote = random vote from rs.LastCommit the peer does not have - Send VoteMessage(vote) to the peer - if send returns true, continue - - if prs.Step <= RoundStepPrevote and prs.Round != -1 and prs.Round <= rs.Round then - Prevotes = rs.Votes.Prevotes(prs.Round) - vote = random vote from Prevotes the peer does not have - Send VoteMessage(vote) to the peer - if send returns true, continue - - if prs.Step <= RoundStepPrecommit and prs.Round != -1 and prs.Round <= rs.Round then - Precommits = rs.Votes.Precommits(prs.Round) - vote = random vote from Precommits the peer does not have - Send VoteMessage(vote) to the peer - if send returns true, continue - - if prs.ProposalPOLRound != -1 then - PolPrevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) - vote = random vote from PolPrevotes the peer does not have - Send VoteMessage(vote) to the peer - if send returns true, continue - -1b) if prs.Height != 0 and rs.Height == prs.Height+1 then - vote = random vote from rs.LastCommit peer does not have - Send VoteMessage(vote) to the peer - if send returns true, continue - -1c) if prs.Height != 0 and rs.Height >= prs.Height+2 then - Commit = get commit from BlockStore for prs.Height - vote = random vote from Commit the peer does not have - Send VoteMessage(vote) to the peer - if send returns true, continue - -2) Sleep PeerGossipSleepDuration -``` - -## QueryMaj23Routine - -It is used to send the following message: `VoteSetMaj23Message`. `VoteSetMaj23Message` is sent to indicate that a given -BlockID has seen +2/3 votes. This routine is based on the local RoundState (`rs`) and the known PeerRoundState -(`prs`). The routine repeats forever the logic shown below. - -``` -1a) if rs.Height == prs.Height then - Prevotes = rs.Votes.Prevotes(prs.Round) - if there is a ⅔ majority for some blockId in Prevotes then - m = VoteSetMaj23Message(prs.Height, prs.Round, Prevote, blockId) - Send m to peer - Sleep PeerQueryMaj23SleepDuration - -1b) if rs.Height == prs.Height then - Precommits = rs.Votes.Precommits(prs.Round) - if there is a ⅔ majority for some blockId in Precommits then - m = VoteSetMaj23Message(prs.Height,prs.Round,Precommit,blockId) - Send m to peer - Sleep PeerQueryMaj23SleepDuration - -1c) if rs.Height == prs.Height and prs.ProposalPOLRound >= 0 then - Prevotes = rs.Votes.Prevotes(prs.ProposalPOLRound) - if there is a ⅔ majority for some blockId in Prevotes then - m = VoteSetMaj23Message(prs.Height,prs.ProposalPOLRound,Prevotes,blockId) - Send m to peer - Sleep PeerQueryMaj23SleepDuration - -1d) if prs.CatchupCommitRound != -1 and 0 < prs.Height and - prs.Height <= blockStore.Height() then - Commit = LoadCommit(prs.Height) - m = VoteSetMaj23Message(prs.Height,Commit.Round,Precommit,Commit.blockId) - Send m to peer - Sleep PeerQueryMaj23SleepDuration - -2) Sleep PeerQueryMaj23SleepDuration -``` - -## Broadcast routine - -The Broadcast routine subscribes to an internal event bus to receive new round steps, votes messages and proposal -heartbeat messages, and broadcasts messages to peers upon receiving those events. -It broadcasts `NewRoundStepMessage` or `CommitStepMessage` upon new round state event. Note that -broadcasting these messages does not depend on the PeerRoundState; it is sent on the StateChannel. -Upon receiving VoteMessage it broadcasts `HasVoteMessage` message to its peers on the StateChannel. -`ProposalHeartbeatMessage` is sent the same way on the StateChannel. - -## Channels - -Defines 4 channels: state, data, vote and vote_set_bits. Each channel -has `SendQueueCapacity` and `RecvBufferCapacity` and -`RecvMessageCapacity` set to `maxMsgSize`. - -Sending incorrectly encoded data will result in stopping the peer. diff --git a/docs/spec/reactors/consensus/consensus.md b/docs/spec/reactors/consensus/consensus.md deleted file mode 100644 index 4ea619b5..00000000 --- a/docs/spec/reactors/consensus/consensus.md +++ /dev/null @@ -1,212 +0,0 @@ -# Tendermint Consensus Reactor - -Tendermint Consensus is a distributed protocol executed by validator processes to agree on -the next block to be added to the Tendermint blockchain. The protocol proceeds in rounds, where -each round is a try to reach agreement on the next block. A round starts by having a dedicated -process (called proposer) suggesting to other processes what should be the next block with -the `ProposalMessage`. -The processes respond by voting for a block with `VoteMessage` (there are two kinds of vote -messages, prevote and precommit votes). Note that a proposal message is just a suggestion what the -next block should be; a validator might vote with a `VoteMessage` for a different block. If in some -round, enough number of processes vote for the same block, then this block is committed and later -added to the blockchain. `ProposalMessage` and `VoteMessage` are signed by the private key of the -validator. The internals of the protocol and how it ensures safety and liveness properties are -explained in a forthcoming document. - -For efficiency reasons, validators in Tendermint consensus protocol do not agree directly on the -block as the block size is big, i.e., they don't embed the block inside `Proposal` and -`VoteMessage`. Instead, they reach agreement on the `BlockID` (see `BlockID` definition in -[Blockchain](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md#blockid) section) that uniquely identifies each block. The block itself is -disseminated to validator processes using peer-to-peer gossiping protocol. It starts by having a -proposer first splitting a block into a number of block parts, that are then gossiped between -processes using `BlockPartMessage`. - -Validators in Tendermint communicate by peer-to-peer gossiping protocol. Each validator is connected -only to a subset of processes called peers. By the gossiping protocol, a validator send to its peers -all needed information (`ProposalMessage`, `VoteMessage` and `BlockPartMessage`) so they can -reach agreement on some block, and also obtain the content of the chosen block (block parts). As -part of the gossiping protocol, processes also send auxiliary messages that inform peers about the -executed steps of the core consensus algorithm (`NewRoundStepMessage` and `CommitStepMessage`), and -also messages that inform peers what votes the process has seen (`HasVoteMessage`, -`VoteSetMaj23Message` and `VoteSetBitsMessage`). These messages are then used in the gossiping -protocol to determine what messages a process should send to its peers. - -We now describe the content of each message exchanged during Tendermint consensus protocol. - -## ProposalMessage - -ProposalMessage is sent when a new block is proposed. It is a suggestion of what the -next block in the blockchain should be. - -```go -type ProposalMessage struct { - Proposal Proposal -} -``` - -### Proposal - -Proposal contains height and round for which this proposal is made, BlockID as a unique identifier -of proposed block, timestamp, and two fields (POLRound and POLBlockID) that are needed for -termination of the consensus. The message is signed by the validator private key. - -```go -type Proposal struct { - Height int64 - Round int - Timestamp Time - BlockID BlockID - POLRound int - POLBlockID BlockID - Signature Signature -} -``` - -NOTE: In the current version of the Tendermint, the consensus value in proposal is represented with -PartSetHeader, and with BlockID in vote message. It should be aligned as suggested in this spec as -BlockID contains PartSetHeader. - -## VoteMessage - -VoteMessage is sent to vote for some block (or to inform others that a process does not vote in the -current round). Vote is defined in the [Blockchain](https://github.com/tendermint/tendermint/blob/master/docs/spec/blockchain/blockchain.md#blockid) section and contains validator's -information (validator address and index), height and round for which the vote is sent, vote type, -blockID if process vote for some block (`nil` otherwise) and a timestamp when the vote is sent. The -message is signed by the validator private key. - -```go -type VoteMessage struct { - Vote Vote -} -``` - -## BlockPartMessage - -BlockPartMessage is sent when gossipping a piece of the proposed block. It contains height, round -and the block part. - -```go -type BlockPartMessage struct { - Height int64 - Round int - Part Part -} -``` - -## ProposalHeartbeatMessage - -ProposalHeartbeatMessage is sent to signal that a node is alive and waiting for transactions -to be able to create a next block proposal. - -```go -type ProposalHeartbeatMessage struct { - Heartbeat Heartbeat -} -``` - -### Heartbeat - -Heartbeat contains validator information (address and index), -height, round and sequence number. It is signed by the private key of the validator. - -```go -type Heartbeat struct { - ValidatorAddress []byte - ValidatorIndex int - Height int64 - Round int - Sequence int - Signature Signature -} -``` - -## NewRoundStepMessage - -NewRoundStepMessage is sent for every step transition during the core consensus algorithm execution. -It is used in the gossip part of the Tendermint protocol to inform peers about a current -height/round/step a process is in. - -```go -type NewRoundStepMessage struct { - Height int64 - Round int - Step RoundStepType - SecondsSinceStartTime int - LastCommitRound int -} -``` - -## CommitStepMessage - -CommitStepMessage is sent when an agreement on some block is reached. It contains height for which -agreement is reached, block parts header that describes the decided block and is used to obtain all -block parts, and a bit array of the block parts a process currently has, so its peers can know what -parts it is missing so they can send them. - -```go -type CommitStepMessage struct { - Height int64 - BlockID BlockID - BlockParts BitArray -} -``` - -TODO: We use BlockID instead of BlockPartsHeader (in current implementation) for symmetry. - -## ProposalPOLMessage - -ProposalPOLMessage is sent when a previous block is re-proposed. -It is used to inform peers in what round the process learned for this block (ProposalPOLRound), -and what prevotes for the re-proposed block the process has. - -```go -type ProposalPOLMessage struct { - Height int64 - ProposalPOLRound int - ProposalPOL BitArray -} -``` - -## HasVoteMessage - -HasVoteMessage is sent to indicate that a particular vote has been received. It contains height, -round, vote type and the index of the validator that is the originator of the corresponding vote. - -```go -type HasVoteMessage struct { - Height int64 - Round int - Type byte - Index int -} -``` - -## VoteSetMaj23Message - -VoteSetMaj23Message is sent to indicate that a process has seen +2/3 votes for some BlockID. -It contains height, round, vote type and the BlockID. - -```go -type VoteSetMaj23Message struct { - Height int64 - Round int - Type byte - BlockID BlockID -} -``` - -## VoteSetBitsMessage - -VoteSetBitsMessage is sent to communicate the bit-array of votes a process has seen for a given -BlockID. It contains height, round, vote type, BlockID and a bit array of -the votes a process has. - -```go -type VoteSetBitsMessage struct { - Height int64 - Round int - Type byte - BlockID BlockID - Votes BitArray -} -``` diff --git a/docs/spec/reactors/consensus/proposer-selection.md b/docs/spec/reactors/consensus/proposer-selection.md deleted file mode 100644 index 649d3dd2..00000000 --- a/docs/spec/reactors/consensus/proposer-selection.md +++ /dev/null @@ -1,46 +0,0 @@ -# Proposer selection procedure in Tendermint - -This document specifies the Proposer Selection Procedure that is used in Tendermint to choose a round proposer. -As Tendermint is “leader-based protocol”, the proposer selection is critical for its correct functioning. -Let denote with `proposer_p(h,r)` a process returned by the Proposer Selection Procedure at the process p, at height h -and round r. Then the Proposer Selection procedure should fulfill the following properties: - -`Agreement`: Given a validator set V, and two honest validators, -p and q, for each height h, and each round r, -proposer_p(h,r) = proposer_q(h,r) - -`Liveness`: In every consecutive sequence of rounds of size K (K is system parameter), at least a -single round has an honest proposer. - -`Fairness`: The proposer selection is proportional to the validator voting power, i.e., a validator with more -voting power is selected more frequently, proportional to its power. More precisely, given a set of processes -with the total voting power N, during a sequence of rounds of size N, every process is proposer in a number of rounds -equal to its voting power. - -We now look at a few particular cases to understand better how fairness should be implemented. -If we have 4 processes with the following voting power distribution (p0,4), (p1, 2), (p2, 2), (p3, 2) at some round r, -we have the following sequence of proposer selections in the following rounds: - -`p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, p0, p1, p2, p3, p0, etc` - -Let consider now the following scenario where a total voting power of faulty processes is aggregated in a single process -p0: (p0,3), (p1, 1), (p2, 1), (p3, 1), (p4, 1), (p5, 1), (p6, 1), (p7, 1). -In this case the sequence of proposer selections looks like this: - -`p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, p0, p1, p2, p3, p0, p4, p5, p6, p7, p0, etc` - -In this case, we see that a number of rounds coordinated by a faulty process is proportional to its voting power. -We consider also the case where we have voting power uniformly distributed among processes, i.e., we have 10 processes -each with voting power of 1. And let consider that there are 3 faulty processes with consecutive addresses, -for example the first 3 processes are faulty. Then the sequence looks like this: - -`p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, etc` - -In this case, we have 3 consecutive rounds with a faulty proposer. -One special case we consider is the case where a single honest process p0 has most of the voting power, for example: -(p0,100), (p1, 2), (p2, 3), (p3, 4). Then the sequence of proposer selection looks like this: - -p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p0, p1, p0, p0, p0, p0, p0, etc - -This basically means that almost all rounds have the same proposer. But in this case, the process p0 has anyway enough -voting power to decide whatever he wants, so the fact that he coordinates almost all rounds seems correct. diff --git a/docs/spec/reactors/evidence/reactor.md b/docs/spec/reactors/evidence/reactor.md deleted file mode 100644 index efa63aa4..00000000 --- a/docs/spec/reactors/evidence/reactor.md +++ /dev/null @@ -1,10 +0,0 @@ -# Evidence Reactor - -## Channels - -[#1503](https://github.com/tendermint/tendermint/issues/1503) - -Sending invalid evidence will result in stopping the peer. - -Sending incorrectly encoded data or data exceeding `maxMsgSize` will result -in stopping the peer. diff --git a/docs/spec/reactors/mempool/concurrency.md b/docs/spec/reactors/mempool/concurrency.md deleted file mode 100644 index 991113e6..00000000 --- a/docs/spec/reactors/mempool/concurrency.md +++ /dev/null @@ -1,8 +0,0 @@ -# Mempool Concurrency - -Look at the concurrency model this uses... - -* Receiving CheckTx -* Broadcasting new tx -* Interfaces with consensus engine, reap/update while checking -* Calling the ABCI app (ordering. callbacks. how proxy works alongside the blockchain proxy which actually writes blocks) diff --git a/docs/spec/reactors/mempool/config.md b/docs/spec/reactors/mempool/config.md deleted file mode 100644 index 776149ba..00000000 --- a/docs/spec/reactors/mempool/config.md +++ /dev/null @@ -1,59 +0,0 @@ -# Mempool Configuration - -Here we describe configuration options around mempool. -For the purposes of this document, they are described -as command-line flags, but they can also be passed in as -environmental variables or in the config.toml file. The -following are all equivalent: - -Flag: `--mempool.recheck_empty=false` - -Environment: `TM_MEMPOOL_RECHECK_EMPTY=false` - -Config: -``` -[mempool] -recheck_empty = false -``` - - -## Recheck - -`--mempool.recheck=false` (default: true) - -`--mempool.recheck_empty=false` (default: true) - -Recheck determines if the mempool rechecks all pending -transactions after a block was committed. Once a block -is committed, the mempool removes all valid transactions -that were successfully included in the block. - -If `recheck` is true, then it will rerun CheckTx on -all remaining transactions with the new block state. - -If the block contained no transactions, it will skip the -recheck unless `recheck_empty` is true. - -## Broadcast - -`--mempool.broadcast=false` (default: true) - -Determines whether this node gossips any valid transactions -that arrive in mempool. Default is to gossip anything that -passes checktx. If this is disabled, transactions are not -gossiped, but instead stored locally and added to the next -block this node is the proposer. - -## WalDir - -`--mempool.wal_dir=/tmp/gaia/mempool.wal` (default: $TM_HOME/data/mempool.wal) - -This defines the directory where mempool writes the write-ahead -logs. These files can be used to reload unbroadcasted -transactions if the node crashes. - -If the directory passed in is an absolute path, the wal file is -created there. If the directory is a relative path, the path is -appended to home directory of the tendermint process to -generate an absolute path to the wal directory -(default `$HOME/.tendermint` or set via `TM_HOME` or `--home``) diff --git a/docs/spec/reactors/mempool/functionality.md b/docs/spec/reactors/mempool/functionality.md deleted file mode 100644 index 85c3dc58..00000000 --- a/docs/spec/reactors/mempool/functionality.md +++ /dev/null @@ -1,37 +0,0 @@ -# Mempool Functionality - -The mempool maintains a list of potentially valid transactions, -both to broadcast to other nodes, as well as to provide to the -consensus reactor when it is selected as the block proposer. - -There are two sides to the mempool state: - -* External: get, check, and broadcast new transactions -* Internal: return valid transaction, update list after block commit - - -## External functionality - -External functionality is exposed via network interfaces -to potentially untrusted actors. - -* CheckTx - triggered via RPC or P2P -* Broadcast - gossip messages after a successful check - -## Internal functionality - -Internal functionality is exposed via method calls to other -code compiled into the tendermint binary. - -* Reap - get tx to propose in next block -* Update - remove tx that were included in last block -* ABCI.CheckTx - call ABCI app to validate the tx - -What does it provide the consensus reactor? -What guarantees does it need from the ABCI app? -(talk about interleaving processes in concurrency) - -## Optimizations - -Talk about the LRU cache to make sure we don't process any -tx that we have seen before diff --git a/docs/spec/reactors/mempool/messages.md b/docs/spec/reactors/mempool/messages.md deleted file mode 100644 index 9a624dff..00000000 --- a/docs/spec/reactors/mempool/messages.md +++ /dev/null @@ -1,61 +0,0 @@ -# Mempool Messages - -## P2P Messages - -There is currently only one message that Mempool broadcasts -and receives over the p2p gossip network (via the reactor): -`TxMessage` - -```go -// TxMessage is a MempoolMessage containing a transaction. -type TxMessage struct { - Tx types.Tx -} -``` - -TxMessage is go-wire encoded and prepended with `0x1` as a -"type byte". This is followed by a go-wire encoded byte-slice. -Prefix of 40=0x28 byte tx is: `0x010128...` followed by -the actual 40-byte tx. Prefix of 350=0x015e byte tx is: -`0x0102015e...` followed by the actual 350 byte tx. - -(Please see the [go-wire repo](https://github.com/tendermint/go-wire#an-interface-example) for more information) - -## RPC Messages - -Mempool exposes `CheckTx([]byte)` over the RPC interface. - -It can be posted via `broadcast_commit`, `broadcast_sync` or -`broadcast_async`. They all parse a message with one argument, -`"tx": "HEX_ENCODED_BINARY"` and differ in only how long they -wait before returning (sync makes sure CheckTx passes, commit -makes sure it was included in a signed block). - -Request (`POST http://gaia.zone:26657/`): - -```json -{ - "id": "", - "jsonrpc": "2.0", - "method": "broadcast_sync", - "params": { - "tx": "F012A4BC68..." - } -} -``` - -Response: - -```json -{ - "error": "", - "result": { - "hash": "E39AAB7A537ABAA237831742DCE1117F187C3C52", - "log": "", - "data": "", - "code": 0 - }, - "id": "", - "jsonrpc": "2.0" -} -``` diff --git a/docs/spec/reactors/mempool/reactor.md b/docs/spec/reactors/mempool/reactor.md deleted file mode 100644 index 2bdbd895..00000000 --- a/docs/spec/reactors/mempool/reactor.md +++ /dev/null @@ -1,14 +0,0 @@ -# Mempool Reactor - -## Channels - -[#1503](https://github.com/tendermint/tendermint/issues/1503) - -Mempool maintains a cache of the last 10000 transactions to prevent -replaying old transactions (plus transactions coming from other -validators, who are continually exchanging transactions). Read [Replay -Protection](https://tendermint.readthedocs.io/projects/tools/en/master/app-development.html?#replay-protection) -for details. - -Sending incorrectly encoded data or data exceeding `maxMsgSize` will result -in stopping the peer. diff --git a/docs/spec/reactors/pex/pex.md b/docs/spec/reactors/pex/pex.md deleted file mode 100644 index 317803b8..00000000 --- a/docs/spec/reactors/pex/pex.md +++ /dev/null @@ -1,123 +0,0 @@ -# Peer Strategy and Exchange - -Here we outline the design of the AddressBook -and how it used by the Peer Exchange Reactor (PEX) to ensure we are connected -to good peers and to gossip peers to others. - -## Peer Types - -Certain peers are special in that they are specified by the user as `persistent`, -which means we auto-redial them if the connection fails, or if we fail to dial -them. -Some peers can be marked as `private`, which means -we will not put them in the address book or gossip them to others. - -All peers except private peers are tracked using the address book. - -## Discovery - -Peer discovery begins with a list of seeds. -When we have no peers, or have been unable to find enough peers from existing ones, -we dial a randomly selected seed to get a list of peers to dial. - -On startup, we will also immediately dial the given list of `persistent_peers`, -and will attempt to maintain persistent connections with them. If the connections die, or we fail to dial, -we will redial every 5s for a few minutes, then switch to an exponential backoff schedule, -and after about a day of trying, stop dialing the peer. - -So long as we have less than `MinNumOutboundPeers`, we periodically request additional peers -from each of our own. If sufficient time goes by and we still can't find enough peers, -we try the seeds again. - -## Listening - -Peers listen on a configurable ListenAddr that they self-report in their -NodeInfo during handshakes with other peers. Peers accept up to (MaxNumPeers - -MinNumOutboundPeers) incoming peers. - -## Address Book - -Peers are tracked via their ID (their PubKey.Address()). -Peers are added to the address book from the PEX when they first connect to us or -when we hear about them from other peers. - -The address book is arranged in sets of buckets, and distinguishes between -vetted (old) and unvetted (new) peers. It keeps different sets of buckets for vetted and -unvetted peers. Buckets provide randomization over peer selection. Peers are put -in buckets according to their IP groups. - -A vetted peer can only be in one bucket. An unvetted peer can be in multiple buckets, and -each instance of the peer can have a different IP:PORT. - -If we're trying to add a new peer but there's no space in its bucket, we'll -remove the worst peer from that bucket to make room. - -## Vetting - -When a peer is first added, it is unvetted. -Marking a peer as vetted is outside the scope of the `p2p` package. -For Tendermint, a Peer becomes vetted once it has contributed sufficiently -at the consensus layer; ie. once it has sent us valid and not-yet-known -votes and/or block parts for `NumBlocksForVetted` blocks. -Other users of the p2p package can determine their own conditions for when a peer is marked vetted. - -If a peer becomes vetted but there are already too many vetted peers, -a randomly selected one of the vetted peers becomes unvetted. - -If a peer becomes unvetted (either a new peer, or one that was previously vetted), -a randomly selected one of the unvetted peers is removed from the address book. - -More fine-grained tracking of peer behaviour can be done using -a trust metric (see below), but it's best to start with something simple. - -## Select Peers to Dial - -When we need more peers, we pick them randomly from the addrbook with some -configurable bias for unvetted peers. The bias should be lower when we have fewer peers -and can increase as we obtain more, ensuring that our first peers are more trustworthy, -but always giving us the chance to discover new good peers. - -We track the last time we dialed a peer and the number of unsuccessful attempts -we've made. If too many attempts are made, we mark the peer as bad. - -Connection attempts are made with exponential backoff (plus jitter). Because -the selection process happens every `ensurePeersPeriod`, we might not end up -dialing a peer for much longer than the backoff duration. - -If we fail to connect to the peer after 16 tries (with exponential backoff), we remove from address book completely. - -## Select Peers to Exchange - -When we’re asked for peers, we select them as follows: -- select at most `maxGetSelection` peers -- try to select at least `minGetSelection` peers - if we have less than that, select them all. -- select a random, unbiased `getSelectionPercent` of the peers - -Send the selected peers. Note we select peers for sending without bias for vetted/unvetted. - -## Preventing Spam - -There are various cases where we decide a peer has misbehaved and we disconnect from them. -When this happens, the peer is removed from the address book and black listed for -some amount of time. We call this "Disconnect and Mark". -Note that the bad behaviour may be detected outside the PEX reactor itself -(for instance, in the mconnection, or another reactor), but it must be communicated to the PEX reactor -so it can remove and mark the peer. - -In the PEX, if a peer sends us an unsolicited list of peers, -or if the peer sends a request too soon after another one, -we Disconnect and MarkBad. - -## Trust Metric - -The quality of peers can be tracked in more fine-grained detail using a -Proportional-Integral-Derivative (PID) controller that incorporates -current, past, and rate-of-change data to inform peer quality. - -While a PID trust metric has been implemented, it remains for future work -to use it in the PEX. - -See the [trustmetric](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-006-trust-metric.md) -and [trustmetric useage](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-007-trust-metric-usage.md) -architecture docs for more details. - diff --git a/docs/spec/reactors/pex/reactor.md b/docs/spec/reactors/pex/reactor.md deleted file mode 100644 index 468f182c..00000000 --- a/docs/spec/reactors/pex/reactor.md +++ /dev/null @@ -1,12 +0,0 @@ -# PEX Reactor - -## Channels - -Defines only `SendQueueCapacity`. [#1503](https://github.com/tendermint/tendermint/issues/1503) - -Implements rate-limiting by enforcing minimal time between two consecutive -`pexRequestMessage` requests. If the peer sends us addresses we did not ask, -it is stopped. - -Sending incorrectly encoded data or data exceeding `maxMsgSize` will result -in stopping the peer. diff --git a/docs/spec/scripts/crypto.go b/docs/spec/scripts/crypto.go deleted file mode 100644 index e4dbd8a2..00000000 --- a/docs/spec/scripts/crypto.go +++ /dev/null @@ -1,125 +0,0 @@ -package main - -import ( - "fmt" - - crypto "github.com/tendermint/go-crypto" -) - -// SECRET -var SECRET = []byte("some secret") - -func printEd() { - priv := crypto.GenPrivKeyEd25519FromSecret(SECRET) - pub := priv.PubKey().(crypto.PubKeyEd25519) - sig := priv.Sign([]byte("hello")).(crypto.SignatureEd25519) - - name := "tendermint/PubKeyEd25519" - length := len(pub[:]) - - fmt.Println("### PubKeyEd25519") - fmt.Println("") - fmt.Println("```") - fmt.Printf("// Name: %s\n", name) - fmt.Printf("// PrefixBytes: 0x%X \n", pub.Bytes()[:4]) - fmt.Printf("// Length: 0x%X \n", length) - fmt.Println("// Notes: raw 32-byte Ed25519 pubkey") - fmt.Println("type PubKeyEd25519 [32]byte") - fmt.Println("") - fmt.Println(`func (pubkey PubKeyEd25519) Address() []byte { - // NOTE: hash of the Amino encoded bytes! - return RIPEMD160(AminoEncode(pubkey)) -}`) - fmt.Println("```") - fmt.Println("") - fmt.Printf("For example, the 32-byte Ed25519 pubkey `%X` would be encoded as `%X`.\n\n", pub[:], pub.Bytes()) - fmt.Printf("The address would then be `RIPEMD160(0x%X)` or `%X`\n", pub.Bytes(), pub.Address()) - fmt.Println("") - - name = "tendermint/SignatureKeyEd25519" - length = len(sig[:]) - - fmt.Println("### SignatureEd25519") - fmt.Println("") - fmt.Println("```") - fmt.Printf("// Name: %s\n", name) - fmt.Printf("// PrefixBytes: 0x%X \n", sig.Bytes()[:4]) - fmt.Printf("// Length: 0x%X \n", length) - fmt.Println("// Notes: raw 64-byte Ed25519 signature") - fmt.Println("type SignatureEd25519 [64]byte") - fmt.Println("```") - fmt.Println("") - fmt.Printf("For example, the 64-byte Ed25519 signature `%X` would be encoded as `%X`\n", sig[:], sig.Bytes()) - fmt.Println("") - - name = "tendermint/PrivKeyEd25519" - - fmt.Println("### PrivKeyEd25519") - fmt.Println("") - fmt.Println("```") - fmt.Println("// Name:", name) - fmt.Println("// Notes: raw 32-byte priv key concatenated to raw 32-byte pub key") - fmt.Println("type PrivKeyEd25519 [64]byte") - fmt.Println("```") -} - -func printSecp() { - priv := crypto.GenPrivKeySecp256k1FromSecret(SECRET) - pub := priv.PubKey().(crypto.PubKeySecp256k1) - sig := priv.Sign([]byte("hello")).(crypto.SignatureSecp256k1) - - name := "tendermint/PubKeySecp256k1" - length := len(pub[:]) - - fmt.Println("### PubKeySecp256k1") - fmt.Println("") - fmt.Println("```") - fmt.Printf("// Name: %s\n", name) - fmt.Printf("// PrefixBytes: 0x%X \n", pub.Bytes()[:4]) - fmt.Printf("// Length: 0x%X \n", length) - fmt.Println("// Notes: OpenSSL compressed pubkey prefixed with 0x02 or 0x03") - fmt.Println("type PubKeySecp256k1 [33]byte") - fmt.Println("") - fmt.Println(`func (pubkey PubKeySecp256k1) Address() []byte { - // NOTE: hash of the raw pubkey bytes (not Amino encoded!). - // Compatible with Bitcoin addresses. - return RIPEMD160(SHA256(pubkey[:])) -}`) - fmt.Println("```") - fmt.Println("") - fmt.Printf("For example, the 33-byte Secp256k1 pubkey `%X` would be encoded as `%X`\n\n", pub[:], pub.Bytes()) - fmt.Printf("The address would then be `RIPEMD160(SHA256(0x%X))` or `%X`\n", pub[:], pub.Address()) - fmt.Println("") - - name = "tendermint/SignatureKeySecp256k1" - - fmt.Println("### SignatureSecp256k1") - fmt.Println("") - fmt.Println("```") - fmt.Printf("// Name: %s\n", name) - fmt.Printf("// PrefixBytes: 0x%X \n", sig.Bytes()[:4]) - fmt.Printf("// Length: Variable\n") - fmt.Printf("// Encoding prefix: Variable\n") - fmt.Println("// Notes: raw bytes of the Secp256k1 signature") - fmt.Println("type SignatureSecp256k1 []byte") - fmt.Println("```") - fmt.Println("") - fmt.Printf("For example, the Secp256k1 signature `%X` would be encoded as `%X`\n", []byte(sig[:]), sig.Bytes()) - fmt.Println("") - - name = "tendermint/PrivKeySecp256k1" - - fmt.Println("### PrivKeySecp256k1") - fmt.Println("") - fmt.Println("```") - fmt.Println("// Name:", name) - fmt.Println("// Notes: raw 32-byte priv key") - fmt.Println("type PrivKeySecp256k1 [32]byte") - fmt.Println("```") -} - -func main() { - printEd() - fmt.Println("") - printSecp() -} diff --git a/docs/spec/software/abci.md b/docs/spec/software/abci.md deleted file mode 100644 index 9c9e6a58..00000000 --- a/docs/spec/software/abci.md +++ /dev/null @@ -1,192 +0,0 @@ -# Application Blockchain Interface (ABCI) - -ABCI is the interface between Tendermint (a state-machine replication engine) -and an application (the actual state machine). - -The ABCI message types are defined in a [protobuf -file](https://github.com/tendermint/abci/blob/master/types/types.proto). - -For full details on the ABCI message types and protocol, see the [ABCI -specificaiton](https://github.com/tendermint/abci/blob/master/specification.rst). -Be sure to read the specification if you're trying to build an ABCI app! - -For additional details on server implementation, see the [ABCI -readme](https://github.com/tendermint/abci#implementation). - -Here we provide some more details around the use of ABCI by Tendermint and -clarify common "gotchas". - -## ABCI connections - -Tendermint opens 3 ABCI connections to the app: one for Consensus, one for -Mempool, one for Queries. - -## Async vs Sync - -The main ABCI server (ie. non-GRPC) provides ordered asynchronous messages. -This is useful for DeliverTx and CheckTx, since it allows Tendermint to forward -transactions to the app before it's finished processing previous ones. - -Thus, DeliverTx and CheckTx messages are sent asycnhronously, while all other -messages are sent synchronously. - -## CheckTx and Commit - -It is typical to hold three distinct states in an ABCI app: CheckTxState, DeliverTxState, -QueryState. The QueryState contains the latest committed state for a block. -The CheckTxState and DeliverTxState may be updated concurrently with one another. -Before Commit is called, Tendermint locks and flushes the mempool so that no new changes will happen -to CheckTxState. When Commit completes, it unlocks the mempool. - -Thus, during Commit, it is safe to reset the QueryState and the CheckTxState to the latest DeliverTxState -(ie. the new state from executing all the txs in the block). - -Note, however, that it is not possible to send transactions to Tendermint during Commit - if your app -tries to send a `/broadcast_tx` to Tendermint during Commit, it will deadlock. - - -## EndBlock Validator Updates - -Updates to the Tendermint validator set can be made by returning `Validator` -objects in the `ResponseBeginBlock`: - -``` -message Validator { - bytes address = 1; - PubKey pub_key = 2; - int64 power = 3; -} - -message PubKey { - string type = 1; - bytes data = 2; -} - -``` - -The `pub_key` currently supports two types: - - `type = "ed25519" and `data = ` - - `type = "secp256k1" and `data = <33-byte OpenSSL compressed public key>` - -If the address is provided, it must match the address of the pubkey, as -specified [here](/docs/spec/blockchain/encoding.md#Addresses) - -(Note: In the v0.19 series, the `pub_key` is the [Amino encoded public -key](/docs/spec/blockchain/encoding.md#public-key-cryptography). -For Ed25519 pubkeys, the Amino prefix is always "1624DE6220". For example, the 32-byte Ed25519 pubkey -`76852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85` would be -Amino encoded as -`1624DE622076852933A4686A721442E931A8415F62F5F1AEDF4910F1F252FB393F74C40C85`) - -(Note: In old versions of Tendermint (pre-v0.19.0), the pubkey is just prefixed with a -single type byte, so for ED25519 we'd have `pub_key = 0x1 | pub`) - -The `power` is the new voting power for the validator, with the -following rules: - -- power must be non-negative -- if power is 0, the validator must already exist, and will be removed from the - validator set -- if power is non-0: - - if the validator does not already exist, it will be added to the validator - set with the given power - - if the validator does already exist, its power will be adjusted to the given power - -## InitChain Validator Updates - -ResponseInitChain has the option to return a list of validators. -If the list is not empty, Tendermint will adopt it for the validator set. -This way the application can determine the initial validator set for the -blockchain. - -Note that if addressses are included in the returned validators, they must match -the address of the public key. - -ResponseInitChain also includes ConsensusParams, but these are presently -ignored. - -## Query - -Query is a generic message type with lots of flexibility to enable diverse sets -of queries from applications. Tendermint has no requirements from the Query -message for normal operation - that is, the ABCI app developer need not implement Query functionality if they do not wish too. -That said, Tendermint makes a number of queries to support some optional -features. These are: - -### Peer Filtering - -When Tendermint connects to a peer, it sends two queries to the ABCI application -using the following paths, with no additional data: - - - `/p2p/filter/addr/`, where `` denote the IP address and - the port of the connection - - `p2p/filter/id/`, where `` is the peer node ID (ie. the - pubkey.Address() for the peer's PubKey) - -If either of these queries return a non-zero ABCI code, Tendermint will refuse -to connect to the peer. - -## Info and the Handshake/Replay - -On startup, Tendermint calls Info on the Query connection to get the latest -committed state of the app. The app MUST return information consistent with the -last block it succesfully completed Commit for. - -If the app succesfully committed block H but not H+1, then `last_block_height = -H` and `last_block_app_hash = `. If the app -failed during the Commit of block H, then `last_block_height = H-1` and -`last_block_app_hash = `. - -We now distinguish three heights, and describe how Tendermint syncs itself with -the app. - -``` -storeBlockHeight = height of the last block Tendermint saw a commit for -stateBlockHeight = height of the last block for which Tendermint completed all - block processing and saved all ABCI results to disk -appBlockHeight = height of the last block for which ABCI app succesfully - completely Commit -``` - -Note we always have `storeBlockHeight >= stateBlockHeight` and `storeBlockHeight >= appBlockHeight` -Note also we never call Commit on an ABCI app twice for the same height. - -The procedure is as follows. - -First, some simeple start conditions: - -If `appBlockHeight == 0`, then call InitChain. - -If `storeBlockHeight == 0`, we're done. - -Now, some sanity checks: - -If `storeBlockHeight < appBlockHeight`, error -If `storeBlockHeight < stateBlockHeight`, panic -If `storeBlockHeight > stateBlockHeight+1`, panic - -Now, the meat: - -If `storeBlockHeight == stateBlockHeight && appBlockHeight < storeBlockHeight`, - replay all blocks in full from `appBlockHeight` to `storeBlockHeight`. - This happens if we completed processing the block, but the app forgot its height. - -If `storeBlockHeight == stateBlockHeight && appBlockHeight == storeBlockHeight`, we're done - This happens if we crashed at an opportune spot. - -If `storeBlockHeight == stateBlockHeight+1` - This happens if we started processing the block but didn't finish. - - If `appBlockHeight < stateBlockHeight` - replay all blocks in full from `appBlockHeight` to `storeBlockHeight-1`, - and replay the block at `storeBlockHeight` using the WAL. - This happens if the app forgot the last block it committed. - - If `appBlockHeight == stateBlockHeight`, - replay the last block (storeBlockHeight) in full. - This happens if we crashed before the app finished Commit - - If appBlockHeight == storeBlockHeight { - update the state using the saved ABCI responses but dont run the block against the real app. - This happens if we crashed after the app finished Commit but before Tendermint saved the state. diff --git a/docs/spec/software/wal.md b/docs/spec/software/wal.md deleted file mode 100644 index a2e03137..00000000 --- a/docs/spec/software/wal.md +++ /dev/null @@ -1,33 +0,0 @@ -# WAL - -Consensus module writes every message to the WAL (write-ahead log). - -It also issues fsync syscall through -[File#Sync](https://golang.org/pkg/os/#File.Sync) for messages signed by this -node (to prevent double signing). - -Under the hood, it uses -[autofile.Group](https://godoc.org/github.com/tendermint/tmlibs/autofile#Group), -which rotates files when those get too big (> 10MB). - -The total maximum size is 1GB. We only need the latest block and the block before it, -but if the former is dragging on across many rounds, we want all those rounds. - -## Replay - -Consensus module will replay all the messages of the last height written to WAL -before a crash (if such occurs). - -The private validator may try to sign messages during replay because it runs -somewhat autonomously and does not know about replay process. - -For example, if we got all the way to precommit in the WAL and then crash, -after we replay the proposal message, the private validator will try to sign a -prevote. But it will fail. That's ok because we’ll see the prevote later in the -WAL. Then it will go to precommit, and that time it will work because the -private validator contains the `LastSignBytes` and then we’ll replay the -precommit from the WAL. - -Make sure to read about [WAL -corruption](https://tendermint.readthedocs.io/projects/tools/en/master/specification/corruption.html#wal-corruption) -and recovery strategies. diff --git a/docs/specification.rst b/docs/specification.rst deleted file mode 100644 index 70ebf633..00000000 --- a/docs/specification.rst +++ /dev/null @@ -1,21 +0,0 @@ -############# -Specification -############# - -Here you'll find details of the Tendermint specification. Tendermint's types are produced by `godoc `__. - -.. toctree:: - :maxdepth: 2 - - specification/block-structure.rst - specification/byzantine-consensus-algorithm.rst - specification/configuration.rst - specification/corruption.rst - specification/fast-sync.rst - specification/genesis.rst - specification/light-client-protocol.rst - specification/merkle.rst - specification/rpc.rst - specification/secure-p2p.rst - specification/validators.rst - specification/wire-protocol.rst diff --git a/docs/specification/block-structure.rst b/docs/specification/block-structure.rst deleted file mode 100644 index 1569f6d9..00000000 --- a/docs/specification/block-structure.rst +++ /dev/null @@ -1,220 +0,0 @@ -Block Structure -=============== - -The tendermint consensus engine records all agreements by a -supermajority of nodes into a blockchain, which is replicated among all -nodes. This blockchain is accessible via various rpc endpoints, mainly -``/block?height=`` to get the full block, as well as -``/blockchain?minHeight=_&maxHeight=_`` to get a list of headers. But -what exactly is stored in these blocks? - -Block -~~~~~ - -A -`Block `__ -contains: - -- a `Header <#header>`__ contains merkle hashes for various chain - states -- the - `Data `__ - is all transactions which are to be processed -- the `LastCommit <#commit>`__ > 2/3 signatures for the last block - -The signatures returned along with block ``H`` are those validating -block ``H-1``. This can be a little confusing, but we must also consider -that the ``Header`` also contains the ``LastCommitHash``. It would be -impossible for a Header to include the commits that sign it, as it would -cause an infinite loop here. But when we get block ``H``, we find -``Header.LastCommitHash``, which must match the hash of ``LastCommit``. - -Header -~~~~~~ - -The -`Header `__ -contains lots of information (follow link for up-to-date info). Notably, -it maintains the ``Height``, the ``LastBlockID`` (to make it a chain), -and hashes of the data, the app state, and the validator set. This is -important as the only item that is signed by the validators is the -``Header``, and all other data must be validated against one of the -merkle hashes in the ``Header``. - -The ``DataHash`` can provide a nice check on the -`Data `__ -returned in this same block. If you are subscribed to new blocks, via -tendermint RPC, in order to display or process the new transactions you -should at least validate that the ``DataHash`` is valid. If it is -important to verify autheniticity, you must wait for the ``LastCommit`` -from the next block to make sure the block header (including -``DataHash``) was properly signed. - -The ``ValidatorHash`` contains a hash of the current -`Validators `__. -Tracking all changes in the validator set is complex, but a client can -quickly compare this hash with the `hash of the currently known -validators `__ -to see if there have been changes. - -The ``AppHash`` serves as the basis for validating any merkle proofs -that come from the `ABCI -application `__. It represents the -state of the actual application, rather that the state of the blockchain -itself. This means it's necessary in order to perform any business -logic, such as verifying an account balance. - -**Note** After the transactions are committed to a block, they still -need to be processed in a separate step, which happens between the -blocks. If you find a given transaction in the block at height ``H``, -the effects of running that transaction will be first visible in the -``AppHash`` from the block header at height ``H+1``. - -Like the ``LastCommit`` issue, this is a requirement of the immutability -of the block chain, as the application only applies transactions *after* -they are commited to the chain. - -Commit -~~~~~~ - -The -`Commit `__ -contains a set of -`Votes `__ -that were made by the validator set to reach consensus on this block. -This is the key to the security in any PoS system, and actually no data -that cannot be traced back to a block header with a valid set of Votes -can be trusted. Thus, getting the Commit data and verifying the votes is -extremely important. - -As mentioned above, in order to find the ``precommit votes`` for block -header ``H``, we need to query block ``H+1``. Then we need to check the -votes, make sure they really are for that block, and properly formatted. -Much of this code is implemented in Go in the -`light-client `__ package. -If you look at the code, you will notice that we need to provide the -``chainID`` of the blockchain in order to properly calculate the votes. -This is to protect anyone from swapping votes between chains to fake (or -frame) a validator. Also note that this ``chainID`` is in the -``genesis.json`` from *Tendermint*, not the ``genesis.json`` from the -basecoin app (`that is a different -chainID... `__). - -Once we have those votes, and we calculated the proper `sign -bytes `__ -using the chainID and a `nice helper -function `__, -we can verify them. The light client is responsible for maintaining a -set of validators that we trust. Each vote only stores the validators -``Address``, as well as the ``Signature``. Assuming we have a local copy -of the trusted validator set, we can look up the ``Public Key`` of the -validator given its ``Address``, then verify that the ``Signature`` -matches the ``SignBytes`` and ``Public Key``. Then we sum up the total -voting power of all validators, whose votes fulfilled all these -stringent requirements. If the total number of voting power for a single -block is greater than 2/3 of all voting power, then we can finally trust -the block header, the AppHash, and the proof we got from the ABCI -application. - -Vote Sign Bytes -^^^^^^^^^^^^^^^ - -The ``sign-bytes`` of a vote is produced by taking a -`stable-json `__-like -deterministic JSON `wire <./wire-protocol.html>`__ encoding of -the vote (excluding the ``Signature`` field), and wrapping it with -``{"chain_id":"my_chain","vote":...}``. - -For example, a precommit vote might have the following ``sign-bytes``: - -.. code:: json - - {"chain_id":"my_chain","vote":{"block_hash":"611801F57B4CE378DF1A3FFF1216656E89209A99","block_parts_header":{"hash":"B46697379DBE0774CC2C3B656083F07CA7E0F9CE","total":123},"height":1234,"round":1,"type":2}} - -Block Hash -~~~~~~~~~~ - -The `block -hash `__ -is the `Simple Tree hash <./merkle.html#simple-tree-with-dictionaries>`__ -of the fields of the block ``Header`` encoded as a list of -``KVPair``\ s. - -Transaction -~~~~~~~~~~~ - -A transaction is any sequence of bytes. It is up to your -`ABCI `__ application to accept or -reject transactions. - -BlockID -~~~~~~~ - -Many of these data structures refer to the -`BlockID `__, -which is the ``BlockHash`` (hash of the block header, also referred to -by the next block) along with the ``PartSetHeader``. The -``PartSetHeader`` is explained below and is used internally to -orchestrate the p2p propogation. For clients, it is basically opaque -bytes, but they must match for all votes. - -PartSetHeader -~~~~~~~~~~~~~ - -The -`PartSetHeader `__ -contains the total number of pieces in a -`PartSet `__, -and the Merkle root hash of those pieces. - -PartSet -~~~~~~~ - -PartSet is used to split a byteslice of data into parts (pieces) for -transmission. By splitting data into smaller parts and computing a -Merkle root hash on the list, you can verify that a part is legitimately -part of the complete data, and the part can be forwarded to other peers -before all the parts are known. In short, it's a fast way to securely -propagate a large chunk of data (like a block) over a gossip network. - -PartSet was inspired by the LibSwift project. - -Usage: - -.. code:: go - - data := RandBytes(2 << 20) // Something large - - partSet := NewPartSetFromData(data) - partSet.Total() // Total number of 4KB parts - partSet.Count() // Equal to the Total, since we already have all the parts - partSet.Hash() // The Merkle root hash - partSet.BitArray() // A BitArray of partSet.Total() 1's - - header := partSet.Header() // Send this to the peer - header.Total // Total number of parts - header.Hash // The merkle root hash - - // Now we'll reconstruct the data from the parts - partSet2 := NewPartSetFromHeader(header) - partSet2.Total() // Same total as partSet.Total() - partSet2.Count() // Zero, since this PartSet doesn't have any parts yet. - partSet2.Hash() // Same hash as in partSet.Hash() - partSet2.BitArray() // A BitArray of partSet.Total() 0's - - // In a gossip network the parts would arrive in arbitrary order, perhaps - // in response to explicit requests for parts, or optimistically in response - // to the receiving peer's partSet.BitArray(). - for !partSet2.IsComplete() { - part := receivePartFromGossipNetwork() - added, err := partSet2.AddPart(part) - if err != nil { - // A wrong part, - // the merkle trail does not hash to partSet2.Hash() - } else if !added { - // A duplicate part already received - } - } - - data2, _ := ioutil.ReadAll(partSet2.GetReader()) - bytes.Equal(data, data2) // true diff --git a/docs/specification/byzantine-consensus-algorithm.rst b/docs/specification/byzantine-consensus-algorithm.rst deleted file mode 100644 index 15eab32d..00000000 --- a/docs/specification/byzantine-consensus-algorithm.rst +++ /dev/null @@ -1,349 +0,0 @@ -Byzantine Consensus Algorithm -============================= - -Terms ------ - -- The network is composed of optionally connected *nodes*. Nodes - directly connected to a particular node are called *peers*. -- The consensus process in deciding the next block (at some *height* - ``H``) is composed of one or many *rounds*. -- ``NewHeight``, ``Propose``, ``Prevote``, ``Precommit``, and - ``Commit`` represent state machine states of a round. (aka - ``RoundStep`` or just "step"). -- A node is said to be *at* a given height, round, and step, or at - ``(H,R,S)``, or at ``(H,R)`` in short to omit the step. -- To *prevote* or *precommit* something means to broadcast a `prevote - vote `__ - or `first precommit - vote `__ - for something. -- A vote *at* ``(H,R)`` is a vote signed with the bytes for ``H`` and - ``R`` included in its - `sign-bytes `__. -- *+2/3* is short for "more than 2/3" -- *1/3+* is short for "1/3 or more" -- A set of +2/3 of prevotes for a particular block or ```` at - ``(H,R)`` is called a *proof-of-lock-change* or *PoLC* for short. - -State Machine Overview ----------------------- - -At each height of the blockchain a round-based protocol is run to -determine the next block. Each round is composed of three *steps* -(``Propose``, ``Prevote``, and ``Precommit``), along with two special -steps ``Commit`` and ``NewHeight``. - -In the optimal scenario, the order of steps is: - -:: - - NewHeight -> (Propose -> Prevote -> Precommit)+ -> Commit -> NewHeight ->... - -The sequence ``(Propose -> Prevote -> Precommit)`` is called a *round*. -There may be more than one round required to commit a block at a given -height. Examples for why more rounds may be required include: - -- The designated proposer was not online. -- The block proposed by the designated proposer was not valid. -- The block proposed by the designated proposer did not propagate in - time. -- The block proposed was valid, but +2/3 of prevotes for the proposed - block were not received in time for enough validator nodes by the - time they reached the ``Precommit`` step. Even though +2/3 of - prevotes are necessary to progress to the next step, at least one - validator may have voted ```` or maliciously voted for something - else. -- The block proposed was valid, and +2/3 of prevotes were received for - enough nodes, but +2/3 of precommits for the proposed block were not - received for enough validator nodes. - -Some of these problems are resolved by moving onto the next round & -proposer. Others are resolved by increasing certain round timeout -parameters over each successive round. - -State Machine Diagram ---------------------- - -:: - - +-------------------------------------+ - v |(Wait til `CommmitTime+timeoutCommit`) - +-----------+ +-----+-----+ - +----------> | Propose +--------------+ | NewHeight | - | +-----------+ | +-----------+ - | | ^ - |(Else, after timeoutPrecommit) v | - +-----+-----+ +-----------+ | - | Precommit | <------------------------+ Prevote | | - +-----+-----+ +-----------+ | - |(When +2/3 Precommits for block found) | - v | - +--------------------------------------------------------------------+ - | Commit | - | | - | * Set CommitTime = now; | - | * Wait for block, then stage/save/commit block; | - +--------------------------------------------------------------------+ - -Background Gossip ------------------ - -A node may not have a corresponding validator private key, but it -nevertheless plays an active role in the consensus process by relaying -relevant meta-data, proposals, blocks, and votes to its peers. A node -that has the private keys of an active validator and is engaged in -signing votes is called a *validator-node*. All nodes (not just -validator-nodes) have an associated state (the current height, round, -and step) and work to make progress. - -Between two nodes there exists a ``Connection``, and multiplexed on top -of this connection are fairly throttled ``Channel``\ s of information. -An epidemic gossip protocol is implemented among some of these channels -to bring peers up to speed on the most recent state of consensus. For -example, - -- Nodes gossip ``PartSet`` parts of the current round's proposer's - proposed block. A LibSwift inspired algorithm is used to quickly - broadcast blocks across the gossip network. -- Nodes gossip prevote/precommit votes. A node NODE\_A that is ahead of - NODE\_B can send NODE\_B prevotes or precommits for NODE\_B's current - (or future) round to enable it to progress forward. -- Nodes gossip prevotes for the proposed PoLC (proof-of-lock-change) - round if one is proposed. -- Nodes gossip to nodes lagging in blockchain height with block - `commits `__ - for older blocks. -- Nodes opportunistically gossip ``HasVote`` messages to hint peers - what votes it already has. -- Nodes broadcast their current state to all neighboring peers. (but is - not gossiped further) - -There's more, but let's not get ahead of ourselves here. - -Proposals ---------- - -A proposal is signed and published by the designated proposer at each -round. The proposer is chosen by a deterministic and non-choking round -robin selection algorithm that selects proposers in proportion to their -voting power. (see -`implementation `__) - -A proposal at ``(H,R)`` is composed of a block and an optional latest -``PoLC-Round < R`` which is included iff the proposer knows of one. This -hints the network to allow nodes to unlock (when safe) to ensure the -liveness property. - -State Machine Spec ------------------- - -Propose Step (height:H,round:R) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Upon entering ``Propose``: - The designated proposer proposes a block at -``(H,R)``. - -The ``Propose`` step ends: - After ``timeoutProposeR`` after entering -``Propose``. --> goto ``Prevote(H,R)`` - After receiving proposal block -and all prevotes at ``PoLC-Round``. --> goto ``Prevote(H,R)`` - After -`common exit conditions <#common-exit-conditions>`__ - -Prevote Step (height:H,round:R) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Upon entering ``Prevote``, each validator broadcasts its prevote vote. - -- First, if the validator is locked on a block since ``LastLockRound`` - but now has a PoLC for something else at round ``PoLC-Round`` where - ``LastLockRound < PoLC-Round < R``, then it unlocks. -- If the validator is still locked on a block, it prevotes that. -- Else, if the proposed block from ``Propose(H,R)`` is good, it - prevotes that. -- Else, if the proposal is invalid or wasn't received on time, it - prevotes ````. - -The ``Prevote`` step ends: - After +2/3 prevotes for a particular block -or ````. --> goto ``Precommit(H,R)`` - After ``timeoutPrevote`` -after receiving any +2/3 prevotes. --> goto ``Precommit(H,R)`` - After -`common exit conditions <#common-exit-conditions>`__ - -Precommit Step (height:H,round:R) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Upon entering ``Precommit``, each validator broadcasts its precommit -vote. - If the validator has a PoLC at ``(H,R)`` for a particular block -``B``, it (re)locks (or changes lock to) and precommits ``B`` and sets -``LastLockRound = R``. - Else, if the validator has a PoLC at ``(H,R)`` -for ````, it unlocks and precommits ````. - Else, it keeps the -lock unchanged and precommits ````. - -A precommit for ```` means "I didn’t see a PoLC for this round, but -I did get +2/3 prevotes and waited a bit". - -The Precommit step ends: - After +2/3 precommits for ````. --> goto -``Propose(H,R+1)`` - After ``timeoutPrecommit`` after receiving any +2/3 -precommits. --> goto ``Propose(H,R+1)`` - After `common exit -conditions <#common-exit-conditions>`__ - -common exit conditions -^^^^^^^^^^^^^^^^^^^^^^ - -- After +2/3 precommits for a particular block. --> goto ``Commit(H)`` -- After any +2/3 prevotes received at ``(H,R+x)``. --> goto - ``Prevote(H,R+x)`` -- After any +2/3 precommits received at ``(H,R+x)``. --> goto - ``Precommit(H,R+x)`` - -Commit Step (height:H) -~~~~~~~~~~~~~~~~~~~~~~ - -- Set ``CommitTime = now()`` -- Wait until block is received. --> goto ``NewHeight(H+1)`` - -NewHeight Step (height:H) -~~~~~~~~~~~~~~~~~~~~~~~~~ - -- Move ``Precommits`` to ``LastCommit`` and increment height. -- Set ``StartTime = CommitTime+timeoutCommit`` -- Wait until ``StartTime`` to receive straggler commits. --> goto - ``Propose(H,0)`` - -Proofs ------- - -Proof of Safety -~~~~~~~~~~~~~~~ - -Assume that at most -1/3 of the voting power of validators is byzantine. -If a validator commits block ``B`` at round ``R``, it's because it saw -+2/3 of precommits at round ``R``. This implies that 1/3+ of honest -nodes are still locked at round ``R' > R``. These locked validators will -remain locked until they see a PoLC at ``R' > R``, but this won't happen -because 1/3+ are locked and honest, so at most -2/3 are available to -vote for anything other than ``B``. - -Proof of Liveness -~~~~~~~~~~~~~~~~~ - -If 1/3+ honest validators are locked on two different blocks from -different rounds, a proposers' ``PoLC-Round`` will eventually cause -nodes locked from the earlier round to unlock. Eventually, the -designated proposer will be one that is aware of a PoLC at the later -round. Also, ``timeoutProposalR`` increments with round ``R``, while the -size of a proposal are capped, so eventually the network is able to -"fully gossip" the whole proposal (e.g. the block & PoLC). - -Proof of Fork Accountability -~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -Define the JSet (justification-vote-set) at height ``H`` of a validator -``V1`` to be all the votes signed by the validator at ``H`` along with -justification PoLC prevotes for each lock change. For example, if ``V1`` -signed the following precommits: ``Precommit(B1 @ round 0)``, -``Precommit( @ round 1)``, ``Precommit(B2 @ round 4)`` (note that -no precommits were signed for rounds 2 and 3, and that's ok), -``Precommit(B1 @ round 0)`` must be justified by a PoLC at round 0, and -``Precommit(B2 @ round 4)`` must be justified by a PoLC at round 4; but -the precommit for ```` at round 1 is not a lock-change by -definition so the JSet for ``V1`` need not include any prevotes at round -1, 2, or 3 (unless ``V1`` happened to have prevoted for those rounds). - -Further, define the JSet at height ``H`` of a set of validators ``VSet`` -to be the union of the JSets for each validator in ``VSet``. For a given -commit by honest validators at round ``R`` for block ``B`` we can -construct a JSet to justify the commit for ``B`` at ``R``. We say that a -JSet *justifies* a commit at ``(H,R)`` if all the committers (validators -in the commit-set) are each justified in the JSet with no duplicitous -vote signatures (by the committers). - -- **Lemma**: When a fork is detected by the existence of two - conflicting `commits <./validators.html#commiting-a-block>`__, - the union of the JSets for both commits (if they can be compiled) - must include double-signing by at least 1/3+ of the validator set. - **Proof**: The commit cannot be at the same round, because that would - immediately imply double-signing by 1/3+. Take the union of the JSets - of both commits. If there is no double-signing by at least 1/3+ of - the validator set in the union, then no honest validator could have - precommitted any different block after the first commit. Yet, +2/3 - did. Reductio ad absurdum. - -As a corollary, when there is a fork, an external process can determine -the blame by requiring each validator to justify all of its round votes. -Either we will find 1/3+ who cannot justify at least one of their votes, -and/or, we will find 1/3+ who had double-signed. - -Alternative algorithm -~~~~~~~~~~~~~~~~~~~~~ - -Alternatively, we can take the JSet of a commit to be the "full commit". -That is, if light clients and validators do not consider a block to be -committed unless the JSet of the commit is also known, then we get the -desirable property that if there ever is a fork (e.g. there are two -conflicting "full commits"), then 1/3+ of the validators are immediately -punishable for double-signing. - -There are many ways to ensure that the gossip network efficiently share -the JSet of a commit. One solution is to add a new message type that -tells peers that this node has (or does not have) a +2/3 majority for B -(or ) at (H,R), and a bitarray of which votes contributed towards that -majority. Peers can react by responding with appropriate votes. - -We will implement such an algorithm for the next iteration of the -Tendermint consensus protocol. - -Other potential improvements include adding more data in votes such as -the last known PoLC round that caused a lock change, and the last voted -round/step (or, we may require that validators not skip any votes). This -may make JSet verification/gossip logic easier to implement. - -Censorship Attacks -~~~~~~~~~~~~~~~~~~ - -Due to the definition of a block -`commit `__, any 1/3+ -coalition of validators can halt the blockchain by not broadcasting -their votes. Such a coalition can also censor particular transactions by -rejecting blocks that include these transactions, though this would -result in a significant proportion of block proposals to be rejected, -which would slow down the rate of block commits of the blockchain, -reducing its utility and value. The malicious coalition might also -broadcast votes in a trickle so as to grind blockchain block commits to -a near halt, or engage in any combination of these attacks. - -If a global active adversary were also involved, it can partition the -network in such a way that it may appear that the wrong subset of -validators were responsible for the slowdown. This is not just a -limitation of Tendermint, but rather a limitation of all consensus -protocols whose network is potentially controlled by an active -adversary. - -Overcoming Forks and Censorship Attacks -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -For these types of attacks, a subset of the validators through external -means should coordinate to sign a reorg-proposal that chooses a fork -(and any evidence thereof) and the initial subset of validators with -their signatures. Validators who sign such a reorg-proposal forego its -collateral on all other forks. Clients should verify the signatures on -the reorg-proposal, verify any evidence, and make a judgement or prompt -the end-user for a decision. For example, a phone wallet app may prompt -the user with a security warning, while a refrigerator may accept any -reorg-proposal signed by +1/2 of the original validators. - -No non-synchronous Byzantine fault-tolerant algorithm can come to -consensus when 1/3+ of validators are dishonest, yet a fork assumes that -1/3+ of validators have already been dishonest by double-signing or -lock-changing without justification. So, signing the reorg-proposal is a -coordination problem that cannot be solved by any non-synchronous -protocol (i.e. automatically, and without making assumptions about the -reliability of the underlying network). It must be provided by means -external to the weakly-synchronous Tendermint consensus algorithm. For -now, we leave the problem of reorg-proposal coordination to human -coordination via internet media. Validators must take care to ensure -that there are no significant network partitions, to avoid situations -where two conflicting reorg-proposals are signed. - -Assuming that the external coordination medium and protocol is robust, -it follows that forks are less of a concern than `censorship -attacks <#censorship-attacks>`__. diff --git a/docs/specification/configuration.md b/docs/specification/configuration.md deleted file mode 100644 index 08981c06..00000000 --- a/docs/specification/configuration.md +++ /dev/null @@ -1,200 +0,0 @@ -# Configuration - -Tendermint Core can be configured via a TOML file in -`$TMHOME/config/config.toml`. Some of these parameters can be overridden by -command-line flags. For most users, the options in the `##### main -base configuration options #####` are intended to be modified while -config options further below are intended for advance power users. - -## Options - -The default configuration file create by `tendermint init` has all -the parameters set with their default values. It will look something -like the file below, however, double check by inspecting the -`config.toml` created with your version of `tendermint` installed: - -``` -# This is a TOML config file. -# For more information, see https://github.com/toml-lang/toml - -##### main base config options ##### - -# TCP or UNIX socket address of the ABCI application, -# or the name of an ABCI application compiled in with the Tendermint binary -proxy_app = "tcp://127.0.0.1:26658" - -# A custom human readable name for this node -moniker = "anonymous" - -# If this node is many blocks behind the tip of the chain, FastSync -# allows them to catchup quickly by downloading blocks in parallel -# and verifying their commits -fast_sync = true - -# Database backend: leveldb | memdb -db_backend = "leveldb" - -# Database directory -db_path = "data" - -# Output level for logging -log_level = "state:info,*:error" - -##### additional base config options ##### - -# The ID of the chain to join (should be signed with every transaction and vote) -chain_id = "" - -# Path to the JSON file containing the initial validator set and other meta data -genesis_file = "genesis.json" - -# Path to the JSON file containing the private key to use as a validator in the consensus protocol -priv_validator_file = "priv_validator.json" - -# Mechanism to connect to the ABCI application: socket | grpc -abci = "socket" - -# TCP or UNIX socket address for the profiling server to listen on -prof_laddr = "" - -# If true, query the ABCI app on connecting to a new peer -# so the app can decide if we should keep the connection or not -filter_peers = false - -##### advanced configuration options ##### - -##### rpc server configuration options ##### -[rpc] - -# TCP or UNIX socket address for the RPC server to listen on -laddr = "tcp://0.0.0.0:26657" - -# TCP or UNIX socket address for the gRPC server to listen on -# NOTE: This server only supports /broadcast_tx_commit -grpc_laddr = "" - -# Activate unsafe RPC commands like /dial_seeds and /unsafe_flush_mempool -unsafe = false - -##### peer to peer configuration options ##### -[p2p] - -# Address to listen for incoming connections -laddr = "tcp://0.0.0.0:26656" - -# Comma separated list of seed nodes to connect to -seeds = "" - -# Comma separated list of nodes to keep persistent connections to -# Do not add private peers to this list if you don't want them advertised -persistent_peers = "" - -# Path to address book -addr_book_file = "addrbook.json" - -# Set true for strict address routability rules -addr_book_strict = true - -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = 100 - -# Maximum number of peers to connect to -max_num_peers = 50 - -# Maximum size of a message packet payload, in bytes -max_msg_packet_payload_size = 1024 - -# Rate at which packets can be sent, in bytes/second -send_rate = 512000 - -# Rate at which packets can be received, in bytes/second -recv_rate = 512000 - -# Set true to enable the peer-exchange reactor -pex = true - -# Seed mode, in which node constantly crawls the network and looks for -# peers. If another node asks it for addresses, it responds and disconnects. -# -# Does not work if the peer-exchange reactor is disabled. -seed_mode = false - -# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) -private_peer_ids = "" - -##### mempool configuration options ##### -[mempool] - -recheck = true -recheck_empty = true -broadcast = true -wal_dir = "data/mempool.wal" - -# size of the mempool -size = 100000 - -# size of the cache (used to filter transactions we saw earlier) -cache_size = 100000 - -##### consensus configuration options ##### -[consensus] - -wal_file = "data/cs.wal/wal" - -# All timeouts are in milliseconds -timeout_propose = 3000 -timeout_propose_delta = 500 -timeout_prevote = 1000 -timeout_prevote_delta = 500 -timeout_precommit = 1000 -timeout_precommit_delta = 500 -timeout_commit = 1000 - -# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) -skip_timeout_commit = false - -# BlockSize -max_block_size_txs = 10000 -max_block_size_bytes = 1 - -# EmptyBlocks mode and possible interval between empty blocks in seconds -create_empty_blocks = true -create_empty_blocks_interval = 0 - -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = 100 -peer_query_maj23_sleep_duration = 2000 - -##### transactions indexer configuration options ##### -[tx_index] - -# What indexer to use for transactions -# -# Options: -# 1) "null" -# 2) "kv" (default) - the simplest possible indexer, backed by key-value storage (defaults to levelDB; see DBBackend). -indexer = "kv" - -# Comma-separated list of tags to index (by default the only tag is tx hash) -# -# It's recommended to index only a subset of tags due to possible memory -# bloat. This is, of course, depends on the indexer's DB and the volume of -# transactions. -index_tags = "" - -# When set to true, tells indexer to index all tags. Note this may be not -# desirable (see the comment above). IndexTags has a precedence over -# IndexAllTags (i.e. when given both, IndexTags will be indexed). -index_all_tags = false - -##### instrumentation configuration options ##### -[instrumentation] - -# When true, Prometheus metrics are served under /metrics on -# PrometheusListenAddr. -# Check out the documentation for the list of available metrics. -prometheus = false - -# Address to listen for Prometheus collector(s) connections -prometheus_listen_addr = ":26660" -``` diff --git a/docs/specification/corruption.rst b/docs/specification/corruption.rst deleted file mode 100644 index 6ae19fb1..00000000 --- a/docs/specification/corruption.rst +++ /dev/null @@ -1,70 +0,0 @@ -Corruption -========== - -Important step --------------- - -Make sure you have a backup of the Tendermint data directory. - -Possible causes ---------------- - -Remember that most corruption is caused by hardware issues: - -- RAID controllers with faulty / worn out battery backup, and an unexpected power loss -- Hard disk drives with write-back cache enabled, and an unexpected power loss -- Cheap SSDs with insufficient power-loss protection, and an unexpected power-loss -- Defective RAM -- Defective or overheating CPU(s) - -Other causes can be: - -- Database systems configured with fsync=off and an OS crash or power loss -- Filesystems configured to use write barriers plus a storage layer that ignores write barriers. LVM is a particular culprit. -- Tendermint bugs -- Operating system bugs -- Admin error - - directly modifying Tendermint data-directory contents - -(Source: https://wiki.postgresql.org/wiki/Corruption) - -WAL Corruption --------------- - -If consensus WAL is corrupted at the lastest height and you are trying to start -Tendermint, replay will fail with panic. - -Recovering from data corruption can be hard and time-consuming. Here are two approaches you can take: - -1) Delete the WAL file and restart Tendermint. It will attempt to sync with other peers. -2) Try to repair the WAL file manually: - - 1. Create a backup of the corrupted WAL file: - - .. code:: bash - - cp "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal_backup - - 2. Use ./scripts/wal2json to create a human-readable version - - .. code:: bash - - ./scripts/wal2json/wal2json "$TMHOME/data/cs.wal/wal" > /tmp/corrupted_wal - - 3. Search for a "CORRUPTED MESSAGE" line. - 4. By looking at the previous message and the message after the corrupted one - and looking at the logs, try to rebuild the message. If the consequent - messages are marked as corrupted too (this may happen if length header - got corrupted or some writes did not make it to the WAL ~ truncation), - then remove all the lines starting from the corrupted one and restart - Tendermint. - - .. code:: bash - - $EDITOR /tmp/corrupted_wal - - 5. After editing, convert this file back into binary form by running: - - .. code:: bash - - ./scripts/json2wal/json2wal /tmp/corrupted_wal > "$TMHOME/data/cs.wal/wal" diff --git a/docs/specification/fast-sync.rst b/docs/specification/fast-sync.rst deleted file mode 100644 index c98ec43a..00000000 --- a/docs/specification/fast-sync.rst +++ /dev/null @@ -1,34 +0,0 @@ -Fast Sync -========= - -Background ----------- - -In a proof of work blockchain, syncing with the chain is the same -process as staying up-to-date with the consensus: download blocks, and -look for the one with the most total work. In proof-of-stake, the -consensus process is more complex, as it involves rounds of -communication between the nodes to determine what block should be -committed next. Using this process to sync up with the blockchain from -scratch can take a very long time. It's much faster to just download -blocks and check the merkle tree of validators than to run the real-time -consensus gossip protocol. - -Fast Sync ---------- - -To support faster syncing, tendermint offers a ``fast-sync`` mode, which -is enabled by default, and can be toggled in the ``config.toml`` or via -``--fast_sync=false``. - -In this mode, the tendermint daemon will sync hundreds of times faster -than if it used the real-time consensus process. Once caught up, the -daemon will switch out of fast sync and into the normal consensus mode. -After running for some time, the node is considered ``caught up`` if it -has at least one peer and it's height is at least as high as the max -reported peer height. See `the IsCaughtUp -method `__. - -If we're lagging sufficiently, we should go back to fast syncing, but -this is an open issue: -https://github.com/tendermint/tendermint/issues/129 diff --git a/docs/specification/genesis.rst b/docs/specification/genesis.rst deleted file mode 100644 index 427c88bb..00000000 --- a/docs/specification/genesis.rst +++ /dev/null @@ -1,71 +0,0 @@ -Genesis -======= - -The genesis.json file in ``$TMHOME/config`` defines the initial TendermintCore -state upon genesis of the blockchain (`see -definition `__). - -Fields -~~~~~~ - -- ``genesis_time``: Official time of blockchain start. -- ``chain_id``: ID of the blockchain. This must be unique for every - blockchain. If your testnet blockchains do not have unique chain IDs, - you will have a bad time. -- ``validators``: -- ``pub_key``: The first element specifies the pub\_key type. 1 == - Ed25519. The second element are the pubkey bytes. -- ``power``: The validator's voting power. -- ``name``: Name of the validator (optional). -- ``app_hash``: The expected application hash (as returned by the - ``ResponseInfo`` ABCI message) upon genesis. If the app's hash does not - match, Tendermint will panic. -- ``app_state``: The application state (e.g. initial distribution of tokens). - -Sample genesis.json -~~~~~~~~~~~~~~~~~~~ - -.. code:: json - - { - "genesis_time": "2016-02-05T06:02:31.526Z", - "chain_id": "chain-tTH4mi", - "validators": [ - { - "pub_key": [ - 1, - "9BC5112CB9614D91CE423FA8744885126CD9D08D9FC9D1F42E552D662BAA411E" - ], - "power": 1, - "name": "mach1" - }, - { - "pub_key": [ - 1, - "F46A5543D51F31660D9F59653B4F96061A740FF7433E0DC1ECBC30BE8494DE06" - ], - "power": 1, - "name": "mach2" - }, - { - "pub_key": [ - 1, - "0E7B423C1635FD07C0FC3603B736D5D27953C1C6CA865BB9392CD79DE1A682BB" - ], - "power": 1, - "name": "mach3" - }, - { - "pub_key": [ - 1, - "4F49237B9A32EB50682EDD83C48CE9CDB1D02A7CFDADCFF6EC8C1FAADB358879" - ], - "power": 1, - "name": "mach4" - } - ], - "app_hash": "15005165891224E721CB664D15CB972240F5703F", - "app_state": { - {"account": "Bob", "coins": 5000} - } - } diff --git a/docs/specification/light-client-protocol.rst b/docs/specification/light-client-protocol.rst deleted file mode 100644 index 6c6083b4..00000000 --- a/docs/specification/light-client-protocol.rst +++ /dev/null @@ -1,33 +0,0 @@ -Light Client Protocol -===================== - -Light clients are an important part of the complete blockchain system -for most applications. Tendermint provides unique speed and security -properties for light client applications. - -See our `lite package -`__. - -Overview --------- - -The objective of the light client protocol is to get a -`commit <./validators.html#committing-a-block>`__ for a recent -`block hash <./block-structure.html#block-hash>`__ where the commit -includes a majority of signatures from the last known validator set. -From there, all the application state is verifiable with `merkle -proofs <./merkle.html#iavl-tree>`__. - -Properties ----------- - -- You get the full collateralized security benefits of Tendermint; No - need to wait for confirmations. -- You get the full speed benefits of Tendermint; transactions commit - instantly. -- You can get the most recent version of the application state - non-interactively (without committing anything to the blockchain). - For example, this means that you can get the most recent value of a - name from the name-registry without worrying about fork censorship - attacks, without posting a commit and waiting for confirmations. It's - fast, secure, and free! diff --git a/docs/specification/merkle.rst b/docs/specification/merkle.rst deleted file mode 100644 index 588f24a9..00000000 --- a/docs/specification/merkle.rst +++ /dev/null @@ -1,88 +0,0 @@ -Merkle -====== - -For an overview of Merkle trees, see -`wikipedia `__. - -There are two types of Merkle trees used in Tendermint. - -- **IAVL+ Tree**: An immutable self-balancing binary - tree for persistent application state -- **Simple Tree**: A simple compact binary tree for - a static list of items - -IAVL+ Tree ----------- - -The purpose of this data structure is to provide persistent storage for -key-value pairs (e.g. account state, name-registrar data, and -per-contract data) such that a deterministic merkle root hash can be -computed. The tree is balanced using a variant of the `AVL -algorithm `__ so all operations -are O(log(n)). - -Nodes of this tree are immutable and indexed by its hash. Thus any node -serves as an immutable snapshot which lets us stage uncommitted -transactions from the mempool cheaply, and we can instantly roll back to -the last committed state to process transactions of a newly committed -block (which may not be the same set of transactions as those from the -mempool). - -In an AVL tree, the heights of the two child subtrees of any node differ -by at most one. Whenever this condition is violated upon an update, the -tree is rebalanced by creating O(log(n)) new nodes that point to -unmodified nodes of the old tree. In the original AVL algorithm, inner -nodes can also hold key-value pairs. The AVL+ algorithm (note the plus) -modifies the AVL algorithm to keep all values on leaf nodes, while only -using branch-nodes to store keys. This simplifies the algorithm while -minimizing the size of merkle proofs - -In Ethereum, the analog is the `Patricia -trie `__. There are tradeoffs. -Keys do not need to be hashed prior to insertion in IAVL+ trees, so this -provides faster iteration in the key space which may benefit some -applications. The logic is simpler to implement, requiring only two -types of nodes -- inner nodes and leaf nodes. The IAVL+ tree is a binary -tree, so merkle proofs are much shorter than the base 16 Patricia trie. -On the other hand, while IAVL+ trees provide a deterministic merkle root -hash, it depends on the order of updates. In practice this shouldn't be -a problem, since you can efficiently encode the tree structure when -serializing the tree contents. - -Simple Tree ------------ - -For merkelizing smaller static lists, use the Simple Tree. The -transactions and validation signatures of a block are hashed using this -simple merkle tree logic. - -If the number of items is not a power of two, the tree will not be full -and some leaf nodes will be at different levels. Simple Tree tries to -keep both sides of the tree the same size, but the left side may be one -greater. - -:: - - Simple Tree with 6 items Simple Tree with 7 items - - * * - / \ / \ - / \ / \ - / \ / \ - / \ / \ - * * * * - / \ / \ / \ / \ - / \ / \ / \ / \ - / \ / \ / \ / \ - * h2 * h5 * * * h6 - / \ / \ / \ / \ / \ - h0 h1 h3 h4 h0 h1 h2 h3 h4 h5 - -Simple Tree with Dictionaries -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -The Simple Tree is used to merkelize a list of items, so to merkelize a -(short) dictionary of key-value pairs, encode the dictionary as an -ordered list of ``KVPair`` structs. The block hash is such a hash -derived from all the fields of the block ``Header``. The state hash is -similarly derived. diff --git a/docs/specification/new-spec/README.md b/docs/specification/new-spec/README.md deleted file mode 100644 index 907ddd94..00000000 --- a/docs/specification/new-spec/README.md +++ /dev/null @@ -1 +0,0 @@ -Spec moved to [docs/spec](https://github.com/tendermint/tendermint/tree/master/docs/spec). diff --git a/docs/specification/rpc.md b/docs/specification/rpc.md deleted file mode 100644 index 2f3a72c7..00000000 --- a/docs/specification/rpc.md +++ /dev/null @@ -1,3 +0,0 @@ -# RPC - -The RPC documentation is hosted [here](https://tendermint.github.io/slate) and is generated by the CI from our [Slate repo](https://github.com/tendermint/slate). To update the documentation, edit the relevant `godoc` comments in the [rpc/core directory](https://github.com/tendermint/tendermint/tree/develop/rpc/core). diff --git a/docs/specification/secure-p2p.rst b/docs/specification/secure-p2p.rst deleted file mode 100644 index de95f0cf..00000000 --- a/docs/specification/secure-p2p.rst +++ /dev/null @@ -1,78 +0,0 @@ -Secure P2P -========== - -The Tendermint p2p protocol uses an authenticated encryption scheme -based on the `Station-to-Station -Protocol `__. -The implementation uses -`golang's `__ `nacl -box `__ for the actual authenticated -encryption algorithm. - -Each peer generates an ED25519 key-pair to use as a persistent -(long-term) id. - -When two peers establish a TCP connection, they first each generate an -ephemeral ED25519 key-pair to use for this session, and send each other -their respective ephemeral public keys. This happens in the clear. - -They then each compute the shared secret. The shared secret is the -multiplication of the peer's ephemeral private key by the other peer's -ephemeral public key. The result is the same for both peers by the magic -of `elliptic -curves `__. -The shared secret is used as the symmetric key for the encryption -algorithm. - -The two ephemeral public keys are sorted to establish a canonical order. -Then a 24-byte nonce is generated by concatenating the public keys and -hashing them with Ripemd160. Note Ripemd160 produces 20byte hashes, so -the nonce ends with four 0s. - -The nonce is used to seed the encryption - it is critical that the same -nonce never be used twice with the same private key. For convenience, -the last bit of the nonce is flipped, giving us two nonces: one for -encrypting our own messages, one for decrypting our peer's. Which ever -peer has the higher public key uses the "bit-flipped" nonce for -encryption. - -Now, a challenge is generated by concatenating the ephemeral public keys -and taking the SHA256 hash. - -Each peer signs the challenge with their persistent private key, and -sends the other peer an AuthSigMsg, containing their persistent public -key and the signature. On receiving an AuthSigMsg, the peer verifies the -signature. - -The peers are now authenticated. - -All future communications can now be encrypted using the shared secret -and the generated nonces, where each nonce is incremented by one each -time it is used. The communications maintain Perfect Forward Secrecy, as -the persistent key pair was not used for generating secrets - only for -authenticating. - -Caveat ------- - -This system is still vulnerable to a Man-In-The-Middle attack if the -persistent public key of the remote node is not known in advance. The -only way to mitigate this is with a public key authentication system, -such as the Web-of-Trust or Certificate Authorities. In our case, we can -use the blockchain itself as a certificate authority to ensure that we -are connected to at least one validator. - -Config ------- - -Authenticated encryption is enabled by default. - -Additional Reading ------------------- - -- `Implementation `__ -- `Original STS paper by Whitfield Diffie, Paul C. van Oorschot and - Michael J. - Wiener `__ -- `Further work on secret - handshakes `__ diff --git a/docs/specification/validators.rst b/docs/specification/validators.rst deleted file mode 100644 index fb6bbbac..00000000 --- a/docs/specification/validators.rst +++ /dev/null @@ -1,44 +0,0 @@ -Validators -========== - -Validators are responsible for committing new blocks in the blockchain. -These validators participate in the consensus protocol by broadcasting -*votes* which contain cryptographic signatures signed by each -validator's private key. - -Some Proof-of-Stake consensus algorithms aim to create a "completely" -decentralized system where all stakeholders (even those who are not -always available online) participate in the committing of blocks. -Tendermint has a different approach to block creation. Validators are -expected to be online, and the set of validators is permissioned/curated -by some external process. Proof-of-stake is not required, but can be -implemented on top of Tendermint consensus. That is, validators may be -required to post collateral on-chain, off-chain, or may not be required -to post any collateral at all. - -Validators have a cryptographic key-pair and an associated amount of -"voting power". Voting power need not be the same. - -Becoming a Validator --------------------- - -There are two ways to become validator. - -1. They can be pre-established in the `genesis - state <./genesis.html>`__ -2. The `ABCI app responds to the EndBlock - message `__ with changes to the - existing validator set. - -Committing a Block ------------------- - -*+2/3 is short for "more than 2/3"* - -A block is committed when +2/3 of the validator set sign `precommit -votes <./block-structure.html#vote>`__ for that block at the same -``round``. The +2/3 set of precommit votes is -called a `*commit* <./block-structure.html#commit>`__. While any -+2/3 set of precommits for the same block at the same height&round can -serve as validation, the canonical commit is included in the next block -(see `LastCommit <./block-structure.html>`__). diff --git a/docs/specification/wire-protocol.rst b/docs/specification/wire-protocol.rst deleted file mode 100644 index c0bf3b0e..00000000 --- a/docs/specification/wire-protocol.rst +++ /dev/null @@ -1,172 +0,0 @@ -Wire Protocol -============= - -The `Tendermint wire protocol `__ -encodes data in `c-style binary <#binary>`__ and `JSON <#json>`__ form. - -Supported types ---------------- - -- Primitive types -- ``uint8`` (aka ``byte``), ``uint16``, ``uint32``, ``uint64`` -- ``int8``, ``int16``, ``int32``, ``int64`` -- ``uint``, ``int``: variable length (un)signed integers -- ``string``, ``[]byte`` -- ``time`` -- Derived types -- structs -- var-length arrays of a particular type -- fixed-length arrays of a particular type -- interfaces: registered union types preceded by a ``type byte`` -- pointers - -Binary ------- - -**Fixed-length primitive types** are encoded with 1,2,3, or 4 big-endian -bytes. - ``uint8`` (aka ``byte``), ``uint16``, ``uint32``, ``uint64``: -takes 1,2,3, and 4 bytes respectively - ``int8``, ``int16``, ``int32``, -``int64``: takes 1,2,3, and 4 bytes respectively - ``time``: ``int64`` -representation of nanoseconds since epoch - -**Variable-length integers** are encoded with a single leading byte -representing the length of the following big-endian bytes. For signed -negative integers, the most significant bit of the leading byte is a 1. - -- ``uint``: 1-byte length prefixed variable-size (0 ~ 255 bytes) - unsigned integers -- ``int``: 1-byte length prefixed variable-size (0 ~ 127 bytes) signed - integers - -NOTE: While the number 0 (zero) is encoded with a single byte ``x00``, -the number 1 (one) takes two bytes to represent: ``x0101``. This isn't -the most efficient representation, but the rules are easier to remember. - -+---------------+----------------+----------------+ -| number | binary | binary ``int`` | -| | ``uint`` | | -+===============+================+================+ -| 0 | ``x00`` | ``x00`` | -+---------------+----------------+----------------+ -| 1 | ``x0101`` | ``x0101`` | -+---------------+----------------+----------------+ -| 2 | ``x0102`` | ``x0102`` | -+---------------+----------------+----------------+ -| 256 | ``x020100`` | ``x020100`` | -+---------------+----------------+----------------+ -| 2^(127\ *8)-1 | ``x800100...`` | overflow | -| \| | | | -| ``x7FFFFF...` | | | -| ` | | | -| \| | | | -| ``x7FFFFF...` | | | -| ` | | | -| \| \| | | | -| 2^(127*\ 8) | | | -+---------------+----------------+----------------+ -| 2^(255\*8)-1 | -| \| | -| ``xFFFFFF...` | -| ` | -| \| overflow | -| \| \| -1 \| | -| n/a \| | -| ``x8101`` \| | -| \| -2 \| n/a | -| \| ``x8102`` | -| \| \| -256 \| | -| n/a \| | -| ``x820100`` | -| \| | -+---------------+----------------+----------------+ - -**Structures** are encoded by encoding the field values in order of -declaration. - -.. code:: go - - type Foo struct { - MyString string - MyUint32 uint32 - } - var foo = Foo{"626172", math.MaxUint32} - - /* The binary representation of foo: - 0103626172FFFFFFFF - 0103: `int` encoded length of string, here 3 - 626172: 3 bytes of string "bar" - FFFFFFFF: 4 bytes of uint32 MaxUint32 - */ - -**Variable-length arrays** are encoded with a leading ``int`` denoting -the length of the array followed by the binary representation of the -items. **Fixed-length arrays** are similar but aren't preceded by the -leading ``int``. - -.. code:: go - - foos := []Foo{foo, foo} - - /* The binary representation of foos: - 01020103626172FFFFFFFF0103626172FFFFFFFF - 0102: `int` encoded length of array, here 2 - 0103626172FFFFFFFF: the first `foo` - 0103626172FFFFFFFF: the second `foo` - */ - - foos := [2]Foo{foo, foo} // fixed-length array - - /* The binary representation of foos: - 0103626172FFFFFFFF0103626172FFFFFFFF - 0103626172FFFFFFFF: the first `foo` - 0103626172FFFFFFFF: the second `foo` - */ - -**Interfaces** can represent one of any number of concrete types. The -concrete types of an interface must first be declared with their -corresponding ``type byte``. An interface is then encoded with the -leading ``type byte``, then the binary encoding of the underlying -concrete type. - -NOTE: The byte ``x00`` is reserved for the ``nil`` interface value and -``nil`` pointer values. - -.. code:: go - - type Animal interface{} - type Dog uint32 - type Cat string - - RegisterInterface( - struct{ Animal }{}, // Convenience for referencing the 'Animal' interface - ConcreteType{Dog(0), 0x01}, // Register the byte 0x01 to denote a Dog - ConcreteType{Cat(""), 0x02}, // Register the byte 0x02 to denote a Cat - ) - - var animal Animal = Dog(02) - - /* The binary representation of animal: - 010102 - 01: the type byte for a `Dog` - 0102: the bytes of Dog(02) - */ - -**Pointers** are encoded with a single leading byte ``x00`` for ``nil`` -pointers, otherwise encoded with a leading byte ``x01`` followed by the -binary encoding of the value pointed to. - -NOTE: It's easy to convert pointer types into interface types, since the -``type byte`` ``x00`` is always ``nil``. - -JSON ----- - -The JSON codec is compatible with the ```binary`` <#binary>`__ codec, -and is fairly intuitive if you're already familiar with golang's JSON -encoding. Some quirks are noted below: - -- variable-length and fixed-length bytes are encoded as uppercase - hexadecimal strings -- interface values are encoded as an array of two items: - ``[type_byte, concrete_value]`` -- times are encoded as rfc2822 strings diff --git a/docs/subscribing-to-events-via-websocket.md b/docs/subscribing-to-events-via-websocket.md deleted file mode 100644 index 43d3f776..00000000 --- a/docs/subscribing-to-events-via-websocket.md +++ /dev/null @@ -1,26 +0,0 @@ -# Subscribing to events via Websocket - -Tendermint emits different events, to which you can subscribe via -[Websocket](https://en.wikipedia.org/wiki/WebSocket). This can be useful -for third-party applications (for analysys) or inspecting state. - -[List of events](https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants) - -You can subscribe to any of the events above by calling `subscribe` RPC -method via Websocket. - - { - "jsonrpc": "2.0", - "method": "subscribe", - "id": "0", - "params": { - "query": "tm.event='NewBlock'" - } - } - -Check out [API docs](https://tendermint.github.io/slate/#subscribe) for -more information on query syntax and other options. - -You can also use tags, given you had included them into DeliverTx -response, to query transaction results. See [Indexing -transactions](./indexing-transactions.md) for details. diff --git a/docs/terraform-and-ansible.md b/docs/terraform-and-ansible.md deleted file mode 100644 index 55c38cef..00000000 --- a/docs/terraform-and-ansible.md +++ /dev/null @@ -1,168 +0,0 @@ -# Terraform & Ansible - -Automated deployments are done using -[Terraform](https://www.terraform.io/) to create servers on Digital -Ocean then [Ansible](http://www.ansible.com/) to create and manage -testnets on those servers. - -## Install - -NOTE: see the [integration bash -script](https://github.com/tendermint/tendermint/blob/develop/networks/remote/integration.sh) -that can be run on a fresh DO droplet and will automatically spin up a 4 -node testnet. The script more or less does everything described below. - -- Install [Terraform](https://www.terraform.io/downloads.html) and - [Ansible](http://docs.ansible.com/ansible/latest/installation_guide/intro_installation.html) - on a Linux machine. -- Create a [DigitalOcean API - token](https://cloud.digitalocean.com/settings/api/tokens) with read - and write capability. -- Install the python dopy package (`pip install dopy`) -- Create SSH keys (`ssh-keygen`) -- Set environment variables: - -``` -export DO_API_TOKEN="abcdef01234567890abcdef01234567890" -export SSH_KEY_FILE="$HOME/.ssh/id_rsa.pub" -``` - -These will be used by both `terraform` and `ansible`. - -### Terraform - -This step will create four Digital Ocean droplets. First, go to the -correct directory: - -``` -cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/terraform -``` - -then: - -``` -terraform init -terraform apply -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE" -``` -and you will get a list of IP addresses that belong to your droplets. - -With the droplets created and running, let's setup Ansible. - -### Ansible - -The playbooks in [the ansible -directory](https://github.com/tendermint/tendermint/tree/master/networks/remote/ansible) -run ansible roles to configure the sentry node architecture. You must -switch to this directory to run ansible -(`cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible`). - -There are several roles that are self-explanatory: - -First, we configure our droplets by specifying the paths for tendermint -(`BINARY`) and the node files (`CONFIGDIR`). The latter expects any -number of directories named `node0, node1, ...` and so on (equal to the -number of droplets created). For this example, we use pre-created files -from [this -directory](https://github.com/tendermint/tendermint/tree/master/docs/examples). -To create your own files, use either the `tendermint testnet` command or -review [manual deployments](./deploy-testnets.md). - -Here's the command to run: - -``` -ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples -``` - -Voila! All your droplets now have the `tendermint` binary and required -configuration files to run a testnet. - -Next, we run the install role: - -``` -ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml -``` - -which as you'll see below, executes -`tendermint node --proxy_app=kvstore` on all droplets. Although we'll -soon be modifying this role and running it again, this first execution -allows us to get each `node_info.id` that corresponds to each -`node_info.listen_addr`. (This part will be automated in the future). In -your browser (or using `curl`), for every droplet, go to IP:26657/status -and note the two just mentioned `node_info` fields. Notice that blocks -aren't being created (`latest_block_height` should be zero and not -increasing). - -Next, open `roles/install/templates/systemd.service.j2` and look for the -line `ExecStart` which should look something like: - -``` -ExecStart=/usr/bin/tendermint node --proxy_app=kvstore -``` - -and add the `--p2p.persistent_peers` flag with the relevant information -for each node. The resulting file should look something like: - -``` -[Unit] -Description={{service}} -Requires=network-online.target -After=network-online.target - -[Service] -Restart=on-failure -User={{service}} -Group={{service}} -PermissionsStartOnly=true -ExecStart=/usr/bin/tendermint node --proxy_app=kvstore --p2p.persistent_peers=167b80242c300bf0ccfb3ced3dec60dc2a81776e@165.227.41.206:26656,3c7a5920811550c04bf7a0b2f1e02ab52317b5e6@165.227.43.146:26656,303a1a4312c30525c99ba66522dd81cca56a361a@159.89.115.32:26656,b686c2a7f4b1b46dca96af3a0f31a6a7beae0be4@159.89.119.125:26656 -ExecReload=/bin/kill -HUP $MAINPID -KillSignal=SIGTERM - -[Install] -WantedBy=multi-user.target -``` - -Then, stop the nodes: - -``` -ansible-playbook -i inventory/digital_ocean.py -l sentrynet stop.yml -``` - -Finally, we run the install role again: - -``` -ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml -``` - -to re-run `tendermint node` with the new flag, on all droplets. The -`latest_block_hash` should now be changing and `latest_block_height` -increasing. Your testnet is now up and running :) - -Peek at the logs with the status role: - -``` -ansible-playbook -i inventory/digital_ocean.py -l sentrynet status.yml -``` - -### Logging - -The crudest way is the status role described above. You can also ship -logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana) -service provider. You can set up your nodes to log there automatically. -Create an account and get your API key from the notes on [this -page](https://app.logz.io/#/dashboard/data-sources/Filebeat), then: - -``` -yum install systemd-devel || echo "This will only work on RHEL-based systems." -apt-get install libsystemd-dev || echo "This will only work on Debian-based systems." - -go get github.com/mheese/journalbeat -ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 -``` - -### Cleanup - -To remove your droplets, run: - -``` -terraform destroy -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE" -``` diff --git a/docs/transactional-semantics.md b/docs/transactional-semantics.md deleted file mode 100644 index bab1864e..00000000 --- a/docs/transactional-semantics.md +++ /dev/null @@ -1,25 +0,0 @@ -# Transactional Semantics - -In [Using Tendermint](./using-tendermint.md#broadcast-api) we -discussed different API endpoints for sending transactions and -differences between them. - -What we have not yet covered is transactional semantics. - -When you send a transaction using one of the available methods, it first -goes to the mempool. Currently, it does not provide strong guarantees -like "if the transaction were accepted, it would be eventually included -in a block (given CheckTx passes)." - -For instance a tx could enter the mempool, but before it can be sent to -peers the node crashes. - -We are planning to provide such guarantees by using a WAL and replaying -transactions (See -[this issue](https://github.com/tendermint/tendermint/issues/248)), but -it's non-trivial to do this all efficiently. - -The temporary solution is for clients to monitor the node and resubmit -transaction(s) and/or send them to more nodes at once, so the -probability of all of them crashing at the same time and losing the msg -decreases substantially. diff --git a/docs/using-tendermint.md b/docs/using-tendermint.md deleted file mode 100644 index 86ec2529..00000000 --- a/docs/using-tendermint.md +++ /dev/null @@ -1,420 +0,0 @@ -# Using Tendermint - -This is a guide to using the `tendermint` program from the command line. -It assumes only that you have the `tendermint` binary installed and have -some rudimentary idea of what Tendermint and ABCI are. - -You can see the help menu with `tendermint --help`, and the version -number with `tendermint version`. - -## Directory Root - -The default directory for blockchain data is `~/.tendermint`. Override -this by setting the `TMHOME` environment variable. - -## Initialize - -Initialize the root directory by running: - - tendermint init - -This will create a new private key (`priv_validator.json`), and a -genesis file (`genesis.json`) containing the associated public key, in -`$TMHOME/config`. This is all that's necessary to run a local testnet -with one validator. - -For more elaborate initialization, see the tesnet command: - - tendermint testnet --help - -## Run - -To run a Tendermint node, use - - tendermint node - -By default, Tendermint will try to connect to an ABCI application on -[127.0.0.1:26658](127.0.0.1:26658). If you have the `kvstore` ABCI app -installed, run it in another window. If you don't, kill Tendermint and -run an in-process version of the `kvstore` app: - - tendermint node --proxy_app=kvstore - -After a few seconds you should see blocks start streaming in. Note that -blocks are produced regularly, even if there are no transactions. See -*No Empty Blocks*, below, to modify this setting. - -Tendermint supports in-process versions of the `counter`, `kvstore` and -`nil` apps that ship as examples in the [ABCI -repository](https://github.com/tendermint/abci). It's easy to compile -your own app in-process with Tendermint if it's written in Go. If your -app is not written in Go, simply run it in another process, and use the -`--proxy_app` flag to specify the address of the socket it is listening -on, for instance: - - tendermint node --proxy_app=/var/run/abci.sock - -## Transactions - -To send a transaction, use `curl` to make requests to the Tendermint RPC -server, for example: - - curl http://localhost:26657/broadcast_tx_commit?tx=\"abcd\" - -We can see the chain's status at the `/status` end-point: - - curl http://localhost:26657/status | json_pp - -and the `latest_app_hash` in particular: - - curl http://localhost:26657/status | json_pp | grep latest_app_hash - -Visit http://localhost:26657> in your browser to see the list of other -endpoints. Some take no arguments (like `/status`), while others specify -the argument name and use `_` as a placeholder. - -### Formatting - -The following nuances when sending/formatting transactions should be -taken into account: - -With `GET`: - -To send a UTF8 string byte array, quote the value of the tx pramater: - - curl 'http://localhost:26657/broadcast_tx_commit?tx="hello"' - -which sends a 5 byte transaction: "h e l l o" \[68 65 6c 6c 6f\]. - -Note the URL must be wrapped with single quoes, else bash will ignore -the double quotes. To avoid the single quotes, escape the double quotes: - - curl http://localhost:26657/broadcast_tx_commit?tx=\"hello\" - -Using a special character: - - curl 'http://localhost:26657/broadcast_tx_commit?tx="€5"' - -sends a 4 byte transaction: "€5" (UTF8) \[e2 82 ac 35\]. - -To send as raw hex, omit quotes AND prefix the hex string with `0x`: - - curl http://localhost:26657/broadcast_tx_commit?tx=0x01020304 - -which sends a 4 byte transaction: \[01 02 03 04\]. - -With `POST` (using `json`), the raw hex must be `base64` encoded: - - curl --data-binary '{"jsonrpc":"2.0","id":"anything","method":"broadcast_tx_commit","params": {"tx": "AQIDBA=="}}' -H 'content-type:text/plain;' http://localhost:26657 - -which sends the same 4 byte transaction: \[01 02 03 04\]. - -Note that raw hex cannot be used in `POST` transactions. - -## Reset - -**WARNING: UNSAFE** Only do this in development and only if you can -afford to lose all blockchain data! - -To reset a blockchain, stop the node, remove the `~/.tendermint/data` -directory and run - - tendermint unsafe_reset_priv_validator - -This final step is necessary to reset the `priv_validator.json`, which -otherwise prevents you from making conflicting votes in the consensus -(something that could get you in trouble if you do it on a real -blockchain). If you don't reset the `priv_validator.json`, your fresh -new blockchain will not make any blocks. - -## Configuration - -Tendermint uses a `config.toml` for configuration. For details, see [the -config specification](./specification/configuration.html). - -Notable options include the socket address of the application -(`proxy_app`), the listening address of the Tendermint peer -(`p2p.laddr`), and the listening address of the RPC server -(`rpc.laddr`). - -Some fields from the config file can be overwritten with flags. - -## No Empty Blocks - -This much requested feature was implemented in version 0.10.3. While the -default behaviour of `tendermint` is still to create blocks -approximately once per second, it is possible to disable empty blocks or -set a block creation interval. In the former case, blocks will be -created when there are new transactions or when the AppHash changes. - -To configure Tendermint to not produce empty blocks unless there are -transactions or the app hash changes, run Tendermint with this -additional flag: - - tendermint node --consensus.create_empty_blocks=false - -or set the configuration via the `config.toml` file: - - [consensus] - create_empty_blocks = false - -Remember: because the default is to *create empty blocks*, avoiding -empty blocks requires the config option to be set to `false`. - -The block interval setting allows for a delay (in seconds) between the -creation of each new empty block. It is set via the `config.toml`: - - [consensus] - create_empty_blocks_interval = 5 - -With this setting, empty blocks will be produced every 5s if no block -has been produced otherwise, regardless of the value of -`create_empty_blocks`. - -## Broadcast API - -Earlier, we used the `broadcast_tx_commit` endpoint to send a -transaction. When a transaction is sent to a Tendermint node, it will -run via `CheckTx` against the application. If it passes `CheckTx`, it -will be included in the mempool, broadcasted to other peers, and -eventually included in a block. - -Since there are multiple phases to processing a transaction, we offer -multiple endpoints to broadcast a transaction: - - /broadcast_tx_async - /broadcast_tx_sync - /broadcast_tx_commit - -These correspond to no-processing, processing through the mempool, and -processing through a block, respectively. That is, `broadcast_tx_async`, -will return right away without waiting to hear if the transaction is -even valid, while `broadcast_tx_sync` will return with the result of -running the transaction through `CheckTx`. Using `broadcast_tx_commit` -will wait until the transaction is committed in a block or until some -timeout is reached, but will return right away if the transaction does -not pass `CheckTx`. The return value for `broadcast_tx_commit` includes -two fields, `check_tx` and `deliver_tx`, pertaining to the result of -running the transaction through those ABCI messages. - -The benefit of using `broadcast_tx_commit` is that the request returns -after the transaction is committed (i.e. included in a block), but that -can take on the order of a second. For a quick result, use -`broadcast_tx_sync`, but the transaction will not be committed until -later, and by that point its effect on the state may change. - -## Tendermint Networks - -When `tendermint init` is run, both a `genesis.json` and -`priv_validator.json` are created in `~/.tendermint/config`. The -`genesis.json` might look like: - - { - "validators" : [ - { - "pub_key" : { - "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=", - "type" : "AC26791624DE60" - }, - "power" : 10, - "name" : "" - } - ], - "app_hash" : "", - "chain_id" : "test-chain-rDlYSN", - "genesis_time" : "0001-01-01T00:00:00Z" - } - -And the `priv_validator.json`: - - { - "last_step" : 0, - "last_round" : 0, - "address" : "B788DEDE4F50AD8BC9462DE76741CCAFF87D51E2", - "pub_key" : { - "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=", - "type" : "AC26791624DE60" - }, - "last_height" : 0, - "priv_key" : { - "value" : "JPivl82x+LfVkp8i3ztoTjY6c6GJ4pBxQexErOCyhwqHeGT5ATxzpAtPJKnxNx/NyUnD8Ebv3OIYH+kgD4N88Q==", - "type" : "954568A3288910" - } - } - -The `priv_validator.json` actually contains a private key, and should -thus be kept absolutely secret; for now we work with the plain text. -Note the `last_` fields, which are used to prevent us from signing -conflicting messages. - -Note also that the `pub_key` (the public key) in the -`priv_validator.json` is also present in the `genesis.json`. - -The genesis file contains the list of public keys which may participate -in the consensus, and their corresponding voting power. Greater than 2/3 -of the voting power must be active (i.e. the corresponding private keys -must be producing signatures) for the consensus to make progress. In our -case, the genesis file contains the public key of our -`priv_validator.json`, so a Tendermint node started with the default -root directory will be able to make progress. Voting power uses an int64 -but must be positive, thus the range is: 0 through 9223372036854775807. -Because of how the current proposer selection algorithm works, we do not -recommend having voting powers greater than 10\^12 (ie. 1 trillion) (see -[Proposals section of Byzantine Consensus -Algorithm](./specification/byzantine-consensus-algorithm.html#proposals) -for details). - -If we want to add more nodes to the network, we have two choices: we can -add a new validator node, who will also participate in the consensus by -proposing blocks and voting on them, or we can add a new non-validator -node, who will not participate directly, but will verify and keep up -with the consensus protocol. - -### Peers - -#### Seed -A seed node is a node who relays the addresses of other peers which they know -of. These nodes constantly crawl the network to try to get more peers. The -addresses which the seed node relays get saved into a local address book. Once -these are in the address book, you will connect to those addresses directly. -Basically the seed nodes job is just to relay everyones addresses. You won't -connect to seed nodes once you have received enough addresses, so typically you -only need them on the first start. The seed node will immediately disconnect -from you after sending you some addresses. - -#### Persistent Peer -Persistent peers are people you want to be constantly connected with. If you -disconnect you will try to connect directly back to them as opposed to using -another address from the address book. On restarts you will always try to -connect to these peers regardless of the size of your address book. - -All peers relay peers they know of by default. This is called the peer exchange -protocol (PeX). With PeX, peers will be gossipping about known peers and forming -a network, storing peer addresses in the addrbook. Because of this, you don't -have to use a seed node if you have a live persistent peer. - -#### Connecting to Peers - -To connect to peers on start-up, specify them in the -`$TMHOME/config/config.toml` or on the command line. Use `seeds` to -specify seed nodes, and -`persistent_peers` to specify peers that your node will maintain -persistent connections with. - -For example, - - tendermint node --p2p.seeds "f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:26656,0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:26656" - -Alternatively, you can use the `/dial_seeds` endpoint of the RPC to -specify seeds for a running node to connect to: - - curl 'localhost:26657/dial_seeds?seeds=\["f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@1.2.3.4:26656","0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@5.6.7.8:26656"\]' - -Note, with PeX enabled, you -should not need seeds after the first start. - -If you want Tendermint to connect to specific set of addresses and -maintain a persistent connection with each, you can use the -`--p2p.persistent_peers` flag or the corresponding setting in the -`config.toml` or the `/dial_peers` RPC endpoint to do it without -stopping Tendermint core instance. - - tendermint node --p2p.persistent_peers "429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:26656,96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:26656" - curl 'localhost:26657/dial_peers?persistent=true&peers=\["429fcf25974313b95673f58d77eacdd434402665@10.11.12.13:26656","96663a3dd0d7b9d17d4c8211b191af259621c693@10.11.12.14:26656"\]' - -### Adding a Non-Validator - -Adding a non-validator is simple. Just copy the original `genesis.json` -to `~/.tendermint/config` on the new machine and start the node, -specifying seeds or persistent peers as necessary. If no seeds or -persistent peers are specified, the node won't make any blocks, because -it's not a validator, and it won't hear about any blocks, because it's -not connected to the other peer. - -### Adding a Validator - -The easiest way to add new validators is to do it in the `genesis.json`, -before starting the network. For instance, we could make a new -`priv_validator.json`, and copy it's `pub_key` into the above genesis. - -We can generate a new `priv_validator.json` with the command: - - tendermint gen_validator - -Now we can update our genesis file. For instance, if the new -`priv_validator.json` looks like: - - { - "address" : "5AF49D2A2D4F5AD4C7C8C4CC2FB020131E9C4902", - "pub_key" : { - "value" : "l9X9+fjkeBzDfPGbUM7AMIRE6uJN78zN5+lk5OYotek=", - "type" : "AC26791624DE60" - }, - "priv_key" : { - "value" : "EDJY9W6zlAw+su6ITgTKg2nTZcHAH1NMTW5iwlgmNDuX1f35+OR4HMN88ZtQzsAwhETq4k3vzM3n6WTk5ii16Q==", - "type" : "954568A3288910" - }, - "last_step" : 0, - "last_round" : 0, - "last_height" : 0 - } - -then the new `genesis.json` will be: - - { - "validators" : [ - { - "pub_key" : { - "value" : "h3hk+QE8c6QLTySp8TcfzclJw/BG79ziGB/pIA+DfPE=", - "type" : "AC26791624DE60" - }, - "power" : 10, - "name" : "" - }, - { - "pub_key" : { - "value" : "l9X9+fjkeBzDfPGbUM7AMIRE6uJN78zN5+lk5OYotek=", - "type" : "AC26791624DE60" - }, - "power" : 10, - "name" : "" - } - ], - "app_hash" : "", - "chain_id" : "test-chain-rDlYSN", - "genesis_time" : "0001-01-01T00:00:00Z" - } - -Update the `genesis.json` in `~/.tendermint/config`. Copy the genesis -file and the new `priv_validator.json` to the `~/.tendermint/config` on -a new machine. - -Now run `tendermint node` on both machines, and use either -`--p2p.persistent_peers` or the `/dial_peers` to get them to peer up. -They should start making blocks, and will only continue to do so as long -as both of them are online. - -To make a Tendermint network that can tolerate one of the validators -failing, you need at least four validator nodes (e.g., 2/3). - -Updating validators in a live network is supported but must be -explicitly programmed by the application developer. See the [application -developers guide](./app-development.html) for more details. - -### Local Network - -To run a network locally, say on a single machine, you must change the -`_laddr` fields in the `config.toml` (or using the flags) so that the -listening addresses of the various sockets don't conflict. Additionally, -you must set `addrbook_strict=false` in the `config.toml`, otherwise -Tendermint's p2p library will deny making connections to peers with the -same IP address. - -### Upgrading - -The Tendermint development cycle currently includes a lot of breaking changes. -Upgrading from an old version to a new version usually means throwing -away the chain data. Try out the -[tm-migrate](https://github.com/hxzqlh/tm-tools) tool written by -[@hxzqlh](https://github.com/hxzqlh) if you are keen to preserve the -state of your chain when upgrading to newer versions. diff --git a/evidence/pool.go b/evidence/pool.go deleted file mode 100644 index 4bad355f..00000000 --- a/evidence/pool.go +++ /dev/null @@ -1,152 +0,0 @@ -package evidence - -import ( - "fmt" - "sync" - - clist "github.com/tendermint/tmlibs/clist" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" - - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -// EvidencePool maintains a pool of valid evidence -// in an EvidenceStore. -type EvidencePool struct { - logger log.Logger - - evidenceStore *EvidenceStore - evidenceList *clist.CList // concurrent linked-list of evidence - - // needed to load validators to verify evidence - stateDB dbm.DB - - // latest state - mtx sync.Mutex - state sm.State -} - -func NewEvidencePool(stateDB dbm.DB, evidenceStore *EvidenceStore) *EvidencePool { - evpool := &EvidencePool{ - stateDB: stateDB, - state: sm.LoadState(stateDB), - logger: log.NewNopLogger(), - evidenceStore: evidenceStore, - evidenceList: clist.New(), - } - return evpool -} - -func (evpool *EvidencePool) EvidenceFront() *clist.CElement { - return evpool.evidenceList.Front() -} - -func (evpool *EvidencePool) EvidenceWaitChan() <-chan struct{} { - return evpool.evidenceList.WaitChan() -} - -// SetLogger sets the Logger. -func (evpool *EvidencePool) SetLogger(l log.Logger) { - evpool.logger = l -} - -// PriorityEvidence returns the priority evidence. -func (evpool *EvidencePool) PriorityEvidence() []types.Evidence { - return evpool.evidenceStore.PriorityEvidence() -} - -// PendingEvidence returns all uncommitted evidence. -func (evpool *EvidencePool) PendingEvidence() []types.Evidence { - return evpool.evidenceStore.PendingEvidence() -} - -// State returns the current state of the evpool. -func (evpool *EvidencePool) State() sm.State { - evpool.mtx.Lock() - defer evpool.mtx.Unlock() - return evpool.state -} - -// Update loads the latest -func (evpool *EvidencePool) Update(block *types.Block, state sm.State) { - - // sanity check - if state.LastBlockHeight != block.Height { - panic(fmt.Sprintf("Failed EvidencePool.Update sanity check: got state.Height=%d with block.Height=%d", state.LastBlockHeight, block.Height)) - } - - // update the state - evpool.mtx.Lock() - evpool.state = state - evpool.mtx.Unlock() - - // remove evidence from pending and mark committed - evpool.MarkEvidenceAsCommitted(block.Height, block.Evidence.Evidence) -} - -// AddEvidence checks the evidence is valid and adds it to the pool. -func (evpool *EvidencePool) AddEvidence(evidence types.Evidence) (err error) { - - // TODO: check if we already have evidence for this - // validator at this height so we dont get spammed - - if err := sm.VerifyEvidence(evpool.stateDB, evpool.State(), evidence); err != nil { - return err - } - - // fetch the validator and return its voting power as its priority - // TODO: something better ? - valset, _ := sm.LoadValidators(evpool.stateDB, evidence.Height()) - _, val := valset.GetByAddress(evidence.Address()) - priority := val.VotingPower - - added := evpool.evidenceStore.AddNewEvidence(evidence, priority) - if !added { - // evidence already known, just ignore - return - } - - evpool.logger.Info("Verified new evidence of byzantine behaviour", "evidence", evidence) - - // add evidence to clist - evpool.evidenceList.PushBack(evidence) - - return nil -} - -// MarkEvidenceAsCommitted marks all the evidence as committed and removes it from the queue. -func (evpool *EvidencePool) MarkEvidenceAsCommitted(height int64, evidence []types.Evidence) { - // make a map of committed evidence to remove from the clist - blockEvidenceMap := make(map[string]struct{}) - for _, ev := range evidence { - evpool.evidenceStore.MarkEvidenceAsCommitted(ev) - blockEvidenceMap[evMapKey(ev)] = struct{}{} - } - - // remove committed evidence from the clist - maxAge := evpool.State().ConsensusParams.EvidenceParams.MaxAge - evpool.removeEvidence(height, maxAge, blockEvidenceMap) - -} - -func (evpool *EvidencePool) removeEvidence(height, maxAge int64, blockEvidenceMap map[string]struct{}) { - for e := evpool.evidenceList.Front(); e != nil; e = e.Next() { - ev := e.Value.(types.Evidence) - - // Remove the evidence if it's already in a block - // or if it's now too old. - if _, ok := blockEvidenceMap[evMapKey(ev)]; ok || - ev.Height() < height-maxAge { - - // remove from clist - evpool.evidenceList.Remove(e) - e.DetachPrev() - } - } -} - -func evMapKey(ev types.Evidence) string { - return string(ev.Hash()) -} diff --git a/evidence/pool_test.go b/evidence/pool_test.go deleted file mode 100644 index 01907623..00000000 --- a/evidence/pool_test.go +++ /dev/null @@ -1,79 +0,0 @@ -package evidence - -import ( - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" -) - -var mockState = sm.State{} - -func initializeValidatorState(valAddr []byte, height int64) dbm.DB { - stateDB := dbm.NewMemDB() - - // create validator set and state - valSet := &types.ValidatorSet{ - Validators: []*types.Validator{ - {Address: valAddr}, - }, - } - state := sm.State{ - LastBlockHeight: 0, - LastBlockTime: time.Now(), - Validators: valSet, - LastHeightValidatorsChanged: 1, - ConsensusParams: types.ConsensusParams{ - EvidenceParams: types.EvidenceParams{ - MaxAge: 1000000, - }, - }, - } - - // save all states up to height - for i := int64(0); i < height; i++ { - state.LastBlockHeight = i - sm.SaveState(stateDB, state) - } - - return stateDB -} - -func TestEvidencePool(t *testing.T) { - - valAddr := []byte("val1") - height := int64(5) - stateDB := initializeValidatorState(valAddr, height) - store := NewEvidenceStore(dbm.NewMemDB()) - pool := NewEvidencePool(stateDB, store) - - goodEvidence := types.NewMockGoodEvidence(height, 0, valAddr) - badEvidence := types.MockBadEvidence{goodEvidence} - - // bad evidence - err := pool.AddEvidence(badEvidence) - assert.NotNil(t, err) - - var wg sync.WaitGroup - wg.Add(1) - go func() { - <-pool.EvidenceWaitChan() - wg.Done() - }() - - err = pool.AddEvidence(goodEvidence) - assert.Nil(t, err) - wg.Wait() - - assert.Equal(t, 1, pool.evidenceList.Len()) - - // if we send it again, it shouldnt change the size - err = pool.AddEvidence(goodEvidence) - assert.Nil(t, err) - assert.Equal(t, 1, pool.evidenceList.Len()) -} diff --git a/evidence/reactor.go b/evidence/reactor.go deleted file mode 100644 index 5159572e..00000000 --- a/evidence/reactor.go +++ /dev/null @@ -1,227 +0,0 @@ -package evidence - -import ( - "fmt" - "reflect" - "time" - - "github.com/tendermint/go-amino" - clist "github.com/tendermint/tmlibs/clist" - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -const ( - EvidenceChannel = byte(0x38) - - maxMsgSize = 1048576 // 1MB TODO make it configurable - - broadcastEvidenceIntervalS = 60 // broadcast uncommitted evidence this often - peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount -) - -// EvidenceReactor handles evpool evidence broadcasting amongst peers. -type EvidenceReactor struct { - p2p.BaseReactor - evpool *EvidencePool - eventBus *types.EventBus -} - -// NewEvidenceReactor returns a new EvidenceReactor with the given config and evpool. -func NewEvidenceReactor(evpool *EvidencePool) *EvidenceReactor { - evR := &EvidenceReactor{ - evpool: evpool, - } - evR.BaseReactor = *p2p.NewBaseReactor("EvidenceReactor", evR) - return evR -} - -// SetLogger sets the Logger on the reactor and the underlying Evidence. -func (evR *EvidenceReactor) SetLogger(l log.Logger) { - evR.Logger = l - evR.evpool.SetLogger(l) -} - -// OnStart implements cmn.Service -func (evR *EvidenceReactor) OnStart() error { - return evR.BaseReactor.OnStart() -} - -// GetChannels implements Reactor. -// It returns the list of channels for this reactor. -func (evR *EvidenceReactor) GetChannels() []*p2p.ChannelDescriptor { - return []*p2p.ChannelDescriptor{ - &p2p.ChannelDescriptor{ - ID: EvidenceChannel, - Priority: 5, - }, - } -} - -// AddPeer implements Reactor. -func (evR *EvidenceReactor) AddPeer(peer p2p.Peer) { - go evR.broadcastEvidenceRoutine(peer) -} - -// RemovePeer implements Reactor. -func (evR *EvidenceReactor) RemovePeer(peer p2p.Peer, reason interface{}) { - // nothing to do -} - -// Receive implements Reactor. -// It adds any received evidence to the evpool. -func (evR *EvidenceReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := DecodeMessage(msgBytes) - if err != nil { - evR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) - evR.Switch.StopPeerForError(src, err) - return - } - evR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) - - switch msg := msg.(type) { - case *EvidenceListMessage: - for _, ev := range msg.Evidence { - err := evR.evpool.AddEvidence(ev) - if err != nil { - evR.Logger.Info("Evidence is not valid", "evidence", msg.Evidence, "err", err) - // punish peer - evR.Switch.StopPeerForError(src, err) - } - } - default: - evR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) - } -} - -// SetEventSwitch implements events.Eventable. -func (evR *EvidenceReactor) SetEventBus(b *types.EventBus) { - evR.eventBus = b -} - -// Modeled after the mempool routine. -// - Evidence accumulates in a clist. -// - Each peer has a routien that iterates through the clist, -// sending available evidence to the peer. -// - If we're waiting for new evidence and the list is not empty, -// start iterating from the beginning again. -func (evR *EvidenceReactor) broadcastEvidenceRoutine(peer p2p.Peer) { - var next *clist.CElement - for { - // This happens because the CElement we were looking at got garbage - // collected (removed). That is, .NextWait() returned nil. Go ahead and - // start from the beginning. - if next == nil { - select { - case <-evR.evpool.EvidenceWaitChan(): // Wait until evidence is available - if next = evR.evpool.EvidenceFront(); next == nil { - continue - } - case <-peer.Quit(): - return - case <-evR.Quit(): - return - } - } - - ev := next.Value.(types.Evidence) - msg, retry := evR.checkSendEvidenceMessage(peer, ev) - if msg != nil { - success := peer.Send(EvidenceChannel, cdc.MustMarshalBinaryBare(msg)) - retry = !success - } - - if retry { - time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) - continue - } - - afterCh := time.After(time.Second * broadcastEvidenceIntervalS) - select { - case <-afterCh: - // start from the beginning every tick. - // TODO: only do this if we're at the end of the list! - next = nil - case <-next.NextWaitChan(): - // see the start of the for loop for nil check - next = next.Next() - case <-peer.Quit(): - return - case <-evR.Quit(): - return - } - } -} - -// Returns the message to send the peer, or nil if the evidence is invalid for the peer. -// If message is nil, return true if we should sleep and try again. -func (evR EvidenceReactor) checkSendEvidenceMessage(peer p2p.Peer, ev types.Evidence) (msg EvidenceMessage, retry bool) { - - // make sure the peer is up to date - evHeight := ev.Height() - peerState, ok := peer.Get(types.PeerStateKey).(PeerState) - if !ok { - evR.Logger.Info("Found peer without PeerState", "peer", peer) - return nil, true - } - - // NOTE: We only send evidence to peers where - // peerHeight - maxAge < evidenceHeight < peerHeight - maxAge := evR.evpool.State().ConsensusParams.EvidenceParams.MaxAge - peerHeight := peerState.GetHeight() - if peerHeight < evHeight { - // peer is behind. sleep while he catches up - return nil, true - } else if peerHeight > evHeight+maxAge { - // evidence is too old, skip - // NOTE: if evidence is too old for an honest peer, - // then we're behind and either it already got committed or it never will! - evR.Logger.Info("Not sending peer old evidence", "peerHeight", peerHeight, "evHeight", evHeight, "maxAge", maxAge, "peer", peer) - return nil, false - } - - // send evidence - msg = &EvidenceListMessage{[]types.Evidence{ev}} - return msg, false -} - -// PeerState describes the state of a peer. -type PeerState interface { - GetHeight() int64 -} - -//----------------------------------------------------------------------------- -// Messages - -// EvidenceMessage is a message sent or received by the EvidenceReactor. -type EvidenceMessage interface{} - -func RegisterEvidenceMessages(cdc *amino.Codec) { - cdc.RegisterInterface((*EvidenceMessage)(nil), nil) - cdc.RegisterConcrete(&EvidenceListMessage{}, - "tendermint/evidence/EvidenceListMessage", nil) -} - -// DecodeMessage decodes a byte-array into a EvidenceMessage. -func DecodeMessage(bz []byte) (msg EvidenceMessage, err error) { - if len(bz) > maxMsgSize { - return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", - len(bz), maxMsgSize) - } - err = cdc.UnmarshalBinaryBare(bz, &msg) - return -} - -//------------------------------------- - -// EvidenceMessage contains a list of evidence. -type EvidenceListMessage struct { - Evidence []types.Evidence -} - -// String returns a string representation of the EvidenceListMessage. -func (m *EvidenceListMessage) String() string { - return fmt.Sprintf("[EvidenceListMessage %v]", m.Evidence) -} diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go deleted file mode 100644 index 2f1c34e6..00000000 --- a/evidence/reactor_test.go +++ /dev/null @@ -1,182 +0,0 @@ -package evidence - -import ( - "fmt" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/go-kit/kit/log/term" - - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -// evidenceLogger is a TestingLogger which uses a different -// color for each validator ("validator" key must exist). -func evidenceLogger() log.Logger { - return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { - for i := 0; i < len(keyvals)-1; i += 2 { - if keyvals[i] == "validator" { - return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} - } - } - return term.FgBgColor{} - }) -} - -// connect N evidence reactors through N switches -func makeAndConnectEvidenceReactors(config *cfg.Config, stateDBs []dbm.DB) []*EvidenceReactor { - N := len(stateDBs) - reactors := make([]*EvidenceReactor, N) - logger := evidenceLogger() - for i := 0; i < N; i++ { - - store := NewEvidenceStore(dbm.NewMemDB()) - pool := NewEvidencePool(stateDBs[i], store) - reactors[i] = NewEvidenceReactor(pool) - reactors[i].SetLogger(logger.With("validator", i)) - } - - p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { - s.AddReactor("EVIDENCE", reactors[i]) - return s - - }, p2p.Connect2Switches) - return reactors -} - -// wait for all evidence on all reactors -func waitForEvidence(t *testing.T, evs types.EvidenceList, reactors []*EvidenceReactor) { - // wait for the evidence in all evpools - wg := new(sync.WaitGroup) - for i := 0; i < len(reactors); i++ { - wg.Add(1) - go _waitForEvidence(t, wg, evs, i, reactors) - } - - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - timer := time.After(TIMEOUT) - select { - case <-timer: - t.Fatal("Timed out waiting for evidence") - case <-done: - } -} - -// wait for all evidence on a single evpool -func _waitForEvidence(t *testing.T, wg *sync.WaitGroup, evs types.EvidenceList, reactorIdx int, reactors []*EvidenceReactor) { - - evpool := reactors[reactorIdx].evpool - for len(evpool.PendingEvidence()) != len(evs) { - time.Sleep(time.Millisecond * 100) - } - - reapedEv := evpool.PendingEvidence() - // put the reaped evidence in a map so we can quickly check we got everything - evMap := make(map[string]types.Evidence) - for _, e := range reapedEv { - evMap[string(e.Hash())] = e - } - for i, expectedEv := range evs { - gotEv := evMap[string(expectedEv.Hash())] - assert.Equal(t, expectedEv, gotEv, - fmt.Sprintf("evidence at index %d on reactor %d don't match: %v vs %v", - i, reactorIdx, expectedEv, gotEv)) - } - - wg.Done() -} - -func sendEvidence(t *testing.T, evpool *EvidencePool, valAddr []byte, n int) types.EvidenceList { - evList := make([]types.Evidence, n) - for i := 0; i < n; i++ { - ev := types.NewMockGoodEvidence(int64(i+1), 0, valAddr) - err := evpool.AddEvidence(ev) - assert.Nil(t, err) - evList[i] = ev - } - return evList -} - -var ( - NUM_EVIDENCE = 10 - TIMEOUT = 120 * time.Second // ridiculously high because CircleCI is slow -) - -func TestReactorBroadcastEvidence(t *testing.T) { - config := cfg.TestConfig() - N := 7 - - // create statedb for everyone - stateDBs := make([]dbm.DB, N) - valAddr := []byte("myval") - // we need validators saved for heights at least as high as we have evidence for - height := int64(NUM_EVIDENCE) + 10 - for i := 0; i < N; i++ { - stateDBs[i] = initializeValidatorState(valAddr, height) - } - - // make reactors from statedb - reactors := makeAndConnectEvidenceReactors(config, stateDBs) - - // set the peer height on each reactor - for _, r := range reactors { - for _, peer := range r.Switch.Peers().List() { - ps := peerState{height} - peer.Set(types.PeerStateKey, ps) - } - } - - // send a bunch of valid evidence to the first reactor's evpool - // and wait for them all to be received in the others - evList := sendEvidence(t, reactors[0].evpool, valAddr, NUM_EVIDENCE) - waitForEvidence(t, evList, reactors) -} - -type peerState struct { - height int64 -} - -func (ps peerState) GetHeight() int64 { - return ps.height -} - -func TestReactorSelectiveBroadcast(t *testing.T) { - config := cfg.TestConfig() - - valAddr := []byte("myval") - height1 := int64(NUM_EVIDENCE) + 10 - height2 := int64(NUM_EVIDENCE) / 2 - - // DB1 is ahead of DB2 - stateDB1 := initializeValidatorState(valAddr, height1) - stateDB2 := initializeValidatorState(valAddr, height2) - - // make reactors from statedb - reactors := makeAndConnectEvidenceReactors(config, []dbm.DB{stateDB1, stateDB2}) - peer := reactors[0].Switch.Peers().List()[0] - ps := peerState{height2} - peer.Set(types.PeerStateKey, ps) - - // send a bunch of valid evidence to the first reactor's evpool - evList := sendEvidence(t, reactors[0].evpool, valAddr, NUM_EVIDENCE) - - // only ones less than the peers height should make it through - waitForEvidence(t, evList[:NUM_EVIDENCE/2], reactors[1:2]) - - // peers should still be connected - peers := reactors[1].Switch.Peers().List() - assert.Equal(t, 1, len(peers)) -} diff --git a/evidence/store.go b/evidence/store.go deleted file mode 100644 index 6af5d75d..00000000 --- a/evidence/store.go +++ /dev/null @@ -1,190 +0,0 @@ -package evidence - -import ( - "fmt" - - "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" -) - -/* -Requirements: - - Valid new evidence must be persisted immediately and never forgotten - - Uncommitted evidence must be continuously broadcast - - Uncommitted evidence has a partial order, the evidence's priority - -Impl: - - First commit atomically in outqueue, pending, lookup. - - Once broadcast, remove from outqueue. No need to sync - - Once committed, atomically remove from pending and update lookup. - -Schema for indexing evidence (note you need both height and hash to find a piece of evidence): - -"evidence-lookup"// -> EvidenceInfo -"evidence-outqueue"/// -> EvidenceInfo -"evidence-pending"// -> EvidenceInfo -*/ - -type EvidenceInfo struct { - Committed bool - Priority int64 - Evidence types.Evidence -} - -const ( - baseKeyLookup = "evidence-lookup" // all evidence - baseKeyOutqueue = "evidence-outqueue" // not-yet broadcast - baseKeyPending = "evidence-pending" // broadcast but not committed -) - -func keyLookup(evidence types.Evidence) []byte { - return keyLookupFromHeightAndHash(evidence.Height(), evidence.Hash()) -} - -// big endian padded hex -func bE(h int64) string { - return fmt.Sprintf("%0.16X", h) -} - -func keyLookupFromHeightAndHash(height int64, hash []byte) []byte { - return _key("%s/%s/%X", baseKeyLookup, bE(height), hash) -} - -func keyOutqueue(evidence types.Evidence, priority int64) []byte { - return _key("%s/%s/%s/%X", baseKeyOutqueue, bE(priority), bE(evidence.Height()), evidence.Hash()) -} - -func keyPending(evidence types.Evidence) []byte { - return _key("%s/%s/%X", baseKeyPending, bE(evidence.Height()), evidence.Hash()) -} - -func _key(fmt_ string, o ...interface{}) []byte { - return []byte(fmt.Sprintf(fmt_, o...)) -} - -// EvidenceStore is a store of all the evidence we've seen, including -// evidence that has been committed, evidence that has been verified but not broadcast, -// and evidence that has been broadcast but not yet committed. -type EvidenceStore struct { - db dbm.DB -} - -func NewEvidenceStore(db dbm.DB) *EvidenceStore { - return &EvidenceStore{ - db: db, - } -} - -// PriorityEvidence returns the evidence from the outqueue, sorted by highest priority. -func (store *EvidenceStore) PriorityEvidence() (evidence []types.Evidence) { - // reverse the order so highest priority is first - l := store.ListEvidence(baseKeyOutqueue) - l2 := make([]types.Evidence, len(l)) - for i := range l { - l2[i] = l[len(l)-1-i] - } - return l2 -} - -// PendingEvidence returns all known uncommitted evidence. -func (store *EvidenceStore) PendingEvidence() (evidence []types.Evidence) { - return store.ListEvidence(baseKeyPending) -} - -// ListEvidence lists the evidence for the given prefix key. -// It is wrapped by PriorityEvidence and PendingEvidence for convenience. -func (store *EvidenceStore) ListEvidence(prefixKey string) (evidence []types.Evidence) { - iter := dbm.IteratePrefix(store.db, []byte(prefixKey)) - for ; iter.Valid(); iter.Next() { - val := iter.Value() - - var ei EvidenceInfo - err := cdc.UnmarshalBinaryBare(val, &ei) - if err != nil { - panic(err) - } - evidence = append(evidence, ei.Evidence) - } - return evidence -} - -// GetEvidence fetches the evidence with the given height and hash. -func (store *EvidenceStore) GetEvidence(height int64, hash []byte) *EvidenceInfo { - key := keyLookupFromHeightAndHash(height, hash) - val := store.db.Get(key) - - if len(val) == 0 { - return nil - } - var ei EvidenceInfo - err := cdc.UnmarshalBinaryBare(val, &ei) - if err != nil { - panic(err) - } - return &ei -} - -// AddNewEvidence adds the given evidence to the database. -// It returns false if the evidence is already stored. -func (store *EvidenceStore) AddNewEvidence(evidence types.Evidence, priority int64) bool { - // check if we already have seen it - ei_ := store.GetEvidence(evidence.Height(), evidence.Hash()) - if ei_ != nil && ei_.Evidence != nil { - return false - } - - ei := EvidenceInfo{ - Committed: false, - Priority: priority, - Evidence: evidence, - } - eiBytes := cdc.MustMarshalBinaryBare(ei) - - // add it to the store - key := keyOutqueue(evidence, priority) - store.db.Set(key, eiBytes) - - key = keyPending(evidence) - store.db.Set(key, eiBytes) - - key = keyLookup(evidence) - store.db.SetSync(key, eiBytes) - - return true -} - -// MarkEvidenceAsBroadcasted removes evidence from Outqueue. -func (store *EvidenceStore) MarkEvidenceAsBroadcasted(evidence types.Evidence) { - ei := store.getEvidenceInfo(evidence) - key := keyOutqueue(evidence, ei.Priority) - store.db.Delete(key) -} - -// MarkEvidenceAsCommitted removes evidence from pending and outqueue and sets the state to committed. -func (store *EvidenceStore) MarkEvidenceAsCommitted(evidence types.Evidence) { - // if its committed, its been broadcast - store.MarkEvidenceAsBroadcasted(evidence) - - pendingKey := keyPending(evidence) - store.db.Delete(pendingKey) - - ei := store.getEvidenceInfo(evidence) - ei.Committed = true - - lookupKey := keyLookup(evidence) - store.db.SetSync(lookupKey, cdc.MustMarshalBinaryBare(ei)) -} - -//--------------------------------------------------- -// utils - -func (store *EvidenceStore) getEvidenceInfo(evidence types.Evidence) EvidenceInfo { - key := keyLookup(evidence) - var ei EvidenceInfo - b := store.db.Get(key) - err := cdc.UnmarshalBinaryBare(b, &ei) - if err != nil { - panic(err) - } - return ei -} diff --git a/evidence/store_test.go b/evidence/store_test.go deleted file mode 100644 index 3fdb3ba6..00000000 --- a/evidence/store_test.go +++ /dev/null @@ -1,109 +0,0 @@ -package evidence - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" -) - -//------------------------------------------- - -func TestStoreAddDuplicate(t *testing.T) { - assert := assert.New(t) - - db := dbm.NewMemDB() - store := NewEvidenceStore(db) - - priority := int64(10) - ev := types.NewMockGoodEvidence(2, 1, []byte("val1")) - - added := store.AddNewEvidence(ev, priority) - assert.True(added) - - // cant add twice - added = store.AddNewEvidence(ev, priority) - assert.False(added) -} - -func TestStoreMark(t *testing.T) { - assert := assert.New(t) - - db := dbm.NewMemDB() - store := NewEvidenceStore(db) - - // before we do anything, priority/pending are empty - priorityEv := store.PriorityEvidence() - pendingEv := store.PendingEvidence() - assert.Equal(0, len(priorityEv)) - assert.Equal(0, len(pendingEv)) - - priority := int64(10) - ev := types.NewMockGoodEvidence(2, 1, []byte("val1")) - - added := store.AddNewEvidence(ev, priority) - assert.True(added) - - // get the evidence. verify. should be uncommitted - ei := store.GetEvidence(ev.Height(), ev.Hash()) - assert.Equal(ev, ei.Evidence) - assert.Equal(priority, ei.Priority) - assert.False(ei.Committed) - - // new evidence should be returns in priority/pending - priorityEv = store.PriorityEvidence() - pendingEv = store.PendingEvidence() - assert.Equal(1, len(priorityEv)) - assert.Equal(1, len(pendingEv)) - - // priority is now empty - store.MarkEvidenceAsBroadcasted(ev) - priorityEv = store.PriorityEvidence() - pendingEv = store.PendingEvidence() - assert.Equal(0, len(priorityEv)) - assert.Equal(1, len(pendingEv)) - - // priority and pending are now empty - store.MarkEvidenceAsCommitted(ev) - priorityEv = store.PriorityEvidence() - pendingEv = store.PendingEvidence() - assert.Equal(0, len(priorityEv)) - assert.Equal(0, len(pendingEv)) - - // evidence should show committed - ei = store.GetEvidence(ev.Height(), ev.Hash()) - assert.Equal(ev, ei.Evidence) - assert.Equal(priority, ei.Priority) - assert.True(ei.Committed) -} - -func TestStorePriority(t *testing.T) { - assert := assert.New(t) - - db := dbm.NewMemDB() - store := NewEvidenceStore(db) - - // sorted by priority and then height - cases := []struct { - ev types.MockGoodEvidence - priority int64 - }{ - {types.NewMockGoodEvidence(2, 1, []byte("val1")), 17}, - {types.NewMockGoodEvidence(5, 2, []byte("val2")), 15}, - {types.NewMockGoodEvidence(10, 2, []byte("val2")), 13}, - {types.NewMockGoodEvidence(100, 2, []byte("val2")), 11}, - {types.NewMockGoodEvidence(90, 2, []byte("val2")), 11}, - {types.NewMockGoodEvidence(80, 2, []byte("val2")), 11}, - } - - for _, c := range cases { - added := store.AddNewEvidence(c.ev, c.priority) - assert.True(added) - } - - evList := store.PriorityEvidence() - for i, ev := range evList { - assert.Equal(ev, cases[i].ev) - } -} diff --git a/evidence/wire.go b/evidence/wire.go deleted file mode 100644 index 842e0707..00000000 --- a/evidence/wire.go +++ /dev/null @@ -1,25 +0,0 @@ -package evidence - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" - "github.com/tendermint/tendermint/types" -) - -var cdc = amino.NewCodec() - -func init() { - RegisterEvidenceMessages(cdc) - crypto.RegisterAmino(cdc) - types.RegisterEvidences(cdc) - RegisterMockEvidences(cdc) // For testing -} - -//------------------------------------------- - -func RegisterMockEvidences(cdc *amino.Codec) { - cdc.RegisterConcrete(types.MockGoodEvidence{}, - "tendermint/MockGoodEvidence", nil) - cdc.RegisterConcrete(types.MockBadEvidence{}, - "tendermint/MockBadEvidence", nil) -} diff --git a/libs/events/Makefile b/libs/events/Makefile deleted file mode 100644 index 696aafff..00000000 --- a/libs/events/Makefile +++ /dev/null @@ -1,9 +0,0 @@ -.PHONY: docs -REPO:=github.com/tendermint/tendermint/libs/events - -docs: - @go get github.com/davecheney/godoc2md - godoc2md $(REPO) > README.md - -test: - go test -v ./... diff --git a/libs/events/README.md b/libs/events/README.md deleted file mode 100644 index 14aa498f..00000000 --- a/libs/events/README.md +++ /dev/null @@ -1,175 +0,0 @@ - - -# events -`import "github.com/tendermint/tendermint/libs/events"` - -* [Overview](#pkg-overview) -* [Index](#pkg-index) - -## Overview -Pub-Sub in go with event caching - - - - -## Index -* [type EventCache](#EventCache) - * [func NewEventCache(evsw Fireable) *EventCache](#NewEventCache) - * [func (evc *EventCache) FireEvent(event string, data EventData)](#EventCache.FireEvent) - * [func (evc *EventCache) Flush()](#EventCache.Flush) -* [type EventCallback](#EventCallback) -* [type EventData](#EventData) -* [type EventSwitch](#EventSwitch) - * [func NewEventSwitch() EventSwitch](#NewEventSwitch) -* [type Eventable](#Eventable) -* [type Fireable](#Fireable) - - -#### Package files -[event_cache.go](/src/github.com/tendermint/tendermint/libs/events/event_cache.go) [events.go](/src/github.com/tendermint/tendermint/libs/events/events.go) - - - - - - -## type [EventCache](/src/target/event_cache.go?s=116:179#L5) -``` go -type EventCache struct { - // contains filtered or unexported fields -} -``` -An EventCache buffers events for a Fireable -All events are cached. Filtering happens on Flush - - - - - - - -### func [NewEventCache](/src/target/event_cache.go?s=239:284#L11) -``` go -func NewEventCache(evsw Fireable) *EventCache -``` -Create a new EventCache with an EventSwitch as backend - - - - - -### func (\*EventCache) [FireEvent](/src/target/event_cache.go?s=449:511#L24) -``` go -func (evc *EventCache) FireEvent(event string, data EventData) -``` -Cache an event to be fired upon finality. - - - - -### func (\*EventCache) [Flush](/src/target/event_cache.go?s=735:765#L31) -``` go -func (evc *EventCache) Flush() -``` -Fire events by running evsw.FireEvent on all cached events. Blocks. -Clears cached events - - - - -## type [EventCallback](/src/target/events.go?s=4201:4240#L185) -``` go -type EventCallback func(data EventData) -``` - - - - - - - - - -## type [EventData](/src/target/events.go?s=243:294#L14) -``` go -type EventData interface { -} -``` -Generic event data can be typed and registered with tendermint/go-amino -via concrete implementation of this interface - - - - - - - - - - -## type [EventSwitch](/src/target/events.go?s=560:771#L29) -``` go -type EventSwitch interface { - cmn.Service - Fireable - - AddListenerForEvent(listenerID, event string, cb EventCallback) - RemoveListenerForEvent(event string, listenerID string) - RemoveListener(listenerID string) -} -``` - - - - - - -### func [NewEventSwitch](/src/target/events.go?s=917:950#L46) -``` go -func NewEventSwitch() EventSwitch -``` - - - - -## type [Eventable](/src/target/events.go?s=378:440#L20) -``` go -type Eventable interface { - SetEventSwitch(evsw EventSwitch) -} -``` -reactors and other modules should export -this interface to become eventable - - - - - - - - - - -## type [Fireable](/src/target/events.go?s=490:558#L25) -``` go -type Fireable interface { - FireEvent(event string, data EventData) -} -``` -an event switch or cache implements fireable - - - - - - - - - - - - - - -- - - -Generated by [godoc2md](http://godoc.org/github.com/davecheney/godoc2md) diff --git a/libs/events/event_cache.go b/libs/events/event_cache.go deleted file mode 100644 index f508e873..00000000 --- a/libs/events/event_cache.go +++ /dev/null @@ -1,37 +0,0 @@ -package events - -// An EventCache buffers events for a Fireable -// All events are cached. Filtering happens on Flush -type EventCache struct { - evsw Fireable - events []eventInfo -} - -// Create a new EventCache with an EventSwitch as backend -func NewEventCache(evsw Fireable) *EventCache { - return &EventCache{ - evsw: evsw, - } -} - -// a cached event -type eventInfo struct { - event string - data EventData -} - -// Cache an event to be fired upon finality. -func (evc *EventCache) FireEvent(event string, data EventData) { - // append to list (go will grow our backing array exponentially) - evc.events = append(evc.events, eventInfo{event, data}) -} - -// Fire events by running evsw.FireEvent on all cached events. Blocks. -// Clears cached events -func (evc *EventCache) Flush() { - for _, ei := range evc.events { - evc.evsw.FireEvent(ei.event, ei.data) - } - // Clear the buffer, since we only add to it with append it's safe to just set it to nil and maybe safe an allocation - evc.events = nil -} diff --git a/libs/events/event_cache_test.go b/libs/events/event_cache_test.go deleted file mode 100644 index ab321da3..00000000 --- a/libs/events/event_cache_test.go +++ /dev/null @@ -1,35 +0,0 @@ -package events - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestEventCache_Flush(t *testing.T) { - evsw := NewEventSwitch() - evsw.Start() - evsw.AddListenerForEvent("nothingness", "", func(data EventData) { - // Check we are not initialising an empty buffer full of zeroed eventInfos in the EventCache - require.FailNow(t, "We should never receive a message on this switch since none are fired") - }) - evc := NewEventCache(evsw) - evc.Flush() - // Check after reset - evc.Flush() - fail := true - pass := false - evsw.AddListenerForEvent("somethingness", "something", func(data EventData) { - if fail { - require.FailNow(t, "Shouldn't see a message until flushed") - } - pass = true - }) - evc.FireEvent("something", struct{ int }{1}) - evc.FireEvent("something", struct{ int }{2}) - evc.FireEvent("something", struct{ int }{3}) - fail = false - evc.Flush() - assert.True(t, pass) -} diff --git a/libs/events/events.go b/libs/events/events.go deleted file mode 100644 index 075f9b42..00000000 --- a/libs/events/events.go +++ /dev/null @@ -1,220 +0,0 @@ -/* -Pub-Sub in go with event caching -*/ -package events - -import ( - "sync" - - cmn "github.com/tendermint/tmlibs/common" -) - -// Generic event data can be typed and registered with tendermint/go-amino -// via concrete implementation of this interface -type EventData interface { - //AssertIsEventData() -} - -// reactors and other modules should export -// this interface to become eventable -type Eventable interface { - SetEventSwitch(evsw EventSwitch) -} - -// an event switch or cache implements fireable -type Fireable interface { - FireEvent(event string, data EventData) -} - -type EventSwitch interface { - cmn.Service - Fireable - - AddListenerForEvent(listenerID, event string, cb EventCallback) - RemoveListenerForEvent(event string, listenerID string) - RemoveListener(listenerID string) -} - -type eventSwitch struct { - cmn.BaseService - - mtx sync.RWMutex - eventCells map[string]*eventCell - listeners map[string]*eventListener -} - -func NewEventSwitch() EventSwitch { - evsw := &eventSwitch{ - eventCells: make(map[string]*eventCell), - listeners: make(map[string]*eventListener), - } - evsw.BaseService = *cmn.NewBaseService(nil, "EventSwitch", evsw) - return evsw -} - -func (evsw *eventSwitch) OnStart() error { - return nil -} - -func (evsw *eventSwitch) OnStop() {} - -func (evsw *eventSwitch) AddListenerForEvent(listenerID, event string, cb EventCallback) { - // Get/Create eventCell and listener - evsw.mtx.Lock() - eventCell := evsw.eventCells[event] - if eventCell == nil { - eventCell = newEventCell() - evsw.eventCells[event] = eventCell - } - listener := evsw.listeners[listenerID] - if listener == nil { - listener = newEventListener(listenerID) - evsw.listeners[listenerID] = listener - } - evsw.mtx.Unlock() - - // Add event and listener - eventCell.AddListener(listenerID, cb) - listener.AddEvent(event) -} - -func (evsw *eventSwitch) RemoveListener(listenerID string) { - // Get and remove listener - evsw.mtx.RLock() - listener := evsw.listeners[listenerID] - evsw.mtx.RUnlock() - if listener == nil { - return - } - - evsw.mtx.Lock() - delete(evsw.listeners, listenerID) - evsw.mtx.Unlock() - - // Remove callback for each event. - listener.SetRemoved() - for _, event := range listener.GetEvents() { - evsw.RemoveListenerForEvent(event, listenerID) - } -} - -func (evsw *eventSwitch) RemoveListenerForEvent(event string, listenerID string) { - // Get eventCell - evsw.mtx.Lock() - eventCell := evsw.eventCells[event] - evsw.mtx.Unlock() - - if eventCell == nil { - return - } - - // Remove listenerID from eventCell - numListeners := eventCell.RemoveListener(listenerID) - - // Maybe garbage collect eventCell. - if numListeners == 0 { - // Lock again and double check. - evsw.mtx.Lock() // OUTER LOCK - eventCell.mtx.Lock() // INNER LOCK - if len(eventCell.listeners) == 0 { - delete(evsw.eventCells, event) - } - eventCell.mtx.Unlock() // INNER LOCK - evsw.mtx.Unlock() // OUTER LOCK - } -} - -func (evsw *eventSwitch) FireEvent(event string, data EventData) { - // Get the eventCell - evsw.mtx.RLock() - eventCell := evsw.eventCells[event] - evsw.mtx.RUnlock() - - if eventCell == nil { - return - } - - // Fire event for all listeners in eventCell - eventCell.FireEvent(data) -} - -//----------------------------------------------------------------------------- - -// eventCell handles keeping track of listener callbacks for a given event. -type eventCell struct { - mtx sync.RWMutex - listeners map[string]EventCallback -} - -func newEventCell() *eventCell { - return &eventCell{ - listeners: make(map[string]EventCallback), - } -} - -func (cell *eventCell) AddListener(listenerID string, cb EventCallback) { - cell.mtx.Lock() - cell.listeners[listenerID] = cb - cell.mtx.Unlock() -} - -func (cell *eventCell) RemoveListener(listenerID string) int { - cell.mtx.Lock() - delete(cell.listeners, listenerID) - numListeners := len(cell.listeners) - cell.mtx.Unlock() - return numListeners -} - -func (cell *eventCell) FireEvent(data EventData) { - cell.mtx.RLock() - for _, listener := range cell.listeners { - listener(data) - } - cell.mtx.RUnlock() -} - -//----------------------------------------------------------------------------- - -type EventCallback func(data EventData) - -type eventListener struct { - id string - - mtx sync.RWMutex - removed bool - events []string -} - -func newEventListener(id string) *eventListener { - return &eventListener{ - id: id, - removed: false, - events: nil, - } -} - -func (evl *eventListener) AddEvent(event string) { - evl.mtx.Lock() - defer evl.mtx.Unlock() - - if evl.removed { - return - } - evl.events = append(evl.events, event) -} - -func (evl *eventListener) GetEvents() []string { - evl.mtx.RLock() - defer evl.mtx.RUnlock() - - events := make([]string, len(evl.events)) - copy(events, evl.events) - return events -} - -func (evl *eventListener) SetRemoved() { - evl.mtx.Lock() - defer evl.mtx.Unlock() - evl.removed = true -} diff --git a/libs/events/events_test.go b/libs/events/events_test.go deleted file mode 100644 index 4995ae73..00000000 --- a/libs/events/events_test.go +++ /dev/null @@ -1,380 +0,0 @@ -package events - -import ( - "fmt" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -// TestAddListenerForEventFireOnce sets up an EventSwitch, subscribes a single -// listener to an event, and sends a string "data". -func TestAddListenerForEventFireOnce(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } - messages := make(chan EventData) - evsw.AddListenerForEvent("listener", "event", - func(data EventData) { - messages <- data - }) - go evsw.FireEvent("event", "data") - received := <-messages - if received != "data" { - t.Errorf("Message received does not match: %v", received) - } -} - -// TestAddListenerForEventFireMany sets up an EventSwitch, subscribes a single -// listener to an event, and sends a thousand integers. -func TestAddListenerForEventFireMany(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } - doneSum := make(chan uint64) - doneSending := make(chan uint64) - numbers := make(chan uint64, 4) - // subscribe one listener for one event - evsw.AddListenerForEvent("listener", "event", - func(data EventData) { - numbers <- data.(uint64) - }) - // collect received events - go sumReceivedNumbers(numbers, doneSum) - // go fire events - go fireEvents(evsw, "event", doneSending, uint64(1)) - checkSum := <-doneSending - close(numbers) - eventSum := <-doneSum - if checkSum != eventSum { - t.Errorf("Not all messages sent were received.\n") - } -} - -// TestAddListenerForDifferentEvents sets up an EventSwitch, subscribes a single -// listener to three different events and sends a thousand integers for each -// of the three events. -func TestAddListenerForDifferentEvents(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } - doneSum := make(chan uint64) - doneSending1 := make(chan uint64) - doneSending2 := make(chan uint64) - doneSending3 := make(chan uint64) - numbers := make(chan uint64, 4) - // subscribe one listener to three events - evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { - numbers <- data.(uint64) - }) - evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { - numbers <- data.(uint64) - }) - evsw.AddListenerForEvent("listener", "event3", - func(data EventData) { - numbers <- data.(uint64) - }) - // collect received events - go sumReceivedNumbers(numbers, doneSum) - // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - go fireEvents(evsw, "event2", doneSending2, uint64(1)) - go fireEvents(evsw, "event3", doneSending3, uint64(1)) - var checkSum uint64 = 0 - checkSum += <-doneSending1 - checkSum += <-doneSending2 - checkSum += <-doneSending3 - close(numbers) - eventSum := <-doneSum - if checkSum != eventSum { - t.Errorf("Not all messages sent were received.\n") - } -} - -// TestAddDifferentListenerForDifferentEvents sets up an EventSwitch, -// subscribes a first listener to three events, and subscribes a second -// listener to two of those three events, and then sends a thousand integers -// for each of the three events. -func TestAddDifferentListenerForDifferentEvents(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } - doneSum1 := make(chan uint64) - doneSum2 := make(chan uint64) - doneSending1 := make(chan uint64) - doneSending2 := make(chan uint64) - doneSending3 := make(chan uint64) - numbers1 := make(chan uint64, 4) - numbers2 := make(chan uint64, 4) - // subscribe two listener to three events - evsw.AddListenerForEvent("listener1", "event1", - func(data EventData) { - numbers1 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener1", "event2", - func(data EventData) { - numbers1 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener1", "event3", - func(data EventData) { - numbers1 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener2", "event2", - func(data EventData) { - numbers2 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener2", "event3", - func(data EventData) { - numbers2 <- data.(uint64) - }) - // collect received events for listener1 - go sumReceivedNumbers(numbers1, doneSum1) - // collect received events for listener2 - go sumReceivedNumbers(numbers2, doneSum2) - // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) - go fireEvents(evsw, "event3", doneSending3, uint64(2001)) - checkSumEvent1 := <-doneSending1 - checkSumEvent2 := <-doneSending2 - checkSumEvent3 := <-doneSending3 - checkSum1 := checkSumEvent1 + checkSumEvent2 + checkSumEvent3 - checkSum2 := checkSumEvent2 + checkSumEvent3 - close(numbers1) - close(numbers2) - eventSum1 := <-doneSum1 - eventSum2 := <-doneSum2 - if checkSum1 != eventSum1 || - checkSum2 != eventSum2 { - t.Errorf("Not all messages sent were received for different listeners to different events.\n") - } -} - -// TestAddAndRemoveListener sets up an EventSwitch, subscribes a listener to -// two events, fires a thousand integers for the first event, then unsubscribes -// the listener and fires a thousand integers for the second event. -func TestAddAndRemoveListener(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } - doneSum1 := make(chan uint64) - doneSum2 := make(chan uint64) - doneSending1 := make(chan uint64) - doneSending2 := make(chan uint64) - numbers1 := make(chan uint64, 4) - numbers2 := make(chan uint64, 4) - // subscribe two listener to three events - evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { - numbers1 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { - numbers2 <- data.(uint64) - }) - // collect received events for event1 - go sumReceivedNumbers(numbers1, doneSum1) - // collect received events for event2 - go sumReceivedNumbers(numbers2, doneSum2) - // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - checkSumEvent1 := <-doneSending1 - // after sending all event1, unsubscribe for all events - evsw.RemoveListener("listener") - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) - checkSumEvent2 := <-doneSending2 - close(numbers1) - close(numbers2) - eventSum1 := <-doneSum1 - eventSum2 := <-doneSum2 - if checkSumEvent1 != eventSum1 || - // correct value asserted by preceding tests, suffices to be non-zero - checkSumEvent2 == uint64(0) || - eventSum2 != uint64(0) { - t.Errorf("Not all messages sent were received or unsubscription did not register.\n") - } -} - -// TestRemoveListener does basic tests on adding and removing -func TestRemoveListener(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } - count := 10 - sum1, sum2 := 0, 0 - // add some listeners and make sure they work - evsw.AddListenerForEvent("listener", "event1", - func(data EventData) { - sum1++ - }) - evsw.AddListenerForEvent("listener", "event2", - func(data EventData) { - sum2++ - }) - for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) - } - assert.Equal(t, count, sum1) - assert.Equal(t, count, sum2) - - // remove one by event and make sure it is gone - evsw.RemoveListenerForEvent("event2", "listener") - for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) - } - assert.Equal(t, count*2, sum1) - assert.Equal(t, count, sum2) - - // remove the listener entirely and make sure both gone - evsw.RemoveListener("listener") - for i := 0; i < count; i++ { - evsw.FireEvent("event1", true) - evsw.FireEvent("event2", true) - } - assert.Equal(t, count*2, sum1) - assert.Equal(t, count, sum2) -} - -// TestAddAndRemoveListenersAsync sets up an EventSwitch, subscribes two -// listeners to three events, and fires a thousand integers for each event. -// These two listeners serve as the baseline validation while other listeners -// are randomly subscribed and unsubscribed. -// More precisely it randomly subscribes new listeners (different from the first -// two listeners) to one of these three events. At the same time it starts -// randomly unsubscribing these additional listeners from all events they are -// at that point subscribed to. -// NOTE: it is important to run this test with race conditions tracking on, -// `go test -race`, to examine for possible race conditions. -func TestRemoveListenersAsync(t *testing.T) { - evsw := NewEventSwitch() - err := evsw.Start() - if err != nil { - t.Errorf("Failed to start EventSwitch, error: %v", err) - } - doneSum1 := make(chan uint64) - doneSum2 := make(chan uint64) - doneSending1 := make(chan uint64) - doneSending2 := make(chan uint64) - doneSending3 := make(chan uint64) - numbers1 := make(chan uint64, 4) - numbers2 := make(chan uint64, 4) - // subscribe two listener to three events - evsw.AddListenerForEvent("listener1", "event1", - func(data EventData) { - numbers1 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener1", "event2", - func(data EventData) { - numbers1 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener1", "event3", - func(data EventData) { - numbers1 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener2", "event1", - func(data EventData) { - numbers2 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener2", "event2", - func(data EventData) { - numbers2 <- data.(uint64) - }) - evsw.AddListenerForEvent("listener2", "event3", - func(data EventData) { - numbers2 <- data.(uint64) - }) - // collect received events for event1 - go sumReceivedNumbers(numbers1, doneSum1) - // collect received events for event2 - go sumReceivedNumbers(numbers2, doneSum2) - addListenersStress := func() { - s1 := rand.NewSource(time.Now().UnixNano()) - r1 := rand.New(s1) - for k := uint16(0); k < 400; k++ { - listenerNumber := r1.Intn(100) + 3 - eventNumber := r1.Intn(3) + 1 - go evsw.AddListenerForEvent(fmt.Sprintf("listener%v", listenerNumber), - fmt.Sprintf("event%v", eventNumber), - func(_ EventData) {}) - } - } - removeListenersStress := func() { - s2 := rand.NewSource(time.Now().UnixNano()) - r2 := rand.New(s2) - for k := uint16(0); k < 80; k++ { - listenerNumber := r2.Intn(100) + 3 - go evsw.RemoveListener(fmt.Sprintf("listener%v", listenerNumber)) - } - } - addListenersStress() - // go fire events - go fireEvents(evsw, "event1", doneSending1, uint64(1)) - removeListenersStress() - go fireEvents(evsw, "event2", doneSending2, uint64(1001)) - go fireEvents(evsw, "event3", doneSending3, uint64(2001)) - checkSumEvent1 := <-doneSending1 - checkSumEvent2 := <-doneSending2 - checkSumEvent3 := <-doneSending3 - checkSum := checkSumEvent1 + checkSumEvent2 + checkSumEvent3 - close(numbers1) - close(numbers2) - eventSum1 := <-doneSum1 - eventSum2 := <-doneSum2 - if checkSum != eventSum1 || - checkSum != eventSum2 { - t.Errorf("Not all messages sent were received.\n") - } -} - -//------------------------------------------------------------------------------ -// Helper functions - -// sumReceivedNumbers takes two channels and adds all numbers received -// until the receiving channel `numbers` is closed; it then sends the sum -// on `doneSum` and closes that channel. Expected to be run in a go-routine. -func sumReceivedNumbers(numbers, doneSum chan uint64) { - var sum uint64 = 0 - for { - j, more := <-numbers - sum += j - if !more { - doneSum <- sum - close(doneSum) - return - } - } -} - -// fireEvents takes an EventSwitch and fires a thousand integers under -// a given `event` with the integers mootonically increasing from `offset` -// to `offset` + 999. It additionally returns the addition of all integers -// sent on `doneChan` for assertion that all events have been sent, and enabling -// the test to assert all events have also been received. -func fireEvents(evsw EventSwitch, event string, doneChan chan uint64, - offset uint64) { - var sentSum uint64 = 0 - for i := offset; i <= offset+uint64(999); i++ { - sentSum += i - evsw.FireEvent(event, i) - } - doneChan <- sentSum - close(doneChan) -} diff --git a/libs/pubsub/example_test.go b/libs/pubsub/example_test.go deleted file mode 100644 index 260521cd..00000000 --- a/libs/pubsub/example_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package pubsub_test - -import ( - "context" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" -) - -func TestExample(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ctx := context.Background() - ch := make(chan interface{}, 1) - err := s.Subscribe(ctx, "example-client", query.MustParse("abci.account.name='John'"), ch) - require.NoError(t, err) - err = s.PublishWithTags(ctx, "Tombstone", pubsub.NewTagMap(map[string]string{"abci.account.name": "John"})) - require.NoError(t, err) - assertReceive(t, "Tombstone", ch) -} diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go deleted file mode 100644 index 776e0653..00000000 --- a/libs/pubsub/pubsub.go +++ /dev/null @@ -1,351 +0,0 @@ -// Package pubsub implements a pub-sub model with a single publisher (Server) -// and multiple subscribers (clients). -// -// Though you can have multiple publishers by sharing a pointer to a server or -// by giving the same channel to each publisher and publishing messages from -// that channel (fan-in). -// -// Clients subscribe for messages, which could be of any type, using a query. -// When some message is published, we match it with all queries. If there is a -// match, this message will be pushed to all clients, subscribed to that query. -// See query subpackage for our implementation. -package pubsub - -import ( - "context" - "errors" - "sync" - - cmn "github.com/tendermint/tmlibs/common" -) - -type operation int - -const ( - sub operation = iota - pub - unsub - shutdown -) - -var ( - // ErrSubscriptionNotFound is returned when a client tries to unsubscribe - // from not existing subscription. - ErrSubscriptionNotFound = errors.New("subscription not found") - - // ErrAlreadySubscribed is returned when a client tries to subscribe twice or - // more using the same query. - ErrAlreadySubscribed = errors.New("already subscribed") -) - -type cmd struct { - op operation - query Query - ch chan<- interface{} - clientID string - msg interface{} - tags TagMap -} - -// Query defines an interface for a query to be used for subscribing. -type Query interface { - Matches(tags TagMap) bool - String() string -} - -// Server allows clients to subscribe/unsubscribe for messages, publishing -// messages with or without tags, and manages internal state. -type Server struct { - cmn.BaseService - - cmds chan cmd - cmdsCap int - - mtx sync.RWMutex - subscriptions map[string]map[string]Query // subscriber -> query (string) -> Query -} - -// Option sets a parameter for the server. -type Option func(*Server) - -// TagMap is used to associate tags to a message. -// They can be queried by subscribers to choose messages they will received. -type TagMap interface { - // Get returns the value for a key, or nil if no value is present. - // The ok result indicates whether value was found in the tags. - Get(key string) (value string, ok bool) - // Len returns the number of tags. - Len() int -} - -type tagMap map[string]string - -var _ TagMap = (*tagMap)(nil) - -// NewTagMap constructs a new immutable tag set from a map. -func NewTagMap(data map[string]string) TagMap { - return tagMap(data) -} - -// Get returns the value for a key, or nil if no value is present. -// The ok result indicates whether value was found in the tags. -func (ts tagMap) Get(key string) (value string, ok bool) { - value, ok = ts[key] - return -} - -// Len returns the number of tags. -func (ts tagMap) Len() int { - return len(ts) -} - -// NewServer returns a new server. See the commentary on the Option functions -// for a detailed description of how to configure buffering. If no options are -// provided, the resulting server's queue is unbuffered. -func NewServer(options ...Option) *Server { - s := &Server{ - subscriptions: make(map[string]map[string]Query), - } - s.BaseService = *cmn.NewBaseService(nil, "PubSub", s) - - for _, option := range options { - option(s) - } - - // if BufferCapacity option was not set, the channel is unbuffered - s.cmds = make(chan cmd, s.cmdsCap) - - return s -} - -// BufferCapacity allows you to specify capacity for the internal server's -// queue. Since the server, given Y subscribers, could only process X messages, -// this option could be used to survive spikes (e.g. high amount of -// transactions during peak hours). -func BufferCapacity(cap int) Option { - return func(s *Server) { - if cap > 0 { - s.cmdsCap = cap - } - } -} - -// BufferCapacity returns capacity of the internal server's queue. -func (s *Server) BufferCapacity() int { - return s.cmdsCap -} - -// Subscribe creates a subscription for the given client. It accepts a channel -// on which messages matching the given query can be received. An error will be -// returned to the caller if the context is canceled or if subscription already -// exist for pair clientID and query. -func (s *Server) Subscribe(ctx context.Context, clientID string, query Query, out chan<- interface{}) error { - s.mtx.RLock() - clientSubscriptions, ok := s.subscriptions[clientID] - if ok { - _, ok = clientSubscriptions[query.String()] - } - s.mtx.RUnlock() - if ok { - return ErrAlreadySubscribed - } - - select { - case s.cmds <- cmd{op: sub, clientID: clientID, query: query, ch: out}: - s.mtx.Lock() - if _, ok = s.subscriptions[clientID]; !ok { - s.subscriptions[clientID] = make(map[string]Query) - } - // preserve original query - // see Unsubscribe - s.subscriptions[clientID][query.String()] = query - s.mtx.Unlock() - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// Unsubscribe removes the subscription on the given query. An error will be -// returned to the caller if the context is canceled or if subscription does -// not exist. -func (s *Server) Unsubscribe(ctx context.Context, clientID string, query Query) error { - var origQuery Query - s.mtx.RLock() - clientSubscriptions, ok := s.subscriptions[clientID] - if ok { - origQuery, ok = clientSubscriptions[query.String()] - } - s.mtx.RUnlock() - if !ok { - return ErrSubscriptionNotFound - } - - // original query is used here because we're using pointers as map keys - select { - case s.cmds <- cmd{op: unsub, clientID: clientID, query: origQuery}: - s.mtx.Lock() - delete(clientSubscriptions, query.String()) - s.mtx.Unlock() - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// UnsubscribeAll removes all client subscriptions. An error will be returned -// to the caller if the context is canceled or if subscription does not exist. -func (s *Server) UnsubscribeAll(ctx context.Context, clientID string) error { - s.mtx.RLock() - _, ok := s.subscriptions[clientID] - s.mtx.RUnlock() - if !ok { - return ErrSubscriptionNotFound - } - - select { - case s.cmds <- cmd{op: unsub, clientID: clientID}: - s.mtx.Lock() - delete(s.subscriptions, clientID) - s.mtx.Unlock() - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// Publish publishes the given message. An error will be returned to the caller -// if the context is canceled. -func (s *Server) Publish(ctx context.Context, msg interface{}) error { - return s.PublishWithTags(ctx, msg, NewTagMap(make(map[string]string))) -} - -// PublishWithTags publishes the given message with the set of tags. The set is -// matched with clients queries. If there is a match, the message is sent to -// the client. -func (s *Server) PublishWithTags(ctx context.Context, msg interface{}, tags TagMap) error { - select { - case s.cmds <- cmd{op: pub, msg: msg, tags: tags}: - return nil - case <-ctx.Done(): - return ctx.Err() - } -} - -// OnStop implements Service.OnStop by shutting down the server. -func (s *Server) OnStop() { - s.cmds <- cmd{op: shutdown} -} - -// NOTE: not goroutine safe -type state struct { - // query -> client -> ch - queries map[Query]map[string]chan<- interface{} - // client -> query -> struct{} - clients map[string]map[Query]struct{} -} - -// OnStart implements Service.OnStart by starting the server. -func (s *Server) OnStart() error { - go s.loop(state{ - queries: make(map[Query]map[string]chan<- interface{}), - clients: make(map[string]map[Query]struct{}), - }) - return nil -} - -// OnReset implements Service.OnReset -func (s *Server) OnReset() error { - return nil -} - -func (s *Server) loop(state state) { -loop: - for cmd := range s.cmds { - switch cmd.op { - case unsub: - if cmd.query != nil { - state.remove(cmd.clientID, cmd.query) - } else { - state.removeAll(cmd.clientID) - } - case shutdown: - for clientID := range state.clients { - state.removeAll(clientID) - } - break loop - case sub: - state.add(cmd.clientID, cmd.query, cmd.ch) - case pub: - state.send(cmd.msg, cmd.tags) - } - } -} - -func (state *state) add(clientID string, q Query, ch chan<- interface{}) { - // add query if needed - if _, ok := state.queries[q]; !ok { - state.queries[q] = make(map[string]chan<- interface{}) - } - - // create subscription - state.queries[q][clientID] = ch - - // add client if needed - if _, ok := state.clients[clientID]; !ok { - state.clients[clientID] = make(map[Query]struct{}) - } - state.clients[clientID][q] = struct{}{} -} - -func (state *state) remove(clientID string, q Query) { - clientToChannelMap, ok := state.queries[q] - if !ok { - return - } - - ch, ok := clientToChannelMap[clientID] - if ok { - close(ch) - - delete(state.clients[clientID], q) - - // if it not subscribed to anything else, remove the client - if len(state.clients[clientID]) == 0 { - delete(state.clients, clientID) - } - - delete(state.queries[q], clientID) - if len(state.queries[q]) == 0 { - delete(state.queries, q) - } - } -} - -func (state *state) removeAll(clientID string) { - queryMap, ok := state.clients[clientID] - if !ok { - return - } - - for q := range queryMap { - ch := state.queries[q][clientID] - close(ch) - - delete(state.queries[q], clientID) - if len(state.queries[q]) == 0 { - delete(state.queries, q) - } - } - delete(state.clients, clientID) -} - -func (state *state) send(msg interface{}, tags TagMap) { - for q, clientToChannelMap := range state.queries { - if q.Matches(tags) { - for _, ch := range clientToChannelMap { - ch <- msg - } - } - } -} diff --git a/libs/pubsub/pubsub_test.go b/libs/pubsub/pubsub_test.go deleted file mode 100644 index fd6c11cf..00000000 --- a/libs/pubsub/pubsub_test.go +++ /dev/null @@ -1,253 +0,0 @@ -package pubsub_test - -import ( - "context" - "fmt" - "runtime/debug" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" -) - -const ( - clientID = "test-client" -) - -func TestSubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ctx := context.Background() - ch := make(chan interface{}, 1) - err := s.Subscribe(ctx, clientID, query.Empty{}, ch) - require.NoError(t, err) - err = s.Publish(ctx, "Ka-Zar") - require.NoError(t, err) - assertReceive(t, "Ka-Zar", ch) - - err = s.Publish(ctx, "Quicksilver") - require.NoError(t, err) - assertReceive(t, "Quicksilver", ch) -} - -func TestDifferentClients(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ctx := context.Background() - ch1 := make(chan interface{}, 1) - err := s.Subscribe(ctx, "client-1", query.MustParse("tm.events.type='NewBlock'"), ch1) - require.NoError(t, err) - err = s.PublishWithTags(ctx, "Iceman", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"})) - require.NoError(t, err) - assertReceive(t, "Iceman", ch1) - - ch2 := make(chan interface{}, 1) - err = s.Subscribe(ctx, "client-2", query.MustParse("tm.events.type='NewBlock' AND abci.account.name='Igor'"), ch2) - require.NoError(t, err) - err = s.PublishWithTags(ctx, "Ultimo", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock", "abci.account.name": "Igor"})) - require.NoError(t, err) - assertReceive(t, "Ultimo", ch1) - assertReceive(t, "Ultimo", ch2) - - ch3 := make(chan interface{}, 1) - err = s.Subscribe(ctx, "client-3", query.MustParse("tm.events.type='NewRoundStep' AND abci.account.name='Igor' AND abci.invoice.number = 10"), ch3) - require.NoError(t, err) - err = s.PublishWithTags(ctx, "Valeria Richards", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewRoundStep"})) - require.NoError(t, err) - assert.Zero(t, len(ch3)) -} - -func TestClientSubscribesTwice(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ctx := context.Background() - q := query.MustParse("tm.events.type='NewBlock'") - - ch1 := make(chan interface{}, 1) - err := s.Subscribe(ctx, clientID, q, ch1) - require.NoError(t, err) - err = s.PublishWithTags(ctx, "Goblin Queen", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"})) - require.NoError(t, err) - assertReceive(t, "Goblin Queen", ch1) - - ch2 := make(chan interface{}, 1) - err = s.Subscribe(ctx, clientID, q, ch2) - require.Error(t, err) - - err = s.PublishWithTags(ctx, "Spider-Man", pubsub.NewTagMap(map[string]string{"tm.events.type": "NewBlock"})) - require.NoError(t, err) - assertReceive(t, "Spider-Man", ch1) -} - -func TestUnsubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ctx := context.Background() - ch := make(chan interface{}) - err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch) - require.NoError(t, err) - err = s.Unsubscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'")) - require.NoError(t, err) - - err = s.Publish(ctx, "Nick Fury") - require.NoError(t, err) - assert.Zero(t, len(ch), "Should not receive anything after Unsubscribe") - - _, ok := <-ch - assert.False(t, ok) -} - -func TestResubscribe(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ctx := context.Background() - ch := make(chan interface{}) - err := s.Subscribe(ctx, clientID, query.Empty{}, ch) - require.NoError(t, err) - err = s.Unsubscribe(ctx, clientID, query.Empty{}) - require.NoError(t, err) - ch = make(chan interface{}) - err = s.Subscribe(ctx, clientID, query.Empty{}, ch) - require.NoError(t, err) - - err = s.Publish(ctx, "Cable") - require.NoError(t, err) - assertReceive(t, "Cable", ch) -} - -func TestUnsubscribeAll(t *testing.T) { - s := pubsub.NewServer() - s.SetLogger(log.TestingLogger()) - s.Start() - defer s.Stop() - - ctx := context.Background() - ch1, ch2 := make(chan interface{}, 1), make(chan interface{}, 1) - err := s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlock'"), ch1) - require.NoError(t, err) - err = s.Subscribe(ctx, clientID, query.MustParse("tm.events.type='NewBlockHeader'"), ch2) - require.NoError(t, err) - - err = s.UnsubscribeAll(ctx, clientID) - require.NoError(t, err) - - err = s.Publish(ctx, "Nick Fury") - require.NoError(t, err) - assert.Zero(t, len(ch1), "Should not receive anything after UnsubscribeAll") - assert.Zero(t, len(ch2), "Should not receive anything after UnsubscribeAll") - - _, ok := <-ch1 - assert.False(t, ok) - _, ok = <-ch2 - assert.False(t, ok) -} - -func TestBufferCapacity(t *testing.T) { - s := pubsub.NewServer(pubsub.BufferCapacity(2)) - s.SetLogger(log.TestingLogger()) - - assert.Equal(t, 2, s.BufferCapacity()) - - ctx := context.Background() - err := s.Publish(ctx, "Nighthawk") - require.NoError(t, err) - err = s.Publish(ctx, "Sage") - require.NoError(t, err) - - ctx, cancel := context.WithTimeout(ctx, 10*time.Millisecond) - defer cancel() - err = s.Publish(ctx, "Ironclad") - if assert.Error(t, err) { - assert.Equal(t, context.DeadlineExceeded, err) - } -} - -func Benchmark10Clients(b *testing.B) { benchmarkNClients(10, b) } -func Benchmark100Clients(b *testing.B) { benchmarkNClients(100, b) } -func Benchmark1000Clients(b *testing.B) { benchmarkNClients(1000, b) } - -func Benchmark10ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(10, b) } -func Benchmark100ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(100, b) } -func Benchmark1000ClientsOneQuery(b *testing.B) { benchmarkNClientsOneQuery(1000, b) } - -func benchmarkNClients(n int, b *testing.B) { - s := pubsub.NewServer() - s.Start() - defer s.Stop() - - ctx := context.Background() - for i := 0; i < n; i++ { - ch := make(chan interface{}) - go func() { - for range ch { - } - }() - s.Subscribe(ctx, clientID, query.MustParse(fmt.Sprintf("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = %d", i)), ch) - } - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": string(i)})) - } -} - -func benchmarkNClientsOneQuery(n int, b *testing.B) { - s := pubsub.NewServer() - s.Start() - defer s.Stop() - - ctx := context.Background() - q := query.MustParse("abci.Account.Owner = 'Ivan' AND abci.Invoices.Number = 1") - for i := 0; i < n; i++ { - ch := make(chan interface{}) - go func() { - for range ch { - } - }() - s.Subscribe(ctx, clientID, q, ch) - } - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - s.PublishWithTags(ctx, "Gamora", pubsub.NewTagMap(map[string]string{"abci.Account.Owner": "Ivan", "abci.Invoices.Number": "1"})) - } -} - -/////////////////////////////////////////////////////////////////////////////// -/// HELPERS -/////////////////////////////////////////////////////////////////////////////// - -func assertReceive(t *testing.T, expected interface{}, ch <-chan interface{}, msgAndArgs ...interface{}) { - select { - case actual := <-ch: - if actual != nil { - assert.Equal(t, expected, actual, msgAndArgs...) - } - case <-time.After(1 * time.Second): - t.Errorf("Expected to receive %v from the channel, got nothing after 1s", expected) - debug.PrintStack() - } -} diff --git a/libs/pubsub/query/Makefile b/libs/pubsub/query/Makefile deleted file mode 100644 index 91030ef0..00000000 --- a/libs/pubsub/query/Makefile +++ /dev/null @@ -1,11 +0,0 @@ -gen_query_parser: - @go get github.com/pointlander/peg - peg -inline -switch query.peg - -fuzzy_test: - @go get github.com/dvyukov/go-fuzz/go-fuzz - @go get github.com/dvyukov/go-fuzz/go-fuzz-build - go-fuzz-build github.com/tendermint/tendermint/libs/pubsub/query/fuzz_test - go-fuzz -bin=./fuzz_test-fuzz.zip -workdir=./fuzz_test/output - -.PHONY: gen_query_parser fuzzy_test diff --git a/libs/pubsub/query/empty.go b/libs/pubsub/query/empty.go deleted file mode 100644 index 17d7acef..00000000 --- a/libs/pubsub/query/empty.go +++ /dev/null @@ -1,16 +0,0 @@ -package query - -import "github.com/tendermint/tendermint/libs/pubsub" - -// Empty query matches any set of tags. -type Empty struct { -} - -// Matches always returns true. -func (Empty) Matches(tags pubsub.TagMap) bool { - return true -} - -func (Empty) String() string { - return "empty" -} diff --git a/libs/pubsub/query/empty_test.go b/libs/pubsub/query/empty_test.go deleted file mode 100644 index 6183b6bd..00000000 --- a/libs/pubsub/query/empty_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package query_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" -) - -func TestEmptyQueryMatchesAnything(t *testing.T) { - q := query.Empty{} - assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{}))) - assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Asher": "Roth"}))) - assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Route": "66"}))) - assert.True(t, q.Matches(pubsub.NewTagMap(map[string]string{"Route": "66", "Billy": "Blue"}))) -} diff --git a/libs/pubsub/query/fuzz_test/main.go b/libs/pubsub/query/fuzz_test/main.go deleted file mode 100644 index 7a46116b..00000000 --- a/libs/pubsub/query/fuzz_test/main.go +++ /dev/null @@ -1,30 +0,0 @@ -package fuzz_test - -import ( - "fmt" - - "github.com/tendermint/tendermint/libs/pubsub/query" -) - -func Fuzz(data []byte) int { - sdata := string(data) - q0, err := query.New(sdata) - if err != nil { - return 0 - } - - sdata1 := q0.String() - q1, err := query.New(sdata1) - if err != nil { - panic(err) - } - - sdata2 := q1.String() - if sdata1 != sdata2 { - fmt.Printf("q0: %q\n", sdata1) - fmt.Printf("q1: %q\n", sdata2) - panic("query changed") - } - - return 1 -} diff --git a/libs/pubsub/query/parser_test.go b/libs/pubsub/query/parser_test.go deleted file mode 100644 index 708dee48..00000000 --- a/libs/pubsub/query/parser_test.go +++ /dev/null @@ -1,92 +0,0 @@ -package query_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tendermint/libs/pubsub/query" -) - -// TODO: fuzzy testing? -func TestParser(t *testing.T) { - cases := []struct { - query string - valid bool - }{ - {"tm.events.type='NewBlock'", true}, - {"tm.events.type = 'NewBlock'", true}, - {"tm.events.name = ''", true}, - {"tm.events.type='TIME'", true}, - {"tm.events.type='DATE'", true}, - {"tm.events.type='='", true}, - {"tm.events.type='TIME", false}, - {"tm.events.type=TIME'", false}, - {"tm.events.type==", false}, - {"tm.events.type=NewBlock", false}, - {">==", false}, - {"tm.events.type 'NewBlock' =", false}, - {"tm.events.type>'NewBlock'", false}, - {"", false}, - {"=", false}, - {"='NewBlock'", false}, - {"tm.events.type=", false}, - - {"tm.events.typeNewBlock", false}, - {"tm.events.type'NewBlock'", false}, - {"'NewBlock'", false}, - {"NewBlock", false}, - {"", false}, - - {"tm.events.type='NewBlock' AND abci.account.name='Igor'", true}, - {"tm.events.type='NewBlock' AND", false}, - {"tm.events.type='NewBlock' AN", false}, - {"tm.events.type='NewBlock' AN tm.events.type='NewBlockHeader'", false}, - {"AND tm.events.type='NewBlock' ", false}, - - {"abci.account.name CONTAINS 'Igor'", true}, - - {"tx.date > DATE 2013-05-03", true}, - {"tx.date < DATE 2013-05-03", true}, - {"tx.date <= DATE 2013-05-03", true}, - {"tx.date >= DATE 2013-05-03", true}, - {"tx.date >= DAT 2013-05-03", false}, - {"tx.date <= DATE2013-05-03", false}, - {"tx.date <= DATE -05-03", false}, - {"tx.date >= DATE 20130503", false}, - {"tx.date >= DATE 2013+01-03", false}, - // incorrect year, month, day - {"tx.date >= DATE 0013-01-03", false}, - {"tx.date >= DATE 2013-31-03", false}, - {"tx.date >= DATE 2013-01-83", false}, - - {"tx.date > TIME 2013-05-03T14:45:00+07:00", true}, - {"tx.date < TIME 2013-05-03T14:45:00-02:00", true}, - {"tx.date <= TIME 2013-05-03T14:45:00Z", true}, - {"tx.date >= TIME 2013-05-03T14:45:00Z", true}, - {"tx.date >= TIME2013-05-03T14:45:00Z", false}, - {"tx.date = IME 2013-05-03T14:45:00Z", false}, - {"tx.date = TIME 2013-05-:45:00Z", false}, - {"tx.date >= TIME 2013-05-03T14:45:00", false}, - {"tx.date >= TIME 0013-00-00T14:45:00Z", false}, - {"tx.date >= TIME 2013+05=03T14:45:00Z", false}, - - {"account.balance=100", true}, - {"account.balance >= 200", true}, - {"account.balance >= -300", false}, - {"account.balance >>= 400", false}, - {"account.balance=33.22.1", false}, - - {"hash='136E18F7E4C348B780CF873A0BF43922E5BAFA63'", true}, - {"hash=136E18F7E4C348B780CF873A0BF43922E5BAFA63", false}, - } - - for _, c := range cases { - _, err := query.New(c.query) - if c.valid { - assert.NoErrorf(t, err, "Query was '%s'", c.query) - } else { - assert.Errorf(t, err, "Query was '%s'", c.query) - } - } -} diff --git a/libs/pubsub/query/query.go b/libs/pubsub/query/query.go deleted file mode 100644 index ec187486..00000000 --- a/libs/pubsub/query/query.go +++ /dev/null @@ -1,339 +0,0 @@ -// Package query provides a parser for a custom query format: -// -// abci.invoice.number=22 AND abci.invoice.owner=Ivan -// -// See query.peg for the grammar, which is a https://en.wikipedia.org/wiki/Parsing_expression_grammar. -// More: https://github.com/PhilippeSigaud/Pegged/wiki/PEG-Basics -// -// It has a support for numbers (integer and floating point), dates and times. -package query - -import ( - "fmt" - "reflect" - "strconv" - "strings" - "time" - - "github.com/tendermint/tendermint/libs/pubsub" -) - -// Query holds the query string and the query parser. -type Query struct { - str string - parser *QueryParser -} - -// Condition represents a single condition within a query and consists of tag -// (e.g. "tx.gas"), operator (e.g. "=") and operand (e.g. "7"). -type Condition struct { - Tag string - Op Operator - Operand interface{} -} - -// New parses the given string and returns a query or error if the string is -// invalid. -func New(s string) (*Query, error) { - p := &QueryParser{Buffer: fmt.Sprintf(`"%s"`, s)} - p.Init() - if err := p.Parse(); err != nil { - return nil, err - } - return &Query{str: s, parser: p}, nil -} - -// MustParse turns the given string into a query or panics; for tests or others -// cases where you know the string is valid. -func MustParse(s string) *Query { - q, err := New(s) - if err != nil { - panic(fmt.Sprintf("failed to parse %s: %v", s, err)) - } - return q -} - -// String returns the original string. -func (q *Query) String() string { - return q.str -} - -// Operator is an operator that defines some kind of relation between tag and -// operand (equality, etc.). -type Operator uint8 - -const ( - // "<=" - OpLessEqual Operator = iota - // ">=" - OpGreaterEqual - // "<" - OpLess - // ">" - OpGreater - // "=" - OpEqual - // "CONTAINS"; used to check if a string contains a certain sub string. - OpContains -) - -const ( - // DateLayout defines a layout for all dates (`DATE date`) - DateLayout = "2006-01-02" - // TimeLayout defines a layout for all times (`TIME time`) - TimeLayout = time.RFC3339 -) - -// Conditions returns a list of conditions. -func (q *Query) Conditions() []Condition { - conditions := make([]Condition, 0) - - buffer, begin, end := q.parser.Buffer, 0, 0 - - var tag string - var op Operator - - // tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7") - for _, token := range q.parser.Tokens() { - switch token.pegRule { - - case rulePegText: - begin, end = int(token.begin), int(token.end) - case ruletag: - tag = buffer[begin:end] - case rulele: - op = OpLessEqual - case rulege: - op = OpGreaterEqual - case rulel: - op = OpLess - case ruleg: - op = OpGreater - case ruleequal: - op = OpEqual - case rulecontains: - op = OpContains - case rulevalue: - // strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock") - valueWithoutSingleQuotes := buffer[begin+1 : end-1] - conditions = append(conditions, Condition{tag, op, valueWithoutSingleQuotes}) - case rulenumber: - number := buffer[begin:end] - if strings.ContainsAny(number, ".") { // if it looks like a floating-point number - value, err := strconv.ParseFloat(number, 64) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number)) - } - conditions = append(conditions, Condition{tag, op, value}) - } else { - value, err := strconv.ParseInt(number, 10, 64) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number)) - } - conditions = append(conditions, Condition{tag, op, value}) - } - case ruletime: - value, err := time.Parse(TimeLayout, buffer[begin:end]) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end])) - } - conditions = append(conditions, Condition{tag, op, value}) - case ruledate: - value, err := time.Parse("2006-01-02", buffer[begin:end]) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end])) - } - conditions = append(conditions, Condition{tag, op, value}) - } - } - - return conditions -} - -// Matches returns true if the query matches the given set of tags, false otherwise. -// -// For example, query "name=John" matches tags = {"name": "John"}. More -// examples could be found in parser_test.go and query_test.go. -func (q *Query) Matches(tags pubsub.TagMap) bool { - if tags.Len() == 0 { - return false - } - - buffer, begin, end := q.parser.Buffer, 0, 0 - - var tag string - var op Operator - - // tokens must be in the following order: tag ("tx.gas") -> operator ("=") -> operand ("7") - for _, token := range q.parser.Tokens() { - switch token.pegRule { - - case rulePegText: - begin, end = int(token.begin), int(token.end) - case ruletag: - tag = buffer[begin:end] - case rulele: - op = OpLessEqual - case rulege: - op = OpGreaterEqual - case rulel: - op = OpLess - case ruleg: - op = OpGreater - case ruleequal: - op = OpEqual - case rulecontains: - op = OpContains - case rulevalue: - // strip single quotes from value (i.e. "'NewBlock'" -> "NewBlock") - valueWithoutSingleQuotes := buffer[begin+1 : end-1] - - // see if the triplet (tag, operator, operand) matches any tag - // "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } - if !match(tag, op, reflect.ValueOf(valueWithoutSingleQuotes), tags) { - return false - } - case rulenumber: - number := buffer[begin:end] - if strings.ContainsAny(number, ".") { // if it looks like a floating-point number - value, err := strconv.ParseFloat(number, 64) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as float64 (should never happen if the grammar is correct)", err, number)) - } - if !match(tag, op, reflect.ValueOf(value), tags) { - return false - } - } else { - value, err := strconv.ParseInt(number, 10, 64) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as int64 (should never happen if the grammar is correct)", err, number)) - } - if !match(tag, op, reflect.ValueOf(value), tags) { - return false - } - } - case ruletime: - value, err := time.Parse(TimeLayout, buffer[begin:end]) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / RFC3339 (should never happen if the grammar is correct)", err, buffer[begin:end])) - } - if !match(tag, op, reflect.ValueOf(value), tags) { - return false - } - case ruledate: - value, err := time.Parse("2006-01-02", buffer[begin:end]) - if err != nil { - panic(fmt.Sprintf("got %v while trying to parse %s as time.Time / '2006-01-02' (should never happen if the grammar is correct)", err, buffer[begin:end])) - } - if !match(tag, op, reflect.ValueOf(value), tags) { - return false - } - } - } - - return true -} - -// match returns true if the given triplet (tag, operator, operand) matches any tag. -// -// First, it looks up the tag in tags and if it finds one, tries to compare the -// value from it to the operand using the operator. -// -// "tx.gas", "=", "7", { "tx.gas": 7, "tx.ID": "4AE393495334" } -func match(tag string, op Operator, operand reflect.Value, tags pubsub.TagMap) bool { - // look up the tag from the query in tags - value, ok := tags.Get(tag) - if !ok { - return false - } - switch operand.Kind() { - case reflect.Struct: // time - operandAsTime := operand.Interface().(time.Time) - // try our best to convert value from tags to time.Time - var ( - v time.Time - err error - ) - if strings.ContainsAny(value, "T") { - v, err = time.Parse(TimeLayout, value) - } else { - v, err = time.Parse(DateLayout, value) - } - if err != nil { - panic(fmt.Sprintf("Failed to convert value %v from tag to time.Time: %v", value, err)) - } - switch op { - case OpLessEqual: - return v.Before(operandAsTime) || v.Equal(operandAsTime) - case OpGreaterEqual: - return v.Equal(operandAsTime) || v.After(operandAsTime) - case OpLess: - return v.Before(operandAsTime) - case OpGreater: - return v.After(operandAsTime) - case OpEqual: - return v.Equal(operandAsTime) - } - case reflect.Float64: - operandFloat64 := operand.Interface().(float64) - var v float64 - // try our best to convert value from tags to float64 - v, err := strconv.ParseFloat(value, 64) - if err != nil { - panic(fmt.Sprintf("Failed to convert value %v from tag to float64: %v", value, err)) - } - switch op { - case OpLessEqual: - return v <= operandFloat64 - case OpGreaterEqual: - return v >= operandFloat64 - case OpLess: - return v < operandFloat64 - case OpGreater: - return v > operandFloat64 - case OpEqual: - return v == operandFloat64 - } - case reflect.Int64: - operandInt := operand.Interface().(int64) - var v int64 - // if value looks like float, we try to parse it as float - if strings.ContainsAny(value, ".") { - v1, err := strconv.ParseFloat(value, 64) - if err != nil { - panic(fmt.Sprintf("Failed to convert value %v from tag to float64: %v", value, err)) - } - v = int64(v1) - } else { - var err error - // try our best to convert value from tags to int64 - v, err = strconv.ParseInt(value, 10, 64) - if err != nil { - panic(fmt.Sprintf("Failed to convert value %v from tag to int64: %v", value, err)) - } - } - switch op { - case OpLessEqual: - return v <= operandInt - case OpGreaterEqual: - return v >= operandInt - case OpLess: - return v < operandInt - case OpGreater: - return v > operandInt - case OpEqual: - return v == operandInt - } - case reflect.String: - switch op { - case OpEqual: - return value == operand.String() - case OpContains: - return strings.Contains(value, operand.String()) - } - default: - panic(fmt.Sprintf("Unknown kind of operand %v", operand.Kind())) - } - - return false -} diff --git a/libs/pubsub/query/query.peg b/libs/pubsub/query/query.peg deleted file mode 100644 index 739892e4..00000000 --- a/libs/pubsub/query/query.peg +++ /dev/null @@ -1,33 +0,0 @@ -package query - -type QueryParser Peg { -} - -e <- '\"' condition ( ' '+ and ' '+ condition )* '\"' !. - -condition <- tag ' '* (le ' '* (number / time / date) - / ge ' '* (number / time / date) - / l ' '* (number / time / date) - / g ' '* (number / time / date) - / equal ' '* (number / time / date / value) - / contains ' '* value - ) - -tag <- < (![ \t\n\r\\()"'=><] .)+ > -value <- < '\'' (!["'] .)* '\''> -number <- < ('0' - / [1-9] digit* ('.' digit*)?) > -digit <- [0-9] -time <- "TIME " < year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit (('-' / '+') digit digit ':' digit digit / 'Z') > -date <- "DATE " < year '-' month '-' day > -year <- ('1' / '2') digit digit digit -month <- ('0' / '1') digit -day <- ('0' / '1' / '2' / '3') digit -and <- "AND" - -equal <- "=" -contains <- "CONTAINS" -le <- "<=" -ge <- ">=" -l <- "<" -g <- ">" diff --git a/libs/pubsub/query/query.peg.go b/libs/pubsub/query/query.peg.go deleted file mode 100644 index c86e4a47..00000000 --- a/libs/pubsub/query/query.peg.go +++ /dev/null @@ -1,1553 +0,0 @@ -// nolint -package query - -import ( - "fmt" - "math" - "sort" - "strconv" -) - -const endSymbol rune = 1114112 - -/* The rule types inferred from the grammar are below. */ -type pegRule uint8 - -const ( - ruleUnknown pegRule = iota - rulee - rulecondition - ruletag - rulevalue - rulenumber - ruledigit - ruletime - ruledate - ruleyear - rulemonth - ruleday - ruleand - ruleequal - rulecontains - rulele - rulege - rulel - ruleg - rulePegText -) - -var rul3s = [...]string{ - "Unknown", - "e", - "condition", - "tag", - "value", - "number", - "digit", - "time", - "date", - "year", - "month", - "day", - "and", - "equal", - "contains", - "le", - "ge", - "l", - "g", - "PegText", -} - -type token32 struct { - pegRule - begin, end uint32 -} - -func (t *token32) String() string { - return fmt.Sprintf("\x1B[34m%v\x1B[m %v %v", rul3s[t.pegRule], t.begin, t.end) -} - -type node32 struct { - token32 - up, next *node32 -} - -func (node *node32) print(pretty bool, buffer string) { - var print func(node *node32, depth int) - print = func(node *node32, depth int) { - for node != nil { - for c := 0; c < depth; c++ { - fmt.Printf(" ") - } - rule := rul3s[node.pegRule] - quote := strconv.Quote(string(([]rune(buffer)[node.begin:node.end]))) - if !pretty { - fmt.Printf("%v %v\n", rule, quote) - } else { - fmt.Printf("\x1B[34m%v\x1B[m %v\n", rule, quote) - } - if node.up != nil { - print(node.up, depth+1) - } - node = node.next - } - } - print(node, 0) -} - -func (node *node32) Print(buffer string) { - node.print(false, buffer) -} - -func (node *node32) PrettyPrint(buffer string) { - node.print(true, buffer) -} - -type tokens32 struct { - tree []token32 -} - -func (t *tokens32) Trim(length uint32) { - t.tree = t.tree[:length] -} - -func (t *tokens32) Print() { - for _, token := range t.tree { - fmt.Println(token.String()) - } -} - -func (t *tokens32) AST() *node32 { - type element struct { - node *node32 - down *element - } - tokens := t.Tokens() - var stack *element - for _, token := range tokens { - if token.begin == token.end { - continue - } - node := &node32{token32: token} - for stack != nil && stack.node.begin >= token.begin && stack.node.end <= token.end { - stack.node.next = node.up - node.up = stack.node - stack = stack.down - } - stack = &element{node: node, down: stack} - } - if stack != nil { - return stack.node - } - return nil -} - -func (t *tokens32) PrintSyntaxTree(buffer string) { - t.AST().Print(buffer) -} - -func (t *tokens32) PrettyPrintSyntaxTree(buffer string) { - t.AST().PrettyPrint(buffer) -} - -func (t *tokens32) Add(rule pegRule, begin, end, index uint32) { - if tree := t.tree; int(index) >= len(tree) { - expanded := make([]token32, 2*len(tree)) - copy(expanded, tree) - t.tree = expanded - } - t.tree[index] = token32{ - pegRule: rule, - begin: begin, - end: end, - } -} - -func (t *tokens32) Tokens() []token32 { - return t.tree -} - -type QueryParser struct { - Buffer string - buffer []rune - rules [20]func() bool - parse func(rule ...int) error - reset func() - Pretty bool - tokens32 -} - -func (p *QueryParser) Parse(rule ...int) error { - return p.parse(rule...) -} - -func (p *QueryParser) Reset() { - p.reset() -} - -type textPosition struct { - line, symbol int -} - -type textPositionMap map[int]textPosition - -func translatePositions(buffer []rune, positions []int) textPositionMap { - length, translations, j, line, symbol := len(positions), make(textPositionMap, len(positions)), 0, 1, 0 - sort.Ints(positions) - -search: - for i, c := range buffer { - if c == '\n' { - line, symbol = line+1, 0 - } else { - symbol++ - } - if i == positions[j] { - translations[positions[j]] = textPosition{line, symbol} - for j++; j < length; j++ { - if i != positions[j] { - continue search - } - } - break search - } - } - - return translations -} - -type parseError struct { - p *QueryParser - max token32 -} - -func (e *parseError) Error() string { - tokens, error := []token32{e.max}, "\n" - positions, p := make([]int, 2*len(tokens)), 0 - for _, token := range tokens { - positions[p], p = int(token.begin), p+1 - positions[p], p = int(token.end), p+1 - } - translations := translatePositions(e.p.buffer, positions) - format := "parse error near %v (line %v symbol %v - line %v symbol %v):\n%v\n" - if e.p.Pretty { - format = "parse error near \x1B[34m%v\x1B[m (line %v symbol %v - line %v symbol %v):\n%v\n" - } - for _, token := range tokens { - begin, end := int(token.begin), int(token.end) - error += fmt.Sprintf(format, - rul3s[token.pegRule], - translations[begin].line, translations[begin].symbol, - translations[end].line, translations[end].symbol, - strconv.Quote(string(e.p.buffer[begin:end]))) - } - - return error -} - -func (p *QueryParser) PrintSyntaxTree() { - if p.Pretty { - p.tokens32.PrettyPrintSyntaxTree(p.Buffer) - } else { - p.tokens32.PrintSyntaxTree(p.Buffer) - } -} - -func (p *QueryParser) Init() { - var ( - max token32 - position, tokenIndex uint32 - buffer []rune - ) - p.reset = func() { - max = token32{} - position, tokenIndex = 0, 0 - - p.buffer = []rune(p.Buffer) - if len(p.buffer) == 0 || p.buffer[len(p.buffer)-1] != endSymbol { - p.buffer = append(p.buffer, endSymbol) - } - buffer = p.buffer - } - p.reset() - - _rules := p.rules - tree := tokens32{tree: make([]token32, math.MaxInt16)} - p.parse = func(rule ...int) error { - r := 1 - if len(rule) > 0 { - r = rule[0] - } - matches := p.rules[r]() - p.tokens32 = tree - if matches { - p.Trim(tokenIndex) - return nil - } - return &parseError{p, max} - } - - add := func(rule pegRule, begin uint32) { - tree.Add(rule, begin, position, tokenIndex) - tokenIndex++ - if begin != position && position > max.end { - max = token32{rule, begin, position} - } - } - - matchDot := func() bool { - if buffer[position] != endSymbol { - position++ - return true - } - return false - } - - /*matchChar := func(c byte) bool { - if buffer[position] == c { - position++ - return true - } - return false - }*/ - - /*matchRange := func(lower byte, upper byte) bool { - if c := buffer[position]; c >= lower && c <= upper { - position++ - return true - } - return false - }*/ - - _rules = [...]func() bool{ - nil, - /* 0 e <- <('"' condition (' '+ and ' '+ condition)* '"' !.)> */ - func() bool { - position0, tokenIndex0 := position, tokenIndex - { - position1 := position - if buffer[position] != rune('"') { - goto l0 - } - position++ - if !_rules[rulecondition]() { - goto l0 - } - l2: - { - position3, tokenIndex3 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l3 - } - position++ - l4: - { - position5, tokenIndex5 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l5 - } - position++ - goto l4 - l5: - position, tokenIndex = position5, tokenIndex5 - } - { - position6 := position - { - position7, tokenIndex7 := position, tokenIndex - if buffer[position] != rune('a') { - goto l8 - } - position++ - goto l7 - l8: - position, tokenIndex = position7, tokenIndex7 - if buffer[position] != rune('A') { - goto l3 - } - position++ - } - l7: - { - position9, tokenIndex9 := position, tokenIndex - if buffer[position] != rune('n') { - goto l10 - } - position++ - goto l9 - l10: - position, tokenIndex = position9, tokenIndex9 - if buffer[position] != rune('N') { - goto l3 - } - position++ - } - l9: - { - position11, tokenIndex11 := position, tokenIndex - if buffer[position] != rune('d') { - goto l12 - } - position++ - goto l11 - l12: - position, tokenIndex = position11, tokenIndex11 - if buffer[position] != rune('D') { - goto l3 - } - position++ - } - l11: - add(ruleand, position6) - } - if buffer[position] != rune(' ') { - goto l3 - } - position++ - l13: - { - position14, tokenIndex14 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l14 - } - position++ - goto l13 - l14: - position, tokenIndex = position14, tokenIndex14 - } - if !_rules[rulecondition]() { - goto l3 - } - goto l2 - l3: - position, tokenIndex = position3, tokenIndex3 - } - if buffer[position] != rune('"') { - goto l0 - } - position++ - { - position15, tokenIndex15 := position, tokenIndex - if !matchDot() { - goto l15 - } - goto l0 - l15: - position, tokenIndex = position15, tokenIndex15 - } - add(rulee, position1) - } - return true - l0: - position, tokenIndex = position0, tokenIndex0 - return false - }, - /* 1 condition <- <(tag ' '* ((le ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / (ge ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number))) / ((&('=') (equal ' '* ((&('\'') value) | (&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('>') (g ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('<') (l ' '* ((&('D' | 'd') date) | (&('T' | 't') time) | (&('0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9') number)))) | (&('C' | 'c') (contains ' '* value)))))> */ - func() bool { - position16, tokenIndex16 := position, tokenIndex - { - position17 := position - { - position18 := position - { - position19 := position - { - position22, tokenIndex22 := position, tokenIndex - { - switch buffer[position] { - case '<': - if buffer[position] != rune('<') { - goto l22 - } - position++ - break - case '>': - if buffer[position] != rune('>') { - goto l22 - } - position++ - break - case '=': - if buffer[position] != rune('=') { - goto l22 - } - position++ - break - case '\'': - if buffer[position] != rune('\'') { - goto l22 - } - position++ - break - case '"': - if buffer[position] != rune('"') { - goto l22 - } - position++ - break - case ')': - if buffer[position] != rune(')') { - goto l22 - } - position++ - break - case '(': - if buffer[position] != rune('(') { - goto l22 - } - position++ - break - case '\\': - if buffer[position] != rune('\\') { - goto l22 - } - position++ - break - case '\r': - if buffer[position] != rune('\r') { - goto l22 - } - position++ - break - case '\n': - if buffer[position] != rune('\n') { - goto l22 - } - position++ - break - case '\t': - if buffer[position] != rune('\t') { - goto l22 - } - position++ - break - default: - if buffer[position] != rune(' ') { - goto l22 - } - position++ - break - } - } - - goto l16 - l22: - position, tokenIndex = position22, tokenIndex22 - } - if !matchDot() { - goto l16 - } - l20: - { - position21, tokenIndex21 := position, tokenIndex - { - position24, tokenIndex24 := position, tokenIndex - { - switch buffer[position] { - case '<': - if buffer[position] != rune('<') { - goto l24 - } - position++ - break - case '>': - if buffer[position] != rune('>') { - goto l24 - } - position++ - break - case '=': - if buffer[position] != rune('=') { - goto l24 - } - position++ - break - case '\'': - if buffer[position] != rune('\'') { - goto l24 - } - position++ - break - case '"': - if buffer[position] != rune('"') { - goto l24 - } - position++ - break - case ')': - if buffer[position] != rune(')') { - goto l24 - } - position++ - break - case '(': - if buffer[position] != rune('(') { - goto l24 - } - position++ - break - case '\\': - if buffer[position] != rune('\\') { - goto l24 - } - position++ - break - case '\r': - if buffer[position] != rune('\r') { - goto l24 - } - position++ - break - case '\n': - if buffer[position] != rune('\n') { - goto l24 - } - position++ - break - case '\t': - if buffer[position] != rune('\t') { - goto l24 - } - position++ - break - default: - if buffer[position] != rune(' ') { - goto l24 - } - position++ - break - } - } - - goto l21 - l24: - position, tokenIndex = position24, tokenIndex24 - } - if !matchDot() { - goto l21 - } - goto l20 - l21: - position, tokenIndex = position21, tokenIndex21 - } - add(rulePegText, position19) - } - add(ruletag, position18) - } - l26: - { - position27, tokenIndex27 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l27 - } - position++ - goto l26 - l27: - position, tokenIndex = position27, tokenIndex27 - } - { - position28, tokenIndex28 := position, tokenIndex - { - position30 := position - if buffer[position] != rune('<') { - goto l29 - } - position++ - if buffer[position] != rune('=') { - goto l29 - } - position++ - add(rulele, position30) - } - l31: - { - position32, tokenIndex32 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l32 - } - position++ - goto l31 - l32: - position, tokenIndex = position32, tokenIndex32 - } - { - switch buffer[position] { - case 'D', 'd': - if !_rules[ruledate]() { - goto l29 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l29 - } - break - default: - if !_rules[rulenumber]() { - goto l29 - } - break - } - } - - goto l28 - l29: - position, tokenIndex = position28, tokenIndex28 - { - position35 := position - if buffer[position] != rune('>') { - goto l34 - } - position++ - if buffer[position] != rune('=') { - goto l34 - } - position++ - add(rulege, position35) - } - l36: - { - position37, tokenIndex37 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l37 - } - position++ - goto l36 - l37: - position, tokenIndex = position37, tokenIndex37 - } - { - switch buffer[position] { - case 'D', 'd': - if !_rules[ruledate]() { - goto l34 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l34 - } - break - default: - if !_rules[rulenumber]() { - goto l34 - } - break - } - } - - goto l28 - l34: - position, tokenIndex = position28, tokenIndex28 - { - switch buffer[position] { - case '=': - { - position40 := position - if buffer[position] != rune('=') { - goto l16 - } - position++ - add(ruleequal, position40) - } - l41: - { - position42, tokenIndex42 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l42 - } - position++ - goto l41 - l42: - position, tokenIndex = position42, tokenIndex42 - } - { - switch buffer[position] { - case '\'': - if !_rules[rulevalue]() { - goto l16 - } - break - case 'D', 'd': - if !_rules[ruledate]() { - goto l16 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l16 - } - break - default: - if !_rules[rulenumber]() { - goto l16 - } - break - } - } - - break - case '>': - { - position44 := position - if buffer[position] != rune('>') { - goto l16 - } - position++ - add(ruleg, position44) - } - l45: - { - position46, tokenIndex46 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l46 - } - position++ - goto l45 - l46: - position, tokenIndex = position46, tokenIndex46 - } - { - switch buffer[position] { - case 'D', 'd': - if !_rules[ruledate]() { - goto l16 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l16 - } - break - default: - if !_rules[rulenumber]() { - goto l16 - } - break - } - } - - break - case '<': - { - position48 := position - if buffer[position] != rune('<') { - goto l16 - } - position++ - add(rulel, position48) - } - l49: - { - position50, tokenIndex50 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l50 - } - position++ - goto l49 - l50: - position, tokenIndex = position50, tokenIndex50 - } - { - switch buffer[position] { - case 'D', 'd': - if !_rules[ruledate]() { - goto l16 - } - break - case 'T', 't': - if !_rules[ruletime]() { - goto l16 - } - break - default: - if !_rules[rulenumber]() { - goto l16 - } - break - } - } - - break - default: - { - position52 := position - { - position53, tokenIndex53 := position, tokenIndex - if buffer[position] != rune('c') { - goto l54 - } - position++ - goto l53 - l54: - position, tokenIndex = position53, tokenIndex53 - if buffer[position] != rune('C') { - goto l16 - } - position++ - } - l53: - { - position55, tokenIndex55 := position, tokenIndex - if buffer[position] != rune('o') { - goto l56 - } - position++ - goto l55 - l56: - position, tokenIndex = position55, tokenIndex55 - if buffer[position] != rune('O') { - goto l16 - } - position++ - } - l55: - { - position57, tokenIndex57 := position, tokenIndex - if buffer[position] != rune('n') { - goto l58 - } - position++ - goto l57 - l58: - position, tokenIndex = position57, tokenIndex57 - if buffer[position] != rune('N') { - goto l16 - } - position++ - } - l57: - { - position59, tokenIndex59 := position, tokenIndex - if buffer[position] != rune('t') { - goto l60 - } - position++ - goto l59 - l60: - position, tokenIndex = position59, tokenIndex59 - if buffer[position] != rune('T') { - goto l16 - } - position++ - } - l59: - { - position61, tokenIndex61 := position, tokenIndex - if buffer[position] != rune('a') { - goto l62 - } - position++ - goto l61 - l62: - position, tokenIndex = position61, tokenIndex61 - if buffer[position] != rune('A') { - goto l16 - } - position++ - } - l61: - { - position63, tokenIndex63 := position, tokenIndex - if buffer[position] != rune('i') { - goto l64 - } - position++ - goto l63 - l64: - position, tokenIndex = position63, tokenIndex63 - if buffer[position] != rune('I') { - goto l16 - } - position++ - } - l63: - { - position65, tokenIndex65 := position, tokenIndex - if buffer[position] != rune('n') { - goto l66 - } - position++ - goto l65 - l66: - position, tokenIndex = position65, tokenIndex65 - if buffer[position] != rune('N') { - goto l16 - } - position++ - } - l65: - { - position67, tokenIndex67 := position, tokenIndex - if buffer[position] != rune('s') { - goto l68 - } - position++ - goto l67 - l68: - position, tokenIndex = position67, tokenIndex67 - if buffer[position] != rune('S') { - goto l16 - } - position++ - } - l67: - add(rulecontains, position52) - } - l69: - { - position70, tokenIndex70 := position, tokenIndex - if buffer[position] != rune(' ') { - goto l70 - } - position++ - goto l69 - l70: - position, tokenIndex = position70, tokenIndex70 - } - if !_rules[rulevalue]() { - goto l16 - } - break - } - } - - } - l28: - add(rulecondition, position17) - } - return true - l16: - position, tokenIndex = position16, tokenIndex16 - return false - }, - /* 2 tag <- <<(!((&('<') '<') | (&('>') '>') | (&('=') '=') | (&('\'') '\'') | (&('"') '"') | (&(')') ')') | (&('(') '(') | (&('\\') '\\') | (&('\r') '\r') | (&('\n') '\n') | (&('\t') '\t') | (&(' ') ' ')) .)+>> */ - nil, - /* 3 value <- <<('\'' (!('"' / '\'') .)* '\'')>> */ - func() bool { - position72, tokenIndex72 := position, tokenIndex - { - position73 := position - { - position74 := position - if buffer[position] != rune('\'') { - goto l72 - } - position++ - l75: - { - position76, tokenIndex76 := position, tokenIndex - { - position77, tokenIndex77 := position, tokenIndex - { - position78, tokenIndex78 := position, tokenIndex - if buffer[position] != rune('"') { - goto l79 - } - position++ - goto l78 - l79: - position, tokenIndex = position78, tokenIndex78 - if buffer[position] != rune('\'') { - goto l77 - } - position++ - } - l78: - goto l76 - l77: - position, tokenIndex = position77, tokenIndex77 - } - if !matchDot() { - goto l76 - } - goto l75 - l76: - position, tokenIndex = position76, tokenIndex76 - } - if buffer[position] != rune('\'') { - goto l72 - } - position++ - add(rulePegText, position74) - } - add(rulevalue, position73) - } - return true - l72: - position, tokenIndex = position72, tokenIndex72 - return false - }, - /* 4 number <- <<('0' / ([1-9] digit* ('.' digit*)?))>> */ - func() bool { - position80, tokenIndex80 := position, tokenIndex - { - position81 := position - { - position82 := position - { - position83, tokenIndex83 := position, tokenIndex - if buffer[position] != rune('0') { - goto l84 - } - position++ - goto l83 - l84: - position, tokenIndex = position83, tokenIndex83 - if c := buffer[position]; c < rune('1') || c > rune('9') { - goto l80 - } - position++ - l85: - { - position86, tokenIndex86 := position, tokenIndex - if !_rules[ruledigit]() { - goto l86 - } - goto l85 - l86: - position, tokenIndex = position86, tokenIndex86 - } - { - position87, tokenIndex87 := position, tokenIndex - if buffer[position] != rune('.') { - goto l87 - } - position++ - l89: - { - position90, tokenIndex90 := position, tokenIndex - if !_rules[ruledigit]() { - goto l90 - } - goto l89 - l90: - position, tokenIndex = position90, tokenIndex90 - } - goto l88 - l87: - position, tokenIndex = position87, tokenIndex87 - } - l88: - } - l83: - add(rulePegText, position82) - } - add(rulenumber, position81) - } - return true - l80: - position, tokenIndex = position80, tokenIndex80 - return false - }, - /* 5 digit <- <[0-9]> */ - func() bool { - position91, tokenIndex91 := position, tokenIndex - { - position92 := position - if c := buffer[position]; c < rune('0') || c > rune('9') { - goto l91 - } - position++ - add(ruledigit, position92) - } - return true - l91: - position, tokenIndex = position91, tokenIndex91 - return false - }, - /* 6 time <- <(('t' / 'T') ('i' / 'I') ('m' / 'M') ('e' / 'E') ' ' <(year '-' month '-' day 'T' digit digit ':' digit digit ':' digit digit ((('-' / '+') digit digit ':' digit digit) / 'Z'))>)> */ - func() bool { - position93, tokenIndex93 := position, tokenIndex - { - position94 := position - { - position95, tokenIndex95 := position, tokenIndex - if buffer[position] != rune('t') { - goto l96 - } - position++ - goto l95 - l96: - position, tokenIndex = position95, tokenIndex95 - if buffer[position] != rune('T') { - goto l93 - } - position++ - } - l95: - { - position97, tokenIndex97 := position, tokenIndex - if buffer[position] != rune('i') { - goto l98 - } - position++ - goto l97 - l98: - position, tokenIndex = position97, tokenIndex97 - if buffer[position] != rune('I') { - goto l93 - } - position++ - } - l97: - { - position99, tokenIndex99 := position, tokenIndex - if buffer[position] != rune('m') { - goto l100 - } - position++ - goto l99 - l100: - position, tokenIndex = position99, tokenIndex99 - if buffer[position] != rune('M') { - goto l93 - } - position++ - } - l99: - { - position101, tokenIndex101 := position, tokenIndex - if buffer[position] != rune('e') { - goto l102 - } - position++ - goto l101 - l102: - position, tokenIndex = position101, tokenIndex101 - if buffer[position] != rune('E') { - goto l93 - } - position++ - } - l101: - if buffer[position] != rune(' ') { - goto l93 - } - position++ - { - position103 := position - if !_rules[ruleyear]() { - goto l93 - } - if buffer[position] != rune('-') { - goto l93 - } - position++ - if !_rules[rulemonth]() { - goto l93 - } - if buffer[position] != rune('-') { - goto l93 - } - position++ - if !_rules[ruleday]() { - goto l93 - } - if buffer[position] != rune('T') { - goto l93 - } - position++ - if !_rules[ruledigit]() { - goto l93 - } - if !_rules[ruledigit]() { - goto l93 - } - if buffer[position] != rune(':') { - goto l93 - } - position++ - if !_rules[ruledigit]() { - goto l93 - } - if !_rules[ruledigit]() { - goto l93 - } - if buffer[position] != rune(':') { - goto l93 - } - position++ - if !_rules[ruledigit]() { - goto l93 - } - if !_rules[ruledigit]() { - goto l93 - } - { - position104, tokenIndex104 := position, tokenIndex - { - position106, tokenIndex106 := position, tokenIndex - if buffer[position] != rune('-') { - goto l107 - } - position++ - goto l106 - l107: - position, tokenIndex = position106, tokenIndex106 - if buffer[position] != rune('+') { - goto l105 - } - position++ - } - l106: - if !_rules[ruledigit]() { - goto l105 - } - if !_rules[ruledigit]() { - goto l105 - } - if buffer[position] != rune(':') { - goto l105 - } - position++ - if !_rules[ruledigit]() { - goto l105 - } - if !_rules[ruledigit]() { - goto l105 - } - goto l104 - l105: - position, tokenIndex = position104, tokenIndex104 - if buffer[position] != rune('Z') { - goto l93 - } - position++ - } - l104: - add(rulePegText, position103) - } - add(ruletime, position94) - } - return true - l93: - position, tokenIndex = position93, tokenIndex93 - return false - }, - /* 7 date <- <(('d' / 'D') ('a' / 'A') ('t' / 'T') ('e' / 'E') ' ' <(year '-' month '-' day)>)> */ - func() bool { - position108, tokenIndex108 := position, tokenIndex - { - position109 := position - { - position110, tokenIndex110 := position, tokenIndex - if buffer[position] != rune('d') { - goto l111 - } - position++ - goto l110 - l111: - position, tokenIndex = position110, tokenIndex110 - if buffer[position] != rune('D') { - goto l108 - } - position++ - } - l110: - { - position112, tokenIndex112 := position, tokenIndex - if buffer[position] != rune('a') { - goto l113 - } - position++ - goto l112 - l113: - position, tokenIndex = position112, tokenIndex112 - if buffer[position] != rune('A') { - goto l108 - } - position++ - } - l112: - { - position114, tokenIndex114 := position, tokenIndex - if buffer[position] != rune('t') { - goto l115 - } - position++ - goto l114 - l115: - position, tokenIndex = position114, tokenIndex114 - if buffer[position] != rune('T') { - goto l108 - } - position++ - } - l114: - { - position116, tokenIndex116 := position, tokenIndex - if buffer[position] != rune('e') { - goto l117 - } - position++ - goto l116 - l117: - position, tokenIndex = position116, tokenIndex116 - if buffer[position] != rune('E') { - goto l108 - } - position++ - } - l116: - if buffer[position] != rune(' ') { - goto l108 - } - position++ - { - position118 := position - if !_rules[ruleyear]() { - goto l108 - } - if buffer[position] != rune('-') { - goto l108 - } - position++ - if !_rules[rulemonth]() { - goto l108 - } - if buffer[position] != rune('-') { - goto l108 - } - position++ - if !_rules[ruleday]() { - goto l108 - } - add(rulePegText, position118) - } - add(ruledate, position109) - } - return true - l108: - position, tokenIndex = position108, tokenIndex108 - return false - }, - /* 8 year <- <(('1' / '2') digit digit digit)> */ - func() bool { - position119, tokenIndex119 := position, tokenIndex - { - position120 := position - { - position121, tokenIndex121 := position, tokenIndex - if buffer[position] != rune('1') { - goto l122 - } - position++ - goto l121 - l122: - position, tokenIndex = position121, tokenIndex121 - if buffer[position] != rune('2') { - goto l119 - } - position++ - } - l121: - if !_rules[ruledigit]() { - goto l119 - } - if !_rules[ruledigit]() { - goto l119 - } - if !_rules[ruledigit]() { - goto l119 - } - add(ruleyear, position120) - } - return true - l119: - position, tokenIndex = position119, tokenIndex119 - return false - }, - /* 9 month <- <(('0' / '1') digit)> */ - func() bool { - position123, tokenIndex123 := position, tokenIndex - { - position124 := position - { - position125, tokenIndex125 := position, tokenIndex - if buffer[position] != rune('0') { - goto l126 - } - position++ - goto l125 - l126: - position, tokenIndex = position125, tokenIndex125 - if buffer[position] != rune('1') { - goto l123 - } - position++ - } - l125: - if !_rules[ruledigit]() { - goto l123 - } - add(rulemonth, position124) - } - return true - l123: - position, tokenIndex = position123, tokenIndex123 - return false - }, - /* 10 day <- <(((&('3') '3') | (&('2') '2') | (&('1') '1') | (&('0') '0')) digit)> */ - func() bool { - position127, tokenIndex127 := position, tokenIndex - { - position128 := position - { - switch buffer[position] { - case '3': - if buffer[position] != rune('3') { - goto l127 - } - position++ - break - case '2': - if buffer[position] != rune('2') { - goto l127 - } - position++ - break - case '1': - if buffer[position] != rune('1') { - goto l127 - } - position++ - break - default: - if buffer[position] != rune('0') { - goto l127 - } - position++ - break - } - } - - if !_rules[ruledigit]() { - goto l127 - } - add(ruleday, position128) - } - return true - l127: - position, tokenIndex = position127, tokenIndex127 - return false - }, - /* 11 and <- <(('a' / 'A') ('n' / 'N') ('d' / 'D'))> */ - nil, - /* 12 equal <- <'='> */ - nil, - /* 13 contains <- <(('c' / 'C') ('o' / 'O') ('n' / 'N') ('t' / 'T') ('a' / 'A') ('i' / 'I') ('n' / 'N') ('s' / 'S'))> */ - nil, - /* 14 le <- <('<' '=')> */ - nil, - /* 15 ge <- <('>' '=')> */ - nil, - /* 16 l <- <'<'> */ - nil, - /* 17 g <- <'>'> */ - nil, - nil, - } - p.rules = _rules -} diff --git a/libs/pubsub/query/query_test.go b/libs/pubsub/query/query_test.go deleted file mode 100644 index f0d94099..00000000 --- a/libs/pubsub/query/query_test.go +++ /dev/null @@ -1,87 +0,0 @@ -package query_test - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/libs/pubsub" - "github.com/tendermint/tendermint/libs/pubsub/query" -) - -func TestMatches(t *testing.T) { - var ( - txDate = "2017-01-01" - txTime = "2018-05-03T14:45:00Z" - ) - - testCases := []struct { - s string - tags map[string]string - err bool - matches bool - }{ - {"tm.events.type='NewBlock'", map[string]string{"tm.events.type": "NewBlock"}, false, true}, - - {"tx.gas > 7", map[string]string{"tx.gas": "8"}, false, true}, - {"tx.gas > 7 AND tx.gas < 9", map[string]string{"tx.gas": "8"}, false, true}, - {"body.weight >= 3.5", map[string]string{"body.weight": "3.5"}, false, true}, - {"account.balance < 1000.0", map[string]string{"account.balance": "900"}, false, true}, - {"apples.kg <= 4", map[string]string{"apples.kg": "4.0"}, false, true}, - {"body.weight >= 4.5", map[string]string{"body.weight": fmt.Sprintf("%v", float32(4.5))}, false, true}, - {"oranges.kg < 4 AND watermellons.kg > 10", map[string]string{"oranges.kg": "3", "watermellons.kg": "12"}, false, true}, - {"peaches.kg < 4", map[string]string{"peaches.kg": "5"}, false, false}, - - {"tx.date > DATE 2017-01-01", map[string]string{"tx.date": time.Now().Format(query.DateLayout)}, false, true}, - {"tx.date = DATE 2017-01-01", map[string]string{"tx.date": txDate}, false, true}, - {"tx.date = DATE 2018-01-01", map[string]string{"tx.date": txDate}, false, false}, - - {"tx.time >= TIME 2013-05-03T14:45:00Z", map[string]string{"tx.time": time.Now().Format(query.TimeLayout)}, false, true}, - {"tx.time = TIME 2013-05-03T14:45:00Z", map[string]string{"tx.time": txTime}, false, false}, - - {"abci.owner.name CONTAINS 'Igor'", map[string]string{"abci.owner.name": "Igor,Ivan"}, false, true}, - {"abci.owner.name CONTAINS 'Igor'", map[string]string{"abci.owner.name": "Pavel,Ivan"}, false, false}, - } - - for _, tc := range testCases { - q, err := query.New(tc.s) - if !tc.err { - require.Nil(t, err) - } - - if tc.matches { - assert.True(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should match %v", tc.s, tc.tags) - } else { - assert.False(t, q.Matches(pubsub.NewTagMap(tc.tags)), "Query '%s' should not match %v", tc.s, tc.tags) - } - } -} - -func TestMustParse(t *testing.T) { - assert.Panics(t, func() { query.MustParse("=") }) - assert.NotPanics(t, func() { query.MustParse("tm.events.type='NewBlock'") }) -} - -func TestConditions(t *testing.T) { - txTime, err := time.Parse(time.RFC3339, "2013-05-03T14:45:00Z") - require.NoError(t, err) - - testCases := []struct { - s string - conditions []query.Condition - }{ - {s: "tm.events.type='NewBlock'", conditions: []query.Condition{query.Condition{Tag: "tm.events.type", Op: query.OpEqual, Operand: "NewBlock"}}}, - {s: "tx.gas > 7 AND tx.gas < 9", conditions: []query.Condition{query.Condition{Tag: "tx.gas", Op: query.OpGreater, Operand: int64(7)}, query.Condition{Tag: "tx.gas", Op: query.OpLess, Operand: int64(9)}}}, - {s: "tx.time >= TIME 2013-05-03T14:45:00Z", conditions: []query.Condition{query.Condition{Tag: "tx.time", Op: query.OpGreaterEqual, Operand: txTime}}}, - } - - for _, tc := range testCases { - q, err := query.New(tc.s) - require.Nil(t, err) - - assert.Equal(t, tc.conditions, q.Conditions()) - } -} diff --git a/lite/client/main_test.go b/lite/client/main_test.go deleted file mode 100644 index 972d3731..00000000 --- a/lite/client/main_test.go +++ /dev/null @@ -1,25 +0,0 @@ -package client_test - -import ( - "os" - "testing" - - "github.com/tendermint/abci/example/kvstore" - - nm "github.com/tendermint/tendermint/node" - rpctest "github.com/tendermint/tendermint/rpc/test" -) - -var node *nm.Node - -func TestMain(m *testing.M) { - // start a tendermint node (and merkleeyes) in the background to test against - app := kvstore.NewKVStoreApplication() - node = rpctest.StartTendermint(app) - code := m.Run() - - // and shut down proper at the end - node.Stop() - node.Wait() - os.Exit(code) -} diff --git a/lite/client/provider.go b/lite/client/provider.go deleted file mode 100644 index 5f3d7245..00000000 --- a/lite/client/provider.go +++ /dev/null @@ -1,141 +0,0 @@ -/* -Package client defines a provider that uses a rpcclient -to get information, which is used to get new headers -and validators directly from a node. -*/ -package client - -import ( - "bytes" - - rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -// SignStatusClient combines a SignClient and StatusClient. -type SignStatusClient interface { - rpcclient.SignClient - rpcclient.StatusClient -} - -type provider struct { - node SignStatusClient - lastHeight int64 -} - -// NewProvider can wrap any rpcclient to expose it as -// a read-only provider. -func NewProvider(node SignStatusClient) lite.Provider { - return &provider{node: node} -} - -// NewHTTPProvider can connect to a tendermint json-rpc endpoint -// at the given url, and uses that as a read-only provider. -func NewHTTPProvider(remote string) lite.Provider { - return &provider{ - node: rpcclient.NewHTTP(remote, "/websocket"), - } -} - -// StatusClient returns the internal node as a StatusClient -func (p *provider) StatusClient() rpcclient.StatusClient { - return p.node -} - -// StoreCommit is a noop, as clients can only read from the chain... -func (p *provider) StoreCommit(_ lite.FullCommit) error { return nil } - -// GetHash gets the most recent validator and sees if it matches -// -// TODO: improve when the rpc interface supports more functionality -func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { - var fc lite.FullCommit - vals, err := p.node.Validators(nil) - // if we get no validators, or a different height, return an error - if err != nil { - return fc, err - } - p.updateHeight(vals.BlockHeight) - vhash := types.NewValidatorSet(vals.Validators).Hash() - if !bytes.Equal(hash, vhash) { - return fc, liteErr.ErrCommitNotFound() - } - return p.seedFromVals(vals) -} - -// GetByHeight gets the validator set by height -func (p *provider) GetByHeight(h int64) (fc lite.FullCommit, err error) { - commit, err := p.node.Commit(&h) - if err != nil { - return fc, err - } - return p.seedFromCommit(commit) -} - -// LatestCommit returns the newest commit stored. -func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { - commit, err := p.GetLatestCommit() - if err != nil { - return fc, err - } - return p.seedFromCommit(commit) -} - -// GetLatestCommit should return the most recent commit there is, -// which handles queries for future heights as per the semantics -// of GetByHeight. -func (p *provider) GetLatestCommit() (*ctypes.ResultCommit, error) { - status, err := p.node.Status() - if err != nil { - return nil, err - } - return p.node.Commit(&status.SyncInfo.LatestBlockHeight) -} - -// CommitFromResult ... -func CommitFromResult(result *ctypes.ResultCommit) lite.Commit { - return (lite.Commit)(result.SignedHeader) -} - -func (p *provider) seedFromVals(vals *ctypes.ResultValidators) (lite.FullCommit, error) { - // now get the commits and build a full commit - commit, err := p.node.Commit(&vals.BlockHeight) - if err != nil { - return lite.FullCommit{}, err - } - fc := lite.NewFullCommit( - CommitFromResult(commit), - types.NewValidatorSet(vals.Validators), - ) - return fc, nil -} - -func (p *provider) seedFromCommit(commit *ctypes.ResultCommit) (fc lite.FullCommit, err error) { - fc.Commit = CommitFromResult(commit) - - // now get the proper validators - vals, err := p.node.Validators(&commit.Header.Height) - if err != nil { - return fc, err - } - - // make sure they match the commit (as we cannot enforce height) - vset := types.NewValidatorSet(vals.Validators) - if !bytes.Equal(vset.Hash(), commit.Header.ValidatorsHash) { - return fc, liteErr.ErrValidatorsChanged() - } - - p.updateHeight(commit.Header.Height) - fc.Validators = vset - return fc, nil -} - -func (p *provider) updateHeight(h int64) { - if h > p.lastHeight { - p.lastHeight = h - } -} diff --git a/lite/client/provider_test.go b/lite/client/provider_test.go deleted file mode 100644 index 94d47da3..00000000 --- a/lite/client/provider_test.go +++ /dev/null @@ -1,63 +0,0 @@ -package client - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" - rpcclient "github.com/tendermint/tendermint/rpc/client" - rpctest "github.com/tendermint/tendermint/rpc/test" - "github.com/tendermint/tendermint/types" -) - -func TestProvider(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - cfg := rpctest.GetConfig() - rpcAddr := cfg.RPC.ListenAddress - genDoc, _ := types.GenesisDocFromFile(cfg.GenesisFile()) - chainID := genDoc.ChainID - p := NewHTTPProvider(rpcAddr) - require.NotNil(t, p) - - // let it produce some blocks - err := rpcclient.WaitForHeight(p.(*provider).node, 6, nil) - require.Nil(err) - - // let's get the highest block - seed, err := p.LatestCommit() - - require.Nil(err, "%+v", err) - sh := seed.Height() - vhash := seed.Header.ValidatorsHash - assert.True(sh < 5000) - - // let's check this is valid somehow - assert.Nil(seed.ValidateBasic(chainID)) - cert := lite.NewStaticCertifier(chainID, seed.Validators) - - // historical queries now work :) - lower := sh - 5 - seed, err = p.GetByHeight(lower) - assert.Nil(err, "%+v", err) - assert.Equal(lower, seed.Height()) - - // also get by hash (given the match) - seed, err = p.GetByHash(vhash) - require.Nil(err, "%+v", err) - require.Equal(vhash, seed.Header.ValidatorsHash) - err = cert.Certify(seed.Commit) - assert.Nil(err, "%+v", err) - - // get by hash fails without match - seed, err = p.GetByHash([]byte("foobar")) - assert.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - // storing the seed silently ignored - err = p.StoreCommit(seed) - assert.Nil(err, "%+v", err) -} diff --git a/lite/commit.go b/lite/commit.go deleted file mode 100644 index 11ae6d7f..00000000 --- a/lite/commit.go +++ /dev/null @@ -1,99 +0,0 @@ -package lite - -import ( - "bytes" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -// Certifier checks the votes to make sure the block really is signed properly. -// Certifier must know the current set of validitors by some other means. -type Certifier interface { - Certify(check Commit) error - ChainID() string -} - -// Commit is basically the rpc /commit response, but extended -// -// This is the basepoint for proving anything on the blockchain. It contains -// a signed header. If the signatures are valid and > 2/3 of the known set, -// we can store this checkpoint and use it to prove any number of aspects of -// the system: such as txs, abci state, validator sets, etc... -type Commit types.SignedHeader - -// FullCommit is a commit and the actual validator set, -// the base info you need to update to a given point, -// assuming knowledge of some previous validator set -type FullCommit struct { - Commit `json:"commit"` - Validators *types.ValidatorSet `json:"validator_set"` -} - -// NewFullCommit returns a new FullCommit. -func NewFullCommit(commit Commit, vals *types.ValidatorSet) FullCommit { - return FullCommit{ - Commit: commit, - Validators: vals, - } -} - -// Height returns the height of the header. -func (c Commit) Height() int64 { - if c.Header == nil { - return 0 - } - return c.Header.Height -} - -// ValidatorsHash returns the hash of the validator set. -func (c Commit) ValidatorsHash() []byte { - if c.Header == nil { - return nil - } - return c.Header.ValidatorsHash -} - -// ValidateBasic does basic consistency checks and makes sure the headers -// and commits are all consistent and refer to our chain. -// -// Make sure to use a Verifier to validate the signatures actually provide -// a significantly strong proof for this header's validity. -func (c Commit) ValidateBasic(chainID string) error { - // make sure the header is reasonable - if c.Header == nil { - return errors.New("Commit missing header") - } - if c.Header.ChainID != chainID { - return errors.Errorf("Header belongs to another chain '%s' not '%s'", - c.Header.ChainID, chainID) - } - - if c.Commit == nil { - return errors.New("Commit missing signatures") - } - - // make sure the header and commit match (height and hash) - if c.Commit.Height() != c.Header.Height { - return liteErr.ErrHeightMismatch(c.Commit.Height(), c.Header.Height) - } - hhash := c.Header.Hash() - chash := c.Commit.BlockID.Hash - if !bytes.Equal(hhash, chash) { - return errors.Errorf("Commits sign block %X header is block %X", - chash, hhash) - } - - // make sure the commit is reasonable - err := c.Commit.ValidateBasic() - if err != nil { - return errors.WithStack(err) - } - - // looks good, we just need to make sure the signatures are really from - // empowered validators - return nil -} diff --git a/lite/doc.go b/lite/doc.go deleted file mode 100644 index 89dc702f..00000000 --- a/lite/doc.go +++ /dev/null @@ -1,133 +0,0 @@ -/* -Package lite allows you to securely validate headers -without a full node. - -This library pulls together all the crypto and algorithms, -so given a relatively recent (< unbonding period) known -validator set, one can get indisputable proof that data is in -the chain (current state) or detect if the node is lying to -the client. - -Tendermint RPC exposes a lot of info, but a malicious node -could return any data it wants to queries, or even to block -headers, even making up fake signatures from non-existent -validators to justify it. This is a lot of logic to get -right, to be contained in a small, easy to use library, -that does this for you, so you can just build nice UI. - -We design for clients who have no strong trust relationship -with any tendermint node, just the validator set as a whole. -Beyond building nice mobile or desktop applications, the -cosmos hub is another important example of a client, -that needs undeniable proof without syncing the full chain, -in order to efficiently implement IBC. - -Commits - -There are two main data structures that we pass around - Commit -and FullCommit. Both of them mirror what information is -exposed in tendermint rpc. - -Commit is a block header along with enough validator signatures -to prove its validity (> 2/3 of the voting power). A FullCommit -is a Commit along with the full validator set. When the -validator set doesn't change, the Commit is enough, but since -the block header only has a hash, we need the FullCommit to -follow any changes to the validator set. - -Certifiers - -A Certifier validates a new Commit given the currently known -state. There are three different types of Certifiers exposed, -each one building on the last one, with additional complexity. - -Static - given the validator set upon initialization. Verifies -all signatures against that set and if the validator set -changes, it will reject all headers. - -Dynamic - This wraps Static and has the same Certify -method. However, it adds an Update method, which can be called -with a FullCommit when the validator set changes. If it can -prove this is a valid transition, it will update the validator -set. - -Inquiring - this wraps Dynamic and implements an auto-update -strategy on top of the Dynamic update. If a call to -Certify fails as the validator set has changed, then it -attempts to find a FullCommit and Update to that header. -To get these FullCommits, it makes use of a Provider. - -Providers - -A Provider allows us to store and retrieve the FullCommits, -to provide memory to the Inquiring Certifier. - -NewMemStoreProvider - in-memory cache. - -files.NewProvider - disk backed storage. - -client.NewHTTPProvider - query tendermint rpc. - -NewCacheProvider - combine multiple providers. - -The suggested use for local light clients is -client.NewHTTPProvider for getting new data (Source), -and NewCacheProvider(NewMemStoreProvider(), -files.NewProvider()) to store confirmed headers (Trusted) - -How We Track Validators - -Unless you want to blindly trust the node you talk with, you -need to trace every response back to a hash in a block header -and validate the commit signatures of that block header match -the proper validator set. If there is a contant validator -set, you store it locally upon initialization of the client, -and check against that every time. - -Once there is a dynamic validator set, the issue of -verifying a block becomes a bit more tricky. There is -background information in a -github issue (https://github.com/tendermint/tendermint/issues/377). - -In short, if there is a block at height H with a known -(trusted) validator set V, and another block at height H' -(H' > H) with validator set V' != V, then we want a way to -safely update it. - -First, get the new (unconfirmed) validator set V' and -verify H' is internally consistent and properly signed by -this V'. Assuming it is a valid block, we check that at -least 2/3 of the validators in V also signed it, meaning -it would also be valid under our old assumptions. -That should be enough, but we can also check that the -V counts for at least 2/3 of the total votes in H' -for extra safety (we can have a discussion if this is -strictly required). If we can verify all this, -then we can accept H' and V' as valid and use that to -validate all blocks X > H'. - -If we cannot update directly from H -> H' because there was -too much change to the validator set, then we can look for -some Hm (H < Hm < H') with a validator set Vm. Then we try -to update H -> Hm and Hm -> H' in two separate steps. -If one of these steps doesn't work, then we continue -bisecting, until we eventually have to externally -validate the valdiator set changes at every block. - -Since we never trust any server in this protocol, only the -signatures themselves, it doesn't matter if the seed comes -from a (possibly malicious) node or a (possibly malicious) user. -We can accept it or reject it based only on our trusted -validator set and cryptographic proofs. This makes it -extremely important to verify that you have the proper -validator set when initializing the client, as that is the -root of all trust. - -Or course, this assumes that the known block is within the -unbonding period to avoid the "nothing at stake" problem. -If you haven't seen the state in a few months, you will need -to manually verify the new validator set hash using off-chain -means (the same as getting the initial hash). - -*/ -package lite diff --git a/lite/dynamic_certifier.go b/lite/dynamic_certifier.go deleted file mode 100644 index 0ddace8b..00000000 --- a/lite/dynamic_certifier.go +++ /dev/null @@ -1,96 +0,0 @@ -package lite - -import ( - "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -var _ Certifier = (*DynamicCertifier)(nil) - -// DynamicCertifier uses a StaticCertifier for Certify, but adds an -// Update method to allow for a change of validators. -// -// You can pass in a FullCommit with another validator set, -// and if this is a provably secure transition (< 1/3 change, -// sufficient signatures), then it will update the -// validator set for the next Certify call. -// For security, it will only follow validator set changes -// going forward. -type DynamicCertifier struct { - cert *StaticCertifier - lastHeight int64 -} - -// NewDynamic returns a new dynamic certifier. -func NewDynamicCertifier(chainID string, vals *types.ValidatorSet, height int64) *DynamicCertifier { - return &DynamicCertifier{ - cert: NewStaticCertifier(chainID, vals), - lastHeight: height, - } -} - -// ChainID returns the chain id of this certifier. -// Implements Certifier. -func (dc *DynamicCertifier) ChainID() string { - return dc.cert.ChainID() -} - -// Validators returns the validators of this certifier. -func (dc *DynamicCertifier) Validators() *types.ValidatorSet { - return dc.cert.vSet -} - -// Hash returns the hash of this certifier. -func (dc *DynamicCertifier) Hash() []byte { - return dc.cert.Hash() -} - -// LastHeight returns the last height of this certifier. -func (dc *DynamicCertifier) LastHeight() int64 { - return dc.lastHeight -} - -// Certify will verify whether the commit is valid and will update the height if it is or return an -// error if it is not. -// Implements Certifier. -func (dc *DynamicCertifier) Certify(check Commit) error { - err := dc.cert.Certify(check) - if err == nil { - // update last seen height if input is valid - dc.lastHeight = check.Height() - } - return err -} - -// Update will verify if this is a valid change and update -// the certifying validator set if safe to do so. -// -// Returns an error if update is impossible (invalid proof or IsTooMuchChangeErr) -func (dc *DynamicCertifier) Update(fc FullCommit) error { - // ignore all checkpoints in the past -> only to the future - h := fc.Height() - if h <= dc.lastHeight { - return liteErr.ErrPastTime() - } - - // first, verify if the input is self-consistent.... - err := fc.ValidateBasic(dc.ChainID()) - if err != nil { - return err - } - - // now, make sure not too much change... meaning this commit - // would be approved by the currently known validator set - // as well as the new set - commit := fc.Commit.Commit - err = dc.Validators().VerifyCommitAny(fc.Validators, dc.ChainID(), commit.BlockID, h, commit) - if err != nil { - return liteErr.ErrTooMuchChange() - } - - // looks good, we can update - dc.cert = NewStaticCertifier(dc.ChainID(), fc.Validators) - dc.lastHeight = h - return nil -} diff --git a/lite/dynamic_certifier_test.go b/lite/dynamic_certifier_test.go deleted file mode 100644 index 88c145f9..00000000 --- a/lite/dynamic_certifier_test.go +++ /dev/null @@ -1,130 +0,0 @@ -package lite_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/lite" - "github.com/tendermint/tendermint/lite/errors" -) - -// TestDynamicCert just makes sure it still works like StaticCert -func TestDynamicCert(t *testing.T) { - // assert, require := assert.New(t), require.New(t) - assert := assert.New(t) - // require := require.New(t) - - keys := lite.GenValKeys(4) - // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals := keys.ToValidators(20, 10) - // and a certifier based on our known set - chainID := "test-dyno" - cert := lite.NewDynamicCertifier(chainID, vals, 0) - - cases := []struct { - keys lite.ValKeys - vals *types.ValidatorSet - height int64 - first, last int // who actually signs - proper bool // true -> expect no error - changed bool // true -> expect validator change error - }{ - // perfect, signed by everyone - {keys, vals, 1, 0, len(keys), true, false}, - // skip little guy is okay - {keys, vals, 2, 1, len(keys), true, false}, - // but not the big guy - {keys, vals, 3, 0, len(keys) - 1, false, false}, - // even changing the power a little bit breaks the static validator - // the sigs are enough, but the validator hash is unknown - {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, - } - - for _, tc := range cases { - check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, - []byte("bar"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Certify(check) - if tc.proper { - assert.Nil(err, "%+v", err) - assert.Equal(cert.LastHeight(), tc.height) - } else { - assert.NotNil(err) - if tc.changed { - assert.True(errors.IsValidatorsChangedErr(err), "%+v", err) - } - } - } -} - -// TestDynamicUpdate makes sure we update safely and sanely -func TestDynamicUpdate(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - chainID := "test-dyno-up" - keys := lite.GenValKeys(5) - vals := keys.ToValidators(20, 0) - cert := lite.NewDynamicCertifier(chainID, vals, 40) - - // one valid block to give us a sense of time - h := int64(100) - good := keys.GenCommit(chainID, h, nil, vals, []byte("foo"), []byte("params"), []byte("results"), 0, len(keys)) - err := cert.Certify(good) - require.Nil(err, "%+v", err) - - // some new sets to try later - keys2 := keys.Extend(2) - keys3 := keys2.Extend(4) - - // we try to update with some blocks - cases := []struct { - keys lite.ValKeys - vals *types.ValidatorSet - height int64 - first, last int // who actually signs - proper bool // true -> expect no error - changed bool // true -> expect too much change error - }{ - // same validator set, well signed, of course it is okay - {keys, vals, h + 10, 0, len(keys), true, false}, - // same validator set, poorly signed, fails - {keys, vals, h + 20, 2, len(keys), false, false}, - - // shift the power a little, works if properly signed - {keys, keys.ToValidators(10, 0), h + 30, 1, len(keys), true, false}, - // but not on a poor signature - {keys, keys.ToValidators(10, 0), h + 40, 2, len(keys), false, false}, - // and not if it was in the past - {keys, keys.ToValidators(10, 0), h + 25, 0, len(keys), false, false}, - - // let's try to adjust to a whole new validator set (we have 5/7 of the votes) - {keys2, keys2.ToValidators(10, 0), h + 33, 0, len(keys2), true, false}, - - // properly signed but too much change, not allowed (only 7/11 validators known) - {keys3, keys3.ToValidators(10, 0), h + 50, 0, len(keys3), false, true}, - } - - for _, tc := range cases { - fc := tc.keys.GenFullCommit(chainID, tc.height, nil, tc.vals, - []byte("bar"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Update(fc) - if tc.proper { - assert.Nil(err, "%d: %+v", tc.height, err) - // we update last seen height - assert.Equal(cert.LastHeight(), tc.height) - // and we update the proper validators - assert.EqualValues(fc.Header.ValidatorsHash, cert.Hash()) - } else { - assert.NotNil(err, "%d", tc.height) - // we don't update the height - assert.NotEqual(cert.LastHeight(), tc.height) - if tc.changed { - assert.True(errors.IsTooMuchChangeErr(err), - "%d: %+v", tc.height, err) - } - } - } -} diff --git a/lite/errors/errors.go b/lite/errors/errors.go deleted file mode 100644 index 99e42a0b..00000000 --- a/lite/errors/errors.go +++ /dev/null @@ -1,92 +0,0 @@ -package errors - -import ( - "fmt" - - "github.com/pkg/errors" -) - -var ( - errValidatorsChanged = fmt.Errorf("Validators differ between header and certifier") - errCommitNotFound = fmt.Errorf("Commit not found by provider") - errTooMuchChange = fmt.Errorf("Validators change too much to safely update") - errPastTime = fmt.Errorf("Update older than certifier height") - errNoPathFound = fmt.Errorf("Cannot find a path of validators") -) - -// IsCommitNotFoundErr checks whether an error is due to missing data -func IsCommitNotFoundErr(err error) bool { - return err != nil && (errors.Cause(err) == errCommitNotFound) -} - -// ErrCommitNotFound indicates that a the requested commit was not found. -func ErrCommitNotFound() error { - return errors.WithStack(errCommitNotFound) -} - -// IsValidatorsChangedErr checks whether an error is due -// to a differing validator set. -func IsValidatorsChangedErr(err error) bool { - return err != nil && (errors.Cause(err) == errValidatorsChanged) -} - -// ErrValidatorsChanged indicates that the validator set was changed between two commits. -func ErrValidatorsChanged() error { - return errors.WithStack(errValidatorsChanged) -} - -// IsTooMuchChangeErr checks whether an error is due to too much change -// between these validators sets. -func IsTooMuchChangeErr(err error) bool { - return err != nil && (errors.Cause(err) == errTooMuchChange) -} - -// ErrTooMuchChange indicates that the underlying validator set was changed by >1/3. -func ErrTooMuchChange() error { - return errors.WithStack(errTooMuchChange) -} - -// IsPastTimeErr ... -func IsPastTimeErr(err error) bool { - return err != nil && (errors.Cause(err) == errPastTime) -} - -// ErrPastTime ... -func ErrPastTime() error { - return errors.WithStack(errPastTime) -} - -// IsNoPathFoundErr checks whether an error is due to no path of -// validators in provider from where we are to where we want to be -func IsNoPathFoundErr(err error) bool { - return err != nil && (errors.Cause(err) == errNoPathFound) -} - -// ErrNoPathFound ... -func ErrNoPathFound() error { - return errors.WithStack(errNoPathFound) -} - -//-------------------------------------------- - -type errHeightMismatch struct { - h1, h2 int64 -} - -func (e errHeightMismatch) Error() string { - return fmt.Sprintf("Blocks don't match - %d vs %d", e.h1, e.h2) -} - -// IsHeightMismatchErr checks whether an error is due to data from different blocks -func IsHeightMismatchErr(err error) bool { - if err == nil { - return false - } - _, ok := errors.Cause(err).(errHeightMismatch) - return ok -} - -// ErrHeightMismatch returns an mismatch error with stack-trace -func ErrHeightMismatch(h1, h2 int64) error { - return errors.WithStack(errHeightMismatch{h1, h2}) -} diff --git a/lite/errors/errors_test.go b/lite/errors/errors_test.go deleted file mode 100644 index 479215e4..00000000 --- a/lite/errors/errors_test.go +++ /dev/null @@ -1,18 +0,0 @@ -package errors - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestErrorHeight(t *testing.T) { - e1 := ErrHeightMismatch(2, 3) - e1.Error() - assert.True(t, IsHeightMismatchErr(e1)) - - e2 := errors.New("foobar") - assert.False(t, IsHeightMismatchErr(e2)) - assert.False(t, IsHeightMismatchErr(nil)) -} diff --git a/lite/files/commit.go b/lite/files/commit.go deleted file mode 100644 index 8a7e4721..00000000 --- a/lite/files/commit.go +++ /dev/null @@ -1,93 +0,0 @@ -package files - -import ( - "io/ioutil" - "os" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -const ( - // MaxFullCommitSize is the maximum number of bytes we will - // read in for a full commit to avoid excessive allocations - // in the deserializer - MaxFullCommitSize = 1024 * 1024 -) - -// SaveFullCommit exports the seed in binary / go-amino style -func SaveFullCommit(fc lite.FullCommit, path string) error { - f, err := os.Create(path) - if err != nil { - return errors.WithStack(err) - } - defer f.Close() - - _, err = cdc.MarshalBinaryWriter(f, fc) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -// SaveFullCommitJSON exports the seed in a json format -func SaveFullCommitJSON(fc lite.FullCommit, path string) error { - f, err := os.Create(path) - if err != nil { - return errors.WithStack(err) - } - defer f.Close() - bz, err := cdc.MarshalJSON(fc) - if err != nil { - return errors.WithStack(err) - } - _, err = f.Write(bz) - if err != nil { - return errors.WithStack(err) - } - return nil -} - -// LoadFullCommit loads the full commit from the file system. -func LoadFullCommit(path string) (lite.FullCommit, error) { - var fc lite.FullCommit - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return fc, liteErr.ErrCommitNotFound() - } - return fc, errors.WithStack(err) - } - defer f.Close() - - _, err = cdc.UnmarshalBinaryReader(f, &fc, 0) - if err != nil { - return fc, errors.WithStack(err) - } - return fc, nil -} - -// LoadFullCommitJSON loads the commit from the file system in JSON format. -func LoadFullCommitJSON(path string) (lite.FullCommit, error) { - var fc lite.FullCommit - f, err := os.Open(path) - if err != nil { - if os.IsNotExist(err) { - return fc, liteErr.ErrCommitNotFound() - } - return fc, errors.WithStack(err) - } - defer f.Close() - - bz, err := ioutil.ReadAll(f) - if err != nil { - return fc, errors.WithStack(err) - } - err = cdc.UnmarshalJSON(bz, &fc) - if err != nil { - return fc, errors.WithStack(err) - } - return fc, nil -} diff --git a/lite/files/commit_test.go b/lite/files/commit_test.go deleted file mode 100644 index e0235ba2..00000000 --- a/lite/files/commit_test.go +++ /dev/null @@ -1,66 +0,0 @@ -package files - -import ( - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - cmn "github.com/tendermint/tmlibs/common" - - "github.com/tendermint/tendermint/lite" -) - -func tmpFile() string { - suffix := cmn.RandStr(16) - return filepath.Join(os.TempDir(), "fc-test-"+suffix) -} - -func TestSerializeFullCommits(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // some constants - appHash := []byte("some crazy thing") - chainID := "ser-ial" - h := int64(25) - - // build a fc - keys := lite.GenValKeys(5) - vals := keys.ToValidators(10, 0) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - - require.Equal(h, fc.Height()) - require.Equal(vals.Hash(), fc.ValidatorsHash()) - - // try read/write with json - jfile := tmpFile() - defer os.Remove(jfile) - jseed, err := LoadFullCommitJSON(jfile) - assert.NotNil(err) - err = SaveFullCommitJSON(fc, jfile) - require.Nil(err) - jseed, err = LoadFullCommitJSON(jfile) - assert.Nil(err, "%+v", err) - assert.Equal(h, jseed.Height()) - assert.Equal(vals.Hash(), jseed.ValidatorsHash()) - - // try read/write with binary - bfile := tmpFile() - defer os.Remove(bfile) - bseed, err := LoadFullCommit(bfile) - assert.NotNil(err) - err = SaveFullCommit(fc, bfile) - require.Nil(err) - bseed, err = LoadFullCommit(bfile) - assert.Nil(err, "%+v", err) - assert.Equal(h, bseed.Height()) - assert.Equal(vals.Hash(), bseed.ValidatorsHash()) - - // make sure they don't read the other format (different) - _, err = LoadFullCommit(jfile) - assert.NotNil(err) - _, err = LoadFullCommitJSON(bfile) - assert.NotNil(err) -} diff --git a/lite/files/provider.go b/lite/files/provider.go deleted file mode 100644 index 327b0331..00000000 --- a/lite/files/provider.go +++ /dev/null @@ -1,139 +0,0 @@ -/* -Package files defines a Provider that stores all data in the filesystem - -We assume the same validator hash may be reused by many different -headers/Commits, and thus store it separately. This leaves us -with three issues: - - 1. Given a validator hash, retrieve the validator set if previously stored - 2. Given a block height, find the Commit with the highest height <= h - 3. Given a FullCommit, store it quickly to satisfy 1 and 2 - -Note that we do not worry about caching, as that can be achieved by -pairing this with a MemStoreProvider and CacheProvider from certifiers -*/ -package files - -import ( - "encoding/hex" - "fmt" - "math" - "os" - "path/filepath" - "sort" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -// nolint -const ( - Ext = ".tsd" - ValDir = "validators" - CheckDir = "checkpoints" - dirPerm = os.FileMode(0755) - //filePerm = os.FileMode(0644) -) - -type provider struct { - valDir string - checkDir string -} - -// NewProvider creates the parent dir and subdirs -// for validators and checkpoints as needed -func NewProvider(dir string) lite.Provider { - valDir := filepath.Join(dir, ValDir) - checkDir := filepath.Join(dir, CheckDir) - for _, d := range []string{valDir, checkDir} { - err := os.MkdirAll(d, dirPerm) - if err != nil { - panic(err) - } - } - return &provider{valDir: valDir, checkDir: checkDir} -} - -func (p *provider) encodeHash(hash []byte) string { - return hex.EncodeToString(hash) + Ext -} - -func (p *provider) encodeHeight(h int64) string { - // pad up to 10^12 for height... - return fmt.Sprintf("%012d%s", h, Ext) -} - -// StoreCommit saves a full commit after it has been verified. -func (p *provider) StoreCommit(fc lite.FullCommit) error { - // make sure the fc is self-consistent before saving - err := fc.ValidateBasic(fc.Commit.Header.ChainID) - if err != nil { - return err - } - - paths := []string{ - filepath.Join(p.checkDir, p.encodeHeight(fc.Height())), - filepath.Join(p.valDir, p.encodeHash(fc.Header.ValidatorsHash)), - } - for _, path := range paths { - err := SaveFullCommit(fc, path) - // unknown error in creating or writing immediately breaks - if err != nil { - return err - } - } - return nil -} - -// GetByHeight returns the closest commit with height <= h. -func (p *provider) GetByHeight(h int64) (lite.FullCommit, error) { - // first we look for exact match, then search... - path := filepath.Join(p.checkDir, p.encodeHeight(h)) - fc, err := LoadFullCommit(path) - if liteErr.IsCommitNotFoundErr(err) { - path, err = p.searchForHeight(h) - if err == nil { - fc, err = LoadFullCommit(path) - } - } - return fc, err -} - -// LatestCommit returns the newest commit stored. -func (p *provider) LatestCommit() (fc lite.FullCommit, err error) { - // Note to future: please update by 2077 to avoid rollover - return p.GetByHeight(math.MaxInt32 - 1) -} - -// search for height, looks for a file with highest height < h -// return certifiers.ErrCommitNotFound() if not there... -func (p *provider) searchForHeight(h int64) (string, error) { - d, err := os.Open(p.checkDir) - if err != nil { - return "", errors.WithStack(err) - } - files, err := d.Readdirnames(0) - - d.Close() - if err != nil { - return "", errors.WithStack(err) - } - - desired := p.encodeHeight(h) - sort.Strings(files) - i := sort.SearchStrings(files, desired) - if i == 0 { - return "", liteErr.ErrCommitNotFound() - } - found := files[i-1] - path := filepath.Join(p.checkDir, found) - return path, errors.WithStack(err) -} - -// GetByHash returns a commit exactly matching this validator hash. -func (p *provider) GetByHash(hash []byte) (lite.FullCommit, error) { - path := filepath.Join(p.valDir, p.encodeHash(hash)) - return LoadFullCommit(path) -} diff --git a/lite/files/provider_test.go b/lite/files/provider_test.go deleted file mode 100644 index 5deebb1a..00000000 --- a/lite/files/provider_test.go +++ /dev/null @@ -1,96 +0,0 @@ -package files_test - -import ( - "bytes" - "errors" - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" - "github.com/tendermint/tendermint/lite/files" -) - -func checkEqual(stored, loaded lite.FullCommit, chainID string) error { - err := loaded.ValidateBasic(chainID) - if err != nil { - return err - } - if !bytes.Equal(stored.ValidatorsHash(), loaded.ValidatorsHash()) { - return errors.New("Different block hashes") - } - return nil -} - -func TestFileProvider(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - dir, err := ioutil.TempDir("", "fileprovider-test") - assert.Nil(err) - defer os.RemoveAll(dir) - p := files.NewProvider(dir) - - chainID := "test-files" - appHash := []byte("some-data") - keys := lite.GenValKeys(5) - count := 10 - - // make a bunch of seeds... - seeds := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // two seeds for each validator, to check how we handle dups - // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... - vals := keys.ToValidators(10, int64(count/2)) - h := int64(20 + 10*i) - check := keys.GenCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - seeds[i] = lite.NewFullCommit(check, vals) - } - - // check provider is empty - seed, err := p.GetByHeight(20) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - seed, err = p.GetByHash(seeds[3].ValidatorsHash()) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - // now add them all to the provider - for _, s := range seeds { - err = p.StoreCommit(s) - require.Nil(err) - // and make sure we can get it back - s2, err := p.GetByHash(s.ValidatorsHash()) - assert.Nil(err) - err = checkEqual(s, s2, chainID) - assert.Nil(err) - // by height as well - s2, err = p.GetByHeight(s.Height()) - err = checkEqual(s, s2, chainID) - assert.Nil(err) - } - - // make sure we get the last hash if we overstep - seed, err = p.GetByHeight(5000) - if assert.Nil(err, "%+v", err) { - assert.Equal(seeds[count-1].Height(), seed.Height()) - err = checkEqual(seeds[count-1], seed, chainID) - assert.Nil(err) - } - - // and middle ones as well - seed, err = p.GetByHeight(47) - if assert.Nil(err, "%+v", err) { - // we only step by 10, so 40 must be the one below this - assert.EqualValues(40, seed.Height()) - } - - // and proper error for too low - _, err = p.GetByHeight(5) - assert.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) -} diff --git a/lite/files/wire.go b/lite/files/wire.go deleted file mode 100644 index 99f98931..00000000 --- a/lite/files/wire.go +++ /dev/null @@ -1,12 +0,0 @@ -package files - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" -) - -var cdc = amino.NewCodec() - -func init() { - crypto.RegisterAmino(cdc) -} diff --git a/lite/helpers.go b/lite/helpers.go deleted file mode 100644 index 714675af..00000000 --- a/lite/helpers.go +++ /dev/null @@ -1,153 +0,0 @@ -package lite - -import ( - "time" - - crypto "github.com/tendermint/go-crypto" - - "github.com/tendermint/tendermint/types" -) - -// ValKeys is a helper for testing. -// -// It lets us simulate signing with many keys, either ed25519 or secp256k1. -// The main use case is to create a set, and call GenCommit -// to get properly signed header for testing. -// -// You can set different weights of validators each time you call -// ToValidators, and can optionally extend the validator set later -// with Extend or ExtendSecp -type ValKeys []crypto.PrivKey - -// GenValKeys produces an array of private keys to generate commits. -func GenValKeys(n int) ValKeys { - res := make(ValKeys, n) - for i := range res { - res[i] = crypto.GenPrivKeyEd25519() - } - return res -} - -// Change replaces the key at index i. -func (v ValKeys) Change(i int) ValKeys { - res := make(ValKeys, len(v)) - copy(res, v) - res[i] = crypto.GenPrivKeyEd25519() - return res -} - -// Extend adds n more keys (to remove, just take a slice). -func (v ValKeys) Extend(n int) ValKeys { - extra := GenValKeys(n) - return append(v, extra...) -} - -// GenSecpValKeys produces an array of secp256k1 private keys to generate commits. -func GenSecpValKeys(n int) ValKeys { - res := make(ValKeys, n) - for i := range res { - res[i] = crypto.GenPrivKeySecp256k1() - } - return res -} - -// ExtendSecp adds n more secp256k1 keys (to remove, just take a slice). -func (v ValKeys) ExtendSecp(n int) ValKeys { - extra := GenSecpValKeys(n) - return append(v, extra...) -} - -// ToValidators produces a list of validators from the set of keys -// The first key has weight `init` and it increases by `inc` every step -// so we can have all the same weight, or a simple linear distribution -// (should be enough for testing). -func (v ValKeys) ToValidators(init, inc int64) *types.ValidatorSet { - res := make([]*types.Validator, len(v)) - for i, k := range v { - res[i] = types.NewValidator(k.PubKey(), init+int64(i)*inc) - } - return types.NewValidatorSet(res) -} - -// signHeader properly signs the header with all keys from first to last exclusive. -func (v ValKeys) signHeader(header *types.Header, first, last int) *types.Commit { - votes := make([]*types.Vote, len(v)) - - // we need this list to keep the ordering... - vset := v.ToValidators(1, 0) - - // fill in the votes we want - for i := first; i < last && i < len(v); i++ { - vote := makeVote(header, vset, v[i]) - votes[vote.ValidatorIndex] = vote - } - - res := &types.Commit{ - BlockID: types.BlockID{Hash: header.Hash()}, - Precommits: votes, - } - return res -} - -func makeVote(header *types.Header, vals *types.ValidatorSet, key crypto.PrivKey) *types.Vote { - addr := key.PubKey().Address() - idx, _ := vals.GetByAddress(addr) - vote := &types.Vote{ - ValidatorAddress: addr, - ValidatorIndex: idx, - Height: header.Height, - Round: 1, - Timestamp: time.Now().UTC(), - Type: types.VoteTypePrecommit, - BlockID: types.BlockID{Hash: header.Hash()}, - } - // Sign it - signBytes := vote.SignBytes(header.ChainID) - vote.Signature = key.Sign(signBytes) - return vote -} - -// Silences warning that vals can also be merkle.Hashable -// nolint: interfacer -func genHeader(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { - - return &types.Header{ - ChainID: chainID, - Height: height, - Time: time.Now(), - NumTxs: int64(len(txs)), - TotalTxs: int64(len(txs)), - // LastBlockID - // LastCommitHash - ValidatorsHash: vals.Hash(), - DataHash: txs.Hash(), - AppHash: appHash, - ConsensusHash: consHash, - LastResultsHash: resHash, - } -} - -// GenCommit calls genHeader and signHeader and combines them into a Commit. -func (v ValKeys) GenCommit(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) Commit { - - header := genHeader(chainID, height, txs, vals, appHash, consHash, resHash) - check := Commit{ - Header: header, - Commit: v.signHeader(header, first, last), - } - return check -} - -// GenFullCommit calls genHeader and signHeader and combines them into a Commit. -func (v ValKeys) GenFullCommit(chainID string, height int64, txs types.Txs, - vals *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { - - header := genHeader(chainID, height, txs, vals, appHash, consHash, resHash) - commit := Commit{ - Header: header, - Commit: v.signHeader(header, first, last), - } - return NewFullCommit(commit, vals) -} diff --git a/lite/inquiring_certifier.go b/lite/inquiring_certifier.go deleted file mode 100644 index 042bd08e..00000000 --- a/lite/inquiring_certifier.go +++ /dev/null @@ -1,163 +0,0 @@ -package lite - -import ( - "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -var _ Certifier = (*InquiringCertifier)(nil) - -// InquiringCertifier wraps a dynamic certifier and implements an auto-update strategy. If a call -// to Certify fails due to a change it validator set, InquiringCertifier will try and find a -// previous FullCommit which it can use to safely update the validator set. It uses a source -// provider to obtain the needed FullCommits. It stores properly validated data on the local system. -type InquiringCertifier struct { - cert *DynamicCertifier - // These are only properly validated data, from local system - trusted Provider - // This is a source of new info, like a node rpc, or other import method - Source Provider -} - -// NewInquiringCertifier returns a new Inquiring object. It uses the trusted provider to store -// validated data and the source provider to obtain missing FullCommits. -// -// Example: The trusted provider should a CacheProvider, MemProvider or files.Provider. The source -// provider should be a client.HTTPProvider. -func NewInquiringCertifier(chainID string, fc FullCommit, trusted Provider, - source Provider) (*InquiringCertifier, error) { - - // store the data in trusted - err := trusted.StoreCommit(fc) - if err != nil { - return nil, err - } - - return &InquiringCertifier{ - cert: NewDynamicCertifier(chainID, fc.Validators, fc.Height()), - trusted: trusted, - Source: source, - }, nil -} - -// ChainID returns the chain id. -// Implements Certifier. -func (ic *InquiringCertifier) ChainID() string { - return ic.cert.ChainID() -} - -// Validators returns the validator set. -func (ic *InquiringCertifier) Validators() *types.ValidatorSet { - return ic.cert.cert.vSet -} - -// LastHeight returns the last height. -func (ic *InquiringCertifier) LastHeight() int64 { - return ic.cert.lastHeight -} - -// Certify makes sure this is checkpoint is valid. -// -// If the validators have changed since the last know time, it looks -// for a path to prove the new validators. -// -// On success, it will store the checkpoint in the store for later viewing -// Implements Certifier. -func (ic *InquiringCertifier) Certify(commit Commit) error { - err := ic.useClosestTrust(commit.Height()) - if err != nil { - return err - } - - err = ic.cert.Certify(commit) - if !liteErr.IsValidatorsChangedErr(err) { - return err - } - err = ic.updateToHash(commit.Header.ValidatorsHash) - if err != nil { - return err - } - - err = ic.cert.Certify(commit) - if err != nil { - return err - } - - // store the new checkpoint - return ic.trusted.StoreCommit(NewFullCommit(commit, ic.Validators())) -} - -// Update will verify if this is a valid change and update -// the certifying validator set if safe to do so. -func (ic *InquiringCertifier) Update(fc FullCommit) error { - err := ic.useClosestTrust(fc.Height()) - if err != nil { - return err - } - - err = ic.cert.Update(fc) - if err == nil { - err = ic.trusted.StoreCommit(fc) - } - return err -} - -func (ic *InquiringCertifier) useClosestTrust(h int64) error { - closest, err := ic.trusted.GetByHeight(h) - if err != nil { - return err - } - - // if the best seed is not the one we currently use, - // let's just reset the dynamic validator - if closest.Height() != ic.LastHeight() { - ic.cert = NewDynamicCertifier(ic.ChainID(), closest.Validators, closest.Height()) - } - return nil -} - -// updateToHash gets the validator hash we want to update to -// if IsTooMuchChangeErr, we try to find a path by binary search over height -func (ic *InquiringCertifier) updateToHash(vhash []byte) error { - // try to get the match, and update - fc, err := ic.Source.GetByHash(vhash) - if err != nil { - return err - } - err = ic.cert.Update(fc) - // handle IsTooMuchChangeErr by using divide and conquer - if liteErr.IsTooMuchChangeErr(err) { - err = ic.updateToHeight(fc.Height()) - } - return err -} - -// updateToHeight will use divide-and-conquer to find a path to h -func (ic *InquiringCertifier) updateToHeight(h int64) error { - // try to update to this height (with checks) - fc, err := ic.Source.GetByHeight(h) - if err != nil { - return err - } - start, end := ic.LastHeight(), fc.Height() - if end <= start { - return liteErr.ErrNoPathFound() - } - err = ic.Update(fc) - - // we can handle IsTooMuchChangeErr specially - if !liteErr.IsTooMuchChangeErr(err) { - return err - } - - // try to update to mid - mid := (start + end) / 2 - err = ic.updateToHeight(mid) - if err != nil { - return err - } - - // if we made it to mid, we recurse - return ic.updateToHeight(h) -} diff --git a/lite/inquiring_certifier_test.go b/lite/inquiring_certifier_test.go deleted file mode 100644 index db8160bd..00000000 --- a/lite/inquiring_certifier_test.go +++ /dev/null @@ -1,173 +0,0 @@ -// nolint: vetshadow -package lite_test - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/lite" -) - -func TestInquirerValidPath(t *testing.T) { - assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() - - // set up the validators to generate test blocks - var vote int64 = 10 - keys := lite.GenValKeys(5) - - // construct a bunch of commits, each with one more height than the last - chainID := "inquiry-test" - consHash := []byte("params") - resHash := []byte("results") - count := 50 - commits := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // extend the keys by 1 each time - keys = keys.Extend(1) - vals := keys.ToValidators(vote, 0) - h := int64(20 + 10*i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) - } - - // initialize a certifier with the initial state - cert, err := lite.NewInquiringCertifier(chainID, commits[0], trust, source) - require.Nil(err) - - // this should fail validation.... - commit := commits[count-1].Commit - err = cert.Certify(commit) - require.NotNil(err) - - // adding a few commits in the middle should be insufficient - for i := 10; i < 13; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.NotNil(err) - - // with more info, we succeed - for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.Nil(err, "%+v", err) -} - -func TestInquirerMinimalPath(t *testing.T) { - assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() - - // set up the validators to generate test blocks - var vote int64 = 10 - keys := lite.GenValKeys(5) - - // construct a bunch of commits, each with one more height than the last - chainID := "minimal-path" - consHash := []byte("other-params") - count := 12 - commits := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // extend the validators, so we are just below 2/3 - keys = keys.Extend(len(keys)/2 - 1) - vals := keys.ToValidators(vote, 0) - h := int64(5 + 10*i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) - } - - // initialize a certifier with the initial state - cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source) - - // this should fail validation.... - commit := commits[count-1].Commit - err := cert.Certify(commit) - require.NotNil(err) - - // add a few seed in the middle should be insufficient - for i := 5; i < 8; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.NotNil(err) - - // with more info, we succeed - for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - err = cert.Certify(commit) - assert.Nil(err, "%+v", err) -} - -func TestInquirerVerifyHistorical(t *testing.T) { - assert, require := assert.New(t), require.New(t) - trust := lite.NewMemStoreProvider() - source := lite.NewMemStoreProvider() - - // set up the validators to generate test blocks - var vote int64 = 10 - keys := lite.GenValKeys(5) - - // construct a bunch of commits, each with one more height than the last - chainID := "inquiry-test" - count := 10 - consHash := []byte("special-params") - commits := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // extend the keys by 1 each time - keys = keys.Extend(1) - vals := keys.ToValidators(vote, 0) - h := int64(20 + 10*i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, - len(keys)) - } - - // initialize a certifier with the initial state - cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source) - - // store a few commits as trust - for _, i := range []int{2, 5} { - trust.StoreCommit(commits[i]) - } - - // let's see if we can jump forward using trusted commits - err := source.StoreCommit(commits[7]) - require.Nil(err, "%+v", err) - check := commits[7].Commit - err = cert.Certify(check) - require.Nil(err, "%+v", err) - assert.Equal(check.Height(), cert.LastHeight()) - - // add access to all commits via untrusted source - for i := 0; i < count; i++ { - err := source.StoreCommit(commits[i]) - require.Nil(err) - } - - // try to check an unknown seed in the past - mid := commits[3].Commit - err = cert.Certify(mid) - require.Nil(err, "%+v", err) - assert.Equal(mid.Height(), cert.LastHeight()) - - // and jump all the way forward again - end := commits[count-1].Commit - err = cert.Certify(end) - require.Nil(err, "%+v", err) - assert.Equal(end.Height(), cert.LastHeight()) -} diff --git a/lite/memprovider.go b/lite/memprovider.go deleted file mode 100644 index ac0d8321..00000000 --- a/lite/memprovider.go +++ /dev/null @@ -1,152 +0,0 @@ -package lite - -import ( - "encoding/hex" - "sort" - "sync" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -type memStoreProvider struct { - mtx sync.RWMutex - // byHeight is always sorted by Height... need to support range search (nil, h] - // btree would be more efficient for larger sets - byHeight fullCommits - byHash map[string]FullCommit - - sorted bool -} - -// fullCommits just exists to allow easy sorting -type fullCommits []FullCommit - -func (s fullCommits) Len() int { return len(s) } -func (s fullCommits) Swap(i, j int) { s[i], s[j] = s[j], s[i] } -func (s fullCommits) Less(i, j int) bool { - return s[i].Height() < s[j].Height() -} - -// NewMemStoreProvider returns a new in-memory provider. -func NewMemStoreProvider() Provider { - return &memStoreProvider{ - byHeight: fullCommits{}, - byHash: map[string]FullCommit{}, - } -} - -func (m *memStoreProvider) encodeHash(hash []byte) string { - return hex.EncodeToString(hash) -} - -// StoreCommit stores a FullCommit after verifying it. -func (m *memStoreProvider) StoreCommit(fc FullCommit) error { - // make sure the fc is self-consistent before saving - err := fc.ValidateBasic(fc.Commit.Header.ChainID) - if err != nil { - return err - } - - // store the valid fc - key := m.encodeHash(fc.ValidatorsHash()) - - m.mtx.Lock() - defer m.mtx.Unlock() - m.byHash[key] = fc - m.byHeight = append(m.byHeight, fc) - m.sorted = false - return nil -} - -// GetByHeight returns the FullCommit for height h or an error if the commit is not found. -func (m *memStoreProvider) GetByHeight(h int64) (FullCommit, error) { - // By heuristics, GetByHeight with linearsearch is fast enough - // for about 50 keys but after that, it needs binary search. - // See https://github.com/tendermint/tendermint/pull/1043#issue-285188242 - m.mtx.RLock() - n := len(m.byHeight) - m.mtx.RUnlock() - - if n <= 50 { - return m.getByHeightLinearSearch(h) - } - return m.getByHeightBinarySearch(h) -} - -func (m *memStoreProvider) sortByHeightIfNecessaryLocked() { - if !m.sorted { - sort.Sort(m.byHeight) - m.sorted = true - } -} - -func (m *memStoreProvider) getByHeightLinearSearch(h int64) (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.sortByHeightIfNecessaryLocked() - // search from highest to lowest - for i := len(m.byHeight) - 1; i >= 0; i-- { - if fc := m.byHeight[i]; fc.Height() <= h { - return fc, nil - } - } - return FullCommit{}, liteErr.ErrCommitNotFound() -} - -func (m *memStoreProvider) getByHeightBinarySearch(h int64) (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - m.sortByHeightIfNecessaryLocked() - low, high := 0, len(m.byHeight)-1 - var mid int - var hmid int64 - var midFC FullCommit - // Our goal is to either find: - // * item ByHeight with the query - // * greatest height with a height <= query - for low <= high { - mid = int(uint(low+high) >> 1) // Avoid an overflow - midFC = m.byHeight[mid] - hmid = midFC.Height() - switch { - case hmid == h: - return midFC, nil - case hmid < h: - low = mid + 1 - case hmid > h: - high = mid - 1 - } - } - - if high >= 0 { - if highFC := m.byHeight[high]; highFC.Height() < h { - return highFC, nil - } - } - return FullCommit{}, liteErr.ErrCommitNotFound() -} - -// GetByHash returns the FullCommit for the hash or an error if the commit is not found. -func (m *memStoreProvider) GetByHash(hash []byte) (FullCommit, error) { - m.mtx.RLock() - defer m.mtx.RUnlock() - - fc, ok := m.byHash[m.encodeHash(hash)] - if !ok { - return fc, liteErr.ErrCommitNotFound() - } - return fc, nil -} - -// LatestCommit returns the latest FullCommit or an error if no commits exist. -func (m *memStoreProvider) LatestCommit() (FullCommit, error) { - m.mtx.Lock() - defer m.mtx.Unlock() - - l := len(m.byHeight) - if l == 0 { - return FullCommit{}, liteErr.ErrCommitNotFound() - } - m.sortByHeightIfNecessaryLocked() - return m.byHeight[l-1], nil -} diff --git a/lite/performance_test.go b/lite/performance_test.go deleted file mode 100644 index 8cd522cb..00000000 --- a/lite/performance_test.go +++ /dev/null @@ -1,365 +0,0 @@ -package lite - -import ( - "fmt" - "math/rand" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -func TestMemStoreProvidergetByHeightBinaryAndLinearSameResult(t *testing.T) { - p := NewMemStoreProvider().(*memStoreProvider) - - // Store a bunch of commits at specific heights - // and then ensure that: - // * getByHeightLinearSearch - // * getByHeightBinarySearch - // both return the exact same result - - // 1. Non-existent height commits - nonExistent := []int64{-1000, -1, 0, 1, 10, 11, 17, 31, 67, 1000, 1e9} - ensureNonExistentCommitsAtHeight(t, "getByHeightLinearSearch", p.getByHeightLinearSearch, nonExistent) - ensureNonExistentCommitsAtHeight(t, "getByHeightBinarySearch", p.getByHeightBinarySearch, nonExistent) - - // 2. Save some known height commits - knownHeights := []int64{0, 1, 7, 9, 12, 13, 18, 44, 23, 16, 1024, 100, 199, 1e9} - createAndStoreCommits(t, p, knownHeights) - - // 3. Now check if those heights are retrieved - ensureExistentCommitsAtHeight(t, "getByHeightLinearSearch", p.getByHeightLinearSearch, knownHeights) - ensureExistentCommitsAtHeight(t, "getByHeightBinarySearch", p.getByHeightBinarySearch, knownHeights) - - // 4. And now for the height probing to ensure that any height - // requested returns a fullCommit of height <= requestedHeight. - comparegetByHeightAlgorithms(t, p, 0, 0) - comparegetByHeightAlgorithms(t, p, 1, 1) - comparegetByHeightAlgorithms(t, p, 2, 1) - comparegetByHeightAlgorithms(t, p, 5, 1) - comparegetByHeightAlgorithms(t, p, 7, 7) - comparegetByHeightAlgorithms(t, p, 10, 9) - comparegetByHeightAlgorithms(t, p, 12, 12) - comparegetByHeightAlgorithms(t, p, 14, 13) - comparegetByHeightAlgorithms(t, p, 19, 18) - comparegetByHeightAlgorithms(t, p, 43, 23) - comparegetByHeightAlgorithms(t, p, 45, 44) - comparegetByHeightAlgorithms(t, p, 1025, 1024) - comparegetByHeightAlgorithms(t, p, 101, 100) - comparegetByHeightAlgorithms(t, p, 1e3, 199) - comparegetByHeightAlgorithms(t, p, 1e4, 1024) - comparegetByHeightAlgorithms(t, p, 1e9, 1e9) - comparegetByHeightAlgorithms(t, p, 1e9+1, 1e9) -} - -func createAndStoreCommits(t *testing.T, p Provider, heights []int64) { - chainID := "cache-best-height-binary-and-linear" - appHash := []byte("0xdeadbeef") - keys := GenValKeys(len(heights) / 2) - - for _, h := range heights { - vals := keys.ToValidators(10, int64(len(heights)/2)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p.StoreCommit(fc) - require.NoError(t, err, "StoreCommit height=%d", h) - } -} - -func comparegetByHeightAlgorithms(t *testing.T, p *memStoreProvider, ask, expect int64) { - algos := map[string]func(int64) (FullCommit, error){ - "getHeightByLinearSearch": p.getByHeightLinearSearch, - "getHeightByBinarySearch": p.getByHeightBinarySearch, - } - - for algo, fn := range algos { - fc, err := fn(ask) - // t.Logf("%s got=%v want=%d", algo, expect, fc.Height()) - require.Nil(t, err, "%s: %+v", algo, err) - if assert.Equal(t, expect, fc.Height()) { - err = p.StoreCommit(fc) - require.Nil(t, err, "%s: %+v", algo, err) - } - } -} - -var blankFullCommit FullCommit - -func ensureNonExistentCommitsAtHeight(t *testing.T, prefix string, fn func(int64) (FullCommit, error), data []int64) { - for i, qh := range data { - fc, err := fn(qh) - assert.NotNil(t, err, "#%d: %s: height=%d should return non-nil error", i, prefix, qh) - assert.Equal(t, fc, blankFullCommit, "#%d: %s: height=%d\ngot =%+v\nwant=%+v", i, prefix, qh, fc, blankFullCommit) - } -} - -func ensureExistentCommitsAtHeight(t *testing.T, prefix string, fn func(int64) (FullCommit, error), data []int64) { - for i, qh := range data { - fc, err := fn(qh) - assert.Nil(t, err, "#%d: %s: height=%d should not return an error: %v", i, prefix, qh, err) - assert.NotEqual(t, fc, blankFullCommit, "#%d: %s: height=%d got a blankCommit", i, prefix, qh) - } -} - -func BenchmarkGenCommit20(b *testing.B) { - keys := GenValKeys(20) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommit100(b *testing.B) { - keys := GenValKeys(100) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommitSec20(b *testing.B) { - keys := GenSecpValKeys(20) - benchmarkGenCommit(b, keys) -} - -func BenchmarkGenCommitSec100(b *testing.B) { - keys := GenSecpValKeys(100) - benchmarkGenCommit(b, keys) -} - -func benchmarkGenCommit(b *testing.B, keys ValKeys) { - chainID := fmt.Sprintf("bench-%d", len(keys)) - vals := keys.ToValidators(20, 10) - for i := 0; i < b.N; i++ { - h := int64(1 + i) - appHash := []byte(fmt.Sprintf("h=%d", h)) - resHash := []byte(fmt.Sprintf("res=%d", h)) - keys.GenCommit(chainID, h, nil, vals, appHash, []byte("params"), resHash, 0, len(keys)) - } -} - -// this benchmarks generating one key -func BenchmarkGenValKeys(b *testing.B) { - keys := GenValKeys(20) - for i := 0; i < b.N; i++ { - keys = keys.Extend(1) - } -} - -// this benchmarks generating one key -func BenchmarkGenSecpValKeys(b *testing.B) { - keys := GenSecpValKeys(20) - for i := 0; i < b.N; i++ { - keys = keys.Extend(1) - } -} - -func BenchmarkToValidators20(b *testing.B) { - benchmarkToValidators(b, 20) -} - -func BenchmarkToValidators100(b *testing.B) { - benchmarkToValidators(b, 100) -} - -// this benchmarks constructing the validator set (.PubKey() * nodes) -func benchmarkToValidators(b *testing.B, nodes int) { - keys := GenValKeys(nodes) - for i := 1; i <= b.N; i++ { - keys.ToValidators(int64(2*i), int64(i)) - } -} - -func BenchmarkToValidatorsSec100(b *testing.B) { - benchmarkToValidatorsSec(b, 100) -} - -// this benchmarks constructing the validator set (.PubKey() * nodes) -func benchmarkToValidatorsSec(b *testing.B, nodes int) { - keys := GenSecpValKeys(nodes) - for i := 1; i <= b.N; i++ { - keys.ToValidators(int64(2*i), int64(i)) - } -} - -func BenchmarkCertifyCommit20(b *testing.B) { - keys := GenValKeys(20) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommit100(b *testing.B) { - keys := GenValKeys(100) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommitSec20(b *testing.B) { - keys := GenSecpValKeys(20) - benchmarkCertifyCommit(b, keys) -} - -func BenchmarkCertifyCommitSec100(b *testing.B) { - keys := GenSecpValKeys(100) - benchmarkCertifyCommit(b, keys) -} - -func benchmarkCertifyCommit(b *testing.B, keys ValKeys) { - chainID := "bench-certify" - vals := keys.ToValidators(20, 10) - cert := NewStaticCertifier(chainID, vals) - check := keys.GenCommit(chainID, 123, nil, vals, []byte("foo"), []byte("params"), []byte("res"), 0, len(keys)) - for i := 0; i < b.N; i++ { - err := cert.Certify(check) - if err != nil { - panic(err) - } - } - -} - -type algo bool - -const ( - linearSearch = true - binarySearch = false -) - -// Lazy load the commits -var fcs5, fcs50, fcs100, fcs500, fcs1000 []FullCommit -var h5, h50, h100, h500, h1000 []int64 -var commitsOnce sync.Once - -func lazyGenerateFullCommits(b *testing.B) { - b.Logf("Generating FullCommits") - commitsOnce.Do(func() { - fcs5, h5 = genFullCommits(nil, nil, 5) - b.Logf("Generated 5 FullCommits") - fcs50, h50 = genFullCommits(fcs5, h5, 50) - b.Logf("Generated 50 FullCommits") - fcs100, h100 = genFullCommits(fcs50, h50, 100) - b.Logf("Generated 100 FullCommits") - fcs500, h500 = genFullCommits(fcs100, h100, 500) - b.Logf("Generated 500 FullCommits") - fcs1000, h1000 = genFullCommits(fcs500, h500, 1000) - b.Logf("Generated 1000 FullCommits") - }) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch5(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs5, h5, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch50(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs50, h50, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch100(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs100, h100, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch500(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs500, h500, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightLinearSearch1000(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, linearSearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch5(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs5, h5, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch50(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs50, h50, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch100(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs100, h100, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch500(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs500, h500, binarySearch) -} - -func BenchmarkMemStoreProviderGetByHeightBinarySearch1000(b *testing.B) { - benchmarkMemStoreProvidergetByHeight(b, fcs1000, h1000, binarySearch) -} - -var rng = rand.New(rand.NewSource(10)) - -func benchmarkMemStoreProvidergetByHeight(b *testing.B, fcs []FullCommit, fHeights []int64, algo algo) { - lazyGenerateFullCommits(b) - - b.StopTimer() - mp := NewMemStoreProvider() - for i, fc := range fcs { - if err := mp.StoreCommit(fc); err != nil { - b.Fatalf("FullCommit #%d: err: %v", i, err) - } - } - qHeights := make([]int64, len(fHeights)) - copy(qHeights, fHeights) - // Append some non-existent heights to trigger the worst cases. - qHeights = append(qHeights, 19, -100, -10000, 1e7, -17, 31, -1e9) - - memP := mp.(*memStoreProvider) - searchFn := memP.getByHeightLinearSearch - if algo == binarySearch { // nolint - searchFn = memP.getByHeightBinarySearch - } - - hPerm := rng.Perm(len(qHeights)) - b.StartTimer() - b.ResetTimer() - for i := 0; i < b.N; i++ { - for _, j := range hPerm { - h := qHeights[j] - if _, err := searchFn(h); err != nil { - } - } - } - b.ReportAllocs() -} - -func genFullCommits(prevFC []FullCommit, prevH []int64, want int) ([]FullCommit, []int64) { - fcs := make([]FullCommit, len(prevFC)) - copy(fcs, prevFC) - heights := make([]int64, len(prevH)) - copy(heights, prevH) - - appHash := []byte("benchmarks") - chainID := "benchmarks-gen-full-commits" - n := want - keys := GenValKeys(2 + (n / 3)) - for i := 0; i < n; i++ { - vals := keys.ToValidators(10, int64(n/2)) - h := int64(20 + 10*i) - fcs = append(fcs, keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5)) - heights = append(heights, h) - } - return fcs, heights -} - -func TestMemStoreProviderLatestCommitAlwaysUsesSorted(t *testing.T) { - p := NewMemStoreProvider().(*memStoreProvider) - // 1. With no commits yet stored, it should return ErrCommitNotFound - got, err := p.LatestCommit() - require.Equal(t, err.Error(), liteErr.ErrCommitNotFound().Error(), "should return ErrCommitNotFound()") - require.Equal(t, got, blankFullCommit, "With no fullcommits, it should return a blank FullCommit") - - // 2. Generate some full commits now and we'll add them unsorted. - genAndStoreCommitsOfHeight(t, p, 27, 100, 1, 12, 1000, 17, 91) - fc, err := p.LatestCommit() - require.Nil(t, err, "with commits saved no error expected") - require.NotEqual(t, fc, blankFullCommit, "with commits saved no blank FullCommit") - require.Equal(t, fc.Height(), int64(1000), "the latest commit i.e. the largest expected") -} - -func genAndStoreCommitsOfHeight(t *testing.T, p Provider, heights ...int64) { - n := len(heights) - appHash := []byte("tests") - chainID := "tests-gen-full-commits" - keys := GenValKeys(2 + (n / 3)) - for i := 0; i < n; i++ { - h := heights[i] - vals := keys.ToValidators(10, int64(n/2)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p.StoreCommit(fc) - require.NoError(t, err, "StoreCommit height=%d", h) - } -} diff --git a/lite/provider.go b/lite/provider.go deleted file mode 100644 index 22dc964a..00000000 --- a/lite/provider.go +++ /dev/null @@ -1,103 +0,0 @@ -package lite - -// Provider is used to get more validators by other means. -// -// Examples: MemProvider, files.Provider, client.Provider, CacheProvider.... -type Provider interface { - // StoreCommit saves a FullCommit after we have verified it, - // so we can query for it later. Important for updating our - // store of trusted commits. - StoreCommit(fc FullCommit) error - // GetByHeight returns the closest commit with height <= h. - GetByHeight(h int64) (FullCommit, error) - // GetByHash returns a commit exactly matching this validator hash. - GetByHash(hash []byte) (FullCommit, error) - // LatestCommit returns the newest commit stored. - LatestCommit() (FullCommit, error) -} - -// cacheProvider allows you to place one or more caches in front of a source -// Provider. It runs through them in order until a match is found. -// So you can keep a local cache, and check with the network if -// no data is there. -type cacheProvider struct { - Providers []Provider -} - -// NewCacheProvider returns a new provider which wraps multiple other providers. -func NewCacheProvider(providers ...Provider) Provider { - return cacheProvider{ - Providers: providers, - } -} - -// StoreCommit tries to add the seed to all providers. -// -// Aborts on first error it encounters (closest provider) -func (c cacheProvider) StoreCommit(fc FullCommit) (err error) { - for _, p := range c.Providers { - err = p.StoreCommit(fc) - if err != nil { - break - } - } - return err -} - -// GetByHeight should return the closest possible match from all providers. -// -// The Cache is usually organized in order from cheapest call (memory) -// to most expensive calls (disk/network). However, since GetByHeight returns -// a FullCommit at h' <= h, if the memory has a seed at h-10, but the network would -// give us the exact match, a naive "stop at first non-error" would hide -// the actual desired results. -// -// Thus, we query each provider in order until we find an exact match -// or we finished querying them all. If at least one returned a non-error, -// then this returns the best match (minimum h-h'). -func (c cacheProvider) GetByHeight(h int64) (fc FullCommit, err error) { - for _, p := range c.Providers { - var tfc FullCommit - tfc, err = p.GetByHeight(h) - if err == nil { - if tfc.Height() > fc.Height() { - fc = tfc - } - if tfc.Height() == h { - break - } - } - } - // even if the last one had an error, if any was a match, this is good - if fc.Height() > 0 { - err = nil - } - return fc, err -} - -// GetByHash returns the FullCommit for the hash or an error if the commit is not found. -func (c cacheProvider) GetByHash(hash []byte) (fc FullCommit, err error) { - for _, p := range c.Providers { - fc, err = p.GetByHash(hash) - if err == nil { - break - } - } - return fc, err -} - -// LatestCommit returns the latest FullCommit or an error if no commit exists. -func (c cacheProvider) LatestCommit() (fc FullCommit, err error) { - for _, p := range c.Providers { - var tfc FullCommit - tfc, err = p.LatestCommit() - if err == nil && tfc.Height() > fc.Height() { - fc = tfc - } - } - // even if the last one had an error, if any was a match, this is good - if fc.Height() > 0 { - err = nil - } - return fc, err -} diff --git a/lite/provider_test.go b/lite/provider_test.go deleted file mode 100644 index 77b5b1a8..00000000 --- a/lite/provider_test.go +++ /dev/null @@ -1,149 +0,0 @@ -// nolint: vetshadow -package lite_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -// missingProvider doesn't store anything, always a miss -// Designed as a mock for testing -type missingProvider struct{} - -// NewMissingProvider returns a provider which does not store anything and always misses. -func NewMissingProvider() lite.Provider { - return missingProvider{} -} - -func (missingProvider) StoreCommit(lite.FullCommit) error { return nil } -func (missingProvider) GetByHeight(int64) (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() -} -func (missingProvider) GetByHash([]byte) (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() -} -func (missingProvider) LatestCommit() (lite.FullCommit, error) { - return lite.FullCommit{}, liteErr.ErrCommitNotFound() -} - -func TestMemProvider(t *testing.T) { - p := lite.NewMemStoreProvider() - checkProvider(t, p, "test-mem", "empty") -} - -func TestCacheProvider(t *testing.T) { - p := lite.NewCacheProvider( - NewMissingProvider(), - lite.NewMemStoreProvider(), - NewMissingProvider(), - ) - checkProvider(t, p, "test-cache", "kjfhekfhkewhgit") -} - -func checkProvider(t *testing.T, p lite.Provider, chainID, app string) { - assert, require := assert.New(t), require.New(t) - appHash := []byte(app) - keys := lite.GenValKeys(5) - count := 10 - - // make a bunch of commits... - commits := make([]lite.FullCommit, count) - for i := 0; i < count; i++ { - // two commits for each validator, to check how we handle dups - // (10, 0), (10, 1), (10, 1), (10, 2), (10, 2), ... - vals := keys.ToValidators(10, int64(count/2)) - h := int64(20 + 10*i) - commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - } - - // check provider is empty - fc, err := p.GetByHeight(20) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - fc, err = p.GetByHash(commits[3].ValidatorsHash()) - require.NotNil(err) - assert.True(liteErr.IsCommitNotFoundErr(err)) - - // now add them all to the provider - for _, s := range commits { - err = p.StoreCommit(s) - require.Nil(err) - // and make sure we can get it back - s2, err := p.GetByHash(s.ValidatorsHash()) - assert.Nil(err) - assert.Equal(s, s2) - // by height as well - s2, err = p.GetByHeight(s.Height()) - assert.Nil(err) - assert.Equal(s, s2) - } - - // make sure we get the last hash if we overstep - fc, err = p.GetByHeight(5000) - if assert.Nil(err) { - assert.Equal(commits[count-1].Height(), fc.Height()) - assert.Equal(commits[count-1], fc) - } - - // and middle ones as well - fc, err = p.GetByHeight(47) - if assert.Nil(err) { - // we only step by 10, so 40 must be the one below this - assert.EqualValues(40, fc.Height()) - } - -} - -// this will make a get height, and if it is good, set the data as well -func checkGetHeight(t *testing.T, p lite.Provider, ask, expect int64) { - fc, err := p.GetByHeight(ask) - require.Nil(t, err, "GetByHeight") - if assert.Equal(t, expect, fc.Height()) { - err = p.StoreCommit(fc) - require.Nil(t, err, "StoreCommit") - } -} - -func TestCacheGetsBestHeight(t *testing.T) { - // assert, require := assert.New(t), require.New(t) - require := require.New(t) - - // we will write data to the second level of the cache (p2), - // and see what gets cached, stored in - p := lite.NewMemStoreProvider() - p2 := lite.NewMemStoreProvider() - cp := lite.NewCacheProvider(p, p2) - - chainID := "cache-best-height" - appHash := []byte("01234567") - keys := lite.GenValKeys(5) - count := 10 - - // set a bunch of commits - for i := 0; i < count; i++ { - vals := keys.ToValidators(10, int64(count/2)) - h := int64(10 * (i + 1)) - fc := keys.GenFullCommit(chainID, h, nil, vals, appHash, []byte("params"), []byte("results"), 0, 5) - err := p2.StoreCommit(fc) - require.NoError(err) - } - - // let's get a few heights from the cache and set them proper - checkGetHeight(t, cp, 57, 50) - checkGetHeight(t, cp, 33, 30) - - // make sure they are set in p as well (but nothing else) - checkGetHeight(t, p, 44, 30) - checkGetHeight(t, p, 50, 50) - checkGetHeight(t, p, 99, 50) - - // now, query the cache for a higher value - checkGetHeight(t, p2, 99, 90) - checkGetHeight(t, cp, 99, 90) -} diff --git a/lite/proxy/block.go b/lite/proxy/block.go deleted file mode 100644 index 4cff9ee6..00000000 --- a/lite/proxy/block.go +++ /dev/null @@ -1,49 +0,0 @@ -package proxy - -import ( - "bytes" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/lite" - certerr "github.com/tendermint/tendermint/lite/errors" - "github.com/tendermint/tendermint/types" -) - -func ValidateBlockMeta(meta *types.BlockMeta, check lite.Commit) error { - if meta == nil { - return errors.New("expecting a non-nil BlockMeta") - } - // TODO: check the BlockID?? - return ValidateHeader(meta.Header, check) -} - -func ValidateBlock(meta *types.Block, check lite.Commit) error { - if meta == nil { - return errors.New("expecting a non-nil Block") - } - err := ValidateHeader(meta.Header, check) - if err != nil { - return err - } - if !bytes.Equal(meta.Data.Hash(), meta.Header.DataHash) { - return errors.New("Data hash doesn't match header") - } - return nil -} - -func ValidateHeader(head *types.Header, check lite.Commit) error { - if head == nil { - return errors.New("expecting a non-nil Header") - } - // make sure they are for the same height (obvious fail) - if head.Height != check.Height() { - return certerr.ErrHeightMismatch(head.Height, check.Height()) - } - // check if they are equal by using hashes - chead := check.Header - if !bytes.Equal(head.Hash(), chead.Hash()) { - return errors.New("Headers don't match") - } - return nil -} diff --git a/lite/proxy/certifier.go b/lite/proxy/certifier.go deleted file mode 100644 index 6e319dc0..00000000 --- a/lite/proxy/certifier.go +++ /dev/null @@ -1,35 +0,0 @@ -package proxy - -import ( - "github.com/tendermint/tendermint/lite" - certclient "github.com/tendermint/tendermint/lite/client" - "github.com/tendermint/tendermint/lite/files" -) - -func GetCertifier(chainID, rootDir, nodeAddr string) (*lite.InquiringCertifier, error) { - trust := lite.NewCacheProvider( - lite.NewMemStoreProvider(), - files.NewProvider(rootDir), - ) - - source := certclient.NewHTTPProvider(nodeAddr) - - // XXX: total insecure hack to avoid `init` - fc, err := source.LatestCommit() - /* XXX - // this gets the most recent verified commit - fc, err := trust.LatestCommit() - if certerr.IsCommitNotFoundErr(err) { - return nil, errors.New("Please run init first to establish a root of trust") - }*/ - if err != nil { - return nil, err - } - - cert, err := lite.NewInquiringCertifier(chainID, fc, trust, source) - if err != nil { - return nil, err - } - - return cert, nil -} diff --git a/lite/proxy/errors.go b/lite/proxy/errors.go deleted file mode 100644 index 5a2713e3..00000000 --- a/lite/proxy/errors.go +++ /dev/null @@ -1,22 +0,0 @@ -package proxy - -import ( - "fmt" - - "github.com/pkg/errors" -) - -//-------------------------------------------- - -var errNoData = fmt.Errorf("No data returned for query") - -// IsNoDataErr checks whether an error is due to a query returning empty data -func IsNoDataErr(err error) bool { - return errors.Cause(err) == errNoData -} - -func ErrNoData() error { - return errors.WithStack(errNoData) -} - -//-------------------------------------------- diff --git a/lite/proxy/errors_test.go b/lite/proxy/errors_test.go deleted file mode 100644 index 7f51be50..00000000 --- a/lite/proxy/errors_test.go +++ /dev/null @@ -1,17 +0,0 @@ -package proxy - -import ( - "errors" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestErrorNoData(t *testing.T) { - e1 := ErrNoData() - assert.True(t, IsNoDataErr(e1)) - - e2 := errors.New("foobar") - assert.False(t, IsNoDataErr(e2)) - assert.False(t, IsNoDataErr(nil)) -} diff --git a/lite/proxy/proxy.go b/lite/proxy/proxy.go deleted file mode 100644 index fe10399d..00000000 --- a/lite/proxy/proxy.go +++ /dev/null @@ -1,77 +0,0 @@ -package proxy - -import ( - "net/http" - - "github.com/tendermint/go-amino" - "github.com/tendermint/tmlibs/log" - - rpcclient "github.com/tendermint/tendermint/rpc/client" - "github.com/tendermint/tendermint/rpc/core" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpc "github.com/tendermint/tendermint/rpc/lib/server" -) - -const ( - wsEndpoint = "/websocket" -) - -// StartProxy will start the websocket manager on the client, -// set up the rpc routes to proxy via the given client, -// and start up an http/rpc server on the location given by bind (eg. :1234) -func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger) error { - err := c.Start() - if err != nil { - return err - } - - cdc := amino.NewCodec() - ctypes.RegisterAmino(cdc) - r := RPCRoutes(c) - - // build the handler... - mux := http.NewServeMux() - rpc.RegisterRPCFuncs(mux, r, cdc, logger) - - wm := rpc.NewWebsocketManager(r, cdc, rpc.EventSubscriber(c)) - wm.SetLogger(logger) - core.SetLogger(logger) - mux.HandleFunc(wsEndpoint, wm.WebsocketHandler) - - _, err = rpc.StartHTTPServer(listenAddr, mux, logger) - - return err -} - -// RPCRoutes just routes everything to the given client, as if it were -// a tendermint fullnode. -// -// if we want security, the client must implement it as a secure client -func RPCRoutes(c rpcclient.Client) map[string]*rpc.RPCFunc { - - return map[string]*rpc.RPCFunc{ - // Subscribe/unsubscribe are reserved for websocket events. - // We can just use the core tendermint impl, which uses the - // EventSwitch we registered in NewWebsocketManager above - "subscribe": rpc.NewWSRPCFunc(core.Subscribe, "query"), - "unsubscribe": rpc.NewWSRPCFunc(core.Unsubscribe, "query"), - - // info API - "status": rpc.NewRPCFunc(c.Status, ""), - "blockchain": rpc.NewRPCFunc(c.BlockchainInfo, "minHeight,maxHeight"), - "genesis": rpc.NewRPCFunc(c.Genesis, ""), - "block": rpc.NewRPCFunc(c.Block, "height"), - "commit": rpc.NewRPCFunc(c.Commit, "height"), - "tx": rpc.NewRPCFunc(c.Tx, "hash,prove"), - "validators": rpc.NewRPCFunc(c.Validators, ""), - - // broadcast API - "broadcast_tx_commit": rpc.NewRPCFunc(c.BroadcastTxCommit, "tx"), - "broadcast_tx_sync": rpc.NewRPCFunc(c.BroadcastTxSync, "tx"), - "broadcast_tx_async": rpc.NewRPCFunc(c.BroadcastTxAsync, "tx"), - - // abci API - "abci_query": rpc.NewRPCFunc(c.ABCIQuery, "path,data,prove"), - "abci_info": rpc.NewRPCFunc(c.ABCIInfo, ""), - } -} diff --git a/lite/proxy/query.go b/lite/proxy/query.go deleted file mode 100644 index 9c9557f8..00000000 --- a/lite/proxy/query.go +++ /dev/null @@ -1,152 +0,0 @@ -package proxy - -import ( - "github.com/pkg/errors" - - cmn "github.com/tendermint/tmlibs/common" - - "github.com/tendermint/tendermint/lite" - "github.com/tendermint/tendermint/lite/client" - certerr "github.com/tendermint/tendermint/lite/errors" - rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" -) - -// KeyProof represents a proof of existence or absence of a single key. -// Copied from iavl repo. TODO -type KeyProof interface { - // Verify verfies the proof is valid. To verify absence, - // the value should be nil. - Verify(key, value, root []byte) error - - // Root returns the root hash of the proof. - Root() []byte - - // Serialize itself - Bytes() []byte -} - -// GetWithProof will query the key on the given node, and verify it has -// a valid proof, as defined by the certifier. -// -// If there is any error in checking, returns an error. -// If val is non-empty, proof should be KeyExistsProof -// If val is empty, proof should be KeyMissingProof -func GetWithProof(key []byte, reqHeight int64, node rpcclient.Client, - cert lite.Certifier) ( - val cmn.HexBytes, height int64, proof KeyProof, err error) { - - if reqHeight < 0 { - err = errors.Errorf("Height cannot be negative") - return - } - - _resp, proof, err := GetWithProofOptions("/key", key, - rpcclient.ABCIQueryOptions{Height: int64(reqHeight)}, - node, cert) - if _resp != nil { - resp := _resp.Response - val, height = resp.Value, resp.Height - } - return val, height, proof, err -} - -// GetWithProofOptions is useful if you want full access to the ABCIQueryOptions -func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOptions, - node rpcclient.Client, cert lite.Certifier) ( - *ctypes.ResultABCIQuery, KeyProof, error) { - - _resp, err := node.ABCIQueryWithOptions(path, key, opts) - if err != nil { - return nil, nil, err - } - resp := _resp.Response - - // make sure the proof is the proper height - if resp.IsErr() { - err = errors.Errorf("Query error for key %d: %d", key, resp.Code) - return nil, nil, err - } - if len(resp.Key) == 0 || len(resp.Proof) == 0 { - return nil, nil, ErrNoData() - } - if resp.Height == 0 { - return nil, nil, errors.New("Height returned is zero") - } - - // AppHash for height H is in header H+1 - commit, err := GetCertifiedCommit(resp.Height+1, node, cert) - if err != nil { - return nil, nil, err - } - - _ = commit - return &ctypes.ResultABCIQuery{Response: resp}, nil, nil - - /* // TODO refactor so iavl stuff is not in tendermint core - // https://github.com/tendermint/tendermint/issues/1183 - if len(resp.Value) > 0 { - // The key was found, construct a proof of existence. - proof, err := iavl.ReadKeyProof(resp.Proof) - if err != nil { - return nil, nil, errors.Wrap(err, "Error reading proof") - } - - eproof, ok := proof.(*iavl.KeyExistsProof) - if !ok { - return nil, nil, errors.New("Expected KeyExistsProof for non-empty value") - } - - // Validate the proof against the certified header to ensure data integrity. - err = eproof.Verify(resp.Key, resp.Value, commit.Header.AppHash) - if err != nil { - return nil, nil, errors.Wrap(err, "Couldn't verify proof") - } - return &ctypes.ResultABCIQuery{Response: resp}, eproof, nil - } - - // The key wasn't found, construct a proof of non-existence. - proof, err := iavl.ReadKeyProof(resp.Proof) - if err != nil { - return nil, nil, errors.Wrap(err, "Error reading proof") - } - - aproof, ok := proof.(*iavl.KeyAbsentProof) - if !ok { - return nil, nil, errors.New("Expected KeyAbsentProof for empty Value") - } - - // Validate the proof against the certified header to ensure data integrity. - err = aproof.Verify(resp.Key, nil, commit.Header.AppHash) - if err != nil { - return nil, nil, errors.Wrap(err, "Couldn't verify proof") - } - return &ctypes.ResultABCIQuery{Response: resp}, aproof, ErrNoData() - */ -} - -// GetCertifiedCommit gets the signed header for a given height -// and certifies it. Returns error if unable to get a proven header. -func GetCertifiedCommit(h int64, node rpcclient.Client, cert lite.Certifier) (lite.Commit, error) { - - // FIXME: cannot use cert.GetByHeight for now, as it also requires - // Validators and will fail on querying tendermint for non-current height. - // When this is supported, we should use it instead... - rpcclient.WaitForHeight(node, h, nil) - cresp, err := node.Commit(&h) - if err != nil { - return lite.Commit{}, err - } - - commit := client.CommitFromResult(cresp) - // validate downloaded checkpoint with our request and trust store. - if commit.Height() != h { - return lite.Commit{}, certerr.ErrHeightMismatch(h, commit.Height()) - } - - if err = cert.Certify(commit); err != nil { - return lite.Commit{}, err - } - - return commit, nil -} diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go deleted file mode 100644 index d6dcccc9..00000000 --- a/lite/proxy/query_test.go +++ /dev/null @@ -1,139 +0,0 @@ -package proxy - -import ( - "fmt" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/abci/example/kvstore" - - "github.com/tendermint/tendermint/lite" - certclient "github.com/tendermint/tendermint/lite/client" - nm "github.com/tendermint/tendermint/node" - "github.com/tendermint/tendermint/rpc/client" - rpctest "github.com/tendermint/tendermint/rpc/test" - "github.com/tendermint/tendermint/types" -) - -var node *nm.Node - -// TODO fix tests!! - -func TestMain(m *testing.M) { - app := kvstore.NewKVStoreApplication() - - node = rpctest.StartTendermint(app) - - code := m.Run() - - node.Stop() - node.Wait() - os.Exit(code) -} - -func kvstoreTx(k, v []byte) []byte { - return []byte(fmt.Sprintf("%s=%s", k, v)) -} - -func _TestAppProofs(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - cl := client.NewLocal(node) - client.WaitForHeight(cl, 1, nil) - - k := []byte("my-key") - v := []byte("my-value") - - tx := kvstoreTx(k, v) - br, err := cl.BroadcastTxCommit(tx) - require.NoError(err, "%+v", err) - require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) - require.EqualValues(0, br.DeliverTx.Code) - brh := br.Height - - // This sets up our trust on the node based on some past point. - source := certclient.NewProvider(cl) - seed, err := source.GetByHeight(brh - 2) - require.NoError(err, "%+v", err) - cert := lite.NewStaticCertifier("my-chain", seed.Validators) - - client.WaitForHeight(cl, 3, nil) - latest, err := source.LatestCommit() - require.NoError(err, "%+v", err) - rootHash := latest.Header.AppHash - - // verify a query before the tx block has no data (and valid non-exist proof) - bs, height, proof, err := GetWithProof(k, brh-1, cl, cert) - fmt.Println(bs, height, proof, err) - require.NotNil(err) - require.True(IsNoDataErr(err), err.Error()) - require.Nil(bs) - - // but given that block it is good - bs, height, proof, err = GetWithProof(k, brh, cl, cert) - require.NoError(err, "%+v", err) - require.NotNil(proof) - require.True(height >= int64(latest.Header.Height)) - - // Alexis there is a bug here, somehow the above code gives us rootHash = nil - // and proof.Verify doesn't care, while proofNotExists.Verify fails. - // I am hacking this in to make it pass, but please investigate further. - rootHash = proof.Root() - - //err = wire.ReadBinaryBytes(bs, &data) - //require.NoError(err, "%+v", err) - assert.EqualValues(v, bs) - err = proof.Verify(k, bs, rootHash) - assert.NoError(err, "%+v", err) - - // Test non-existing key. - missing := []byte("my-missing-key") - bs, _, proof, err = GetWithProof(missing, 0, cl, cert) - require.True(IsNoDataErr(err)) - require.Nil(bs) - require.NotNil(proof) - err = proof.Verify(missing, nil, rootHash) - assert.NoError(err, "%+v", err) - err = proof.Verify(k, nil, rootHash) - assert.Error(err) -} - -func _TestTxProofs(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - cl := client.NewLocal(node) - client.WaitForHeight(cl, 1, nil) - - tx := kvstoreTx([]byte("key-a"), []byte("value-a")) - br, err := cl.BroadcastTxCommit(tx) - require.NoError(err, "%+v", err) - require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) - require.EqualValues(0, br.DeliverTx.Code) - brh := br.Height - - source := certclient.NewProvider(cl) - seed, err := source.GetByHeight(brh - 2) - require.NoError(err, "%+v", err) - cert := lite.NewStaticCertifier("my-chain", seed.Validators) - - // First let's make sure a bogus transaction hash returns a valid non-existence proof. - key := types.Tx([]byte("bogus")).Hash() - res, err := cl.Tx(key, true) - require.NotNil(err) - require.Contains(err.Error(), "not found") - - // Now let's check with the real tx hash. - key = types.Tx(tx).Hash() - res, err = cl.Tx(key, true) - require.NoError(err, "%+v", err) - require.NotNil(res) - err = res.Proof.Validate(key) - assert.NoError(err, "%+v", err) - - commit, err := GetCertifiedCommit(br.Height, cl, cert) - require.Nil(err, "%+v", err) - require.Equal(res.Proof.RootHash, commit.Header.DataHash) -} diff --git a/lite/proxy/validate_test.go b/lite/proxy/validate_test.go deleted file mode 100644 index 782a6aab..00000000 --- a/lite/proxy/validate_test.go +++ /dev/null @@ -1,218 +0,0 @@ -package proxy_test - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tendermint/lite" - "github.com/tendermint/tendermint/lite/proxy" - "github.com/tendermint/tendermint/types" -) - -var ( - deadBeefTxs = types.Txs{[]byte("DE"), []byte("AD"), []byte("BE"), []byte("EF")} - deadBeefHash = deadBeefTxs.Hash() - testTime1 = time.Date(2018, 1, 1, 1, 1, 1, 1, time.UTC) - testTime2 = time.Date(2017, 1, 2, 1, 1, 1, 1, time.UTC) -) - -var hdrHeight11 = &types.Header{ - Height: 11, - Time: testTime1, - ValidatorsHash: []byte("Tendermint"), -} - -func TestValidateBlock(t *testing.T) { - tests := []struct { - block *types.Block - commit lite.Commit - wantErr string - }{ - { - block: nil, wantErr: "non-nil Block", - }, - { - block: &types.Block{}, wantErr: "nil Header", - }, - { - block: &types.Block{Header: new(types.Header)}, - }, - - // Start Header.Height mismatch test - { - block: &types.Block{Header: &types.Header{Height: 10}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "don't match - 10 vs 11", - }, - - { - block: &types.Block{Header: &types.Header{Height: 11}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - }, - // End Header.Height mismatch test - - // Start Header.Hash mismatch test - { - block: &types.Block{Header: hdrHeight11}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "Headers don't match", - }, - - { - block: &types.Block{Header: hdrHeight11}, - commit: lite.Commit{Header: hdrHeight11}, - }, - // End Header.Hash mismatch test - - // Start Header.Data hash mismatch test - { - block: &types.Block{ - Header: &types.Header{Height: 11}, - Data: &types.Data{Txs: []types.Tx{[]byte("0xDE"), []byte("AD")}}, - }, - commit: lite.Commit{ - Header: &types.Header{Height: 11}, - Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("0xDEADBEEF")}}, - }, - wantErr: "Data hash doesn't match header", - }, - { - block: &types.Block{ - Header: &types.Header{Height: 11, DataHash: deadBeefHash}, - Data: &types.Data{Txs: deadBeefTxs}, - }, - commit: lite.Commit{ - Header: &types.Header{Height: 11}, - Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}}, - }, - }, - // End Header.Data hash mismatch test - } - - for i, tt := range tests { - err := proxy.ValidateBlock(tt.block, tt.commit) - if tt.wantErr != "" { - if err == nil { - assert.FailNowf(t, "Unexpectedly passed", "#%d", i) - } else { - assert.Contains(t, err.Error(), tt.wantErr, "#%d should contain the substring\n\n", i) - } - continue - } - - assert.Nil(t, err, "#%d: expecting a nil error", i) - } -} - -func TestValidateBlockMeta(t *testing.T) { - tests := []struct { - meta *types.BlockMeta - commit lite.Commit - wantErr string - }{ - { - meta: nil, wantErr: "non-nil BlockMeta", - }, - { - meta: &types.BlockMeta{}, wantErr: "non-nil Header", - }, - { - meta: &types.BlockMeta{Header: new(types.Header)}, - }, - - // Start Header.Height mismatch test - { - meta: &types.BlockMeta{Header: &types.Header{Height: 10}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "don't match - 10 vs 11", - }, - - { - meta: &types.BlockMeta{Header: &types.Header{Height: 11}}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - }, - // End Header.Height mismatch test - - // Start Headers don't match test - { - meta: &types.BlockMeta{Header: hdrHeight11}, - commit: lite.Commit{Header: &types.Header{Height: 11}}, - wantErr: "Headers don't match", - }, - - { - meta: &types.BlockMeta{Header: hdrHeight11}, - commit: lite.Commit{Header: hdrHeight11}, - }, - - { - meta: &types.BlockMeta{ - Header: &types.Header{ - Height: 11, - ValidatorsHash: []byte("lite-test"), - // TODO: should be able to use empty time after Amino upgrade - Time: testTime1, - }, - }, - commit: lite.Commit{ - Header: &types.Header{Height: 11, DataHash: deadBeefHash}, - }, - wantErr: "Headers don't match", - }, - - { - meta: &types.BlockMeta{ - Header: &types.Header{ - Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint"), - Time: testTime1, - }, - }, - commit: lite.Commit{ - Header: &types.Header{ - Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint"), - Time: testTime2, - }, - Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}}, - }, - wantErr: "Headers don't match", - }, - - { - meta: &types.BlockMeta{ - Header: &types.Header{ - Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint"), - Time: testTime2, - }, - }, - commit: lite.Commit{ - Header: &types.Header{ - Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint-x"), - Time: testTime2, - }, - Commit: &types.Commit{BlockID: types.BlockID{Hash: []byte("DEADBEEF")}}, - }, - wantErr: "Headers don't match", - }, - // End Headers don't match test - } - - for i, tt := range tests { - err := proxy.ValidateBlockMeta(tt.meta, tt.commit) - if tt.wantErr != "" { - if err == nil { - assert.FailNowf(t, "Unexpectedly passed", "#%d: wanted error %q", i, tt.wantErr) - } else { - assert.Contains(t, err.Error(), tt.wantErr, "#%d should contain the substring\n\n", i) - } - continue - } - - assert.Nil(t, err, "#%d: expecting a nil error", i) - } -} diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go deleted file mode 100644 index 5fb12a40..00000000 --- a/lite/proxy/wrapper.go +++ /dev/null @@ -1,187 +0,0 @@ -package proxy - -import ( - cmn "github.com/tendermint/tmlibs/common" - - "github.com/tendermint/tendermint/lite" - certclient "github.com/tendermint/tendermint/lite/client" - rpcclient "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" -) - -var _ rpcclient.Client = Wrapper{} - -// Wrapper wraps a rpcclient with a Certifier and double-checks any input that is -// provable before passing it along. Allows you to make any rpcclient fully secure. -type Wrapper struct { - rpcclient.Client - cert *lite.InquiringCertifier -} - -// SecureClient uses a given certifier to wrap an connection to an untrusted -// host and return a cryptographically secure rpc client. -// -// If it is wrapping an HTTP rpcclient, it will also wrap the websocket interface -func SecureClient(c rpcclient.Client, cert *lite.InquiringCertifier) Wrapper { - wrap := Wrapper{c, cert} - // TODO: no longer possible as no more such interface exposed.... - // if we wrap http client, then we can swap out the event switch to filter - // if hc, ok := c.(*rpcclient.HTTP); ok { - // evt := hc.WSEvents.EventSwitch - // hc.WSEvents.EventSwitch = WrappedSwitch{evt, wrap} - // } - return wrap -} - -// ABCIQueryWithOptions exposes all options for the ABCI query and verifies the returned proof -func (w Wrapper) ABCIQueryWithOptions(path string, data cmn.HexBytes, - opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - - res, _, err := GetWithProofOptions(path, data, opts, w.Client, w.cert) - return res, err -} - -// ABCIQuery uses default options for the ABCI query and verifies the returned proof -func (w Wrapper) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { - return w.ABCIQueryWithOptions(path, data, rpcclient.DefaultABCIQueryOptions) -} - -// Tx queries for a given tx and verifies the proof if it was requested -func (w Wrapper) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { - res, err := w.Client.Tx(hash, prove) - if !prove || err != nil { - return res, err - } - h := int64(res.Height) - check, err := GetCertifiedCommit(h, w.Client, w.cert) - if err != nil { - return res, err - } - err = res.Proof.Validate(check.Header.DataHash) - return res, err -} - -// BlockchainInfo requests a list of headers and verifies them all... -// Rather expensive. -// -// TODO: optimize this if used for anything needing performance -func (w Wrapper) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - r, err := w.Client.BlockchainInfo(minHeight, maxHeight) - if err != nil { - return nil, err - } - - // go and verify every blockmeta in the result.... - for _, meta := range r.BlockMetas { - // get a checkpoint to verify from - c, err := w.Commit(&meta.Header.Height) - if err != nil { - return nil, err - } - check := certclient.CommitFromResult(c) - err = ValidateBlockMeta(meta, check) - if err != nil { - return nil, err - } - } - - return r, nil -} - -// Block returns an entire block and verifies all signatures -func (w Wrapper) Block(height *int64) (*ctypes.ResultBlock, error) { - r, err := w.Client.Block(height) - if err != nil { - return nil, err - } - // get a checkpoint to verify from - c, err := w.Commit(height) - if err != nil { - return nil, err - } - check := certclient.CommitFromResult(c) - - // now verify - err = ValidateBlockMeta(r.BlockMeta, check) - if err != nil { - return nil, err - } - err = ValidateBlock(r.Block, check) - if err != nil { - return nil, err - } - return r, nil -} - -// Commit downloads the Commit and certifies it with the lite. -// -// This is the foundation for all other verification in this module -func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) { - rpcclient.WaitForHeight(w.Client, *height, nil) - r, err := w.Client.Commit(height) - // if we got it, then certify it - if err == nil { - check := certclient.CommitFromResult(r) - err = w.cert.Certify(check) - } - return r, err -} - -// // WrappedSwitch creates a websocket connection that auto-verifies any info -// // coming through before passing it along. -// // -// // Since the verification takes 1-2 rpc calls, this is obviously only for -// // relatively low-throughput situations that can tolerate a bit extra latency -// type WrappedSwitch struct { -// types.EventSwitch -// client rpcclient.Client -// } - -// // FireEvent verifies any block or header returned from the eventswitch -// func (s WrappedSwitch) FireEvent(event string, data events.EventData) { -// tm, ok := data.(types.TMEventData) -// if !ok { -// fmt.Printf("bad type %#v\n", data) -// return -// } - -// // check to validate it if possible, and drop if not valid -// switch t := tm.(type) { -// case types.EventDataNewBlockHeader: -// err := verifyHeader(s.client, t.Header) -// if err != nil { -// fmt.Printf("Invalid header: %#v\n", err) -// return -// } -// case types.EventDataNewBlock: -// err := verifyBlock(s.client, t.Block) -// if err != nil { -// fmt.Printf("Invalid block: %#v\n", err) -// return -// } -// // TODO: can we verify tx as well? anything else -// } - -// // looks good, we fire it -// s.EventSwitch.FireEvent(event, data) -// } - -// func verifyHeader(c rpcclient.Client, head *types.Header) error { -// // get a checkpoint to verify from -// commit, err := c.Commit(&head.Height) -// if err != nil { -// return err -// } -// check := certclient.CommitFromResult(commit) -// return ValidateHeader(head, check) -// } -// -// func verifyBlock(c rpcclient.Client, block *types.Block) error { -// // get a checkpoint to verify from -// commit, err := c.Commit(&block.Height) -// if err != nil { -// return err -// } -// check := certclient.CommitFromResult(commit) -// return ValidateBlock(block, check) -// } diff --git a/lite/static_certifier.go b/lite/static_certifier.go deleted file mode 100644 index 1ec3b809..00000000 --- a/lite/static_certifier.go +++ /dev/null @@ -1,73 +0,0 @@ -package lite - -import ( - "bytes" - - "github.com/pkg/errors" - - "github.com/tendermint/tendermint/types" - - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -var _ Certifier = (*StaticCertifier)(nil) - -// StaticCertifier assumes a static set of validators, set on -// initilization and checks against them. -// The signatures on every header is checked for > 2/3 votes -// against the known validator set upon Certify -// -// Good for testing or really simple chains. Building block -// to support real-world functionality. -type StaticCertifier struct { - chainID string - vSet *types.ValidatorSet - vhash []byte -} - -// NewStaticCertifier returns a new certifier with a static validator set. -func NewStaticCertifier(chainID string, vals *types.ValidatorSet) *StaticCertifier { - return &StaticCertifier{ - chainID: chainID, - vSet: vals, - } -} - -// ChainID returns the chain id. -// Implements Certifier. -func (sc *StaticCertifier) ChainID() string { - return sc.chainID -} - -// Validators returns the validator set. -func (sc *StaticCertifier) Validators() *types.ValidatorSet { - return sc.vSet -} - -// Hash returns the hash of the validator set. -func (sc *StaticCertifier) Hash() []byte { - if len(sc.vhash) == 0 { - sc.vhash = sc.vSet.Hash() - } - return sc.vhash -} - -// Certify makes sure that the commit is valid. -// Implements Certifier. -func (sc *StaticCertifier) Certify(commit Commit) error { - // do basic sanity checks - err := commit.ValidateBasic(sc.chainID) - if err != nil { - return err - } - - // make sure it has the same validator set we have (static means static) - if !bytes.Equal(sc.Hash(), commit.Header.ValidatorsHash) { - return liteErr.ErrValidatorsChanged() - } - - // then make sure we have the proper signatures for this - err = sc.vSet.VerifyCommit(sc.chainID, commit.Commit.BlockID, - commit.Header.Height, commit.Commit) - return errors.WithStack(err) -} diff --git a/lite/static_certifier_test.go b/lite/static_certifier_test.go deleted file mode 100644 index 03567daa..00000000 --- a/lite/static_certifier_test.go +++ /dev/null @@ -1,59 +0,0 @@ -package lite_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tendermint/types" - - "github.com/tendermint/tendermint/lite" - liteErr "github.com/tendermint/tendermint/lite/errors" -) - -func TestStaticCert(t *testing.T) { - // assert, require := assert.New(t), require.New(t) - assert := assert.New(t) - // require := require.New(t) - - keys := lite.GenValKeys(4) - // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals := keys.ToValidators(20, 10) - // and a certifier based on our known set - chainID := "test-static" - cert := lite.NewStaticCertifier(chainID, vals) - - cases := []struct { - keys lite.ValKeys - vals *types.ValidatorSet - height int64 - first, last int // who actually signs - proper bool // true -> expect no error - changed bool // true -> expect validator change error - }{ - // perfect, signed by everyone - {keys, vals, 1, 0, len(keys), true, false}, - // skip little guy is okay - {keys, vals, 2, 1, len(keys), true, false}, - // but not the big guy - {keys, vals, 3, 0, len(keys) - 1, false, false}, - // even changing the power a little bit breaks the static validator - // the sigs are enough, but the validator hash is unknown - {keys, keys.ToValidators(20, 11), 4, 0, len(keys), false, true}, - } - - for _, tc := range cases { - check := tc.keys.GenCommit(chainID, tc.height, nil, tc.vals, - []byte("foo"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Certify(check) - if tc.proper { - assert.Nil(err, "%+v", err) - } else { - assert.NotNil(err) - if tc.changed { - assert.True(liteErr.IsValidatorsChangedErr(err), "%+v", err) - } - } - } - -} diff --git a/mempool/mempool.go b/mempool/mempool.go deleted file mode 100644 index 418470a7..00000000 --- a/mempool/mempool.go +++ /dev/null @@ -1,524 +0,0 @@ -package mempool - -import ( - "bytes" - "container/list" - "fmt" - "sync" - "sync/atomic" - "time" - - "github.com/pkg/errors" - - abci "github.com/tendermint/abci/types" - auto "github.com/tendermint/tmlibs/autofile" - "github.com/tendermint/tmlibs/clist" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" -) - -/* - -The mempool pushes new txs onto the proxyAppConn. -It gets a stream of (req, res) tuples from the proxy. -The mempool stores good txs in a concurrent linked-list. - -Multiple concurrent go-routines can traverse this linked-list -safely by calling .NextWait() on each element. - -So we have several go-routines: -1. Consensus calling Update() and Reap() synchronously -2. Many mempool reactor's peer routines calling CheckTx() -3. Many mempool reactor's peer routines traversing the txs linked list -4. Another goroutine calling GarbageCollectTxs() periodically - -To manage these goroutines, there are three methods of locking. -1. Mutations to the linked-list is protected by an internal mtx (CList is goroutine-safe) -2. Mutations to the linked-list elements are atomic -3. CheckTx() calls can be paused upon Update() and Reap(), protected by .proxyMtx - -Garbage collection of old elements from mempool.txs is handlde via -the DetachPrev() call, which makes old elements not reachable by -peer broadcastTxRoutine() automatically garbage collected. - -TODO: Better handle abci client errors. (make it automatically handle connection errors) - -*/ - -var ( - // ErrTxInCache is returned to the client if we saw tx earlier - ErrTxInCache = errors.New("Tx already exists in cache") - - // ErrMempoolIsFull means Tendermint & an application can't handle that much load - ErrMempoolIsFull = errors.New("Mempool is full") -) - -// Mempool is an ordered in-memory pool for transactions before they are proposed in a consensus -// round. Transaction validity is checked using the CheckTx abci message before the transaction is -// added to the pool. The Mempool uses a concurrent list structure for storing transactions that -// can be efficiently accessed by multiple concurrent readers. -type Mempool struct { - config *cfg.MempoolConfig - - proxyMtx sync.Mutex - proxyAppConn proxy.AppConnMempool - txs *clist.CList // concurrent linked-list of good txs - counter int64 // simple incrementing counter - height int64 // the last block Update()'d to - rechecking int32 // for re-checking filtered txs on Update() - recheckCursor *clist.CElement // next expected response - recheckEnd *clist.CElement // re-checking stops here - notifiedTxsAvailable bool - txsAvailable chan int64 // fires the next height once for each height, when the mempool is not empty - - // Keep a cache of already-seen txs. - // This reduces the pressure on the proxyApp. - cache *txCache - - // A log of mempool txs - wal *auto.AutoFile - - logger log.Logger - - metrics *Metrics -} - -// MempoolOption sets an optional parameter on the Mempool. -type MempoolOption func(*Mempool) - -// NewMempool returns a new Mempool with the given configuration and connection to an application. -func NewMempool( - config *cfg.MempoolConfig, - proxyAppConn proxy.AppConnMempool, - height int64, - options ...MempoolOption, -) *Mempool { - mempool := &Mempool{ - config: config, - proxyAppConn: proxyAppConn, - txs: clist.New(), - counter: 0, - height: height, - rechecking: 0, - recheckCursor: nil, - recheckEnd: nil, - logger: log.NewNopLogger(), - cache: newTxCache(config.CacheSize), - metrics: NopMetrics(), - } - proxyAppConn.SetResponseCallback(mempool.resCb) - for _, option := range options { - option(mempool) - } - return mempool -} - -// EnableTxsAvailable initializes the TxsAvailable channel, -// ensuring it will trigger once every height when transactions are available. -// NOTE: not thread safe - should only be called once, on startup -func (mem *Mempool) EnableTxsAvailable() { - mem.txsAvailable = make(chan int64, 1) -} - -// SetLogger sets the Logger. -func (mem *Mempool) SetLogger(l log.Logger) { - mem.logger = l -} - -// WithMetrics sets the metrics. -func WithMetrics(metrics *Metrics) MempoolOption { - return func(mem *Mempool) { mem.metrics = metrics } -} - -// CloseWAL closes and discards the underlying WAL file. -// Any further writes will not be relayed to disk. -func (mem *Mempool) CloseWAL() bool { - if mem == nil { - return false - } - - mem.proxyMtx.Lock() - defer mem.proxyMtx.Unlock() - - if mem.wal == nil { - return false - } - if err := mem.wal.Close(); err != nil && mem.logger != nil { - mem.logger.Error("Mempool.CloseWAL", "err", err) - } - mem.wal = nil - return true -} - -func (mem *Mempool) InitWAL() { - walDir := mem.config.WalDir() - if walDir != "" { - err := cmn.EnsureDir(walDir, 0700) - if err != nil { - cmn.PanicSanity(errors.Wrap(err, "Error ensuring Mempool wal dir")) - } - af, err := auto.OpenAutoFile(walDir + "/wal") - if err != nil { - cmn.PanicSanity(errors.Wrap(err, "Error opening Mempool wal file")) - } - mem.wal = af - } -} - -// Lock locks the mempool. The consensus must be able to hold lock to safely update. -func (mem *Mempool) Lock() { - mem.proxyMtx.Lock() -} - -// Unlock unlocks the mempool. -func (mem *Mempool) Unlock() { - mem.proxyMtx.Unlock() -} - -// Size returns the number of transactions in the mempool. -func (mem *Mempool) Size() int { - return mem.txs.Len() -} - -// Flushes the mempool connection to ensure async resCb calls are done e.g. -// from CheckTx. -func (mem *Mempool) FlushAppConn() error { - return mem.proxyAppConn.FlushSync() -} - -// Flush removes all transactions from the mempool and cache -func (mem *Mempool) Flush() { - mem.proxyMtx.Lock() - defer mem.proxyMtx.Unlock() - - mem.cache.Reset() - - for e := mem.txs.Front(); e != nil; e = e.Next() { - mem.txs.Remove(e) - e.DetachPrev() - } -} - -// TxsFront returns the first transaction in the ordered list for peer -// goroutines to call .NextWait() on. -func (mem *Mempool) TxsFront() *clist.CElement { - return mem.txs.Front() -} - -// TxsWaitChan returns a channel to wait on transactions. It will be closed -// once the mempool is not empty (ie. the internal `mem.txs` has at least one -// element) -func (mem *Mempool) TxsWaitChan() <-chan struct{} { - return mem.txs.WaitChan() -} - -// CheckTx executes a new transaction against the application to determine its validity -// and whether it should be added to the mempool. -// It blocks if we're waiting on Update() or Reap(). -// cb: A callback from the CheckTx command. -// It gets called from another goroutine. -// CONTRACT: Either cb will get called, or err returned. -func (mem *Mempool) CheckTx(tx types.Tx, cb func(*abci.Response)) (err error) { - mem.proxyMtx.Lock() - defer mem.proxyMtx.Unlock() - - if mem.Size() >= mem.config.Size { - return ErrMempoolIsFull - } - - // CACHE - if !mem.cache.Push(tx) { - return ErrTxInCache - } - // END CACHE - - // WAL - if mem.wal != nil { - // TODO: Notify administrators when WAL fails - _, err := mem.wal.Write([]byte(tx)) - if err != nil { - mem.logger.Error("Error writing to WAL", "err", err) - } - _, err = mem.wal.Write([]byte("\n")) - if err != nil { - mem.logger.Error("Error writing to WAL", "err", err) - } - } - // END WAL - - // NOTE: proxyAppConn may error if tx buffer is full - if err = mem.proxyAppConn.Error(); err != nil { - return err - } - reqRes := mem.proxyAppConn.CheckTxAsync(tx) - if cb != nil { - reqRes.SetCallback(cb) - } - - return nil -} - -// ABCI callback function -func (mem *Mempool) resCb(req *abci.Request, res *abci.Response) { - if mem.recheckCursor == nil { - mem.resCbNormal(req, res) - } else { - mem.resCbRecheck(req, res) - } - mem.metrics.Size.Set(float64(mem.Size())) -} - -func (mem *Mempool) resCbNormal(req *abci.Request, res *abci.Response) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - tx := req.GetCheckTx().Tx - if r.CheckTx.Code == abci.CodeTypeOK { - mem.counter++ - memTx := &mempoolTx{ - counter: mem.counter, - height: mem.height, - tx: tx, - } - mem.txs.PushBack(memTx) - mem.logger.Info("Added good transaction", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "res", r) - mem.notifyTxsAvailable() - } else { - // ignore bad transaction - mem.logger.Info("Rejected bad transaction", "tx", fmt.Sprintf("%X", types.Tx(tx).Hash()), "res", r) - - // remove from cache (it might be good later) - mem.cache.Remove(tx) - } - default: - // ignore other messages - } -} - -func (mem *Mempool) resCbRecheck(req *abci.Request, res *abci.Response) { - switch r := res.Value.(type) { - case *abci.Response_CheckTx: - memTx := mem.recheckCursor.Value.(*mempoolTx) - if !bytes.Equal(req.GetCheckTx().Tx, memTx.tx) { - cmn.PanicSanity(cmn.Fmt("Unexpected tx response from proxy during recheck\n"+ - "Expected %X, got %X", r.CheckTx.Data, memTx.tx)) - } - if r.CheckTx.Code == abci.CodeTypeOK { - // Good, nothing to do. - } else { - // Tx became invalidated due to newly committed block. - mem.txs.Remove(mem.recheckCursor) - mem.recheckCursor.DetachPrev() - - // remove from cache (it might be good later) - mem.cache.Remove(req.GetCheckTx().Tx) - } - if mem.recheckCursor == mem.recheckEnd { - mem.recheckCursor = nil - } else { - mem.recheckCursor = mem.recheckCursor.Next() - } - if mem.recheckCursor == nil { - // Done! - atomic.StoreInt32(&mem.rechecking, 0) - mem.logger.Info("Done rechecking txs") - - // incase the recheck removed all txs - if mem.Size() > 0 { - mem.notifyTxsAvailable() - } - } - default: - // ignore other messages - } -} - -// TxsAvailable returns a channel which fires once for every height, -// and only when transactions are available in the mempool. -// NOTE: the returned channel may be nil if EnableTxsAvailable was not called. -func (mem *Mempool) TxsAvailable() <-chan int64 { - return mem.txsAvailable -} - -func (mem *Mempool) notifyTxsAvailable() { - if mem.Size() == 0 { - panic("notified txs available but mempool is empty!") - } - if mem.txsAvailable != nil && !mem.notifiedTxsAvailable { - select { - case mem.txsAvailable <- mem.height + 1: - default: - } - - mem.notifiedTxsAvailable = true - } -} - -// Reap returns a list of transactions currently in the mempool. -// If maxTxs is -1, there is no cap on the number of returned transactions. -func (mem *Mempool) Reap(maxTxs int) types.Txs { - mem.proxyMtx.Lock() - defer mem.proxyMtx.Unlock() - - for atomic.LoadInt32(&mem.rechecking) > 0 { - // TODO: Something better? - time.Sleep(time.Millisecond * 10) - } - - txs := mem.collectTxs(maxTxs) - return txs -} - -// maxTxs: -1 means uncapped, 0 means none -func (mem *Mempool) collectTxs(maxTxs int) types.Txs { - if maxTxs == 0 { - return []types.Tx{} - } else if maxTxs < 0 { - maxTxs = mem.txs.Len() - } - txs := make([]types.Tx, 0, cmn.MinInt(mem.txs.Len(), maxTxs)) - for e := mem.txs.Front(); e != nil && len(txs) < maxTxs; e = e.Next() { - memTx := e.Value.(*mempoolTx) - txs = append(txs, memTx.tx) - } - return txs -} - -// Update informs the mempool that the given txs were committed and can be discarded. -// NOTE: this should be called *after* block is committed by consensus. -// NOTE: unsafe; Lock/Unlock must be managed by caller -func (mem *Mempool) Update(height int64, txs types.Txs) error { - // First, create a lookup map of txns in new txs. - txsMap := make(map[string]struct{}) - for _, tx := range txs { - txsMap[string(tx)] = struct{}{} - } - - // Set height - mem.height = height - mem.notifiedTxsAvailable = false - - // Remove transactions that are already in txs. - goodTxs := mem.filterTxs(txsMap) - // Recheck mempool txs if any txs were committed in the block - // NOTE/XXX: in some apps a tx could be invalidated due to EndBlock, - // so we really still do need to recheck, but this is for debugging - if mem.config.Recheck && (mem.config.RecheckEmpty || len(goodTxs) > 0) { - mem.logger.Info("Recheck txs", "numtxs", len(goodTxs), "height", height) - mem.recheckTxs(goodTxs) - // At this point, mem.txs are being rechecked. - // mem.recheckCursor re-scans mem.txs and possibly removes some txs. - // Before mem.Reap(), we should wait for mem.recheckCursor to be nil. - } - mem.metrics.Size.Set(float64(mem.Size())) - return nil -} - -func (mem *Mempool) filterTxs(blockTxsMap map[string]struct{}) []types.Tx { - goodTxs := make([]types.Tx, 0, mem.txs.Len()) - for e := mem.txs.Front(); e != nil; e = e.Next() { - memTx := e.Value.(*mempoolTx) - // Remove the tx if it's alredy in a block. - if _, ok := blockTxsMap[string(memTx.tx)]; ok { - // remove from clist - mem.txs.Remove(e) - e.DetachPrev() - - // NOTE: we don't remove committed txs from the cache. - continue - } - // Good tx! - goodTxs = append(goodTxs, memTx.tx) - } - return goodTxs -} - -// NOTE: pass in goodTxs because mem.txs can mutate concurrently. -func (mem *Mempool) recheckTxs(goodTxs []types.Tx) { - if len(goodTxs) == 0 { - return - } - atomic.StoreInt32(&mem.rechecking, 1) - mem.recheckCursor = mem.txs.Front() - mem.recheckEnd = mem.txs.Back() - - // Push txs to proxyAppConn - // NOTE: resCb() may be called concurrently. - for _, tx := range goodTxs { - mem.proxyAppConn.CheckTxAsync(tx) - } - mem.proxyAppConn.FlushAsync() -} - -//-------------------------------------------------------------------------------- - -// mempoolTx is a transaction that successfully ran -type mempoolTx struct { - counter int64 // a simple incrementing counter - height int64 // height that this tx had been validated in - tx types.Tx // -} - -// Height returns the height for this transaction -func (memTx *mempoolTx) Height() int64 { - return atomic.LoadInt64(&memTx.height) -} - -//-------------------------------------------------------------------------------- - -// txCache maintains a cache of transactions. -type txCache struct { - mtx sync.Mutex - size int - map_ map[string]struct{} - list *list.List // to remove oldest tx when cache gets too big -} - -// newTxCache returns a new txCache. -func newTxCache(cacheSize int) *txCache { - return &txCache{ - size: cacheSize, - map_: make(map[string]struct{}, cacheSize), - list: list.New(), - } -} - -// Reset resets the txCache to empty. -func (cache *txCache) Reset() { - cache.mtx.Lock() - cache.map_ = make(map[string]struct{}, cache.size) - cache.list.Init() - cache.mtx.Unlock() -} - -// Push adds the given tx to the txCache. It returns false if tx is already in the cache. -func (cache *txCache) Push(tx types.Tx) bool { - cache.mtx.Lock() - defer cache.mtx.Unlock() - - if _, exists := cache.map_[string(tx)]; exists { - return false - } - - if cache.list.Len() >= cache.size { - popped := cache.list.Front() - poppedTx := popped.Value.(types.Tx) - // NOTE: the tx may have already been removed from the map - // but deleting a non-existent element is fine - delete(cache.map_, string(poppedTx)) - cache.list.Remove(popped) - } - cache.map_[string(tx)] = struct{}{} - cache.list.PushBack(tx) - return true -} - -// Remove removes the given tx from the cache. -func (cache *txCache) Remove(tx types.Tx) { - cache.mtx.Lock() - delete(cache.map_, string(tx)) - cache.mtx.Unlock() -} diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go deleted file mode 100644 index a67adf6d..00000000 --- a/mempool/mempool_test.go +++ /dev/null @@ -1,286 +0,0 @@ -package mempool - -import ( - "crypto/md5" - "crypto/rand" - "encoding/binary" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "testing" - "time" - - "github.com/tendermint/abci/example/counter" - "github.com/tendermint/abci/example/kvstore" - abci "github.com/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" - - "github.com/stretchr/testify/require" -) - -func newMempoolWithApp(cc proxy.ClientCreator) *Mempool { - config := cfg.ResetTestRoot("mempool_test") - - appConnMem, _ := cc.NewABCIClient() - appConnMem.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "mempool")) - err := appConnMem.Start() - if err != nil { - panic(err) - } - mempool := NewMempool(config.Mempool, appConnMem, 0) - mempool.SetLogger(log.TestingLogger()) - return mempool -} - -func ensureNoFire(t *testing.T, ch <-chan int64, timeoutMS int) { - timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) - select { - case <-ch: - t.Fatal("Expected not to fire") - case <-timer.C: - } -} - -func ensureFire(t *testing.T, ch <-chan int64, timeoutMS int) { - timer := time.NewTimer(time.Duration(timeoutMS) * time.Millisecond) - select { - case <-ch: - case <-timer.C: - t.Fatal("Expected to fire") - } -} - -func checkTxs(t *testing.T, mempool *Mempool, count int) types.Txs { - txs := make(types.Txs, count) - for i := 0; i < count; i++ { - txBytes := make([]byte, 20) - txs[i] = txBytes - _, err := rand.Read(txBytes) - if err != nil { - t.Error(err) - } - if err := mempool.CheckTx(txBytes, nil); err != nil { - t.Fatalf("Error after CheckTx: %v", err) - } - } - return txs -} - -func TestTxsAvailable(t *testing.T) { - app := kvstore.NewKVStoreApplication() - cc := proxy.NewLocalClientCreator(app) - mempool := newMempoolWithApp(cc) - mempool.EnableTxsAvailable() - - timeoutMS := 500 - - // with no txs, it shouldnt fire - ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) - - // send a bunch of txs, it should only fire once - txs := checkTxs(t, mempool, 100) - ensureFire(t, mempool.TxsAvailable(), timeoutMS) - ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) - - // call update with half the txs. - // it should fire once now for the new height - // since there are still txs left - committedTxs, txs := txs[:50], txs[50:] - if err := mempool.Update(1, committedTxs); err != nil { - t.Error(err) - } - ensureFire(t, mempool.TxsAvailable(), timeoutMS) - ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) - - // send a bunch more txs. we already fired for this height so it shouldnt fire again - moreTxs := checkTxs(t, mempool, 50) - ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) - - // now call update with all the txs. it should not fire as there are no txs left - committedTxs = append(txs, moreTxs...) - if err := mempool.Update(2, committedTxs); err != nil { - t.Error(err) - } - ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) - - // send a bunch more txs, it should only fire once - checkTxs(t, mempool, 100) - ensureFire(t, mempool.TxsAvailable(), timeoutMS) - ensureNoFire(t, mempool.TxsAvailable(), timeoutMS) -} - -func TestSerialReap(t *testing.T) { - app := counter.NewCounterApplication(true) - app.SetOption(abci.RequestSetOption{"serial", "on"}) - cc := proxy.NewLocalClientCreator(app) - - mempool := newMempoolWithApp(cc) - appConnCon, _ := cc.NewABCIClient() - appConnCon.SetLogger(log.TestingLogger().With("module", "abci-client", "connection", "consensus")) - err := appConnCon.Start() - require.Nil(t, err) - - cacheMap := make(map[string]struct{}) - deliverTxsRange := func(start, end int) { - // Deliver some txs. - for i := start; i < end; i++ { - - // This will succeed - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - err := mempool.CheckTx(txBytes, nil) - _, cached := cacheMap[string(txBytes)] - if cached { - require.NotNil(t, err, "expected error for cached tx") - } else { - require.Nil(t, err, "expected no err for uncached tx") - } - cacheMap[string(txBytes)] = struct{}{} - - // Duplicates are cached and should return error - err = mempool.CheckTx(txBytes, nil) - require.NotNil(t, err, "Expected error after CheckTx on duplicated tx") - } - } - - reapCheck := func(exp int) { - txs := mempool.Reap(-1) - require.Equal(t, len(txs), exp, cmn.Fmt("Expected to reap %v txs but got %v", exp, len(txs))) - } - - updateRange := func(start, end int) { - txs := make([]types.Tx, 0) - for i := start; i < end; i++ { - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - txs = append(txs, txBytes) - } - if err := mempool.Update(0, txs); err != nil { - t.Error(err) - } - } - - commitRange := func(start, end int) { - // Deliver some txs. - for i := start; i < end; i++ { - txBytes := make([]byte, 8) - binary.BigEndian.PutUint64(txBytes, uint64(i)) - res, err := appConnCon.DeliverTxSync(txBytes) - if err != nil { - t.Errorf("Client error committing tx: %v", err) - } - if res.IsErr() { - t.Errorf("Error committing tx. Code:%v result:%X log:%v", - res.Code, res.Data, res.Log) - } - } - res, err := appConnCon.CommitSync() - if err != nil { - t.Errorf("Client error committing: %v", err) - } - if len(res.Data) != 8 { - t.Errorf("Error committing. Hash:%X", res.Data) - } - } - - //---------------------------------------- - - // Deliver some txs. - deliverTxsRange(0, 100) - - // Reap the txs. - reapCheck(100) - - // Reap again. We should get the same amount - reapCheck(100) - - // Deliver 0 to 999, we should reap 900 new txs - // because 100 were already counted. - deliverTxsRange(0, 1000) - - // Reap the txs. - reapCheck(1000) - - // Reap again. We should get the same amount - reapCheck(1000) - - // Commit from the conensus AppConn - commitRange(0, 500) - updateRange(0, 500) - - // We should have 500 left. - reapCheck(500) - - // Deliver 100 invalid txs and 100 valid txs - deliverTxsRange(900, 1100) - - // We should have 600 now. - reapCheck(600) -} - -func TestMempoolCloseWAL(t *testing.T) { - // 1. Create the temporary directory for mempool and WAL testing. - rootDir, err := ioutil.TempDir("", "mempool-test") - require.Nil(t, err, "expecting successful tmpdir creation") - defer os.RemoveAll(rootDir) - - // 2. Ensure that it doesn't contain any elements -- Sanity check - m1, err := filepath.Glob(filepath.Join(rootDir, "*")) - require.Nil(t, err, "successful globbing expected") - require.Equal(t, 0, len(m1), "no matches yet") - - // 3. Create the mempool - wcfg := cfg.DefaultMempoolConfig() - wcfg.RootDir = rootDir - app := kvstore.NewKVStoreApplication() - cc := proxy.NewLocalClientCreator(app) - appConnMem, _ := cc.NewABCIClient() - mempool := NewMempool(wcfg, appConnMem, 10) - mempool.InitWAL() - - // 4. Ensure that the directory contains the WAL file - m2, err := filepath.Glob(filepath.Join(rootDir, "*")) - require.Nil(t, err, "successful globbing expected") - require.Equal(t, 1, len(m2), "expecting the wal match in") - - // 5. Write some contents to the WAL - mempool.CheckTx(types.Tx([]byte("foo")), nil) - walFilepath := mempool.wal.Path - sum1 := checksumFile(walFilepath, t) - - // 6. Sanity check to ensure that the written TX matches the expectation. - require.Equal(t, sum1, checksumIt([]byte("foo\n")), "foo with a newline should be written") - - // 7. Invoke CloseWAL() and ensure it discards the - // WAL thus any other write won't go through. - require.True(t, mempool.CloseWAL(), "CloseWAL should CloseWAL") - mempool.CheckTx(types.Tx([]byte("bar")), nil) - sum2 := checksumFile(walFilepath, t) - require.Equal(t, sum1, sum2, "expected no change to the WAL after invoking CloseWAL() since it was discarded") - - // 8. Second CloseWAL should do nothing - require.False(t, mempool.CloseWAL(), "CloseWAL should CloseWAL") - - // 9. Sanity check to ensure that the WAL file still exists - m3, err := filepath.Glob(filepath.Join(rootDir, "*")) - require.Nil(t, err, "successful globbing expected") - require.Equal(t, 1, len(m3), "expecting the wal match in") -} - -func checksumIt(data []byte) string { - h := md5.New() - h.Write(data) - return fmt.Sprintf("%x", h.Sum(nil)) -} - -func checksumFile(p string, t *testing.T) string { - data, err := ioutil.ReadFile(p) - require.Nil(t, err, "expecting successful read of %q", p) - return checksumIt(data) -} diff --git a/mempool/metrics.go b/mempool/metrics.go deleted file mode 100644 index f381678c..00000000 --- a/mempool/metrics.go +++ /dev/null @@ -1,34 +0,0 @@ -package mempool - -import ( - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - - prometheus "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" -) - -// Metrics contains metrics exposed by this package. -// see MetricsProvider for descriptions. -type Metrics struct { - // Size of the mempool. - Size metrics.Gauge -} - -// PrometheusMetrics returns Metrics build using Prometheus client library. -func PrometheusMetrics() *Metrics { - return &Metrics{ - Size: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "mempool", - Name: "size", - Help: "Size of the mempool (number of uncommitted transactions).", - }, []string{}), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - Size: discard.NewGauge(), - } -} diff --git a/mempool/reactor.go b/mempool/reactor.go deleted file mode 100644 index 54a3c32f..00000000 --- a/mempool/reactor.go +++ /dev/null @@ -1,188 +0,0 @@ -package mempool - -import ( - "fmt" - "reflect" - "time" - - abci "github.com/tendermint/abci/types" - "github.com/tendermint/go-amino" - "github.com/tendermint/tmlibs/clist" - "github.com/tendermint/tmlibs/log" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/types" -) - -const ( - MempoolChannel = byte(0x30) - - maxMsgSize = 1048576 // 1MB TODO make it configurable - peerCatchupSleepIntervalMS = 100 // If peer is behind, sleep this amount -) - -// MempoolReactor handles mempool tx broadcasting amongst peers. -type MempoolReactor struct { - p2p.BaseReactor - config *cfg.MempoolConfig - Mempool *Mempool -} - -// NewMempoolReactor returns a new MempoolReactor with the given config and mempool. -func NewMempoolReactor(config *cfg.MempoolConfig, mempool *Mempool) *MempoolReactor { - memR := &MempoolReactor{ - config: config, - Mempool: mempool, - } - memR.BaseReactor = *p2p.NewBaseReactor("MempoolReactor", memR) - return memR -} - -// SetLogger sets the Logger on the reactor and the underlying Mempool. -func (memR *MempoolReactor) SetLogger(l log.Logger) { - memR.Logger = l - memR.Mempool.SetLogger(l) -} - -// GetChannels implements Reactor. -// It returns the list of channels for this reactor. -func (memR *MempoolReactor) GetChannels() []*p2p.ChannelDescriptor { - return []*p2p.ChannelDescriptor{ - { - ID: MempoolChannel, - Priority: 5, - }, - } -} - -// AddPeer implements Reactor. -// It starts a broadcast routine ensuring all txs are forwarded to the given peer. -func (memR *MempoolReactor) AddPeer(peer p2p.Peer) { - go memR.broadcastTxRoutine(peer) -} - -// RemovePeer implements Reactor. -func (memR *MempoolReactor) RemovePeer(peer p2p.Peer, reason interface{}) { - // broadcast routine checks if peer is gone and returns -} - -// Receive implements Reactor. -// It adds any received transactions to the mempool. -func (memR *MempoolReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { - msg, err := DecodeMessage(msgBytes) - if err != nil { - memR.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) - memR.Switch.StopPeerForError(src, err) - return - } - memR.Logger.Debug("Receive", "src", src, "chId", chID, "msg", msg) - - switch msg := msg.(type) { - case *TxMessage: - err := memR.Mempool.CheckTx(msg.Tx, nil) - if err != nil { - memR.Logger.Info("Could not check tx", "tx", msg.Tx, "err", err) - } - // broadcasting happens from go routines per peer - default: - memR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) - } -} - -// BroadcastTx is an alias for Mempool.CheckTx. Broadcasting itself happens in peer routines. -func (memR *MempoolReactor) BroadcastTx(tx types.Tx, cb func(*abci.Response)) error { - return memR.Mempool.CheckTx(tx, cb) -} - -// PeerState describes the state of a peer. -type PeerState interface { - GetHeight() int64 -} - -// Send new mempool txs to peer. -func (memR *MempoolReactor) broadcastTxRoutine(peer p2p.Peer) { - if !memR.config.Broadcast { - return - } - - var next *clist.CElement - for { - // This happens because the CElement we were looking at got garbage - // collected (removed). That is, .NextWait() returned nil. Go ahead and - // start from the beginning. - if next == nil { - select { - case <-memR.Mempool.TxsWaitChan(): // Wait until a tx is available - if next = memR.Mempool.TxsFront(); next == nil { - continue - } - case <-peer.Quit(): - return - case <-memR.Quit(): - return - } - } - - memTx := next.Value.(*mempoolTx) - // make sure the peer is up to date - height := memTx.Height() - if peerState_i := peer.Get(types.PeerStateKey); peerState_i != nil { - peerState := peerState_i.(PeerState) - if peerState.GetHeight() < height-1 { // Allow for a lag of 1 block - time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) - continue - } - } - // send memTx - msg := &TxMessage{Tx: memTx.tx} - success := peer.Send(MempoolChannel, cdc.MustMarshalBinaryBare(msg)) - if !success { - time.Sleep(peerCatchupSleepIntervalMS * time.Millisecond) - continue - } - - select { - case <-next.NextWaitChan(): - // see the start of the for loop for nil check - next = next.Next() - case <-peer.Quit(): - return - case <-memR.Quit(): - return - } - } -} - -//----------------------------------------------------------------------------- -// Messages - -// MempoolMessage is a message sent or received by the MempoolReactor. -type MempoolMessage interface{} - -func RegisterMempoolMessages(cdc *amino.Codec) { - cdc.RegisterInterface((*MempoolMessage)(nil), nil) - cdc.RegisterConcrete(&TxMessage{}, "tendermint/mempool/TxMessage", nil) -} - -// DecodeMessage decodes a byte-array into a MempoolMessage. -func DecodeMessage(bz []byte) (msg MempoolMessage, err error) { - if len(bz) > maxMsgSize { - return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", - len(bz), maxMsgSize) - } - err = cdc.UnmarshalBinaryBare(bz, &msg) - return -} - -//------------------------------------- - -// TxMessage is a MempoolMessage containing a transaction. -type TxMessage struct { - Tx types.Tx -} - -// String returns a string representation of the TxMessage. -func (m *TxMessage) String() string { - return fmt.Sprintf("[TxMessage %v]", m.Tx) -} diff --git a/mempool/reactor_test.go b/mempool/reactor_test.go deleted file mode 100644 index 0a6d0915..00000000 --- a/mempool/reactor_test.go +++ /dev/null @@ -1,157 +0,0 @@ -package mempool - -import ( - "fmt" - "sync" - "testing" - "time" - - "github.com/fortytw2/leaktest" - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - - "github.com/go-kit/kit/log/term" - - "github.com/tendermint/abci/example/kvstore" - "github.com/tendermint/tmlibs/log" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" -) - -// mempoolLogger is a TestingLogger which uses a different -// color for each validator ("validator" key must exist). -func mempoolLogger() log.Logger { - return log.TestingLoggerWithColorFn(func(keyvals ...interface{}) term.FgBgColor { - for i := 0; i < len(keyvals)-1; i += 2 { - if keyvals[i] == "validator" { - return term.FgBgColor{Fg: term.Color(uint8(keyvals[i+1].(int) + 1))} - } - } - return term.FgBgColor{} - }) -} - -// connect N mempool reactors through N switches -func makeAndConnectMempoolReactors(config *cfg.Config, N int) []*MempoolReactor { - reactors := make([]*MempoolReactor, N) - logger := mempoolLogger() - for i := 0; i < N; i++ { - app := kvstore.NewKVStoreApplication() - cc := proxy.NewLocalClientCreator(app) - mempool := newMempoolWithApp(cc) - - reactors[i] = NewMempoolReactor(config.Mempool, mempool) // so we dont start the consensus states - reactors[i].SetLogger(logger.With("validator", i)) - } - - p2p.MakeConnectedSwitches(config.P2P, N, func(i int, s *p2p.Switch) *p2p.Switch { - s.AddReactor("MEMPOOL", reactors[i]) - return s - - }, p2p.Connect2Switches) - return reactors -} - -// wait for all txs on all reactors -func waitForTxs(t *testing.T, txs types.Txs, reactors []*MempoolReactor) { - // wait for the txs in all mempools - wg := new(sync.WaitGroup) - for i := 0; i < len(reactors); i++ { - wg.Add(1) - go _waitForTxs(t, wg, txs, i, reactors) - } - - done := make(chan struct{}) - go func() { - wg.Wait() - close(done) - }() - - timer := time.After(TIMEOUT) - select { - case <-timer: - t.Fatal("Timed out waiting for txs") - case <-done: - } -} - -// wait for all txs on a single mempool -func _waitForTxs(t *testing.T, wg *sync.WaitGroup, txs types.Txs, reactorIdx int, reactors []*MempoolReactor) { - - mempool := reactors[reactorIdx].Mempool - for mempool.Size() != len(txs) { - time.Sleep(time.Millisecond * 100) - } - - reapedTxs := mempool.Reap(len(txs)) - for i, tx := range txs { - assert.Equal(t, tx, reapedTxs[i], fmt.Sprintf("txs at index %d on reactor %d don't match: %v vs %v", i, reactorIdx, tx, reapedTxs[i])) - } - wg.Done() -} - -const ( - NUM_TXS = 1000 - TIMEOUT = 120 * time.Second // ridiculously high because CircleCI is slow -) - -func TestReactorBroadcastTxMessage(t *testing.T) { - config := cfg.TestConfig() - const N = 4 - reactors := makeAndConnectMempoolReactors(config, N) - defer func() { - for _, r := range reactors { - r.Stop() - } - }() - - // send a bunch of txs to the first reactor's mempool - // and wait for them all to be received in the others - txs := checkTxs(t, reactors[0].Mempool, NUM_TXS) - waitForTxs(t, txs, reactors) -} - -func TestBroadcastTxForPeerStopsWhenPeerStops(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - config := cfg.TestConfig() - const N = 2 - reactors := makeAndConnectMempoolReactors(config, N) - defer func() { - for _, r := range reactors { - r.Stop() - } - }() - - // stop peer - sw := reactors[1].Switch - sw.StopPeerForError(sw.Peers().List()[0], errors.New("some reason")) - - // check that we are not leaking any go-routines - // i.e. broadcastTxRoutine finishes when peer is stopped - leaktest.CheckTimeout(t, 10*time.Second)() -} - -func TestBroadcastTxForPeerStopsWhenReactorStops(t *testing.T) { - if testing.Short() { - t.Skip("skipping test in short mode.") - } - - config := cfg.TestConfig() - const N = 2 - reactors := makeAndConnectMempoolReactors(config, N) - - // stop reactors - for _, r := range reactors { - r.Stop() - } - - // check that we are not leaking any go-routines - // i.e. broadcastTxRoutine finishes when reactor is stopped - leaktest.CheckTimeout(t, 10*time.Second)() -} diff --git a/mempool/wire.go b/mempool/wire.go deleted file mode 100644 index ed089726..00000000 --- a/mempool/wire.go +++ /dev/null @@ -1,11 +0,0 @@ -package mempool - -import ( - "github.com/tendermint/go-amino" -) - -var cdc = amino.NewCodec() - -func init() { - RegisterMempoolMessages(cdc) -} diff --git a/networks/local/Makefile b/networks/local/Makefile deleted file mode 100644 index 98517851..00000000 --- a/networks/local/Makefile +++ /dev/null @@ -1,7 +0,0 @@ -# Makefile for the "localnode" docker image. - -all: - docker build --tag tendermint/localnode localnode - -.PHONY: all - diff --git a/networks/local/README.md b/networks/local/README.md deleted file mode 100644 index 554abdf4..00000000 --- a/networks/local/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# Local Cluster with Docker Compose - -## Requirements - -- [Install tendermint](/docs/install.rst) -- [Install docker](https://docs.docker.com/engine/installation/) -- [Install docker-compose](https://docs.docker.com/compose/install/) - -## Build - -Build the `tendermint` binary and the `tendermint/localnode` docker image. - -Note the binary will be mounted into the container so it can be updated without -rebuilding the image. - -``` -cd $GOPATH/src/github.com/tendermint/tendermint - -# Build the linux binary in ./build -make build-linux - -# Build tendermint/localnode image -make build-docker-localnode -``` - - -## Run a testnet - -To start a 4 node testnet run: - -``` -make localnet-start -``` - -The nodes bind their RPC servers to ports 26657, 26660, 26662, and 26664 on the host. -This file creates a 4-node network using the localnode image. -The nodes of the network expose their P2P and RPC endpoints to the host machine on ports 26656-26657, 26659-26660, 26661-26662, and 26663-26664 respectively. - -To update the binary, just rebuild it and restart the nodes: - -``` -make build-linux -make localnet-stop -make localnet-start -``` - -## Configuration - -The `make localnet-start` creates files for a 4-node testnet in `./build` by calling the `tendermint testnet` command. - -The `./build` directory is mounted to the `/tendermint` mount point to attach the binary and config files to the container. - -For instance, to create a single node testnet: - -``` -cd $GOPATH/src/github.com/tendermint/tendermint - -# Clear the build folder -rm -rf ./build - -# Build binary -make build-linux - -# Create configuration -docker run -e LOG="stdout" -v `pwd`/build:/tendermint tendermint/localnode testnet --o . --v 1 - -#Run the node -docker run -v `pwd`/build:/tendermint tendermint/localnode - -``` - -## Logging - -Log is saved under the attached volume, in the `tendermint.log` file. If the `LOG` environment variable is set to `stdout` at start, the log is not saved, but printed on the screen. - -## Special binaries - -If you have multiple binaries with different names, you can specify which one to run with the BINARY environment variable. The path of the binary is relative to the attached volume. - diff --git a/networks/local/localnode/Dockerfile b/networks/local/localnode/Dockerfile deleted file mode 100644 index 3942cecd..00000000 --- a/networks/local/localnode/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM alpine:3.7 -MAINTAINER Greg Szabo - -RUN apk update && \ - apk upgrade && \ - apk --no-cache add curl jq file - -VOLUME [ /tendermint ] -WORKDIR /tendermint -EXPOSE 26656 26657 -ENTRYPOINT ["/usr/bin/wrapper.sh"] -CMD ["node", "--proxy_app", "kvstore"] -STOPSIGNAL SIGTERM - -COPY wrapper.sh /usr/bin/wrapper.sh - diff --git a/networks/local/localnode/wrapper.sh b/networks/local/localnode/wrapper.sh deleted file mode 100755 index fe8031e6..00000000 --- a/networks/local/localnode/wrapper.sh +++ /dev/null @@ -1,35 +0,0 @@ -#!/usr/bin/env sh - -## -## Input parameters -## -BINARY=/tendermint/${BINARY:-tendermint} -ID=${ID:-0} -LOG=${LOG:-tendermint.log} - -## -## Assert linux binary -## -if ! [ -f "${BINARY}" ]; then - echo "The binary $(basename "${BINARY}") cannot be found. Please add the binary to the shared folder. Please use the BINARY environment variable if the name of the binary is not 'tendermint' E.g.: -e BINARY=tendermint_my_test_version" - exit 1 -fi -BINARY_CHECK="$(file "$BINARY" | grep 'ELF 64-bit LSB executable, x86-64')" -if [ -z "${BINARY_CHECK}" ]; then - echo "Binary needs to be OS linux, ARCH amd64" - exit 1 -fi - -## -## Run binary with all parameters -## -export TMHOME="/tendermint/node${ID}" - -if [ -d "`dirname ${TMHOME}/${LOG}`" ]; then - "$BINARY" "$@" | tee "${TMHOME}/${LOG}" -else - "$BINARY" "$@" -fi - -chmod 777 -R /tendermint - diff --git a/networks/remote/README.md b/networks/remote/README.md deleted file mode 100644 index 090f6da1..00000000 --- a/networks/remote/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# Remote Cluster with Terraform and Ansible - -See the [docs](/docs/terraform-and-ansible.rst) diff --git a/networks/remote/ansible/.gitignore b/networks/remote/ansible/.gitignore deleted file mode 100644 index a8b42eb6..00000000 --- a/networks/remote/ansible/.gitignore +++ /dev/null @@ -1 +0,0 @@ -*.retry diff --git a/networks/remote/ansible/ansible.cfg b/networks/remote/ansible/ansible.cfg deleted file mode 100644 index 045c1ea6..00000000 --- a/networks/remote/ansible/ansible.cfg +++ /dev/null @@ -1,4 +0,0 @@ -[defaults] -retry_files_enabled = False -host_key_checking = False - diff --git a/networks/remote/ansible/config.yml b/networks/remote/ansible/config.yml deleted file mode 100644 index 7b772fb7..00000000 --- a/networks/remote/ansible/config.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- - -#Requires BINARY and CONFIGDIR variables set. -#N=4 hosts by default. - -- hosts: all - user: root - any_errors_fatal: true - gather_facts: yes - vars: - - service: tendermint - - N: 4 - roles: - - stop - - config - - unsafe_reset - - start - diff --git a/networks/remote/ansible/install.yml b/networks/remote/ansible/install.yml deleted file mode 100644 index a57b4be4..00000000 --- a/networks/remote/ansible/install.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- hosts: all - user: root - any_errors_fatal: true - gather_facts: no - vars: - - service: tendermint - roles: - - install - diff --git a/networks/remote/ansible/inventory/COPYING b/networks/remote/ansible/inventory/COPYING deleted file mode 100644 index 10926e87..00000000 --- a/networks/remote/ansible/inventory/COPYING +++ /dev/null @@ -1,675 +0,0 @@ - GNU GENERAL PUBLIC LICENSE - Version 3, 29 June 2007 - - Copyright (C) 2007 Free Software Foundation, Inc. - Everyone is permitted to copy and distribute verbatim copies - of this license document, but changing it is not allowed. - - Preamble - - The GNU General Public License is a free, copyleft license for -software and other kinds of works. - - The licenses for most software and other practical works are designed -to take away your freedom to share and change the works. By contrast, -the GNU General Public License is intended to guarantee your freedom to -share and change all versions of a program--to make sure it remains free -software for all its users. We, the Free Software Foundation, use the -GNU General Public License for most of our software; it applies also to -any other work released this way by its authors. You can apply it to -your programs, too. - - When we speak of free software, we are referring to freedom, not -price. Our General Public Licenses are designed to make sure that you -have the freedom to distribute copies of free software (and charge for -them if you wish), that you receive source code or can get it if you -want it, that you can change the software or use pieces of it in new -free programs, and that you know you can do these things. - - To protect your rights, we need to prevent others from denying you -these rights or asking you to surrender the rights. Therefore, you have -certain responsibilities if you distribute copies of the software, or if -you modify it: responsibilities to respect the freedom of others. - - For example, if you distribute copies of such a program, whether -gratis or for a fee, you must pass on to the recipients the same -freedoms that you received. You must make sure that they, too, receive -or can get the source code. And you must show them these terms so they -know their rights. - - Developers that use the GNU GPL protect your rights with two steps: -(1) assert copyright on the software, and (2) offer you this License -giving you legal permission to copy, distribute and/or modify it. - - For the developers' and authors' protection, the GPL clearly explains -that there is no warranty for this free software. For both users' and -authors' sake, the GPL requires that modified versions be marked as -changed, so that their problems will not be attributed erroneously to -authors of previous versions. - - Some devices are designed to deny users access to install or run -modified versions of the software inside them, although the manufacturer -can do so. This is fundamentally incompatible with the aim of -protecting users' freedom to change the software. The systematic -pattern of such abuse occurs in the area of products for individuals to -use, which is precisely where it is most unacceptable. Therefore, we -have designed this version of the GPL to prohibit the practice for those -products. If such problems arise substantially in other domains, we -stand ready to extend this provision to those domains in future versions -of the GPL, as needed to protect the freedom of users. - - Finally, every program is threatened constantly by software patents. -States should not allow patents to restrict development and use of -software on general-purpose computers, but in those that do, we wish to -avoid the special danger that patents applied to a free program could -make it effectively proprietary. To prevent this, the GPL assures that -patents cannot be used to render the program non-free. - - The precise terms and conditions for copying, distribution and -modification follow. - - TERMS AND CONDITIONS - - 0. Definitions. - - "This License" refers to version 3 of the GNU General Public License. - - "Copyright" also means copyright-like laws that apply to other kinds of -works, such as semiconductor masks. - - "The Program" refers to any copyrightable work licensed under this -License. Each licensee is addressed as "you". "Licensees" and -"recipients" may be individuals or organizations. - - To "modify" a work means to copy from or adapt all or part of the work -in a fashion requiring copyright permission, other than the making of an -exact copy. The resulting work is called a "modified version" of the -earlier work or a work "based on" the earlier work. - - A "covered work" means either the unmodified Program or a work based -on the Program. - - To "propagate" a work means to do anything with it that, without -permission, would make you directly or secondarily liable for -infringement under applicable copyright law, except executing it on a -computer or modifying a private copy. Propagation includes copying, -distribution (with or without modification), making available to the -public, and in some countries other activities as well. - - To "convey" a work means any kind of propagation that enables other -parties to make or receive copies. Mere interaction with a user through -a computer network, with no transfer of a copy, is not conveying. - - An interactive user interface displays "Appropriate Legal Notices" -to the extent that it includes a convenient and prominently visible -feature that (1) displays an appropriate copyright notice, and (2) -tells the user that there is no warranty for the work (except to the -extent that warranties are provided), that licensees may convey the -work under this License, and how to view a copy of this License. If -the interface presents a list of user commands or options, such as a -menu, a prominent item in the list meets this criterion. - - 1. Source Code. - - The "source code" for a work means the preferred form of the work -for making modifications to it. "Object code" means any non-source -form of a work. - - A "Standard Interface" means an interface that either is an official -standard defined by a recognized standards body, or, in the case of -interfaces specified for a particular programming language, one that -is widely used among developers working in that language. - - The "System Libraries" of an executable work include anything, other -than the work as a whole, that (a) is included in the normal form of -packaging a Major Component, but which is not part of that Major -Component, and (b) serves only to enable use of the work with that -Major Component, or to implement a Standard Interface for which an -implementation is available to the public in source code form. A -"Major Component", in this context, means a major essential component -(kernel, window system, and so on) of the specific operating system -(if any) on which the executable work runs, or a compiler used to -produce the work, or an object code interpreter used to run it. - - The "Corresponding Source" for a work in object code form means all -the source code needed to generate, install, and (for an executable -work) run the object code and to modify the work, including scripts to -control those activities. However, it does not include the work's -System Libraries, or general-purpose tools or generally available free -programs which are used unmodified in performing those activities but -which are not part of the work. For example, Corresponding Source -includes interface definition files associated with source files for -the work, and the source code for shared libraries and dynamically -linked subprograms that the work is specifically designed to require, -such as by intimate data communication or control flow between those -subprograms and other parts of the work. - - The Corresponding Source need not include anything that users -can regenerate automatically from other parts of the Corresponding -Source. - - The Corresponding Source for a work in source code form is that -same work. - - 2. Basic Permissions. - - All rights granted under this License are granted for the term of -copyright on the Program, and are irrevocable provided the stated -conditions are met. This License explicitly affirms your unlimited -permission to run the unmodified Program. The output from running a -covered work is covered by this License only if the output, given its -content, constitutes a covered work. This License acknowledges your -rights of fair use or other equivalent, as provided by copyright law. - - You may make, run and propagate covered works that you do not -convey, without conditions so long as your license otherwise remains -in force. You may convey covered works to others for the sole purpose -of having them make modifications exclusively for you, or provide you -with facilities for running those works, provided that you comply with -the terms of this License in conveying all material for which you do -not control copyright. Those thus making or running the covered works -for you must do so exclusively on your behalf, under your direction -and control, on terms that prohibit them from making any copies of -your copyrighted material outside their relationship with you. - - Conveying under any other circumstances is permitted solely under -the conditions stated below. Sublicensing is not allowed; section 10 -makes it unnecessary. - - 3. Protecting Users' Legal Rights From Anti-Circumvention Law. - - No covered work shall be deemed part of an effective technological -measure under any applicable law fulfilling obligations under article -11 of the WIPO copyright treaty adopted on 20 December 1996, or -similar laws prohibiting or restricting circumvention of such -measures. - - When you convey a covered work, you waive any legal power to forbid -circumvention of technological measures to the extent such circumvention -is effected by exercising rights under this License with respect to -the covered work, and you disclaim any intention to limit operation or -modification of the work as a means of enforcing, against the work's -users, your or third parties' legal rights to forbid circumvention of -technological measures. - - 4. Conveying Verbatim Copies. - - You may convey verbatim copies of the Program's source code as you -receive it, in any medium, provided that you conspicuously and -appropriately publish on each copy an appropriate copyright notice; -keep intact all notices stating that this License and any -non-permissive terms added in accord with section 7 apply to the code; -keep intact all notices of the absence of any warranty; and give all -recipients a copy of this License along with the Program. - - You may charge any price or no price for each copy that you convey, -and you may offer support or warranty protection for a fee. - - 5. Conveying Modified Source Versions. - - You may convey a work based on the Program, or the modifications to -produce it from the Program, in the form of source code under the -terms of section 4, provided that you also meet all of these conditions: - - a) The work must carry prominent notices stating that you modified - it, and giving a relevant date. - - b) The work must carry prominent notices stating that it is - released under this License and any conditions added under section - 7. This requirement modifies the requirement in section 4 to - "keep intact all notices". - - c) You must license the entire work, as a whole, under this - License to anyone who comes into possession of a copy. This - License will therefore apply, along with any applicable section 7 - additional terms, to the whole of the work, and all its parts, - regardless of how they are packaged. This License gives no - permission to license the work in any other way, but it does not - invalidate such permission if you have separately received it. - - d) If the work has interactive user interfaces, each must display - Appropriate Legal Notices; however, if the Program has interactive - interfaces that do not display Appropriate Legal Notices, your - work need not make them do so. - - A compilation of a covered work with other separate and independent -works, which are not by their nature extensions of the covered work, -and which are not combined with it such as to form a larger program, -in or on a volume of a storage or distribution medium, is called an -"aggregate" if the compilation and its resulting copyright are not -used to limit the access or legal rights of the compilation's users -beyond what the individual works permit. Inclusion of a covered work -in an aggregate does not cause this License to apply to the other -parts of the aggregate. - - 6. Conveying Non-Source Forms. - - You may convey a covered work in object code form under the terms -of sections 4 and 5, provided that you also convey the -machine-readable Corresponding Source under the terms of this License, -in one of these ways: - - a) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by the - Corresponding Source fixed on a durable physical medium - customarily used for software interchange. - - b) Convey the object code in, or embodied in, a physical product - (including a physical distribution medium), accompanied by a - written offer, valid for at least three years and valid for as - long as you offer spare parts or customer support for that product - model, to give anyone who possesses the object code either (1) a - copy of the Corresponding Source for all the software in the - product that is covered by this License, on a durable physical - medium customarily used for software interchange, for a price no - more than your reasonable cost of physically performing this - conveying of source, or (2) access to copy the - Corresponding Source from a network server at no charge. - - c) Convey individual copies of the object code with a copy of the - written offer to provide the Corresponding Source. This - alternative is allowed only occasionally and noncommercially, and - only if you received the object code with such an offer, in accord - with subsection 6b. - - d) Convey the object code by offering access from a designated - place (gratis or for a charge), and offer equivalent access to the - Corresponding Source in the same way through the same place at no - further charge. You need not require recipients to copy the - Corresponding Source along with the object code. If the place to - copy the object code is a network server, the Corresponding Source - may be on a different server (operated by you or a third party) - that supports equivalent copying facilities, provided you maintain - clear directions next to the object code saying where to find the - Corresponding Source. Regardless of what server hosts the - Corresponding Source, you remain obligated to ensure that it is - available for as long as needed to satisfy these requirements. - - e) Convey the object code using peer-to-peer transmission, provided - you inform other peers where the object code and Corresponding - Source of the work are being offered to the general public at no - charge under subsection 6d. - - A separable portion of the object code, whose source code is excluded -from the Corresponding Source as a System Library, need not be -included in conveying the object code work. - - A "User Product" is either (1) a "consumer product", which means any -tangible personal property which is normally used for personal, family, -or household purposes, or (2) anything designed or sold for incorporation -into a dwelling. In determining whether a product is a consumer product, -doubtful cases shall be resolved in favor of coverage. For a particular -product received by a particular user, "normally used" refers to a -typical or common use of that class of product, regardless of the status -of the particular user or of the way in which the particular user -actually uses, or expects or is expected to use, the product. A product -is a consumer product regardless of whether the product has substantial -commercial, industrial or non-consumer uses, unless such uses represent -the only significant mode of use of the product. - - "Installation Information" for a User Product means any methods, -procedures, authorization keys, or other information required to install -and execute modified versions of a covered work in that User Product from -a modified version of its Corresponding Source. The information must -suffice to ensure that the continued functioning of the modified object -code is in no case prevented or interfered with solely because -modification has been made. - - If you convey an object code work under this section in, or with, or -specifically for use in, a User Product, and the conveying occurs as -part of a transaction in which the right of possession and use of the -User Product is transferred to the recipient in perpetuity or for a -fixed term (regardless of how the transaction is characterized), the -Corresponding Source conveyed under this section must be accompanied -by the Installation Information. But this requirement does not apply -if neither you nor any third party retains the ability to install -modified object code on the User Product (for example, the work has -been installed in ROM). - - The requirement to provide Installation Information does not include a -requirement to continue to provide support service, warranty, or updates -for a work that has been modified or installed by the recipient, or for -the User Product in which it has been modified or installed. Access to a -network may be denied when the modification itself materially and -adversely affects the operation of the network or violates the rules and -protocols for communication across the network. - - Corresponding Source conveyed, and Installation Information provided, -in accord with this section must be in a format that is publicly -documented (and with an implementation available to the public in -source code form), and must require no special password or key for -unpacking, reading or copying. - - 7. Additional Terms. - - "Additional permissions" are terms that supplement the terms of this -License by making exceptions from one or more of its conditions. -Additional permissions that are applicable to the entire Program shall -be treated as though they were included in this License, to the extent -that they are valid under applicable law. If additional permissions -apply only to part of the Program, that part may be used separately -under those permissions, but the entire Program remains governed by -this License without regard to the additional permissions. - - When you convey a copy of a covered work, you may at your option -remove any additional permissions from that copy, or from any part of -it. (Additional permissions may be written to require their own -removal in certain cases when you modify the work.) You may place -additional permissions on material, added by you to a covered work, -for which you have or can give appropriate copyright permission. - - Notwithstanding any other provision of this License, for material you -add to a covered work, you may (if authorized by the copyright holders of -that material) supplement the terms of this License with terms: - - a) Disclaiming warranty or limiting liability differently from the - terms of sections 15 and 16 of this License; or - - b) Requiring preservation of specified reasonable legal notices or - author attributions in that material or in the Appropriate Legal - Notices displayed by works containing it; or - - c) Prohibiting misrepresentation of the origin of that material, or - requiring that modified versions of such material be marked in - reasonable ways as different from the original version; or - - d) Limiting the use for publicity purposes of names of licensors or - authors of the material; or - - e) Declining to grant rights under trademark law for use of some - trade names, trademarks, or service marks; or - - f) Requiring indemnification of licensors and authors of that - material by anyone who conveys the material (or modified versions of - it) with contractual assumptions of liability to the recipient, for - any liability that these contractual assumptions directly impose on - those licensors and authors. - - All other non-permissive additional terms are considered "further -restrictions" within the meaning of section 10. If the Program as you -received it, or any part of it, contains a notice stating that it is -governed by this License along with a term that is a further -restriction, you may remove that term. If a license document contains -a further restriction but permits relicensing or conveying under this -License, you may add to a covered work material governed by the terms -of that license document, provided that the further restriction does -not survive such relicensing or conveying. - - If you add terms to a covered work in accord with this section, you -must place, in the relevant source files, a statement of the -additional terms that apply to those files, or a notice indicating -where to find the applicable terms. - - Additional terms, permissive or non-permissive, may be stated in the -form of a separately written license, or stated as exceptions; -the above requirements apply either way. - - 8. Termination. - - You may not propagate or modify a covered work except as expressly -provided under this License. Any attempt otherwise to propagate or -modify it is void, and will automatically terminate your rights under -this License (including any patent licenses granted under the third -paragraph of section 11). - - However, if you cease all violation of this License, then your -license from a particular copyright holder is reinstated (a) -provisionally, unless and until the copyright holder explicitly and -finally terminates your license, and (b) permanently, if the copyright -holder fails to notify you of the violation by some reasonable means -prior to 60 days after the cessation. - - Moreover, your license from a particular copyright holder is -reinstated permanently if the copyright holder notifies you of the -violation by some reasonable means, this is the first time you have -received notice of violation of this License (for any work) from that -copyright holder, and you cure the violation prior to 30 days after -your receipt of the notice. - - Termination of your rights under this section does not terminate the -licenses of parties who have received copies or rights from you under -this License. If your rights have been terminated and not permanently -reinstated, you do not qualify to receive new licenses for the same -material under section 10. - - 9. Acceptance Not Required for Having Copies. - - You are not required to accept this License in order to receive or -run a copy of the Program. Ancillary propagation of a covered work -occurring solely as a consequence of using peer-to-peer transmission -to receive a copy likewise does not require acceptance. However, -nothing other than this License grants you permission to propagate or -modify any covered work. These actions infringe copyright if you do -not accept this License. Therefore, by modifying or propagating a -covered work, you indicate your acceptance of this License to do so. - - 10. Automatic Licensing of Downstream Recipients. - - Each time you convey a covered work, the recipient automatically -receives a license from the original licensors, to run, modify and -propagate that work, subject to this License. You are not responsible -for enforcing compliance by third parties with this License. - - An "entity transaction" is a transaction transferring control of an -organization, or substantially all assets of one, or subdividing an -organization, or merging organizations. If propagation of a covered -work results from an entity transaction, each party to that -transaction who receives a copy of the work also receives whatever -licenses to the work the party's predecessor in interest had or could -give under the previous paragraph, plus a right to possession of the -Corresponding Source of the work from the predecessor in interest, if -the predecessor has it or can get it with reasonable efforts. - - You may not impose any further restrictions on the exercise of the -rights granted or affirmed under this License. For example, you may -not impose a license fee, royalty, or other charge for exercise of -rights granted under this License, and you may not initiate litigation -(including a cross-claim or counterclaim in a lawsuit) alleging that -any patent claim is infringed by making, using, selling, offering for -sale, or importing the Program or any portion of it. - - 11. Patents. - - A "contributor" is a copyright holder who authorizes use under this -License of the Program or a work on which the Program is based. The -work thus licensed is called the contributor's "contributor version". - - A contributor's "essential patent claims" are all patent claims -owned or controlled by the contributor, whether already acquired or -hereafter acquired, that would be infringed by some manner, permitted -by this License, of making, using, or selling its contributor version, -but do not include claims that would be infringed only as a -consequence of further modification of the contributor version. For -purposes of this definition, "control" includes the right to grant -patent sublicenses in a manner consistent with the requirements of -this License. - - Each contributor grants you a non-exclusive, worldwide, royalty-free -patent license under the contributor's essential patent claims, to -make, use, sell, offer for sale, import and otherwise run, modify and -propagate the contents of its contributor version. - - In the following three paragraphs, a "patent license" is any express -agreement or commitment, however denominated, not to enforce a patent -(such as an express permission to practice a patent or covenant not to -sue for patent infringement). To "grant" such a patent license to a -party means to make such an agreement or commitment not to enforce a -patent against the party. - - If you convey a covered work, knowingly relying on a patent license, -and the Corresponding Source of the work is not available for anyone -to copy, free of charge and under the terms of this License, through a -publicly available network server or other readily accessible means, -then you must either (1) cause the Corresponding Source to be so -available, or (2) arrange to deprive yourself of the benefit of the -patent license for this particular work, or (3) arrange, in a manner -consistent with the requirements of this License, to extend the patent -license to downstream recipients. "Knowingly relying" means you have -actual knowledge that, but for the patent license, your conveying the -covered work in a country, or your recipient's use of the covered work -in a country, would infringe one or more identifiable patents in that -country that you have reason to believe are valid. - - If, pursuant to or in connection with a single transaction or -arrangement, you convey, or propagate by procuring conveyance of, a -covered work, and grant a patent license to some of the parties -receiving the covered work authorizing them to use, propagate, modify -or convey a specific copy of the covered work, then the patent license -you grant is automatically extended to all recipients of the covered -work and works based on it. - - A patent license is "discriminatory" if it does not include within -the scope of its coverage, prohibits the exercise of, or is -conditioned on the non-exercise of one or more of the rights that are -specifically granted under this License. You may not convey a covered -work if you are a party to an arrangement with a third party that is -in the business of distributing software, under which you make payment -to the third party based on the extent of your activity of conveying -the work, and under which the third party grants, to any of the -parties who would receive the covered work from you, a discriminatory -patent license (a) in connection with copies of the covered work -conveyed by you (or copies made from those copies), or (b) primarily -for and in connection with specific products or compilations that -contain the covered work, unless you entered into that arrangement, -or that patent license was granted, prior to 28 March 2007. - - Nothing in this License shall be construed as excluding or limiting -any implied license or other defenses to infringement that may -otherwise be available to you under applicable patent law. - - 12. No Surrender of Others' Freedom. - - If conditions are imposed on you (whether by court order, agreement or -otherwise) that contradict the conditions of this License, they do not -excuse you from the conditions of this License. If you cannot convey a -covered work so as to satisfy simultaneously your obligations under this -License and any other pertinent obligations, then as a consequence you may -not convey it at all. For example, if you agree to terms that obligate you -to collect a royalty for further conveying from those to whom you convey -the Program, the only way you could satisfy both those terms and this -License would be to refrain entirely from conveying the Program. - - 13. Use with the GNU Affero General Public License. - - Notwithstanding any other provision of this License, you have -permission to link or combine any covered work with a work licensed -under version 3 of the GNU Affero General Public License into a single -combined work, and to convey the resulting work. The terms of this -License will continue to apply to the part which is the covered work, -but the special requirements of the GNU Affero General Public License, -section 13, concerning interaction through a network will apply to the -combination as such. - - 14. Revised Versions of this License. - - The Free Software Foundation may publish revised and/or new versions of -the GNU General Public License from time to time. Such new versions will -be similar in spirit to the present version, but may differ in detail to -address new problems or concerns. - - Each version is given a distinguishing version number. If the -Program specifies that a certain numbered version of the GNU General -Public License "or any later version" applies to it, you have the -option of following the terms and conditions either of that numbered -version or of any later version published by the Free Software -Foundation. If the Program does not specify a version number of the -GNU General Public License, you may choose any version ever published -by the Free Software Foundation. - - If the Program specifies that a proxy can decide which future -versions of the GNU General Public License can be used, that proxy's -public statement of acceptance of a version permanently authorizes you -to choose that version for the Program. - - Later license versions may give you additional or different -permissions. However, no additional obligations are imposed on any -author or copyright holder as a result of your choosing to follow a -later version. - - 15. Disclaimer of Warranty. - - THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY -APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT -HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY -OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, -THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM -IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF -ALL NECESSARY SERVICING, REPAIR OR CORRECTION. - - 16. Limitation of Liability. - - IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING -WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS -THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY -GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE -USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF -DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD -PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), -EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF -SUCH DAMAGES. - - 17. Interpretation of Sections 15 and 16. - - If the disclaimer of warranty and limitation of liability provided -above cannot be given local legal effect according to their terms, -reviewing courts shall apply local law that most closely approximates -an absolute waiver of all civil liability in connection with the -Program, unless a warranty or assumption of liability accompanies a -copy of the Program in return for a fee. - - END OF TERMS AND CONDITIONS - - How to Apply These Terms to Your New Programs - - If you develop a new program, and you want it to be of the greatest -possible use to the public, the best way to achieve this is to make it -free software which everyone can redistribute and change under these terms. - - To do so, attach the following notices to the program. It is safest -to attach them to the start of each source file to most effectively -state the exclusion of warranty; and each file should have at least -the "copyright" line and a pointer to where the full notice is found. - - - Copyright (C) - - This program is free software: you can redistribute it and/or modify - it under the terms of the GNU General Public License as published by - the Free Software Foundation, either version 3 of the License, or - (at your option) any later version. - - This program is distributed in the hope that it will be useful, - but WITHOUT ANY WARRANTY; without even the implied warranty of - MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - GNU General Public License for more details. - - You should have received a copy of the GNU General Public License - along with this program. If not, see . - -Also add information on how to contact you by electronic and paper mail. - - If the program does terminal interaction, make it output a short -notice like this when it starts in an interactive mode: - - Copyright (C) - This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. - This is free software, and you are welcome to redistribute it - under certain conditions; type `show c' for details. - -The hypothetical commands `show w' and `show c' should show the appropriate -parts of the General Public License. Of course, your program's commands -might be different; for a GUI interface, you would use an "about box". - - You should also get your employer (if you work as a programmer) or school, -if any, to sign a "copyright disclaimer" for the program, if necessary. -For more information on this, and how to apply and follow the GNU GPL, see -. - - The GNU General Public License does not permit incorporating your program -into proprietary programs. If your program is a subroutine library, you -may consider it more useful to permit linking proprietary applications with -the library. If this is what you want to do, use the GNU Lesser General -Public License instead of this License. But first, please read -. - diff --git a/networks/remote/ansible/inventory/digital_ocean.ini b/networks/remote/ansible/inventory/digital_ocean.ini deleted file mode 100644 index b809554b..00000000 --- a/networks/remote/ansible/inventory/digital_ocean.ini +++ /dev/null @@ -1,34 +0,0 @@ -# Ansible DigitalOcean external inventory script settings -# - -[digital_ocean] - -# The module needs your DigitalOcean API Token. -# It may also be specified on the command line via --api-token -# or via the environment variables DO_API_TOKEN or DO_API_KEY -# -#api_token = 123456abcdefg - - -# API calls to DigitalOcean may be slow. For this reason, we cache the results -# of an API call. Set this to the path you want cache files to be written to. -# One file will be written to this directory: -# - ansible-digital_ocean.cache -# -cache_path = /tmp - - -# The number of seconds a cache file is considered valid. After this many -# seconds, a new API call will be made, and the cache file will be updated. -# -cache_max_age = 300 - -# Use the private network IP address instead of the public when available. -# -use_private_network = False - -# Pass variables to every group, e.g.: -# -# group_variables = { 'ansible_user': 'root' } -# -group_variables = {} diff --git a/networks/remote/ansible/inventory/digital_ocean.py b/networks/remote/ansible/inventory/digital_ocean.py deleted file mode 100755 index 24ba6437..00000000 --- a/networks/remote/ansible/inventory/digital_ocean.py +++ /dev/null @@ -1,471 +0,0 @@ -#!/usr/bin/env python - -''' -DigitalOcean external inventory script -====================================== - -Generates Ansible inventory of DigitalOcean Droplets. - -In addition to the --list and --host options used by Ansible, there are options -for generating JSON of other DigitalOcean data. This is useful when creating -droplets. For example, --regions will return all the DigitalOcean Regions. -This information can also be easily found in the cache file, whose default -location is /tmp/ansible-digital_ocean.cache). - -The --pretty (-p) option pretty-prints the output for better human readability. - ----- -Although the cache stores all the information received from DigitalOcean, -the cache is not used for current droplet information (in --list, --host, ---all, and --droplets). This is so that accurate droplet information is always -found. You can force this script to use the cache with --force-cache. - ----- -Configuration is read from `digital_ocean.ini`, then from environment variables, -then and command-line arguments. - -Most notably, the DigitalOcean API Token must be specified. It can be specified -in the INI file or with the following environment variables: - export DO_API_TOKEN='abc123' or - export DO_API_KEY='abc123' - -Alternatively, it can be passed on the command-line with --api-token. - -If you specify DigitalOcean credentials in the INI file, a handy way to -get them into your environment (e.g., to use the digital_ocean module) -is to use the output of the --env option with export: - export $(digital_ocean.py --env) - ----- -The following groups are generated from --list: - - ID (droplet ID) - - NAME (droplet NAME) - - image_ID - - image_NAME - - distro_NAME (distribution NAME from image) - - region_NAME - - size_NAME - - status_STATUS - -For each host, the following variables are registered: - - do_backup_ids - - do_created_at - - do_disk - - do_features - list - - do_id - - do_image - object - - do_ip_address - - do_private_ip_address - - do_kernel - object - - do_locked - - do_memory - - do_name - - do_networks - object - - do_next_backup_window - - do_region - object - - do_size - object - - do_size_slug - - do_snapshot_ids - list - - do_status - - do_tags - - do_vcpus - - do_volume_ids - ------ -``` -usage: digital_ocean.py [-h] [--list] [--host HOST] [--all] - [--droplets] [--regions] [--images] [--sizes] - [--ssh-keys] [--domains] [--pretty] - [--cache-path CACHE_PATH] - [--cache-max_age CACHE_MAX_AGE] - [--force-cache] - [--refresh-cache] - [--api-token API_TOKEN] - -Produce an Ansible Inventory file based on DigitalOcean credentials - -optional arguments: - -h, --help show this help message and exit - --list List all active Droplets as Ansible inventory - (default: True) - --host HOST Get all Ansible inventory variables about a specific - Droplet - --all List all DigitalOcean information as JSON - --droplets List Droplets as JSON - --regions List Regions as JSON - --images List Images as JSON - --sizes List Sizes as JSON - --ssh-keys List SSH keys as JSON - --domains List Domains as JSON - --pretty, -p Pretty-print results - --cache-path CACHE_PATH - Path to the cache files (default: .) - --cache-max_age CACHE_MAX_AGE - Maximum age of the cached items (default: 0) - --force-cache Only use data from the cache - --refresh-cache Force refresh of cache by making API requests to - DigitalOcean (default: False - use cache files) - --api-token API_TOKEN, -a API_TOKEN - DigitalOcean API Token -``` - -''' - -# (c) 2013, Evan Wies -# -# Inspired by the EC2 inventory plugin: -# https://github.com/ansible/ansible/blob/devel/contrib/inventory/ec2.py -# -# This file is part of Ansible, -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see . - -###################################################################### - -import os -import sys -import re -import argparse -from time import time -import ConfigParser -import ast - -try: - import json -except ImportError: - import simplejson as json - -try: - from dopy.manager import DoManager -except ImportError as e: - sys.exit("failed=True msg='`dopy` library required for this script'") - - -class DigitalOceanInventory(object): - - ########################################################################### - # Main execution path - ########################################################################### - - def __init__(self): - ''' Main execution path ''' - - # DigitalOceanInventory data - self.data = {} # All DigitalOcean data - self.inventory = {} # Ansible Inventory - - # Define defaults - self.cache_path = '.' - self.cache_max_age = 0 - self.use_private_network = False - self.group_variables = {} - - # Read settings, environment variables, and CLI arguments - self.read_settings() - self.read_environment() - self.read_cli_args() - - # Verify credentials were set - if not hasattr(self, 'api_token'): - sys.stderr.write('''Could not find values for DigitalOcean api_token. -They must be specified via either ini file, command line argument (--api-token), -or environment variables (DO_API_TOKEN)\n''') - sys.exit(-1) - - # env command, show DigitalOcean credentials - if self.args.env: - print("DO_API_TOKEN=%s" % self.api_token) - sys.exit(0) - - # Manage cache - self.cache_filename = self.cache_path + "/ansible-digital_ocean.cache" - self.cache_refreshed = False - - if self.is_cache_valid(): - self.load_from_cache() - if len(self.data) == 0: - if self.args.force_cache: - sys.stderr.write('''Cache is empty and --force-cache was specified\n''') - sys.exit(-1) - - self.manager = DoManager(None, self.api_token, api_version=2) - - # Pick the json_data to print based on the CLI command - if self.args.droplets: - self.load_from_digital_ocean('droplets') - json_data = {'droplets': self.data['droplets']} - elif self.args.regions: - self.load_from_digital_ocean('regions') - json_data = {'regions': self.data['regions']} - elif self.args.images: - self.load_from_digital_ocean('images') - json_data = {'images': self.data['images']} - elif self.args.sizes: - self.load_from_digital_ocean('sizes') - json_data = {'sizes': self.data['sizes']} - elif self.args.ssh_keys: - self.load_from_digital_ocean('ssh_keys') - json_data = {'ssh_keys': self.data['ssh_keys']} - elif self.args.domains: - self.load_from_digital_ocean('domains') - json_data = {'domains': self.data['domains']} - elif self.args.all: - self.load_from_digital_ocean() - json_data = self.data - elif self.args.host: - json_data = self.load_droplet_variables_for_host() - else: # '--list' this is last to make it default - self.load_from_digital_ocean('droplets') - self.build_inventory() - json_data = self.inventory - - if self.cache_refreshed: - self.write_to_cache() - - if self.args.pretty: - print(json.dumps(json_data, sort_keys=True, indent=2)) - else: - print(json.dumps(json_data)) - # That's all she wrote... - - ########################################################################### - # Script configuration - ########################################################################### - - def read_settings(self): - ''' Reads the settings from the digital_ocean.ini file ''' - config = ConfigParser.SafeConfigParser() - config.read(os.path.dirname(os.path.realpath(__file__)) + '/digital_ocean.ini') - - # Credentials - if config.has_option('digital_ocean', 'api_token'): - self.api_token = config.get('digital_ocean', 'api_token') - - # Cache related - if config.has_option('digital_ocean', 'cache_path'): - self.cache_path = config.get('digital_ocean', 'cache_path') - if config.has_option('digital_ocean', 'cache_max_age'): - self.cache_max_age = config.getint('digital_ocean', 'cache_max_age') - - # Private IP Address - if config.has_option('digital_ocean', 'use_private_network'): - self.use_private_network = config.getboolean('digital_ocean', 'use_private_network') - - # Group variables - if config.has_option('digital_ocean', 'group_variables'): - self.group_variables = ast.literal_eval(config.get('digital_ocean', 'group_variables')) - - def read_environment(self): - ''' Reads the settings from environment variables ''' - # Setup credentials - if os.getenv("DO_API_TOKEN"): - self.api_token = os.getenv("DO_API_TOKEN") - if os.getenv("DO_API_KEY"): - self.api_token = os.getenv("DO_API_KEY") - - def read_cli_args(self): - ''' Command line argument processing ''' - parser = argparse.ArgumentParser(description='Produce an Ansible Inventory file based on DigitalOcean credentials') - - parser.add_argument('--list', action='store_true', help='List all active Droplets as Ansible inventory (default: True)') - parser.add_argument('--host', action='store', help='Get all Ansible inventory variables about a specific Droplet') - - parser.add_argument('--all', action='store_true', help='List all DigitalOcean information as JSON') - parser.add_argument('--droplets', '-d', action='store_true', help='List Droplets as JSON') - parser.add_argument('--regions', action='store_true', help='List Regions as JSON') - parser.add_argument('--images', action='store_true', help='List Images as JSON') - parser.add_argument('--sizes', action='store_true', help='List Sizes as JSON') - parser.add_argument('--ssh-keys', action='store_true', help='List SSH keys as JSON') - parser.add_argument('--domains', action='store_true', help='List Domains as JSON') - - parser.add_argument('--pretty', '-p', action='store_true', help='Pretty-print results') - - parser.add_argument('--cache-path', action='store', help='Path to the cache files (default: .)') - parser.add_argument('--cache-max_age', action='store', help='Maximum age of the cached items (default: 0)') - parser.add_argument('--force-cache', action='store_true', default=False, help='Only use data from the cache') - parser.add_argument('--refresh-cache', '-r', action='store_true', default=False, - help='Force refresh of cache by making API requests to DigitalOcean (default: False - use cache files)') - - parser.add_argument('--env', '-e', action='store_true', help='Display DO_API_TOKEN') - parser.add_argument('--api-token', '-a', action='store', help='DigitalOcean API Token') - - self.args = parser.parse_args() - - if self.args.api_token: - self.api_token = self.args.api_token - - # Make --list default if none of the other commands are specified - if (not self.args.droplets and not self.args.regions and - not self.args.images and not self.args.sizes and - not self.args.ssh_keys and not self.args.domains and - not self.args.all and not self.args.host): - self.args.list = True - - ########################################################################### - # Data Management - ########################################################################### - - def load_from_digital_ocean(self, resource=None): - '''Get JSON from DigitalOcean API''' - if self.args.force_cache and os.path.isfile(self.cache_filename): - return - # We always get fresh droplets - if self.is_cache_valid() and not (resource == 'droplets' or resource is None): - return - if self.args.refresh_cache: - resource = None - - if resource == 'droplets' or resource is None: - self.data['droplets'] = self.manager.all_active_droplets() - self.cache_refreshed = True - if resource == 'regions' or resource is None: - self.data['regions'] = self.manager.all_regions() - self.cache_refreshed = True - if resource == 'images' or resource is None: - self.data['images'] = self.manager.all_images(filter=None) - self.cache_refreshed = True - if resource == 'sizes' or resource is None: - self.data['sizes'] = self.manager.sizes() - self.cache_refreshed = True - if resource == 'ssh_keys' or resource is None: - self.data['ssh_keys'] = self.manager.all_ssh_keys() - self.cache_refreshed = True - if resource == 'domains' or resource is None: - self.data['domains'] = self.manager.all_domains() - self.cache_refreshed = True - - def build_inventory(self): - '''Build Ansible inventory of droplets''' - self.inventory = { - 'all': { - 'hosts': [], - 'vars': self.group_variables - }, - '_meta': {'hostvars': {}} - } - - # add all droplets by id and name - for droplet in self.data['droplets']: - # when using private_networking, the API reports the private one in "ip_address". - if 'private_networking' in droplet['features'] and not self.use_private_network: - for net in droplet['networks']['v4']: - if net['type'] == 'public': - dest = net['ip_address'] - else: - continue - else: - dest = droplet['ip_address'] - - self.inventory['all']['hosts'].append(dest) - - self.inventory[droplet['id']] = [dest] - self.inventory[droplet['name']] = [dest] - - # groups that are always present - for group in ('region_' + droplet['region']['slug'], - 'image_' + str(droplet['image']['id']), - 'size_' + droplet['size']['slug'], - 'distro_' + self.to_safe(droplet['image']['distribution']), - 'status_' + droplet['status']): - if group not in self.inventory: - self.inventory[group] = {'hosts': [], 'vars': {}} - self.inventory[group]['hosts'].append(dest) - - # groups that are not always present - for group in (droplet['image']['slug'], - droplet['image']['name']): - if group: - image = 'image_' + self.to_safe(group) - if image not in self.inventory: - self.inventory[image] = {'hosts': [], 'vars': {}} - self.inventory[image]['hosts'].append(dest) - - if droplet['tags']: - for tag in droplet['tags']: - if tag not in self.inventory: - self.inventory[tag] = {'hosts': [], 'vars': {}} - self.inventory[tag]['hosts'].append(dest) - - # hostvars - info = self.do_namespace(droplet) - self.inventory['_meta']['hostvars'][dest] = info - - def load_droplet_variables_for_host(self): - '''Generate a JSON response to a --host call''' - host = int(self.args.host) - droplet = self.manager.show_droplet(host) - info = self.do_namespace(droplet) - return {'droplet': info} - - ########################################################################### - # Cache Management - ########################################################################### - - def is_cache_valid(self): - ''' Determines if the cache files have expired, or if it is still valid ''' - if os.path.isfile(self.cache_filename): - mod_time = os.path.getmtime(self.cache_filename) - current_time = time() - if (mod_time + self.cache_max_age) > current_time: - return True - return False - - def load_from_cache(self): - ''' Reads the data from the cache file and assigns it to member variables as Python Objects''' - try: - cache = open(self.cache_filename, 'r') - json_data = cache.read() - cache.close() - data = json.loads(json_data) - except IOError: - data = {'data': {}, 'inventory': {}} - - self.data = data['data'] - self.inventory = data['inventory'] - - def write_to_cache(self): - ''' Writes data in JSON format to a file ''' - data = {'data': self.data, 'inventory': self.inventory} - json_data = json.dumps(data, sort_keys=True, indent=2) - - cache = open(self.cache_filename, 'w') - cache.write(json_data) - cache.close() - - ########################################################################### - # Utilities - ########################################################################### - - def push(self, my_dict, key, element): - ''' Pushed an element onto an array that may not have been defined in the dict ''' - if key in my_dict: - my_dict[key].append(element) - else: - my_dict[key] = [element] - - def to_safe(self, word): - ''' Converts 'bad' characters in a string to underscores so they can be used as Ansible groups ''' - return re.sub("[^A-Za-z0-9\-\.]", "_", word) - - def do_namespace(self, data): - ''' Returns a copy of the dictionary with all the keys put in a 'do_' namespace ''' - info = {} - for k, v in data.items(): - info['do_' + k] = v - return info - - -########################################################################### -# Run the script -DigitalOceanInventory() diff --git a/networks/remote/ansible/logzio.yml b/networks/remote/ansible/logzio.yml deleted file mode 100644 index 53f637f2..00000000 --- a/networks/remote/ansible/logzio.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- - -#Note: You need to add LOGZIO_TOKEN variable with your API key. Like tihs: ansible-playbook -e LOGZIO_TOKEN=ABCXYZ123456 - -- hosts: all - user: root - any_errors_fatal: true - gather_facts: no - vars: - - service: tendermint - - JOURNALBEAT_BINARY: "{{lookup('env', 'GOPATH')}}/bin/journalbeat" - roles: - - logzio - diff --git a/networks/remote/ansible/reset.yml b/networks/remote/ansible/reset.yml deleted file mode 100644 index 63b1733c..00000000 --- a/networks/remote/ansible/reset.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- - -- hosts: all - user: root - any_errors_fatal: true - gather_facts: no - vars: - - service: tendermint - roles: - - stop - - unsafe_reset - - start - - diff --git a/networks/remote/ansible/restart.yml b/networks/remote/ansible/restart.yml deleted file mode 100644 index 71d4bc66..00000000 --- a/networks/remote/ansible/restart.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- - -- hosts: all - user: root - any_errors_fatal: true - gather_facts: no - vars: - - service: tendermint - roles: - - stop - - start - diff --git a/networks/remote/ansible/roles/config/tasks/main.yml b/networks/remote/ansible/roles/config/tasks/main.yml deleted file mode 100644 index a51098ca..00000000 --- a/networks/remote/ansible/roles/config/tasks/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- - -- name: Copy binary - copy: - src: "{{BINARY}}" - dest: /usr/bin - mode: 0755 - -- name: Copy config - when: item <= N and ansible_hostname == 'sentrynet-node' ~ item - copy: - src: "{{CONFIGDIR}}/node{{item}}/" - dest: "/home/{{service}}/.{{service}}/" - owner: "{{service}}" - group: "{{service}}" - loop: [ 0, 1, 2, 3, 4, 5, 6, 7 ] - diff --git a/networks/remote/ansible/roles/install/handlers/main.yml b/networks/remote/ansible/roles/install/handlers/main.yml deleted file mode 100644 index 16afbb61..00000000 --- a/networks/remote/ansible/roles/install/handlers/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: reload services - systemd: "name={{service}} daemon_reload=yes enabled=yes" - diff --git a/networks/remote/ansible/roles/install/tasks/main.yml b/networks/remote/ansible/roles/install/tasks/main.yml deleted file mode 100644 index 9e5a7524..00000000 --- a/networks/remote/ansible/roles/install/tasks/main.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- - -- name: Create service group - group: "name={{service}}" - -- name: Create service user - user: "name={{service}} group={{service}} home=/home/{{service}}" - -- name: Change user folder to more permissive - file: "path=/home/{{service}} mode=0755" - -- name: Create service - template: "src=systemd.service.j2 dest=/etc/systemd/system/{{service}}.service" - notify: reload services - diff --git a/networks/remote/ansible/roles/install/templates/systemd.service.j2 b/networks/remote/ansible/roles/install/templates/systemd.service.j2 deleted file mode 100644 index 17b3de4d..00000000 --- a/networks/remote/ansible/roles/install/templates/systemd.service.j2 +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description={{service}} -Requires=network-online.target -After=network-online.target - -[Service] -Restart=on-failure -User={{service}} -Group={{service}} -PermissionsStartOnly=true -ExecStart=/usr/bin/tendermint node --proxy_app=kvstore -ExecReload=/bin/kill -HUP $MAINPID -KillSignal=SIGTERM - -[Install] -WantedBy=multi-user.target - diff --git a/networks/remote/ansible/roles/logzio/files/journalbeat.service b/networks/remote/ansible/roles/logzio/files/journalbeat.service deleted file mode 100644 index 3cb66a45..00000000 --- a/networks/remote/ansible/roles/logzio/files/journalbeat.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=journalbeat -#propagates activation, deactivation and activation fails. -Requires=network-online.target -After=network-online.target - -[Service] -Restart=on-failure -ExecStart=/usr/bin/journalbeat -c /etc/journalbeat/journalbeat.yml -path.home /usr/share/journalbeat -path.config /etc/journalbeat -path.data /var/lib/journalbeat -path.logs /var/log/journalbeat -Restart=always - -[Install] -WantedBy=multi-user.target - - diff --git a/networks/remote/ansible/roles/logzio/handlers/main.yml b/networks/remote/ansible/roles/logzio/handlers/main.yml deleted file mode 100644 index 0b371fc5..00000000 --- a/networks/remote/ansible/roles/logzio/handlers/main.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- - -- name: reload daemon - command: "systemctl daemon-reload" - -- name: restart journalbeat - service: name=journalbeat state=restarted - diff --git a/networks/remote/ansible/roles/logzio/tasks/main.yml b/networks/remote/ansible/roles/logzio/tasks/main.yml deleted file mode 100644 index ab3976f2..00000000 --- a/networks/remote/ansible/roles/logzio/tasks/main.yml +++ /dev/null @@ -1,27 +0,0 @@ ---- - -- name: Copy journalbeat binary - copy: src="{{JOURNALBEAT_BINARY}}" dest=/usr/bin/journalbeat mode=0755 - notify: restart journalbeat - -- name: Create folders - file: "path={{item}} state=directory recurse=yes" - with_items: - - /etc/journalbeat - - /etc/pki/tls/certs - - /usr/share/journalbeat - - /var/log/journalbeat - -- name: Copy journalbeat config - template: src=journalbeat.yml.j2 dest=/etc/journalbeat/journalbeat.yml mode=0600 - notify: restart journalbeat - -- name: Get server certificate for Logz.io - get_url: "url=https://raw.githubusercontent.com/logzio/public-certificates/master/COMODORSADomainValidationSecureServerCA.crt force=yes dest=/etc/pki/tls/certs/COMODORSADomainValidationSecureServerCA.crt" - -- name: Copy journalbeat service config - copy: src=journalbeat.service dest=/etc/systemd/system/journalbeat.service - notify: - - reload daemon - - restart journalbeat - diff --git a/networks/remote/ansible/roles/logzio/templates/journalbeat.yml.j2 b/networks/remote/ansible/roles/logzio/templates/journalbeat.yml.j2 deleted file mode 100644 index a421ec8a..00000000 --- a/networks/remote/ansible/roles/logzio/templates/journalbeat.yml.j2 +++ /dev/null @@ -1,342 +0,0 @@ -#======================== Journalbeat Configuration ============================ - -journalbeat: - # What position in journald to seek to at start up - # options: cursor, tail, head (defaults to tail) - #seek_position: tail - - # If seek_position is set to cursor and seeking to cursor fails - # fall back to this method. If set to none will it will exit - # options: tail, head, none (defaults to tail) - #cursor_seek_fallback: tail - - # Store the cursor of the successfully published events - #write_cursor_state: true - - # Path to the file to store the cursor (defaults to ".journalbeat-cursor-state") - #cursor_state_file: .journalbeat-cursor-state - - # How frequently should we save the cursor to disk (defaults to 5s) - #cursor_flush_period: 5s - - # Path to the file to store the queue of events pending (defaults to ".journalbeat-pending-queue") - #pending_queue.file: .journalbeat-pending-queue - - # How frequently should we save the queue to disk (defaults to 1s). - # Pending queue represents the WAL of events queued to be published - # or being published and waiting for acknowledgement. In case of a - # regular restart of journalbeat all the events not yet acknowledged - # will be flushed to disk during the shutdown. - # In case of disaster most probably journalbeat won't get a chance to shutdown - # itself gracefully and this flush period option will serve you as a - # backup creation frequency option. - #pending_queue.flush_period: 1s - - # Lowercase and remove leading underscores, e.g. "_MESSAGE" -> "message" - # (defaults to false) - #clean_field_names: false - - # All journal entries are strings by default. You can try to convert them to numbers. - # (defaults to false) - #convert_to_numbers: false - - # Store all the fields of the Systemd Journal entry under this field - # Can be almost any string suitable to be a field name of an ElasticSearch document. - # Dots can be used to create nested fields. - # Two exceptions: - # - no repeated dots; - # - no trailing dots, e.g. "journal..field_name." will fail - # (defaults to "" hence stores on the upper level of the event) - #move_metadata_to_field: "" - - # Specific units to monitor. - units: ["{{service}}.service"] - - # Specify Journal paths to open. You can pass an array of paths to Systemd Journal paths. - # If you want to open Journal from directory just pass an array consisting of one element - # representing the path. See: https://www.freedesktop.org/software/systemd/man/sd_journal_open.html - # By default this setting is empty thus journalbeat will attempt to find all journal files automatically - #journal_paths: ["/var/log/journal"] - - #default_type: journal - -#================================ General ====================================== - -# The name of the shipper that publishes the network data. It can be used to group -# all the transactions sent by a single shipper in the web interface. -# If this options is not defined, the hostname is used. -#name: journalbeat - -# The tags of the shipper are included in their own field with each -# transaction published. Tags make it easy to group servers by different -# logical properties. -tags: ["{{service}}"] - -# Optional fields that you can specify to add additional information to the -# output. Fields can be scalar values, arrays, dictionaries, or any nested -# combination of these. -fields: - logzio_codec: plain - token: {{LOGZIO_TOKEN}} - -# If this option is set to true, the custom fields are stored as top-level -# fields in the output document instead of being grouped under a fields -# sub-dictionary. Default is false. -fields_under_root: true - -# Internal queue size for single events in processing pipeline -#queue_size: 1000 - -# The internal queue size for bulk events in the processing pipeline. -# Do not modify this value. -#bulk_queue_size: 0 - -# Sets the maximum number of CPUs that can be executing simultaneously. The -# default is the number of logical CPUs available in the system. -#max_procs: - -#================================ Processors =================================== - -# Processors are used to reduce the number of fields in the exported event or to -# enhance the event with external metadata. This section defines a list of -# processors that are applied one by one and the first one receives the initial -# event: -# -# event -> filter1 -> event1 -> filter2 ->event2 ... -# -# The supported processors are drop_fields, drop_event, include_fields, and -# add_cloud_metadata. -# -# For example, you can use the following processors to keep the fields that -# contain CPU load percentages, but remove the fields that contain CPU ticks -# values: -# -processors: -#- include_fields: -# fields: ["cpu"] -- drop_fields: - fields: ["beat.name", "beat.version", "logzio_codec", "SYSLOG_IDENTIFIER", "SYSLOG_FACILITY", "PRIORITY"] -# -# The following example drops the events that have the HTTP response code 200: -# -#processors: -#- drop_event: -# when: -# equals: -# http.code: 200 -# -# The following example enriches each event with metadata from the cloud -# provider about the host machine. It works on EC2, GCE, and DigitalOcean. -# -#processors: -#- add_cloud_metadata: -# - -#================================ Outputs ====================================== - -# Configure what outputs to use when sending the data collected by the beat. -# Multiple outputs may be used. - -#----------------------------- Logstash output --------------------------------- -output.logstash: - # Boolean flag to enable or disable the output module. - enabled: true - - # The Logstash hosts - hosts: ["listener.logz.io:5015"] - - # Number of workers per Logstash host. - #worker: 1 - - # Set gzip compression level. - #compression_level: 3 - - # Optional load balance the events between the Logstash hosts - #loadbalance: true - - # Number of batches to be send asynchronously to logstash while processing - # new batches. - #pipelining: 0 - - # Optional index name. The default index name is set to name of the beat - # in all lowercase. - #index: 'beatname' - - # SOCKS5 proxy server URL - #proxy_url: socks5://user:password@socks5-server:2233 - - # Resolve names locally when using a proxy server. Defaults to false. - #proxy_use_local_resolver: false - - # Enable SSL support. SSL is automatically enabled, if any SSL setting is set. - ssl.enabled: true - - # Configure SSL verification mode. If `none` is configured, all server hosts - # and certificates will be accepted. In this mode, SSL based connections are - # susceptible to man-in-the-middle attacks. Use only for testing. Default is - # `full`. - ssl.verification_mode: full - - # List of supported/valid TLS versions. By default all TLS versions 1.0 up to - # 1.2 are enabled. - #ssl.supported_protocols: [TLSv1.0, TLSv1.1, TLSv1.2] - - # Optional SSL configuration options. SSL is off by default. - # List of root certificates for HTTPS server verifications - ssl.certificate_authorities: ["/etc/pki/tls/certs/COMODORSADomainValidationSecureServerCA.crt"] - - # Certificate for SSL client authentication - #ssl.certificate: "/etc/pki/client/cert.pem" - - # Client Certificate Key - #ssl.key: "/etc/pki/client/cert.key" - - # Optional passphrase for decrypting the Certificate Key. - #ssl.key_passphrase: '' - - # Configure cipher suites to be used for SSL connections - #ssl.cipher_suites: [] - - # Configure curve types for ECDHE based cipher suites - #ssl.curve_types: [] - -#------------------------------- File output ----------------------------------- -#output.file: - # Boolean flag to enable or disable the output module. - #enabled: true - - # Path to the directory where to save the generated files. The option is - # mandatory. - #path: "/tmp/beatname" - - # Name of the generated files. The default is `beatname` and it generates - # files: `beatname`, `beatname.1`, `beatname.2`, etc. - #filename: beatname - - # Maximum size in kilobytes of each file. When this size is reached, and on - # every beatname restart, the files are rotated. The default value is 10240 - # kB. - #rotate_every_kb: 10000 - - # Maximum number of files under path. When this number of files is reached, - # the oldest file is deleted and the rest are shifted from last to first. The - # default is 7 files. - #number_of_files: 7 - - -#----------------------------- Console output --------------------------------- -#output.console: - # Boolean flag to enable or disable the output module. - #enabled: true - - # Pretty print json event - #pretty: false - -#================================= Paths ====================================== - -# The home path for the beatname installation. This is the default base path -# for all other path settings and for miscellaneous files that come with the -# distribution (for example, the sample dashboards). -# If not set by a CLI flag or in the configuration file, the default for the -# home path is the location of the binary. -#path.home: - -# The configuration path for the beatname installation. This is the default -# base path for configuration files, including the main YAML configuration file -# and the Elasticsearch template file. If not set by a CLI flag or in the -# configuration file, the default for the configuration path is the home path. -#path.config: ${path.home} - -# The data path for the beatname installation. This is the default base path -# for all the files in which beatname needs to store its data. If not set by a -# CLI flag or in the configuration file, the default for the data path is a data -# subdirectory inside the home path. -#path.data: ${path.home}/data - -# The logs path for a beatname installation. This is the default location for -# the Beat's log files. If not set by a CLI flag or in the configuration file, -# the default for the logs path is a logs subdirectory inside the home path. -#path.logs: ${path.home}/logs - -#============================== Dashboards ===================================== -# These settings control loading the sample dashboards to the Kibana index. Loading -# the dashboards is disabled by default and can be enabled either by setting the -# options here, or by using the `-setup` CLI flag. -#dashboards.enabled: false - -# The URL from where to download the dashboards archive. By default this URL -# has a value which is computed based on the Beat name and version. For released -# versions, this URL points to the dashboard archive on the artifacts.elastic.co -# website. -#dashboards.url: - -# The directory from where to read the dashboards. It is used instead of the URL -# when it has a value. -#dashboards.directory: - -# The file archive (zip file) from where to read the dashboards. It is used instead -# of the URL when it has a value. -#dashboards.file: - -# If this option is enabled, the snapshot URL is used instead of the default URL. -#dashboards.snapshot: false - -# The URL from where to download the snapshot version of the dashboards. By default -# this has a value which is computed based on the Beat name and version. -#dashboards.snapshot_url - -# In case the archive contains the dashboards from multiple Beats, this lets you -# select which one to load. You can load all the dashboards in the archive by -# setting this to the empty string. -#dashboards.beat: beatname - -# The name of the Kibana index to use for setting the configuration. Default is ".kibana" -#dashboards.kibana_index: .kibana - -# The Elasticsearch index name. This overwrites the index name defined in the -# dashboards and index pattern. Example: testbeat-* -#dashboards.index: - -#================================ Logging ====================================== -# There are three options for the log output: syslog, file, stderr. -# Under Windows systems, the log files are per default sent to the file output, -# under all other system per default to syslog. - -# Sets log level. The default log level is info. -# Available log levels are: critical, error, warning, info, debug -#logging.level: info - -# Enable debug output for selected components. To enable all selectors use ["*"] -# Other available selectors are "beat", "publish", "service" -# Multiple selectors can be chained. -#logging.selectors: [ ] - -# Send all logging output to syslog. The default is false. -#logging.to_syslog: true - -# If enabled, beatname periodically logs its internal metrics that have changed -# in the last period. For each metric that changed, the delta from the value at -# the beginning of the period is logged. Also, the total values for -# all non-zero internal metrics are logged on shutdown. The default is true. -#logging.metrics.enabled: true - -# The period after which to log the internal metrics. The default is 30s. -#logging.metrics.period: 30s - -# Logging to rotating files files. Set logging.to_files to false to disable logging to -# files. -logging.to_files: true -logging.files: - # Configure the path where the logs are written. The default is the logs directory - # under the home path (the binary location). - #path: /var/log/beatname - - # The name of the files where the logs are written to. - #name: beatname - - # Configure log file size limit. If limit is reached, log file will be - # automatically rotated - #rotateeverybytes: 10485760 # = 10MB - - # Number of rotated log files to keep. Oldest files will be deleted first. - #keepfiles: 7 diff --git a/networks/remote/ansible/roles/start/tasks/main.yml b/networks/remote/ansible/roles/start/tasks/main.yml deleted file mode 100644 index 6bc611c9..00000000 --- a/networks/remote/ansible/roles/start/tasks/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: start service - service: "name={{service}} state=started" - diff --git a/networks/remote/ansible/roles/status/tasks/main.yml b/networks/remote/ansible/roles/status/tasks/main.yml deleted file mode 100644 index 50170c74..00000000 --- a/networks/remote/ansible/roles/status/tasks/main.yml +++ /dev/null @@ -1,10 +0,0 @@ ---- - -- name: application service status - command: "service {{service}} status" - changed_when: false - register: status - -- name: Result - debug: var=status.stdout_lines - diff --git a/networks/remote/ansible/roles/stop/tasks/main.yml b/networks/remote/ansible/roles/stop/tasks/main.yml deleted file mode 100644 index 7db356f2..00000000 --- a/networks/remote/ansible/roles/stop/tasks/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- - -- name: stop service - service: "name={{service}} state=stopped" - diff --git a/networks/remote/ansible/roles/unsafe_reset/tasks/main.yml b/networks/remote/ansible/roles/unsafe_reset/tasks/main.yml deleted file mode 100644 index 6ac1ec55..00000000 --- a/networks/remote/ansible/roles/unsafe_reset/tasks/main.yml +++ /dev/null @@ -1,4 +0,0 @@ -- command: "{{service}} unsafe_reset_all {{ (service != 'tendermint') | ternary('node','') }} --home /home/{{service}}/.{{service}}" - become_user: "{{service}}" - become: yes - diff --git a/networks/remote/ansible/start.yml b/networks/remote/ansible/start.yml deleted file mode 100644 index 2be07dc7..00000000 --- a/networks/remote/ansible/start.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- hosts: all - user: root - any_errors_fatal: true - gather_facts: no - vars: - - service: tendermint - roles: - - start - diff --git a/networks/remote/ansible/status.yml b/networks/remote/ansible/status.yml deleted file mode 100644 index a1721b87..00000000 --- a/networks/remote/ansible/status.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- hosts: all - user: root - any_errors_fatal: true - gather_facts: no - vars: - - service: tendermint - roles: - - status - diff --git a/networks/remote/ansible/stop.yml b/networks/remote/ansible/stop.yml deleted file mode 100644 index abc6031d..00000000 --- a/networks/remote/ansible/stop.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- hosts: all - user: root - any_errors_fatal: true - gather_facts: no - vars: - - service: tendermint - roles: - - stop - diff --git a/networks/remote/integration.sh b/networks/remote/integration.sh deleted file mode 100644 index 1624711f..00000000 --- a/networks/remote/integration.sh +++ /dev/null @@ -1,132 +0,0 @@ -#!/usr/bin/env bash - -# XXX: this script is intended to be run from a fresh Digital Ocean droplet - -# NOTE: you must set this manually now -echo "export DO_API_TOKEN=\"yourToken\"" >> ~/.profile - -sudo apt-get update -y -sudo apt-get upgrade -y -sudo apt-get install -y jq unzip python-pip software-properties-common make - -# get and unpack golang -curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz -tar -xvf go1.10.linux-amd64.tar.gz - -## move binary and add to path -mv go /usr/local -echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile - -## create the goApps directory, set GOPATH, and put it on PATH -mkdir goApps -echo "export GOPATH=/root/goApps" >> ~/.profile -echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile - -source ~/.profile - -## get the code and move into repo -REPO=github.com/tendermint/tendermint -go get $REPO -cd $GOPATH/src/$REPO - -## build -git checkout zach/ansible -make get_tools -make get_vendor_deps -make build - -# generate an ssh key -ssh-keygen -f $HOME/.ssh/id_rsa -t rsa -N '' -echo "export SSH_KEY_FILE=\"\$HOME/.ssh/id_rsa.pub\"" >> ~/.profile -source ~/.profile - -# install terraform -wget https://releases.hashicorp.com/terraform/0.11.7/terraform_0.11.7_linux_amd64.zip -unzip terraform_0.11.7_linux_amd64.zip -d /usr/bin/ - -# install ansible -sudo apt-get update -y -sudo apt-add-repository ppa:ansible/ansible -y -sudo apt-get update -y -sudo apt-get install ansible -y - -# required by ansible -pip install dopy - -# the next two commands are directory sensitive -cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/terraform - -terraform init -terraform apply -var DO_API_TOKEN="$DO_API_TOKEN" -var SSH_KEY_FILE="$SSH_KEY_FILE" -auto-approve - -# let the droplets boot -sleep 60 - -# get the IPs -ip0=`terraform output -json public_ips | jq '.value[0]'` -ip1=`terraform output -json public_ips | jq '.value[1]'` -ip2=`terraform output -json public_ips | jq '.value[2]'` -ip3=`terraform output -json public_ips | jq '.value[3]'` - -# to remove quotes -strip() { - opt=$1 - temp="${opt%\"}" - temp="${temp#\"}" - echo $temp -} - -ip0=$(strip $ip0) -ip1=$(strip $ip1) -ip2=$(strip $ip2) -ip3=$(strip $ip3) - -# all the ansible commands are also directory specific -cd $GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible - -ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml -ansible-playbook -i inventory/digital_ocean.py -l sentrynet config.yml -e BINARY=$GOPATH/src/github.com/tendermint/tendermint/build/tendermint -e CONFIGDIR=$GOPATH/src/github.com/tendermint/tendermint/docs/examples - -sleep 10 - -# get each nodes ID then populate the ansible file -id0=`curl $ip0:26657/status | jq .result.node_info.id` -id1=`curl $ip1:26657/status | jq .result.node_info.id` -id2=`curl $ip2:26657/status | jq .result.node_info.id` -id3=`curl $ip3:26657/status | jq .result.node_info.id` - -id0=$(strip $id0) -id1=$(strip $id1) -id2=$(strip $id2) -id3=$(strip $id3) - -# remove file we'll re-write to with new info -old_ansible_file=$GOPATH/src/github.com/tendermint/tendermint/networks/remote/ansible/roles/install/templates/systemd.service.j2 -rm $old_ansible_file - -# need to populate the `--p2p.persistent_peers` flag -echo "[Unit] -Description={{service}} -Requires=network-online.target -After=network-online.target - -[Service] -Restart=on-failure -User={{service}} -Group={{service}} -PermissionsStartOnly=true -ExecStart=/usr/bin/tendermint node --proxy_app=kvstore --p2p.persistent_peers=$id0@$ip0:26656,$id1@$ip1:26656,$id2@$ip2:26656,$id3@$ip3:26656 -ExecReload=/bin/kill -HUP \$MAINPID -KillSignal=SIGTERM - -[Install] -WantedBy=multi-user.target -" >> $old_ansible_file - -# now, we can re-run the install command -ansible-playbook -i inventory/digital_ocean.py -l sentrynet install.yml - -# and finally restart it all -ansible-playbook -i inventory/digital_ocean.py -l sentrynet restart.yml - -echo "congratulations, your testnet is now running :)" diff --git a/networks/remote/terraform/.gitignore b/networks/remote/terraform/.gitignore deleted file mode 100644 index 0cc2d499..00000000 --- a/networks/remote/terraform/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -.terraform -terraform.tfstate -terraform.tfstate.backup -terraform.tfstate.d diff --git a/networks/remote/terraform/cluster/main.tf b/networks/remote/terraform/cluster/main.tf deleted file mode 100644 index 98ab37ce..00000000 --- a/networks/remote/terraform/cluster/main.tf +++ /dev/null @@ -1,28 +0,0 @@ -resource "digitalocean_tag" "cluster" { - name = "${var.name}" -} - -resource "digitalocean_ssh_key" "cluster" { - name = "${var.name}" - public_key = "${file(var.ssh_key)}" -} - -resource "digitalocean_droplet" "cluster" { - name = "${var.name}-node${count.index}" - image = "centos-7-x64" - size = "${var.instance_size}" - region = "${element(var.regions, count.index)}" - ssh_keys = ["${digitalocean_ssh_key.cluster.id}"] - count = "${var.servers}" - tags = ["${digitalocean_tag.cluster.id}"] - - lifecycle = { - prevent_destroy = false - } - - connection { - timeout = "30s" - } - -} - diff --git a/networks/remote/terraform/cluster/outputs.tf b/networks/remote/terraform/cluster/outputs.tf deleted file mode 100644 index 78291b6a..00000000 --- a/networks/remote/terraform/cluster/outputs.tf +++ /dev/null @@ -1,15 +0,0 @@ -// The cluster name -output "name" { - value = "${var.name}" -} - -// The list of cluster instance IDs -output "instances" { - value = ["${digitalocean_droplet.cluster.*.id}"] -} - -// The list of cluster instance public IPs -output "public_ips" { - value = ["${digitalocean_droplet.cluster.*.ipv4_address}"] -} - diff --git a/networks/remote/terraform/cluster/variables.tf b/networks/remote/terraform/cluster/variables.tf deleted file mode 100644 index 1b6a7007..00000000 --- a/networks/remote/terraform/cluster/variables.tf +++ /dev/null @@ -1,25 +0,0 @@ -variable "name" { - description = "The cluster name, e.g cdn" -} - -variable "regions" { - description = "Regions to launch in" - type = "list" - default = ["AMS3", "FRA1", "LON1", "NYC3", "SFO2", "SGP1", "TOR1"] -} - -variable "ssh_key" { - description = "SSH key filename to copy to the nodes" - type = "string" -} - -variable "instance_size" { - description = "The instance size to use" - default = "2gb" -} - -variable "servers" { - description = "Desired instance count" - default = 4 -} - diff --git a/networks/remote/terraform/main.tf b/networks/remote/terraform/main.tf deleted file mode 100644 index a768ee13..00000000 --- a/networks/remote/terraform/main.tf +++ /dev/null @@ -1,37 +0,0 @@ -#Terraform Configuration - -variable "DO_API_TOKEN" { - description = "DigitalOcean Access Token" -} - -variable "TESTNET_NAME" { - description = "Name of the testnet" - default = "sentrynet" -} - -variable "SSH_KEY_FILE" { - description = "SSH public key file to be used on the nodes" - type = "string" -} - -variable "SERVERS" { - description = "Number of nodes in testnet" - default = "4" -} - -provider "digitalocean" { - token = "${var.DO_API_TOKEN}" -} - -module "cluster" { - source = "./cluster" - name = "${var.TESTNET_NAME}" - ssh_key = "${var.SSH_KEY_FILE}" - servers = "${var.SERVERS}" -} - - -output "public_ips" { - value = "${module.cluster.public_ips}" -} - diff --git a/node/id.go b/node/id.go deleted file mode 100644 index fa391f94..00000000 --- a/node/id.go +++ /dev/null @@ -1,35 +0,0 @@ -package node - -import ( - "time" - - "github.com/tendermint/go-crypto" -) - -type NodeID struct { - Name string - PubKey crypto.PubKey -} - -type PrivNodeID struct { - NodeID - PrivKey crypto.PrivKey -} - -type NodeGreeting struct { - NodeID - Version string - ChainID string - Message string - Time time.Time -} - -type SignedNodeGreeting struct { - NodeGreeting - Signature crypto.Signature -} - -func (pnid *PrivNodeID) SignGreeting() *SignedNodeGreeting { - //greeting := NodeGreeting{} - return nil -} diff --git a/node/node.go b/node/node.go deleted file mode 100644 index dc79cff7..00000000 --- a/node/node.go +++ /dev/null @@ -1,730 +0,0 @@ -package node - -import ( - "bytes" - "context" - "errors" - "fmt" - "net" - "net/http" - - "github.com/prometheus/client_golang/prometheus/promhttp" - - abci "github.com/tendermint/abci/types" - amino "github.com/tendermint/go-amino" - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" - - bc "github.com/tendermint/tendermint/blockchain" - cfg "github.com/tendermint/tendermint/config" - cs "github.com/tendermint/tendermint/consensus" - "github.com/tendermint/tendermint/evidence" - mempl "github.com/tendermint/tendermint/mempool" - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/p2p/pex" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" - rpccore "github.com/tendermint/tendermint/rpc/core" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - grpccore "github.com/tendermint/tendermint/rpc/grpc" - rpc "github.com/tendermint/tendermint/rpc/lib" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/txindex" - "github.com/tendermint/tendermint/state/txindex/kv" - "github.com/tendermint/tendermint/state/txindex/null" - "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" - - _ "net/http/pprof" -) - -//------------------------------------------------------------------------------ - -// DBContext specifies config information for loading a new DB. -type DBContext struct { - ID string - Config *cfg.Config -} - -// DBProvider takes a DBContext and returns an instantiated DB. -type DBProvider func(*DBContext) (dbm.DB, error) - -// DefaultDBProvider returns a database using the DBBackend and DBDir -// specified in the ctx.Config. -func DefaultDBProvider(ctx *DBContext) (dbm.DB, error) { - dbType := dbm.DBBackendType(ctx.Config.DBBackend) - return dbm.NewDB(ctx.ID, dbType, ctx.Config.DBDir()), nil -} - -// GenesisDocProvider returns a GenesisDoc. -// It allows the GenesisDoc to be pulled from sources other than the -// filesystem, for instance from a distributed key-value store cluster. -type GenesisDocProvider func() (*types.GenesisDoc, error) - -// DefaultGenesisDocProviderFunc returns a GenesisDocProvider that loads -// the GenesisDoc from the config.GenesisFile() on the filesystem. -func DefaultGenesisDocProviderFunc(config *cfg.Config) GenesisDocProvider { - return func() (*types.GenesisDoc, error) { - return types.GenesisDocFromFile(config.GenesisFile()) - } -} - -// NodeProvider takes a config and a logger and returns a ready to go Node. -type NodeProvider func(*cfg.Config, log.Logger) (*Node, error) - -// DefaultNewNode returns a Tendermint node with default settings for the -// PrivValidator, ClientCreator, GenesisDoc, and DBProvider. -// It implements NodeProvider. -func DefaultNewNode(config *cfg.Config, logger log.Logger) (*Node, error) { - return NewNode(config, - privval.LoadOrGenFilePV(config.PrivValidatorFile()), - proxy.DefaultClientCreator(config.ProxyApp, config.ABCI, config.DBDir()), - DefaultGenesisDocProviderFunc(config), - DefaultDBProvider, - DefaultMetricsProvider, - logger, - ) -} - -// MetricsProvider returns a consensus, p2p and mempool Metrics. -type MetricsProvider func() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) - -// DefaultMetricsProvider returns consensus, p2p and mempool Metrics build -// using Prometheus client library. -func DefaultMetricsProvider() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) { - return cs.PrometheusMetrics(), p2p.PrometheusMetrics(), mempl.PrometheusMetrics() -} - -// NopMetricsProvider returns consensus, p2p and mempool Metrics as no-op. -func NopMetricsProvider() (*cs.Metrics, *p2p.Metrics, *mempl.Metrics) { - return cs.NopMetrics(), p2p.NopMetrics(), mempl.NopMetrics() -} - -//------------------------------------------------------------------------------ - -// Node is the highest level interface to a full Tendermint node. -// It includes all configuration information and running services. -type Node struct { - cmn.BaseService - - // config - config *cfg.Config - genesisDoc *types.GenesisDoc // initial validator set - privValidator types.PrivValidator // local node's validator key - - // network - sw *p2p.Switch // p2p connections - addrBook pex.AddrBook // known peers - - // services - eventBus *types.EventBus // pub/sub for services - stateDB dbm.DB - blockStore *bc.BlockStore // store the blockchain to disk - bcReactor *bc.BlockchainReactor // for fast-syncing - mempoolReactor *mempl.MempoolReactor // for gossipping transactions - consensusState *cs.ConsensusState // latest consensus state - consensusReactor *cs.ConsensusReactor // for participating in the consensus - evidencePool *evidence.EvidencePool // tracking evidence - proxyApp proxy.AppConns // connection to the application - rpcListeners []net.Listener // rpc servers - txIndexer txindex.TxIndexer - indexerService *txindex.IndexerService - prometheusSrv *http.Server -} - -// NewNode returns a new, ready to go, Tendermint Node. -func NewNode(config *cfg.Config, - privValidator types.PrivValidator, - clientCreator proxy.ClientCreator, - genesisDocProvider GenesisDocProvider, - dbProvider DBProvider, - metricsProvider MetricsProvider, - logger log.Logger) (*Node, error) { - - // Get BlockStore - blockStoreDB, err := dbProvider(&DBContext{"blockstore", config}) - if err != nil { - return nil, err - } - blockStore := bc.NewBlockStore(blockStoreDB) - - // Get State - stateDB, err := dbProvider(&DBContext{"state", config}) - if err != nil { - return nil, err - } - - // Get genesis doc - // TODO: move to state package? - genDoc, err := loadGenesisDoc(stateDB) - if err != nil { - genDoc, err = genesisDocProvider() - if err != nil { - return nil, err - } - // save genesis doc to prevent a certain class of user errors (e.g. when it - // was changed, accidentally or not). Also good for audit trail. - saveGenesisDoc(stateDB, genDoc) - } - - state, err := sm.LoadStateFromDBOrGenesisDoc(stateDB, genDoc) - if err != nil { - return nil, err - } - - // Create the proxyApp, which manages connections (consensus, mempool, query) - // and sync tendermint and the app by performing a handshake - // and replaying any necessary blocks - consensusLogger := logger.With("module", "consensus") - handshaker := cs.NewHandshaker(stateDB, state, blockStore, genDoc) - handshaker.SetLogger(consensusLogger) - proxyApp := proxy.NewAppConns(clientCreator, handshaker) - proxyApp.SetLogger(logger.With("module", "proxy")) - if err := proxyApp.Start(); err != nil { - return nil, fmt.Errorf("Error starting proxy app connections: %v", err) - } - - // reload the state (it may have been updated by the handshake) - state = sm.LoadState(stateDB) - - // If an address is provided, listen on the socket for a - // connection from an external signing process. - if config.PrivValidatorListenAddr != "" { - var ( - // TODO: persist this key so external signer - // can actually authenticate us - privKey = crypto.GenPrivKeyEd25519() - pvsc = privval.NewSocketPV( - logger.With("module", "privval"), - config.PrivValidatorListenAddr, - privKey, - ) - ) - - if err := pvsc.Start(); err != nil { - return nil, fmt.Errorf("Error starting private validator client: %v", err) - } - - privValidator = pvsc - } - - // Decide whether to fast-sync or not - // We don't fast-sync when the only validator is us. - fastSync := config.FastSync - if state.Validators.Size() == 1 { - addr, _ := state.Validators.GetByIndex(0) - if bytes.Equal(privValidator.GetAddress(), addr) { - fastSync = false - } - } - - // Log whether this node is a validator or an observer - if state.Validators.HasAddress(privValidator.GetAddress()) { - consensusLogger.Info("This node is a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey()) - } else { - consensusLogger.Info("This node is not a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey()) - } - - // metrics - var ( - csMetrics *cs.Metrics - p2pMetrics *p2p.Metrics - memplMetrics *mempl.Metrics - ) - if config.Instrumentation.Prometheus { - csMetrics, p2pMetrics, memplMetrics = metricsProvider() - } else { - csMetrics, p2pMetrics, memplMetrics = NopMetricsProvider() - } - - // Make MempoolReactor - mempoolLogger := logger.With("module", "mempool") - mempool := mempl.NewMempool( - config.Mempool, - proxyApp.Mempool(), - state.LastBlockHeight, - mempl.WithMetrics(memplMetrics), - ) - mempool.SetLogger(mempoolLogger) - mempool.InitWAL() // no need to have the mempool wal during tests - mempoolReactor := mempl.NewMempoolReactor(config.Mempool, mempool) - mempoolReactor.SetLogger(mempoolLogger) - - if config.Consensus.WaitForTxs() { - mempool.EnableTxsAvailable() - } - - // Make Evidence Reactor - evidenceDB, err := dbProvider(&DBContext{"evidence", config}) - if err != nil { - return nil, err - } - evidenceLogger := logger.With("module", "evidence") - evidenceStore := evidence.NewEvidenceStore(evidenceDB) - evidencePool := evidence.NewEvidencePool(stateDB, evidenceStore) - evidencePool.SetLogger(evidenceLogger) - evidenceReactor := evidence.NewEvidenceReactor(evidencePool) - evidenceReactor.SetLogger(evidenceLogger) - - blockExecLogger := logger.With("module", "state") - // make block executor for consensus and blockchain reactors to execute blocks - blockExec := sm.NewBlockExecutor(stateDB, blockExecLogger, proxyApp.Consensus(), mempool, evidencePool) - - // Make BlockchainReactor - bcReactor := bc.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) - bcReactor.SetLogger(logger.With("module", "blockchain")) - - // Make ConsensusReactor - consensusState := cs.NewConsensusState( - config.Consensus, - state.Copy(), - blockExec, - blockStore, - mempool, - evidencePool, - cs.WithMetrics(csMetrics), - ) - consensusState.SetLogger(consensusLogger) - if privValidator != nil { - consensusState.SetPrivValidator(privValidator) - } - consensusReactor := cs.NewConsensusReactor(consensusState, fastSync) - consensusReactor.SetLogger(consensusLogger) - - p2pLogger := logger.With("module", "p2p") - - sw := p2p.NewSwitch(config.P2P, p2p.WithMetrics(p2pMetrics)) - sw.SetLogger(p2pLogger) - sw.AddReactor("MEMPOOL", mempoolReactor) - sw.AddReactor("BLOCKCHAIN", bcReactor) - sw.AddReactor("CONSENSUS", consensusReactor) - sw.AddReactor("EVIDENCE", evidenceReactor) - - // Optionally, start the pex reactor - // - // TODO: - // - // We need to set Seeds and PersistentPeers on the switch, - // since it needs to be able to use these (and their DNS names) - // even if the PEX is off. We can include the DNS name in the NetAddress, - // but it would still be nice to have a clear list of the current "PersistentPeers" - // somewhere that we can return with net_info. - // - // If PEX is on, it should handle dialing the seeds. Otherwise the switch does it. - // Note we currently use the addrBook regardless at least for AddOurAddress - addrBook := pex.NewAddrBook(config.P2P.AddrBookFile(), config.P2P.AddrBookStrict) - addrBook.SetLogger(p2pLogger.With("book", config.P2P.AddrBookFile())) - if config.P2P.PexReactor { - // TODO persistent peers ? so we can have their DNS addrs saved - pexReactor := pex.NewPEXReactor(addrBook, - &pex.PEXReactorConfig{ - Seeds: cmn.SplitAndTrim(config.P2P.Seeds, ",", " "), - SeedMode: config.P2P.SeedMode, - PrivatePeerIDs: cmn.SplitAndTrim(config.P2P.PrivatePeerIDs, ",", " ")}) - pexReactor.SetLogger(p2pLogger) - sw.AddReactor("PEX", pexReactor) - } - - sw.SetAddrBook(addrBook) - - // Filter peers by addr or pubkey with an ABCI query. - // If the query return code is OK, add peer. - // XXX: Query format subject to change - if config.FilterPeers { - // NOTE: addr is ip:port - sw.SetAddrFilter(func(addr net.Addr) error { - resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/addr/%s", addr.String())}) - if err != nil { - return err - } - if resQuery.IsErr() { - return fmt.Errorf("Error querying abci app: %v", resQuery) - } - return nil - }) - sw.SetIDFilter(func(id p2p.ID) error { - resQuery, err := proxyApp.Query().QuerySync(abci.RequestQuery{Path: cmn.Fmt("/p2p/filter/id/%s", id)}) - if err != nil { - return err - } - if resQuery.IsErr() { - return fmt.Errorf("Error querying abci app: %v", resQuery) - } - return nil - }) - } - - eventBus := types.NewEventBus() - eventBus.SetLogger(logger.With("module", "events")) - - // services which will be publishing and/or subscribing for messages (events) - // consensusReactor will set it on consensusState and blockExecutor - consensusReactor.SetEventBus(eventBus) - - // Transaction indexing - var txIndexer txindex.TxIndexer - switch config.TxIndex.Indexer { - case "kv": - store, err := dbProvider(&DBContext{"tx_index", config}) - if err != nil { - return nil, err - } - if config.TxIndex.IndexTags != "" { - txIndexer = kv.NewTxIndex(store, kv.IndexTags(cmn.SplitAndTrim(config.TxIndex.IndexTags, ",", " "))) - } else if config.TxIndex.IndexAllTags { - txIndexer = kv.NewTxIndex(store, kv.IndexAllTags()) - } else { - txIndexer = kv.NewTxIndex(store) - } - default: - txIndexer = &null.TxIndex{} - } - - indexerService := txindex.NewIndexerService(txIndexer, eventBus) - indexerService.SetLogger(logger.With("module", "txindex")) - - // run the profile server - profileHost := config.ProfListenAddress - if profileHost != "" { - go func() { - logger.Error("Profile server", "err", http.ListenAndServe(profileHost, nil)) - }() - } - - node := &Node{ - config: config, - genesisDoc: genDoc, - privValidator: privValidator, - - sw: sw, - addrBook: addrBook, - - stateDB: stateDB, - blockStore: blockStore, - bcReactor: bcReactor, - mempoolReactor: mempoolReactor, - consensusState: consensusState, - consensusReactor: consensusReactor, - evidencePool: evidencePool, - proxyApp: proxyApp, - txIndexer: txIndexer, - indexerService: indexerService, - eventBus: eventBus, - } - node.BaseService = *cmn.NewBaseService(logger, "Node", node) - return node, nil -} - -// OnStart starts the Node. It implements cmn.Service. -func (n *Node) OnStart() error { - err := n.eventBus.Start() - if err != nil { - return err - } - - // Create & add listener - protocol, address := cmn.ProtocolAndAddress(n.config.P2P.ListenAddress) - l := p2p.NewDefaultListener(protocol, address, n.config.P2P.SkipUPNP, n.Logger.With("module", "p2p")) - n.sw.AddListener(l) - - // Generate node PrivKey - // TODO: pass in like privValidator - nodeKey, err := p2p.LoadOrGenNodeKey(n.config.NodeKeyFile()) - if err != nil { - return err - } - n.Logger.Info("P2P Node ID", "ID", nodeKey.ID(), "file", n.config.NodeKeyFile()) - - nodeInfo := n.makeNodeInfo(nodeKey.ID()) - n.sw.SetNodeInfo(nodeInfo) - n.sw.SetNodeKey(nodeKey) - - // Add ourselves to addrbook to prevent dialing ourselves - n.addrBook.AddOurAddress(nodeInfo.NetAddress()) - - // Start the RPC server before the P2P server - // so we can eg. receive txs for the first block - if n.config.RPC.ListenAddress != "" { - listeners, err := n.startRPC() - if err != nil { - return err - } - n.rpcListeners = listeners - } - - if n.config.Instrumentation.Prometheus { - n.prometheusSrv = n.startPrometheusServer(n.config.Instrumentation.PrometheusListenAddr) - } - - // Start the switch (the P2P server). - err = n.sw.Start() - if err != nil { - return err - } - - // Always connect to persistent peers - if n.config.P2P.PersistentPeers != "" { - err = n.sw.DialPeersAsync(n.addrBook, cmn.SplitAndTrim(n.config.P2P.PersistentPeers, ",", " "), true) - if err != nil { - return err - } - } - - // start tx indexer - return n.indexerService.Start() -} - -// OnStop stops the Node. It implements cmn.Service. -func (n *Node) OnStop() { - n.BaseService.OnStop() - - n.Logger.Info("Stopping Node") - // TODO: gracefully disconnect from peers. - n.sw.Stop() - - for _, l := range n.rpcListeners { - n.Logger.Info("Closing rpc listener", "listener", l) - if err := l.Close(); err != nil { - n.Logger.Error("Error closing listener", "listener", l, "err", err) - } - } - - n.eventBus.Stop() - n.indexerService.Stop() - - if pvsc, ok := n.privValidator.(*privval.SocketPV); ok { - if err := pvsc.Stop(); err != nil { - n.Logger.Error("Error stopping priv validator socket client", "err", err) - } - } - - if n.prometheusSrv != nil { - if err := n.prometheusSrv.Shutdown(context.Background()); err != nil { - // Error from closing listeners, or context timeout: - n.Logger.Error("Prometheus HTTP server Shutdown", "err", err) - } - } -} - -// RunForever waits for an interrupt signal and stops the node. -func (n *Node) RunForever() { - // Sleep forever and then... - cmn.TrapSignal(func() { - n.Stop() - }) -} - -// AddListener adds a listener to accept inbound peer connections. -// It should be called before starting the Node. -// The first listener is the primary listener (in NodeInfo) -func (n *Node) AddListener(l p2p.Listener) { - n.sw.AddListener(l) -} - -// ConfigureRPC sets all variables in rpccore so they will serve -// rpc calls from this node -func (n *Node) ConfigureRPC() { - rpccore.SetStateDB(n.stateDB) - rpccore.SetBlockStore(n.blockStore) - rpccore.SetConsensusState(n.consensusState) - rpccore.SetMempool(n.mempoolReactor.Mempool) - rpccore.SetEvidencePool(n.evidencePool) - rpccore.SetSwitch(n.sw) - rpccore.SetPubKey(n.privValidator.GetPubKey()) - rpccore.SetGenesisDoc(n.genesisDoc) - rpccore.SetAddrBook(n.addrBook) - rpccore.SetProxyAppQuery(n.proxyApp.Query()) - rpccore.SetTxIndexer(n.txIndexer) - rpccore.SetConsensusReactor(n.consensusReactor) - rpccore.SetEventBus(n.eventBus) - rpccore.SetLogger(n.Logger.With("module", "rpc")) -} - -func (n *Node) startRPC() ([]net.Listener, error) { - n.ConfigureRPC() - listenAddrs := cmn.SplitAndTrim(n.config.RPC.ListenAddress, ",", " ") - coreCodec := amino.NewCodec() - ctypes.RegisterAmino(coreCodec) - - if n.config.RPC.Unsafe { - rpccore.AddUnsafeRoutes() - } - - // we may expose the rpc over both a unix and tcp socket - listeners := make([]net.Listener, len(listenAddrs)) - for i, listenAddr := range listenAddrs { - mux := http.NewServeMux() - rpcLogger := n.Logger.With("module", "rpc-server") - wm := rpcserver.NewWebsocketManager(rpccore.Routes, coreCodec, rpcserver.EventSubscriber(n.eventBus)) - wm.SetLogger(rpcLogger.With("protocol", "websocket")) - mux.HandleFunc("/websocket", wm.WebsocketHandler) - rpcserver.RegisterRPCFuncs(mux, rpccore.Routes, coreCodec, rpcLogger) - listener, err := rpcserver.StartHTTPServer(listenAddr, mux, rpcLogger) - if err != nil { - return nil, err - } - listeners[i] = listener - } - - // we expose a simplified api over grpc for convenience to app devs - grpcListenAddr := n.config.RPC.GRPCListenAddress - if grpcListenAddr != "" { - listener, err := grpccore.StartGRPCServer(grpcListenAddr) - if err != nil { - return nil, err - } - listeners = append(listeners, listener) - } - - return listeners, nil -} - -// startPrometheusServer starts a Prometheus HTTP server, listening for metrics -// collectors on addr. -func (n *Node) startPrometheusServer(addr string) *http.Server { - srv := &http.Server{ - Addr: addr, - Handler: promhttp.Handler(), - } - go func() { - if err := srv.ListenAndServe(); err != http.ErrServerClosed { - // Error starting or closing listener: - n.Logger.Error("Prometheus HTTP server ListenAndServe", "err", err) - } - }() - return srv -} - -// Switch returns the Node's Switch. -func (n *Node) Switch() *p2p.Switch { - return n.sw -} - -// BlockStore returns the Node's BlockStore. -func (n *Node) BlockStore() *bc.BlockStore { - return n.blockStore -} - -// ConsensusState returns the Node's ConsensusState. -func (n *Node) ConsensusState() *cs.ConsensusState { - return n.consensusState -} - -// ConsensusReactor returns the Node's ConsensusReactor. -func (n *Node) ConsensusReactor() *cs.ConsensusReactor { - return n.consensusReactor -} - -// MempoolReactor returns the Node's MempoolReactor. -func (n *Node) MempoolReactor() *mempl.MempoolReactor { - return n.mempoolReactor -} - -// EvidencePool returns the Node's EvidencePool. -func (n *Node) EvidencePool() *evidence.EvidencePool { - return n.evidencePool -} - -// EventBus returns the Node's EventBus. -func (n *Node) EventBus() *types.EventBus { - return n.eventBus -} - -// PrivValidator returns the Node's PrivValidator. -// XXX: for convenience only! -func (n *Node) PrivValidator() types.PrivValidator { - return n.privValidator -} - -// GenesisDoc returns the Node's GenesisDoc. -func (n *Node) GenesisDoc() *types.GenesisDoc { - return n.genesisDoc -} - -// ProxyApp returns the Node's AppConns, representing its connections to the ABCI application. -func (n *Node) ProxyApp() proxy.AppConns { - return n.proxyApp -} - -func (n *Node) makeNodeInfo(nodeID p2p.ID) p2p.NodeInfo { - txIndexerStatus := "on" - if _, ok := n.txIndexer.(*null.TxIndex); ok { - txIndexerStatus = "off" - } - nodeInfo := p2p.NodeInfo{ - ID: nodeID, - Network: n.genesisDoc.ChainID, - Version: version.Version, - Channels: []byte{ - bc.BlockchainChannel, - cs.StateChannel, cs.DataChannel, cs.VoteChannel, cs.VoteSetBitsChannel, - mempl.MempoolChannel, - evidence.EvidenceChannel, - }, - Moniker: n.config.Moniker, - Other: []string{ - cmn.Fmt("amino_version=%v", amino.Version), - cmn.Fmt("p2p_version=%v", p2p.Version), - cmn.Fmt("consensus_version=%v", cs.Version), - cmn.Fmt("rpc_version=%v/%v", rpc.Version, rpccore.Version), - cmn.Fmt("tx_index=%v", txIndexerStatus), - }, - } - - if n.config.P2P.PexReactor { - nodeInfo.Channels = append(nodeInfo.Channels, pex.PexChannel) - } - - rpcListenAddr := n.config.RPC.ListenAddress - nodeInfo.Other = append(nodeInfo.Other, cmn.Fmt("rpc_addr=%v", rpcListenAddr)) - - if !n.sw.IsListening() { - return nodeInfo - } - - p2pListener := n.sw.Listeners()[0] - p2pHost := p2pListener.ExternalAddress().IP.String() - p2pPort := p2pListener.ExternalAddress().Port - nodeInfo.ListenAddr = cmn.Fmt("%v:%v", p2pHost, p2pPort) - - return nodeInfo -} - -//------------------------------------------------------------------------------ - -// NodeInfo returns the Node's Info from the Switch. -func (n *Node) NodeInfo() p2p.NodeInfo { - return n.sw.NodeInfo() -} - -//------------------------------------------------------------------------------ - -var ( - genesisDocKey = []byte("genesisDoc") -) - -// panics if failed to unmarshal bytes -func loadGenesisDoc(db dbm.DB) (*types.GenesisDoc, error) { - bytes := db.Get(genesisDocKey) - if len(bytes) == 0 { - return nil, errors.New("Genesis doc not found") - } - var genDoc *types.GenesisDoc - err := cdc.UnmarshalJSON(bytes, &genDoc) - if err != nil { - cmn.PanicCrisis(fmt.Sprintf("Failed to load genesis doc due to unmarshaling error: %v (bytes: %X)", err, bytes)) - } - return genDoc, nil -} - -// panics if failed to marshal the given genesis document -func saveGenesisDoc(db dbm.DB, genDoc *types.GenesisDoc) { - bytes, err := cdc.MarshalJSON(genDoc) - if err != nil { - cmn.PanicCrisis(fmt.Sprintf("Failed to save genesis doc due to marshaling error: %v", err)) - } - db.SetSync(genesisDocKey, bytes) -} diff --git a/node/node_test.go b/node/node_test.go deleted file mode 100644 index cdabdbb3..00000000 --- a/node/node_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package node - -import ( - "context" - "testing" - "time" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tmlibs/log" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/types" -) - -func TestNodeStartStop(t *testing.T) { - config := cfg.ResetTestRoot("node_node_test") - - // create & start node - n, err := DefaultNewNode(config, log.TestingLogger()) - assert.NoError(t, err, "expected no err on DefaultNewNode") - err1 := n.Start() - if err1 != nil { - t.Error(err1) - } - t.Logf("Started node %v", n.sw.NodeInfo()) - - // wait for the node to produce a block - blockCh := make(chan interface{}) - err = n.EventBus().Subscribe(context.Background(), "node_test", types.EventQueryNewBlock, blockCh) - assert.NoError(t, err) - select { - case <-blockCh: - case <-time.After(10 * time.Second): - t.Fatal("timed out waiting for the node to produce a block") - } - - // stop the node - go func() { - n.Stop() - }() - - select { - case <-n.Quit(): - case <-time.After(5 * time.Second): - t.Fatal("timed out waiting for shutdown") - } -} diff --git a/node/wire.go b/node/wire.go deleted file mode 100644 index a0d7677d..00000000 --- a/node/wire.go +++ /dev/null @@ -1,12 +0,0 @@ -package node - -import ( - amino "github.com/tendermint/go-amino" - crypto "github.com/tendermint/go-crypto" -) - -var cdc = amino.NewCodec() - -func init() { - crypto.RegisterAmino(cdc) -} diff --git a/p2p/README.md b/p2p/README.md deleted file mode 100644 index 819a5056..00000000 --- a/p2p/README.md +++ /dev/null @@ -1,11 +0,0 @@ -# p2p - -The p2p package provides an abstraction around peer-to-peer communication. - -Docs: - -- [Connection](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/connection.md) for details on how connections and multiplexing work -- [Peer](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/peer.md) for details on peer ID, handshakes, and peer exchange -- [Node](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/node.md) for details about different types of nodes and how they should work -- [Pex](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/reactors/pex/pex.md) for details on peer discovery and exchange -- [Config](https://github.com/tendermint/tendermint/blob/master/docs/spec/docs/spec/p2p/config.md) for details on some config option diff --git a/p2p/base_reactor.go b/p2p/base_reactor.go deleted file mode 100644 index 83c8efa4..00000000 --- a/p2p/base_reactor.go +++ /dev/null @@ -1,53 +0,0 @@ -package p2p - -import ( - "github.com/tendermint/tendermint/p2p/conn" - cmn "github.com/tendermint/tmlibs/common" -) - -type Reactor interface { - cmn.Service // Start, Stop - - // SetSwitch allows setting a switch. - SetSwitch(*Switch) - - // GetChannels returns the list of channel descriptors. - GetChannels() []*conn.ChannelDescriptor - - // AddPeer is called by the switch when a new peer is added. - AddPeer(peer Peer) - - // RemovePeer is called by the switch when the peer is stopped (due to error - // or other reason). - RemovePeer(peer Peer, reason interface{}) - - // Receive is called when msgBytes is received from peer. - // - // NOTE reactor can not keep msgBytes around after Receive completes without - // copying. - // - // CONTRACT: msgBytes are not nil. - Receive(chID byte, peer Peer, msgBytes []byte) -} - -//-------------------------------------- - -type BaseReactor struct { - cmn.BaseService // Provides Start, Stop, .Quit - Switch *Switch -} - -func NewBaseReactor(name string, impl Reactor) *BaseReactor { - return &BaseReactor{ - BaseService: *cmn.NewBaseService(nil, name, impl), - Switch: nil, - } -} - -func (br *BaseReactor) SetSwitch(sw *Switch) { - br.Switch = sw -} -func (*BaseReactor) GetChannels() []*conn.ChannelDescriptor { return nil } -func (*BaseReactor) AddPeer(peer Peer) {} -func (*BaseReactor) RemovePeer(peer Peer, reason interface{}) {} -func (*BaseReactor) Receive(chID byte, peer Peer, msgBytes []byte) {} diff --git a/p2p/conn/conn_go110.go b/p2p/conn/conn_go110.go deleted file mode 100644 index 68218810..00000000 --- a/p2p/conn/conn_go110.go +++ /dev/null @@ -1,15 +0,0 @@ -// +build go1.10 - -package conn - -// Go1.10 has a proper net.Conn implementation that -// has the SetDeadline method implemented as per -// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 -// lest we run into problems like -// https://github.com/tendermint/tendermint/issues/851 - -import "net" - -func NetPipe() (net.Conn, net.Conn) { - return net.Pipe() -} diff --git a/p2p/conn/conn_notgo110.go b/p2p/conn/conn_notgo110.go deleted file mode 100644 index ed642eb5..00000000 --- a/p2p/conn/conn_notgo110.go +++ /dev/null @@ -1,32 +0,0 @@ -// +build !go1.10 - -package conn - -import ( - "net" - "time" -) - -// Only Go1.10 has a proper net.Conn implementation that -// has the SetDeadline method implemented as per -// https://github.com/golang/go/commit/e2dd8ca946be884bb877e074a21727f1a685a706 -// lest we run into problems like -// https://github.com/tendermint/tendermint/issues/851 -// so for go versions < Go1.10 use our custom net.Conn creator -// that doesn't return an `Unimplemented error` for net.Conn. -// Before https://github.com/tendermint/tendermint/commit/49faa79bdce5663894b3febbf4955fb1d172df04 -// we hadn't cared about errors from SetDeadline so swallow them up anyways. -type pipe struct { - net.Conn -} - -func (p *pipe) SetDeadline(t time.Time) error { - return nil -} - -func NetPipe() (net.Conn, net.Conn) { - p1, p2 := net.Pipe() - return &pipe{p1}, &pipe{p2} -} - -var _ net.Conn = (*pipe)(nil) diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go deleted file mode 100644 index 5c7f19cf..00000000 --- a/p2p/conn/connection.go +++ /dev/null @@ -1,784 +0,0 @@ -package conn - -import ( - "bufio" - "errors" - "fmt" - "io" - "math" - "net" - "reflect" - "sync/atomic" - "time" - - amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" - flow "github.com/tendermint/tmlibs/flowrate" - "github.com/tendermint/tmlibs/log" -) - -const ( - maxPacketMsgPayloadSizeDefault = 1024 // NOTE: Must be below 16,384 bytes for 14 below. - maxPacketMsgOverheadSize = 14 // NOTE: See connection_test for derivation. - - numBatchPacketMsgs = 10 - minReadBufferSize = 1024 - minWriteBufferSize = 65536 - updateStats = 2 * time.Second - - // some of these defaults are written in the user config - // flushThrottle, sendRate, recvRate - // TODO: remove values present in config - defaultFlushThrottle = 100 * time.Millisecond - - defaultSendQueueCapacity = 1 - defaultRecvBufferCapacity = 4096 - defaultRecvMessageCapacity = 22020096 // 21MB - defaultSendRate = int64(512000) // 500KB/s - defaultRecvRate = int64(512000) // 500KB/s - defaultSendTimeout = 10 * time.Second - defaultPingInterval = 60 * time.Second - defaultPongTimeout = 45 * time.Second -) - -type receiveCbFunc func(chID byte, msgBytes []byte) -type errorCbFunc func(interface{}) - -/* -Each peer has one `MConnection` (multiplex connection) instance. - -__multiplex__ *noun* a system or signal involving simultaneous transmission of -several messages along a single channel of communication. - -Each `MConnection` handles message transmission on multiple abstract communication -`Channel`s. Each channel has a globally unique byte id. -The byte id and the relative priorities of each `Channel` are configured upon -initialization of the connection. - -There are two methods for sending messages: - func (m MConnection) Send(chID byte, msgBytes []byte) bool {} - func (m MConnection) TrySend(chID byte, msgBytes []byte}) bool {} - -`Send(chID, msgBytes)` is a blocking call that waits until `msg` is -successfully queued for the channel with the given id byte `chID`, or until the -request times out. The message `msg` is serialized using Go-Amino. - -`TrySend(chID, msgBytes)` is a nonblocking call that returns false if the -channel's queue is full. - -Inbound message bytes are handled with an onReceive callback function. -*/ -type MConnection struct { - cmn.BaseService - - conn net.Conn - bufConnReader *bufio.Reader - bufConnWriter *bufio.Writer - sendMonitor *flow.Monitor - recvMonitor *flow.Monitor - send chan struct{} - pong chan struct{} - channels []*Channel - channelsIdx map[byte]*Channel - onReceive receiveCbFunc - onError errorCbFunc - errored uint32 - config MConnConfig - - quit chan struct{} - flushTimer *cmn.ThrottleTimer // flush writes as necessary but throttled. - pingTimer *cmn.RepeatTimer // send pings periodically - - // close conn if pong is not received in pongTimeout - pongTimer *time.Timer - pongTimeoutCh chan bool // true - timeout, false - peer sent pong - - chStatsTimer *cmn.RepeatTimer // update channel stats periodically - - created time.Time // time of creation -} - -// MConnConfig is a MConnection configuration. -type MConnConfig struct { - SendRate int64 `mapstructure:"send_rate"` - RecvRate int64 `mapstructure:"recv_rate"` - - // Maximum payload size - MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"` - - // Interval to flush writes (throttled) - FlushThrottle time.Duration `mapstructure:"flush_throttle"` - - // Interval to send pings - PingInterval time.Duration `mapstructure:"ping_interval"` - - // Maximum wait time for pongs - PongTimeout time.Duration `mapstructure:"pong_timeout"` -} - -func (cfg *MConnConfig) maxPacketMsgTotalSize() int { - return cfg.MaxPacketMsgPayloadSize + maxPacketMsgOverheadSize -} - -// DefaultMConnConfig returns the default config. -func DefaultMConnConfig() MConnConfig { - return MConnConfig{ - SendRate: defaultSendRate, - RecvRate: defaultRecvRate, - MaxPacketMsgPayloadSize: maxPacketMsgPayloadSizeDefault, - FlushThrottle: defaultFlushThrottle, - PingInterval: defaultPingInterval, - PongTimeout: defaultPongTimeout, - } -} - -// NewMConnection wraps net.Conn and creates multiplex connection -func NewMConnection(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc) *MConnection { - return NewMConnectionWithConfig( - conn, - chDescs, - onReceive, - onError, - DefaultMConnConfig()) -} - -// NewMConnectionWithConfig wraps net.Conn and creates multiplex connection with a config -func NewMConnectionWithConfig(conn net.Conn, chDescs []*ChannelDescriptor, onReceive receiveCbFunc, onError errorCbFunc, config MConnConfig) *MConnection { - if config.PongTimeout >= config.PingInterval { - panic("pongTimeout must be less than pingInterval (otherwise, next ping will reset pong timer)") - } - - mconn := &MConnection{ - conn: conn, - bufConnReader: bufio.NewReaderSize(conn, minReadBufferSize), - bufConnWriter: bufio.NewWriterSize(conn, minWriteBufferSize), - sendMonitor: flow.New(0, 0), - recvMonitor: flow.New(0, 0), - send: make(chan struct{}, 1), - pong: make(chan struct{}, 1), - onReceive: onReceive, - onError: onError, - config: config, - } - - // Create channels - var channelsIdx = map[byte]*Channel{} - var channels = []*Channel{} - - for _, desc := range chDescs { - channel := newChannel(mconn, *desc) - channelsIdx[channel.desc.ID] = channel - channels = append(channels, channel) - } - mconn.channels = channels - mconn.channelsIdx = channelsIdx - - mconn.BaseService = *cmn.NewBaseService(nil, "MConnection", mconn) - - return mconn -} - -func (c *MConnection) SetLogger(l log.Logger) { - c.BaseService.SetLogger(l) - for _, ch := range c.channels { - ch.SetLogger(l) - } -} - -// OnStart implements BaseService -func (c *MConnection) OnStart() error { - if err := c.BaseService.OnStart(); err != nil { - return err - } - c.quit = make(chan struct{}) - c.flushTimer = cmn.NewThrottleTimer("flush", c.config.FlushThrottle) - c.pingTimer = cmn.NewRepeatTimer("ping", c.config.PingInterval) - c.pongTimeoutCh = make(chan bool, 1) - c.chStatsTimer = cmn.NewRepeatTimer("chStats", updateStats) - go c.sendRoutine() - go c.recvRoutine() - return nil -} - -// OnStop implements BaseService -func (c *MConnection) OnStop() { - c.BaseService.OnStop() - c.flushTimer.Stop() - c.pingTimer.Stop() - c.chStatsTimer.Stop() - if c.quit != nil { - close(c.quit) - } - c.conn.Close() // nolint: errcheck - - // We can't close pong safely here because - // recvRoutine may write to it after we've stopped. - // Though it doesn't need to get closed at all, - // we close it @ recvRoutine. -} - -func (c *MConnection) String() string { - return fmt.Sprintf("MConn{%v}", c.conn.RemoteAddr()) -} - -func (c *MConnection) flush() { - c.Logger.Debug("Flush", "conn", c) - err := c.bufConnWriter.Flush() - if err != nil { - c.Logger.Error("MConnection flush failed", "err", err) - } -} - -// Catch panics, usually caused by remote disconnects. -func (c *MConnection) _recover() { - if r := recover(); r != nil { - err := cmn.ErrorWrap(r, "recovered panic in MConnection") - c.stopForError(err) - } -} - -func (c *MConnection) stopForError(r interface{}) { - c.Stop() - if atomic.CompareAndSwapUint32(&c.errored, 0, 1) { - if c.onError != nil { - c.onError(r) - } - } -} - -// Queues a message to be sent to channel. -func (c *MConnection) Send(chID byte, msgBytes []byte) bool { - if !c.IsRunning() { - return false - } - - c.Logger.Debug("Send", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) - - // Send message to channel. - channel, ok := c.channelsIdx[chID] - if !ok { - c.Logger.Error(cmn.Fmt("Cannot send bytes, unknown channel %X", chID)) - return false - } - - success := channel.sendBytes(msgBytes) - if success { - // Wake up sendRoutine if necessary - select { - case c.send <- struct{}{}: - default: - } - } else { - c.Logger.Error("Send failed", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) - } - return success -} - -// Queues a message to be sent to channel. -// Nonblocking, returns true if successful. -func (c *MConnection) TrySend(chID byte, msgBytes []byte) bool { - if !c.IsRunning() { - return false - } - - c.Logger.Debug("TrySend", "channel", chID, "conn", c, "msgBytes", fmt.Sprintf("%X", msgBytes)) - - // Send message to channel. - channel, ok := c.channelsIdx[chID] - if !ok { - c.Logger.Error(cmn.Fmt("Cannot send bytes, unknown channel %X", chID)) - return false - } - - ok = channel.trySendBytes(msgBytes) - if ok { - // Wake up sendRoutine if necessary - select { - case c.send <- struct{}{}: - default: - } - } - - return ok -} - -// CanSend returns true if you can send more data onto the chID, false -// otherwise. Use only as a heuristic. -func (c *MConnection) CanSend(chID byte) bool { - if !c.IsRunning() { - return false - } - - channel, ok := c.channelsIdx[chID] - if !ok { - c.Logger.Error(cmn.Fmt("Unknown channel %X", chID)) - return false - } - return channel.canSend() -} - -// sendRoutine polls for packets to send from channels. -func (c *MConnection) sendRoutine() { - defer c._recover() - -FOR_LOOP: - for { - var _n int64 - var err error - SELECTION: - select { - case <-c.flushTimer.Ch: - // NOTE: flushTimer.Set() must be called every time - // something is written to .bufConnWriter. - c.flush() - case <-c.chStatsTimer.Chan(): - for _, channel := range c.channels { - channel.updateStats() - } - case <-c.pingTimer.Chan(): - c.Logger.Debug("Send Ping") - _n, err = cdc.MarshalBinaryWriter(c.bufConnWriter, PacketPing{}) - if err != nil { - break SELECTION - } - c.sendMonitor.Update(int(_n)) - c.Logger.Debug("Starting pong timer", "dur", c.config.PongTimeout) - c.pongTimer = time.AfterFunc(c.config.PongTimeout, func() { - select { - case c.pongTimeoutCh <- true: - default: - } - }) - c.flush() - case timeout := <-c.pongTimeoutCh: - if timeout { - c.Logger.Debug("Pong timeout") - err = errors.New("pong timeout") - } else { - c.stopPongTimer() - } - case <-c.pong: - c.Logger.Debug("Send Pong") - _n, err = cdc.MarshalBinaryWriter(c.bufConnWriter, PacketPong{}) - if err != nil { - break SELECTION - } - c.sendMonitor.Update(int(_n)) - c.flush() - case <-c.quit: - break FOR_LOOP - case <-c.send: - // Send some PacketMsgs - eof := c.sendSomePacketMsgs() - if !eof { - // Keep sendRoutine awake. - select { - case c.send <- struct{}{}: - default: - } - } - } - - if !c.IsRunning() { - break FOR_LOOP - } - if err != nil { - c.Logger.Error("Connection failed @ sendRoutine", "conn", c, "err", err) - c.stopForError(err) - break FOR_LOOP - } - } - - // Cleanup - c.stopPongTimer() -} - -// Returns true if messages from channels were exhausted. -// Blocks in accordance to .sendMonitor throttling. -func (c *MConnection) sendSomePacketMsgs() bool { - // Block until .sendMonitor says we can write. - // Once we're ready we send more than we asked for, - // but amortized it should even out. - c.sendMonitor.Limit(c.config.maxPacketMsgTotalSize(), atomic.LoadInt64(&c.config.SendRate), true) - - // Now send some PacketMsgs. - for i := 0; i < numBatchPacketMsgs; i++ { - if c.sendPacketMsg() { - return true - } - } - return false -} - -// Returns true if messages from channels were exhausted. -func (c *MConnection) sendPacketMsg() bool { - // Choose a channel to create a PacketMsg from. - // The chosen channel will be the one whose recentlySent/priority is the least. - var leastRatio float32 = math.MaxFloat32 - var leastChannel *Channel - for _, channel := range c.channels { - // If nothing to send, skip this channel - if !channel.isSendPending() { - continue - } - // Get ratio, and keep track of lowest ratio. - ratio := float32(channel.recentlySent) / float32(channel.desc.Priority) - if ratio < leastRatio { - leastRatio = ratio - leastChannel = channel - } - } - - // Nothing to send? - if leastChannel == nil { - return true - } - // c.Logger.Info("Found a msgPacket to send") - - // Make & send a PacketMsg from this channel - _n, err := leastChannel.writePacketMsgTo(c.bufConnWriter) - if err != nil { - c.Logger.Error("Failed to write PacketMsg", "err", err) - c.stopForError(err) - return true - } - c.sendMonitor.Update(int(_n)) - c.flushTimer.Set() - return false -} - -// recvRoutine reads PacketMsgs and reconstructs the message using the channels' "recving" buffer. -// After a whole message has been assembled, it's pushed to onReceive(). -// Blocks depending on how the connection is throttled. -// Otherwise, it never blocks. -func (c *MConnection) recvRoutine() { - defer c._recover() - -FOR_LOOP: - for { - // Block until .recvMonitor says we can read. - c.recvMonitor.Limit(c.config.maxPacketMsgTotalSize(), atomic.LoadInt64(&c.config.RecvRate), true) - - // Peek into bufConnReader for debugging - /* - if numBytes := c.bufConnReader.Buffered(); numBytes > 0 { - bz, err := c.bufConnReader.Peek(cmn.MinInt(numBytes, 100)) - if err == nil { - // return - } else { - c.Logger.Debug("Error peeking connection buffer", "err", err) - // return nil - } - c.Logger.Info("Peek connection buffer", "numBytes", numBytes, "bz", bz) - } - */ - - // Read packet type - var packet Packet - var _n int64 - var err error - _n, err = cdc.UnmarshalBinaryReader(c.bufConnReader, &packet, int64(c.config.maxPacketMsgTotalSize())) - c.recvMonitor.Update(int(_n)) - if err != nil { - if c.IsRunning() { - c.Logger.Error("Connection failed @ recvRoutine (reading byte)", "conn", c, "err", err) - c.stopForError(err) - } - break FOR_LOOP - } - - // Read more depending on packet type. - switch pkt := packet.(type) { - case PacketPing: - // TODO: prevent abuse, as they cause flush()'s. - // https://github.com/tendermint/tendermint/issues/1190 - c.Logger.Debug("Receive Ping") - select { - case c.pong <- struct{}{}: - default: - // never block - } - case PacketPong: - c.Logger.Debug("Receive Pong") - select { - case c.pongTimeoutCh <- false: - default: - // never block - } - case PacketMsg: - channel, ok := c.channelsIdx[pkt.ChannelID] - if !ok || channel == nil { - err := fmt.Errorf("Unknown channel %X", pkt.ChannelID) - c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) - c.stopForError(err) - break FOR_LOOP - } - - msgBytes, err := channel.recvPacketMsg(pkt) - if err != nil { - if c.IsRunning() { - c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) - c.stopForError(err) - } - break FOR_LOOP - } - if msgBytes != nil { - c.Logger.Debug("Received bytes", "chID", pkt.ChannelID, "msgBytes", fmt.Sprintf("%X", msgBytes)) - // NOTE: This means the reactor.Receive runs in the same thread as the p2p recv routine - c.onReceive(pkt.ChannelID, msgBytes) - } - default: - err := fmt.Errorf("Unknown message type %v", reflect.TypeOf(packet)) - c.Logger.Error("Connection failed @ recvRoutine", "conn", c, "err", err) - c.stopForError(err) - break FOR_LOOP - } - } - - // Cleanup - close(c.pong) - for range c.pong { - // Drain - } -} - -// not goroutine-safe -func (c *MConnection) stopPongTimer() { - if c.pongTimer != nil { - _ = c.pongTimer.Stop() - c.pongTimer = nil - } -} - -type ConnectionStatus struct { - Duration time.Duration - SendMonitor flow.Status - RecvMonitor flow.Status - Channels []ChannelStatus -} - -type ChannelStatus struct { - ID byte - SendQueueCapacity int - SendQueueSize int - Priority int - RecentlySent int64 -} - -func (c *MConnection) Status() ConnectionStatus { - var status ConnectionStatus - status.Duration = time.Since(c.created) - status.SendMonitor = c.sendMonitor.Status() - status.RecvMonitor = c.recvMonitor.Status() - status.Channels = make([]ChannelStatus, len(c.channels)) - for i, channel := range c.channels { - status.Channels[i] = ChannelStatus{ - ID: channel.desc.ID, - SendQueueCapacity: cap(channel.sendQueue), - SendQueueSize: int(channel.sendQueueSize), // TODO use atomic - Priority: channel.desc.Priority, - RecentlySent: channel.recentlySent, - } - } - return status -} - -//----------------------------------------------------------------------------- - -type ChannelDescriptor struct { - ID byte - Priority int - SendQueueCapacity int - RecvBufferCapacity int - RecvMessageCapacity int -} - -func (chDesc ChannelDescriptor) FillDefaults() (filled ChannelDescriptor) { - if chDesc.SendQueueCapacity == 0 { - chDesc.SendQueueCapacity = defaultSendQueueCapacity - } - if chDesc.RecvBufferCapacity == 0 { - chDesc.RecvBufferCapacity = defaultRecvBufferCapacity - } - if chDesc.RecvMessageCapacity == 0 { - chDesc.RecvMessageCapacity = defaultRecvMessageCapacity - } - filled = chDesc - return -} - -// TODO: lowercase. -// NOTE: not goroutine-safe. -type Channel struct { - conn *MConnection - desc ChannelDescriptor - sendQueue chan []byte - sendQueueSize int32 // atomic. - recving []byte - sending []byte - recentlySent int64 // exponential moving average - - maxPacketMsgPayloadSize int - - Logger log.Logger -} - -func newChannel(conn *MConnection, desc ChannelDescriptor) *Channel { - desc = desc.FillDefaults() - if desc.Priority <= 0 { - cmn.PanicSanity("Channel default priority must be a positive integer") - } - return &Channel{ - conn: conn, - desc: desc, - sendQueue: make(chan []byte, desc.SendQueueCapacity), - recving: make([]byte, 0, desc.RecvBufferCapacity), - maxPacketMsgPayloadSize: conn.config.MaxPacketMsgPayloadSize, - } -} - -func (ch *Channel) SetLogger(l log.Logger) { - ch.Logger = l -} - -// Queues message to send to this channel. -// Goroutine-safe -// Times out (and returns false) after defaultSendTimeout -func (ch *Channel) sendBytes(bytes []byte) bool { - select { - case ch.sendQueue <- bytes: - atomic.AddInt32(&ch.sendQueueSize, 1) - return true - case <-time.After(defaultSendTimeout): - return false - } -} - -// Queues message to send to this channel. -// Nonblocking, returns true if successful. -// Goroutine-safe -func (ch *Channel) trySendBytes(bytes []byte) bool { - select { - case ch.sendQueue <- bytes: - atomic.AddInt32(&ch.sendQueueSize, 1) - return true - default: - return false - } -} - -// Goroutine-safe -func (ch *Channel) loadSendQueueSize() (size int) { - return int(atomic.LoadInt32(&ch.sendQueueSize)) -} - -// Goroutine-safe -// Use only as a heuristic. -func (ch *Channel) canSend() bool { - return ch.loadSendQueueSize() < defaultSendQueueCapacity -} - -// Returns true if any PacketMsgs are pending to be sent. -// Call before calling nextPacketMsg() -// Goroutine-safe -func (ch *Channel) isSendPending() bool { - if len(ch.sending) == 0 { - if len(ch.sendQueue) == 0 { - return false - } - ch.sending = <-ch.sendQueue - } - return true -} - -// Creates a new PacketMsg to send. -// Not goroutine-safe -func (ch *Channel) nextPacketMsg() PacketMsg { - packet := PacketMsg{} - packet.ChannelID = byte(ch.desc.ID) - maxSize := ch.maxPacketMsgPayloadSize - packet.Bytes = ch.sending[:cmn.MinInt(maxSize, len(ch.sending))] - if len(ch.sending) <= maxSize { - packet.EOF = byte(0x01) - ch.sending = nil - atomic.AddInt32(&ch.sendQueueSize, -1) // decrement sendQueueSize - } else { - packet.EOF = byte(0x00) - ch.sending = ch.sending[cmn.MinInt(maxSize, len(ch.sending)):] - } - return packet -} - -// Writes next PacketMsg to w and updates c.recentlySent. -// Not goroutine-safe -func (ch *Channel) writePacketMsgTo(w io.Writer) (n int64, err error) { - var packet = ch.nextPacketMsg() - n, err = cdc.MarshalBinaryWriter(w, packet) - ch.recentlySent += n - return -} - -// Handles incoming PacketMsgs. It returns a message bytes if message is -// complete. NOTE message bytes may change on next call to recvPacketMsg. -// Not goroutine-safe -func (ch *Channel) recvPacketMsg(packet PacketMsg) ([]byte, error) { - ch.Logger.Debug("Read PacketMsg", "conn", ch.conn, "packet", packet) - var recvCap, recvReceived = ch.desc.RecvMessageCapacity, len(ch.recving) + len(packet.Bytes) - if recvCap < recvReceived { - return nil, fmt.Errorf("Received message exceeds available capacity: %v < %v", recvCap, recvReceived) - } - ch.recving = append(ch.recving, packet.Bytes...) - if packet.EOF == byte(0x01) { - msgBytes := ch.recving - - // clear the slice without re-allocating. - // http://stackoverflow.com/questions/16971741/how-do-you-clear-a-slice-in-go - // suggests this could be a memory leak, but we might as well keep the memory for the channel until it closes, - // at which point the recving slice stops being used and should be garbage collected - ch.recving = ch.recving[:0] // make([]byte, 0, ch.desc.RecvBufferCapacity) - return msgBytes, nil - } - return nil, nil -} - -// Call this periodically to update stats for throttling purposes. -// Not goroutine-safe -func (ch *Channel) updateStats() { - // Exponential decay of stats. - // TODO: optimize. - ch.recentlySent = int64(float64(ch.recentlySent) * 0.8) -} - -//---------------------------------------- -// Packet - -type Packet interface { - AssertIsPacket() -} - -func RegisterPacket(cdc *amino.Codec) { - cdc.RegisterInterface((*Packet)(nil), nil) - cdc.RegisterConcrete(PacketPing{}, "tendermint/p2p/PacketPing", nil) - cdc.RegisterConcrete(PacketPong{}, "tendermint/p2p/PacketPong", nil) - cdc.RegisterConcrete(PacketMsg{}, "tendermint/p2p/PacketMsg", nil) -} - -func (_ PacketPing) AssertIsPacket() {} -func (_ PacketPong) AssertIsPacket() {} -func (_ PacketMsg) AssertIsPacket() {} - -type PacketPing struct { -} - -type PacketPong struct { -} - -type PacketMsg struct { - ChannelID byte - EOF byte // 1 means message ends here. - Bytes []byte -} - -func (mp PacketMsg) String() string { - return fmt.Sprintf("PacketMsg{%X:%X T:%X}", mp.ChannelID, mp.Bytes, mp.EOF) -} diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go deleted file mode 100644 index ade8e8e9..00000000 --- a/p2p/conn/connection_test.go +++ /dev/null @@ -1,500 +0,0 @@ -package conn - -import ( - "bytes" - "net" - "testing" - "time" - - "github.com/fortytw2/leaktest" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - amino "github.com/tendermint/go-amino" - "github.com/tendermint/tmlibs/log" -) - -func createTestMConnection(conn net.Conn) *MConnection { - onReceive := func(chID byte, msgBytes []byte) { - } - onError := func(r interface{}) { - } - c := createMConnectionWithCallbacks(conn, onReceive, onError) - c.SetLogger(log.TestingLogger()) - return c -} - -func createMConnectionWithCallbacks(conn net.Conn, onReceive func(chID byte, msgBytes []byte), onError func(r interface{})) *MConnection { - cfg := DefaultMConnConfig() - cfg.PingInterval = 90 * time.Millisecond - cfg.PongTimeout = 45 * time.Millisecond - chDescs := []*ChannelDescriptor{&ChannelDescriptor{ID: 0x01, Priority: 1, SendQueueCapacity: 1}} - c := NewMConnectionWithConfig(conn, chDescs, onReceive, onError, cfg) - c.SetLogger(log.TestingLogger()) - return c -} - -func TestMConnectionSend(t *testing.T) { - server, client := NetPipe() - defer server.Close() // nolint: errcheck - defer client.Close() // nolint: errcheck - - mconn := createTestMConnection(client) - err := mconn.Start() - require.Nil(t, err) - defer mconn.Stop() - - msg := []byte("Ant-Man") - assert.True(t, mconn.Send(0x01, msg)) - // Note: subsequent Send/TrySend calls could pass because we are reading from - // the send queue in a separate goroutine. - _, err = server.Read(make([]byte, len(msg))) - if err != nil { - t.Error(err) - } - assert.True(t, mconn.CanSend(0x01)) - - msg = []byte("Spider-Man") - assert.True(t, mconn.TrySend(0x01, msg)) - _, err = server.Read(make([]byte, len(msg))) - if err != nil { - t.Error(err) - } - - assert.False(t, mconn.CanSend(0x05), "CanSend should return false because channel is unknown") - assert.False(t, mconn.Send(0x05, []byte("Absorbing Man")), "Send should return false because channel is unknown") -} - -func TestMConnectionReceive(t *testing.T) { - server, client := NetPipe() - defer server.Close() // nolint: errcheck - defer client.Close() // nolint: errcheck - - receivedCh := make(chan []byte) - errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { - receivedCh <- msgBytes - } - onError := func(r interface{}) { - errorsCh <- r - } - mconn1 := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn1.Start() - require.Nil(t, err) - defer mconn1.Stop() - - mconn2 := createTestMConnection(server) - err = mconn2.Start() - require.Nil(t, err) - defer mconn2.Stop() - - msg := []byte("Cyclops") - assert.True(t, mconn2.Send(0x01, msg)) - - select { - case receivedBytes := <-receivedCh: - assert.Equal(t, []byte(msg), receivedBytes) - case err := <-errorsCh: - t.Fatalf("Expected %s, got %+v", msg, err) - case <-time.After(500 * time.Millisecond): - t.Fatalf("Did not receive %s message in 500ms", msg) - } -} - -func TestMConnectionStatus(t *testing.T) { - server, client := NetPipe() - defer server.Close() // nolint: errcheck - defer client.Close() // nolint: errcheck - - mconn := createTestMConnection(client) - err := mconn.Start() - require.Nil(t, err) - defer mconn.Stop() - - status := mconn.Status() - assert.NotNil(t, status) - assert.Zero(t, status.Channels[0].SendQueueSize) -} - -func TestMConnectionPongTimeoutResultsInError(t *testing.T) { - server, client := net.Pipe() - defer server.Close() - defer client.Close() - - receivedCh := make(chan []byte) - errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { - receivedCh <- msgBytes - } - onError := func(r interface{}) { - errorsCh <- r - } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() - require.Nil(t, err) - defer mconn.Stop() - - serverGotPing := make(chan struct{}) - go func() { - // read ping - var pkt PacketPing - const maxPacketPingSize = 1024 - _, err = cdc.UnmarshalBinaryReader(server, &pkt, maxPacketPingSize) - assert.Nil(t, err) - serverGotPing <- struct{}{} - }() - <-serverGotPing - - pongTimerExpired := mconn.config.PongTimeout + 20*time.Millisecond - select { - case msgBytes := <-receivedCh: - t.Fatalf("Expected error, but got %v", msgBytes) - case err := <-errorsCh: - assert.NotNil(t, err) - case <-time.After(pongTimerExpired): - t.Fatalf("Expected to receive error after %v", pongTimerExpired) - } -} - -func TestMConnectionMultiplePongsInTheBeginning(t *testing.T) { - server, client := net.Pipe() - defer server.Close() - defer client.Close() - - receivedCh := make(chan []byte) - errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { - receivedCh <- msgBytes - } - onError := func(r interface{}) { - errorsCh <- r - } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() - require.Nil(t, err) - defer mconn.Stop() - - // sending 3 pongs in a row (abuse) - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) - require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) - require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) - require.Nil(t, err) - - serverGotPing := make(chan struct{}) - go func() { - // read ping (one byte) - var packet, err = Packet(nil), error(nil) - _, err = cdc.UnmarshalBinaryReader(server, &packet, 1024) - require.Nil(t, err) - serverGotPing <- struct{}{} - // respond with pong - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) - require.Nil(t, err) - }() - <-serverGotPing - - pongTimerExpired := mconn.config.PongTimeout + 20*time.Millisecond - select { - case msgBytes := <-receivedCh: - t.Fatalf("Expected no data, but got %v", msgBytes) - case err := <-errorsCh: - t.Fatalf("Expected no error, but got %v", err) - case <-time.After(pongTimerExpired): - assert.True(t, mconn.IsRunning()) - } -} - -func TestMConnectionMultiplePings(t *testing.T) { - server, client := net.Pipe() - defer server.Close() - defer client.Close() - - receivedCh := make(chan []byte) - errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { - receivedCh <- msgBytes - } - onError := func(r interface{}) { - errorsCh <- r - } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() - require.Nil(t, err) - defer mconn.Stop() - - // sending 3 pings in a row (abuse) - // see https://github.com/tendermint/tendermint/issues/1190 - _, err = server.Write(cdc.MustMarshalBinary(PacketPing{})) - require.Nil(t, err) - var pkt PacketPong - _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024) - require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinary(PacketPing{})) - require.Nil(t, err) - _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024) - require.Nil(t, err) - _, err = server.Write(cdc.MustMarshalBinary(PacketPing{})) - require.Nil(t, err) - _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024) - require.Nil(t, err) - - assert.True(t, mconn.IsRunning()) -} - -func TestMConnectionPingPongs(t *testing.T) { - // check that we are not leaking any go-routines - defer leaktest.CheckTimeout(t, 10*time.Second)() - - server, client := net.Pipe() - - defer server.Close() - defer client.Close() - - receivedCh := make(chan []byte) - errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { - receivedCh <- msgBytes - } - onError := func(r interface{}) { - errorsCh <- r - } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() - require.Nil(t, err) - defer mconn.Stop() - - serverGotPing := make(chan struct{}) - go func() { - // read ping - var pkt PacketPing - _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024) - require.Nil(t, err) - serverGotPing <- struct{}{} - // respond with pong - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) - require.Nil(t, err) - - time.Sleep(mconn.config.PingInterval) - - // read ping - _, err = cdc.UnmarshalBinaryReader(server, &pkt, 1024) - require.Nil(t, err) - // respond with pong - _, err = server.Write(cdc.MustMarshalBinary(PacketPong{})) - require.Nil(t, err) - }() - <-serverGotPing - - pongTimerExpired := (mconn.config.PongTimeout + 20*time.Millisecond) * 2 - select { - case msgBytes := <-receivedCh: - t.Fatalf("Expected no data, but got %v", msgBytes) - case err := <-errorsCh: - t.Fatalf("Expected no error, but got %v", err) - case <-time.After(2 * pongTimerExpired): - assert.True(t, mconn.IsRunning()) - } -} - -func TestMConnectionStopsAndReturnsError(t *testing.T) { - server, client := NetPipe() - defer server.Close() // nolint: errcheck - defer client.Close() // nolint: errcheck - - receivedCh := make(chan []byte) - errorsCh := make(chan interface{}) - onReceive := func(chID byte, msgBytes []byte) { - receivedCh <- msgBytes - } - onError := func(r interface{}) { - errorsCh <- r - } - mconn := createMConnectionWithCallbacks(client, onReceive, onError) - err := mconn.Start() - require.Nil(t, err) - defer mconn.Stop() - - if err := client.Close(); err != nil { - t.Error(err) - } - - select { - case receivedBytes := <-receivedCh: - t.Fatalf("Expected error, got %v", receivedBytes) - case err := <-errorsCh: - assert.NotNil(t, err) - assert.False(t, mconn.IsRunning()) - case <-time.After(500 * time.Millisecond): - t.Fatal("Did not receive error in 500ms") - } -} - -func newClientAndServerConnsForReadErrors(t *testing.T, chOnErr chan struct{}) (*MConnection, *MConnection) { - server, client := NetPipe() - - onReceive := func(chID byte, msgBytes []byte) {} - onError := func(r interface{}) {} - - // create client conn with two channels - chDescs := []*ChannelDescriptor{ - {ID: 0x01, Priority: 1, SendQueueCapacity: 1}, - {ID: 0x02, Priority: 1, SendQueueCapacity: 1}, - } - mconnClient := NewMConnection(client, chDescs, onReceive, onError) - mconnClient.SetLogger(log.TestingLogger().With("module", "client")) - err := mconnClient.Start() - require.Nil(t, err) - - // create server conn with 1 channel - // it fires on chOnErr when there's an error - serverLogger := log.TestingLogger().With("module", "server") - onError = func(r interface{}) { - chOnErr <- struct{}{} - } - mconnServer := createMConnectionWithCallbacks(server, onReceive, onError) - mconnServer.SetLogger(serverLogger) - err = mconnServer.Start() - require.Nil(t, err) - return mconnClient, mconnServer -} - -func expectSend(ch chan struct{}) bool { - after := time.After(time.Second * 5) - select { - case <-ch: - return true - case <-after: - return false - } -} - -func TestMConnectionReadErrorBadEncoding(t *testing.T) { - chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - defer mconnClient.Stop() - defer mconnServer.Stop() - - client := mconnClient.conn - - // send badly encoded msgPacket - bz := cdc.MustMarshalBinary(PacketMsg{}) - bz[4] += 0x01 // Invalid prefix bytes. - - // Write it. - _, err := client.Write(bz) - assert.Nil(t, err) - assert.True(t, expectSend(chOnErr), "badly encoded msgPacket") -} - -func TestMConnectionReadErrorUnknownChannel(t *testing.T) { - chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - defer mconnClient.Stop() - defer mconnServer.Stop() - - msg := []byte("Ant-Man") - - // fail to send msg on channel unknown by client - assert.False(t, mconnClient.Send(0x03, msg)) - - // send msg on channel unknown by the server. - // should cause an error - assert.True(t, mconnClient.Send(0x02, msg)) - assert.True(t, expectSend(chOnErr), "unknown channel") -} - -func TestMConnectionReadErrorLongMessage(t *testing.T) { - chOnErr := make(chan struct{}) - chOnRcv := make(chan struct{}) - - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - defer mconnClient.Stop() - defer mconnServer.Stop() - - mconnServer.onReceive = func(chID byte, msgBytes []byte) { - chOnRcv <- struct{}{} - } - - client := mconnClient.conn - - // send msg thats just right - var err error - var buf = new(bytes.Buffer) - // - Uvarint length of MustMarshalBinary(packet) = 1 or 2 bytes - // (as long as it's less than 16,384 bytes) - // - Prefix bytes = 4 bytes - // - ChannelID field key + byte = 2 bytes - // - EOF field key + byte = 2 bytes - // - Bytes field key = 1 bytes - // - Uvarint length of MustMarshalBinary(bytes) = 1 or 2 bytes - // - Struct terminator = 1 byte - // = up to 14 bytes overhead for the packet. - var packet = PacketMsg{ - ChannelID: 0x01, - EOF: 1, - Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize), - } - _, err = cdc.MarshalBinaryWriter(buf, packet) - assert.Nil(t, err) - _, err = client.Write(buf.Bytes()) - assert.Nil(t, err) - assert.True(t, expectSend(chOnRcv), "msg just right") - assert.False(t, expectSend(chOnErr), "msg just right") - - // send msg thats too long - buf = new(bytes.Buffer) - packet = PacketMsg{ - ChannelID: 0x01, - EOF: 1, - Bytes: make([]byte, mconnClient.config.MaxPacketMsgPayloadSize+1), - } - _, err = cdc.MarshalBinaryWriter(buf, packet) - assert.Nil(t, err) - _, err = client.Write(buf.Bytes()) - assert.NotNil(t, err) - assert.False(t, expectSend(chOnRcv), "msg too long") - assert.True(t, expectSend(chOnErr), "msg too long") -} - -func TestMConnectionReadErrorUnknownMsgType(t *testing.T) { - chOnErr := make(chan struct{}) - mconnClient, mconnServer := newClientAndServerConnsForReadErrors(t, chOnErr) - defer mconnClient.Stop() - defer mconnServer.Stop() - - // send msg with unknown msg type - err := error(nil) - err = amino.EncodeUvarint(mconnClient.conn, 4) - assert.Nil(t, err) - _, err = mconnClient.conn.Write([]byte{0xFF, 0xFF, 0xFF, 0xFF}) - assert.Nil(t, err) - assert.True(t, expectSend(chOnErr), "unknown msg type") -} - -func TestMConnectionTrySend(t *testing.T) { - server, client := NetPipe() - defer server.Close() - defer client.Close() - - mconn := createTestMConnection(client) - err := mconn.Start() - require.Nil(t, err) - defer mconn.Stop() - - msg := []byte("Semicolon-Woman") - resultCh := make(chan string, 2) - assert.True(t, mconn.TrySend(0x01, msg)) - server.Read(make([]byte, len(msg))) - assert.True(t, mconn.CanSend(0x01)) - assert.True(t, mconn.TrySend(0x01, msg)) - assert.False(t, mconn.CanSend(0x01)) - go func() { - mconn.TrySend(0x01, msg) - resultCh <- "TrySend" - }() - assert.False(t, mconn.CanSend(0x01)) - assert.False(t, mconn.TrySend(0x01, msg)) - assert.Equal(t, "TrySend", <-resultCh) -} diff --git a/p2p/conn/secret_connection.go b/p2p/conn/secret_connection.go deleted file mode 100644 index 2a507f88..00000000 --- a/p2p/conn/secret_connection.go +++ /dev/null @@ -1,348 +0,0 @@ -// Uses nacl's secret_box to encrypt a net.Conn. -// It is (meant to be) an implementation of the STS protocol. -// Note we do not (yet) assume that a remote peer's pubkey -// is known ahead of time, and thus we are technically -// still vulnerable to MITM. (TODO!) -// See docs/sts-final.pdf for more info -package conn - -import ( - "bytes" - crand "crypto/rand" - "crypto/sha256" - "encoding/binary" - "errors" - "io" - "net" - "time" - - "golang.org/x/crypto/nacl/box" - "golang.org/x/crypto/nacl/secretbox" - "golang.org/x/crypto/ripemd160" - - "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" -) - -// 4 + 1024 == 1028 total frame size -const dataLenSize = 4 -const dataMaxSize = 1024 -const totalFrameSize = dataMaxSize + dataLenSize -const sealedFrameSize = totalFrameSize + secretbox.Overhead - -// Implements net.Conn -type SecretConnection struct { - conn io.ReadWriteCloser - recvBuffer []byte - recvNonce *[24]byte - sendNonce *[24]byte - remPubKey crypto.PubKey - shrSecret *[32]byte // shared secret -} - -// Performs handshake and returns a new authenticated SecretConnection. -// Returns nil if error in handshake. -// Caller should call conn.Close() -// See docs/sts-final.pdf for more information. -func MakeSecretConnection(conn io.ReadWriteCloser, locPrivKey crypto.PrivKey) (*SecretConnection, error) { - - locPubKey := locPrivKey.PubKey() - - // Generate ephemeral keys for perfect forward secrecy. - locEphPub, locEphPriv := genEphKeys() - - // Write local ephemeral pubkey and receive one too. - // NOTE: every 32-byte string is accepted as a Curve25519 public key - // (see DJB's Curve25519 paper: http://cr.yp.to/ecdh/curve25519-20060209.pdf) - remEphPub, err := shareEphPubKey(conn, locEphPub) - if err != nil { - return nil, err - } - - // Compute common shared secret. - shrSecret := computeSharedSecret(remEphPub, locEphPriv) - - // Sort by lexical order. - loEphPub, hiEphPub := sort32(locEphPub, remEphPub) - - // Check if the local ephemeral public key - // was the least, lexicographically sorted. - locIsLeast := bytes.Equal(locEphPub[:], loEphPub[:]) - - // Generate nonces to use for secretbox. - recvNonce, sendNonce := genNonces(loEphPub, hiEphPub, locIsLeast) - - // Generate common challenge to sign. - challenge := genChallenge(loEphPub, hiEphPub) - - // Construct SecretConnection. - sc := &SecretConnection{ - conn: conn, - recvBuffer: nil, - recvNonce: recvNonce, - sendNonce: sendNonce, - shrSecret: shrSecret, - } - - // Sign the challenge bytes for authentication. - locSignature := signChallenge(challenge, locPrivKey) - - // Share (in secret) each other's pubkey & challenge signature - authSigMsg, err := shareAuthSignature(sc, locPubKey, locSignature) - if err != nil { - return nil, err - } - remPubKey, remSignature := authSigMsg.Key, authSigMsg.Sig - if !remPubKey.VerifyBytes(challenge[:], remSignature) { - return nil, errors.New("Challenge verification failed") - } - - // We've authorized. - sc.remPubKey = remPubKey - return sc, nil -} - -// Returns authenticated remote pubkey -func (sc *SecretConnection) RemotePubKey() crypto.PubKey { - return sc.remPubKey -} - -// Writes encrypted frames of `sealedFrameSize` -// CONTRACT: data smaller than dataMaxSize is read atomically. -func (sc *SecretConnection) Write(data []byte) (n int, err error) { - for 0 < len(data) { - var frame = make([]byte, totalFrameSize) - var chunk []byte - if dataMaxSize < len(data) { - chunk = data[:dataMaxSize] - data = data[dataMaxSize:] - } else { - chunk = data - data = nil - } - chunkLength := len(chunk) - binary.BigEndian.PutUint32(frame, uint32(chunkLength)) - copy(frame[dataLenSize:], chunk) - - // encrypt the frame - var sealedFrame = make([]byte, sealedFrameSize) - secretbox.Seal(sealedFrame[:0], frame, sc.sendNonce, sc.shrSecret) - // fmt.Printf("secretbox.Seal(sealed:%X,sendNonce:%X,shrSecret:%X\n", sealedFrame, sc.sendNonce, sc.shrSecret) - incr2Nonce(sc.sendNonce) - // end encryption - - _, err := sc.conn.Write(sealedFrame) - if err != nil { - return n, err - } - n += len(chunk) - } - return -} - -// CONTRACT: data smaller than dataMaxSize is read atomically. -func (sc *SecretConnection) Read(data []byte) (n int, err error) { - if 0 < len(sc.recvBuffer) { - n = copy(data, sc.recvBuffer) - sc.recvBuffer = sc.recvBuffer[n:] - return - } - - sealedFrame := make([]byte, sealedFrameSize) - _, err = io.ReadFull(sc.conn, sealedFrame) - if err != nil { - return - } - - // decrypt the frame - var frame = make([]byte, totalFrameSize) - // fmt.Printf("secretbox.Open(sealed:%X,recvNonce:%X,shrSecret:%X\n", sealedFrame, sc.recvNonce, sc.shrSecret) - _, ok := secretbox.Open(frame[:0], sealedFrame, sc.recvNonce, sc.shrSecret) - if !ok { - return n, errors.New("Failed to decrypt SecretConnection") - } - incr2Nonce(sc.recvNonce) - // end decryption - - var chunkLength = binary.BigEndian.Uint32(frame) // read the first two bytes - if chunkLength > dataMaxSize { - return 0, errors.New("chunkLength is greater than dataMaxSize") - } - var chunk = frame[dataLenSize : dataLenSize+chunkLength] - - n = copy(data, chunk) - sc.recvBuffer = chunk[n:] - return -} - -// Implements net.Conn -func (sc *SecretConnection) Close() error { return sc.conn.Close() } -func (sc *SecretConnection) LocalAddr() net.Addr { return sc.conn.(net.Conn).LocalAddr() } -func (sc *SecretConnection) RemoteAddr() net.Addr { return sc.conn.(net.Conn).RemoteAddr() } -func (sc *SecretConnection) SetDeadline(t time.Time) error { return sc.conn.(net.Conn).SetDeadline(t) } -func (sc *SecretConnection) SetReadDeadline(t time.Time) error { - return sc.conn.(net.Conn).SetReadDeadline(t) -} -func (sc *SecretConnection) SetWriteDeadline(t time.Time) error { - return sc.conn.(net.Conn).SetWriteDeadline(t) -} - -func genEphKeys() (ephPub, ephPriv *[32]byte) { - var err error - ephPub, ephPriv, err = box.GenerateKey(crand.Reader) - if err != nil { - panic("Could not generate ephemeral keypairs") - } - return -} - -func shareEphPubKey(conn io.ReadWriteCloser, locEphPub *[32]byte) (remEphPub *[32]byte, err error) { - - // Send our pubkey and receive theirs in tandem. - var trs, _ = cmn.Parallel( - func(_ int) (val interface{}, err error, abort bool) { - var _, err1 = cdc.MarshalBinaryWriter(conn, locEphPub) - if err1 != nil { - return nil, err1, true // abort - } else { - return nil, nil, false - } - }, - func(_ int) (val interface{}, err error, abort bool) { - var _remEphPub [32]byte - var _, err2 = cdc.UnmarshalBinaryReader(conn, &_remEphPub, 1024*1024) // TODO - if err2 != nil { - return nil, err2, true // abort - } else { - return _remEphPub, nil, false - } - }, - ) - - // If error: - if trs.FirstError() != nil { - err = trs.FirstError() - return - } - - // Otherwise: - var _remEphPub = trs.FirstValue().([32]byte) - return &_remEphPub, nil -} - -func computeSharedSecret(remPubKey, locPrivKey *[32]byte) (shrSecret *[32]byte) { - shrSecret = new([32]byte) - box.Precompute(shrSecret, remPubKey, locPrivKey) - return -} - -func sort32(foo, bar *[32]byte) (lo, hi *[32]byte) { - if bytes.Compare(foo[:], bar[:]) < 0 { - lo = foo - hi = bar - } else { - lo = bar - hi = foo - } - return -} - -func genNonces(loPubKey, hiPubKey *[32]byte, locIsLo bool) (recvNonce, sendNonce *[24]byte) { - nonce1 := hash24(append(loPubKey[:], hiPubKey[:]...)) - nonce2 := new([24]byte) - copy(nonce2[:], nonce1[:]) - nonce2[len(nonce2)-1] ^= 0x01 - if locIsLo { - recvNonce = nonce1 - sendNonce = nonce2 - } else { - recvNonce = nonce2 - sendNonce = nonce1 - } - return -} - -func genChallenge(loPubKey, hiPubKey *[32]byte) (challenge *[32]byte) { - return hash32(append(loPubKey[:], hiPubKey[:]...)) -} - -func signChallenge(challenge *[32]byte, locPrivKey crypto.PrivKey) (signature crypto.Signature) { - signature = locPrivKey.Sign(challenge[:]) - return -} - -type authSigMessage struct { - Key crypto.PubKey - Sig crypto.Signature -} - -func shareAuthSignature(sc *SecretConnection, pubKey crypto.PubKey, signature crypto.Signature) (recvMsg authSigMessage, err error) { - - // Send our info and receive theirs in tandem. - var trs, _ = cmn.Parallel( - func(_ int) (val interface{}, err error, abort bool) { - var _, err1 = cdc.MarshalBinaryWriter(sc, authSigMessage{pubKey, signature}) - if err1 != nil { - return nil, err1, true // abort - } else { - return nil, nil, false - } - }, - func(_ int) (val interface{}, err error, abort bool) { - var _recvMsg authSigMessage - var _, err2 = cdc.UnmarshalBinaryReader(sc, &_recvMsg, 1024*1024) // TODO - if err2 != nil { - return nil, err2, true // abort - } else { - return _recvMsg, nil, false - } - }, - ) - - // If error: - if trs.FirstError() != nil { - err = trs.FirstError() - return - } - - var _recvMsg = trs.FirstValue().(authSigMessage) - return _recvMsg, nil -} - -//-------------------------------------------------------------------------------- - -// sha256 -func hash32(input []byte) (res *[32]byte) { - hasher := sha256.New() - hasher.Write(input) // nolint: errcheck, gas - resSlice := hasher.Sum(nil) - res = new([32]byte) - copy(res[:], resSlice) - return -} - -// We only fill in the first 20 bytes with ripemd160 -func hash24(input []byte) (res *[24]byte) { - hasher := ripemd160.New() - hasher.Write(input) // nolint: errcheck, gas - resSlice := hasher.Sum(nil) - res = new([24]byte) - copy(res[:], resSlice) - return -} - -// increment nonce big-endian by 2 with wraparound. -func incr2Nonce(nonce *[24]byte) { - incrNonce(nonce) - incrNonce(nonce) -} - -// increment nonce big-endian by 1 with wraparound. -func incrNonce(nonce *[24]byte) { - for i := 23; 0 <= i; i-- { - nonce[i]++ - if nonce[i] != 0 { - return - } - } -} diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go deleted file mode 100644 index 8dfc2bef..00000000 --- a/p2p/conn/secret_connection_test.go +++ /dev/null @@ -1,250 +0,0 @@ -package conn - -import ( - "fmt" - "io" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" -) - -type kvstoreConn struct { - *io.PipeReader - *io.PipeWriter -} - -func (drw kvstoreConn) Close() (err error) { - err2 := drw.PipeWriter.CloseWithError(io.EOF) - err1 := drw.PipeReader.Close() - if err2 != nil { - return err - } - return err1 -} - -// Each returned ReadWriteCloser is akin to a net.Connection -func makeKVStoreConnPair() (fooConn, barConn kvstoreConn) { - barReader, fooWriter := io.Pipe() - fooReader, barWriter := io.Pipe() - return kvstoreConn{fooReader, fooWriter}, kvstoreConn{barReader, barWriter} -} - -func makeSecretConnPair(tb testing.TB) (fooSecConn, barSecConn *SecretConnection) { - - var fooConn, barConn = makeKVStoreConnPair() - var fooPrvKey = crypto.GenPrivKeyEd25519() - var fooPubKey = fooPrvKey.PubKey() - var barPrvKey = crypto.GenPrivKeyEd25519() - var barPubKey = barPrvKey.PubKey() - - // Make connections from both sides in parallel. - var trs, ok = cmn.Parallel( - func(_ int) (val interface{}, err error, abort bool) { - fooSecConn, err = MakeSecretConnection(fooConn, fooPrvKey) - if err != nil { - tb.Errorf("Failed to establish SecretConnection for foo: %v", err) - return nil, err, true - } - remotePubBytes := fooSecConn.RemotePubKey() - if !remotePubBytes.Equals(barPubKey) { - err = fmt.Errorf("Unexpected fooSecConn.RemotePubKey. Expected %v, got %v", - barPubKey, fooSecConn.RemotePubKey()) - tb.Error(err) - return nil, err, false - } - return nil, nil, false - }, - func(_ int) (val interface{}, err error, abort bool) { - barSecConn, err = MakeSecretConnection(barConn, barPrvKey) - if barSecConn == nil { - tb.Errorf("Failed to establish SecretConnection for bar: %v", err) - return nil, err, true - } - remotePubBytes := barSecConn.RemotePubKey() - if !remotePubBytes.Equals(fooPubKey) { - err = fmt.Errorf("Unexpected barSecConn.RemotePubKey. Expected %v, got %v", - fooPubKey, barSecConn.RemotePubKey()) - tb.Error(err) - return nil, nil, false - } - return nil, nil, false - }, - ) - - require.Nil(tb, trs.FirstError()) - require.True(tb, ok, "Unexpected task abortion") - - return -} - -func TestSecretConnectionHandshake(t *testing.T) { - fooSecConn, barSecConn := makeSecretConnPair(t) - if err := fooSecConn.Close(); err != nil { - t.Error(err) - } - if err := barSecConn.Close(); err != nil { - t.Error(err) - } -} - -func TestSecretConnectionReadWrite(t *testing.T) { - fooConn, barConn := makeKVStoreConnPair() - fooWrites, barWrites := []string{}, []string{} - fooReads, barReads := []string{}, []string{} - - // Pre-generate the things to write (for foo & bar) - for i := 0; i < 100; i++ { - fooWrites = append(fooWrites, cmn.RandStr((cmn.RandInt()%(dataMaxSize*5))+1)) - barWrites = append(barWrites, cmn.RandStr((cmn.RandInt()%(dataMaxSize*5))+1)) - } - - // A helper that will run with (fooConn, fooWrites, fooReads) and vice versa - genNodeRunner := func(id string, nodeConn kvstoreConn, nodeWrites []string, nodeReads *[]string) cmn.Task { - return func(_ int) (interface{}, error, bool) { - // Initiate cryptographic private key and secret connection trhough nodeConn. - nodePrvKey := crypto.GenPrivKeyEd25519() - nodeSecretConn, err := MakeSecretConnection(nodeConn, nodePrvKey) - if err != nil { - t.Errorf("Failed to establish SecretConnection for node: %v", err) - return nil, err, true - } - // In parallel, handle some reads and writes. - var trs, ok = cmn.Parallel( - func(_ int) (interface{}, error, bool) { - // Node writes: - for _, nodeWrite := range nodeWrites { - n, err := nodeSecretConn.Write([]byte(nodeWrite)) - if err != nil { - t.Errorf("Failed to write to nodeSecretConn: %v", err) - return nil, err, true - } - if n != len(nodeWrite) { - err = fmt.Errorf("Failed to write all bytes. Expected %v, wrote %v", len(nodeWrite), n) - t.Error(err) - return nil, err, true - } - } - if err := nodeConn.PipeWriter.Close(); err != nil { - t.Error(err) - return nil, err, true - } - return nil, nil, false - }, - func(_ int) (interface{}, error, bool) { - // Node reads: - readBuffer := make([]byte, dataMaxSize) - for { - n, err := nodeSecretConn.Read(readBuffer) - if err == io.EOF { - return nil, nil, false - } else if err != nil { - t.Errorf("Failed to read from nodeSecretConn: %v", err) - return nil, err, true - } - *nodeReads = append(*nodeReads, string(readBuffer[:n])) - } - if err := nodeConn.PipeReader.Close(); err != nil { - t.Error(err) - return nil, err, true - } - return nil, nil, false - }, - ) - assert.True(t, ok, "Unexpected task abortion") - - // If error: - if trs.FirstError() != nil { - return nil, trs.FirstError(), true - } - - // Otherwise: - return nil, nil, false - } - } - - // Run foo & bar in parallel - var trs, ok = cmn.Parallel( - genNodeRunner("foo", fooConn, fooWrites, &fooReads), - genNodeRunner("bar", barConn, barWrites, &barReads), - ) - require.Nil(t, trs.FirstError()) - require.True(t, ok, "unexpected task abortion") - - // A helper to ensure that the writes and reads match. - // Additionally, small writes (<= dataMaxSize) must be atomically read. - compareWritesReads := func(writes []string, reads []string) { - for { - // Pop next write & corresponding reads - var read, write string = "", writes[0] - var readCount = 0 - for _, readChunk := range reads { - read += readChunk - readCount++ - if len(write) <= len(read) { - break - } - if len(write) <= dataMaxSize { - break // atomicity of small writes - } - } - // Compare - if write != read { - t.Errorf("Expected to read %X, got %X", write, read) - } - // Iterate - writes = writes[1:] - reads = reads[readCount:] - if len(writes) == 0 { - break - } - } - } - - compareWritesReads(fooWrites, barReads) - compareWritesReads(barWrites, fooReads) - -} - -func BenchmarkSecretConnection(b *testing.B) { - b.StopTimer() - fooSecConn, barSecConn := makeSecretConnPair(b) - fooWriteText := cmn.RandStr(dataMaxSize) - // Consume reads from bar's reader - go func() { - readBuffer := make([]byte, dataMaxSize) - for { - _, err := barSecConn.Read(readBuffer) - if err == io.EOF { - return - } else if err != nil { - b.Fatalf("Failed to read from barSecConn: %v", err) - } - } - }() - - b.StartTimer() - for i := 0; i < b.N; i++ { - _, err := fooSecConn.Write([]byte(fooWriteText)) - if err != nil { - b.Fatalf("Failed to write to fooSecConn: %v", err) - } - } - b.StopTimer() - - if err := fooSecConn.Close(); err != nil { - b.Error(err) - } - //barSecConn.Close() race condition -} - -func fingerprint(bz []byte) []byte { - const fbsize = 40 - if len(bz) < fbsize { - return bz - } else { - return bz[:fbsize] - } -} diff --git a/p2p/conn/wire.go b/p2p/conn/wire.go deleted file mode 100644 index 02d67f6f..00000000 --- a/p2p/conn/wire.go +++ /dev/null @@ -1,13 +0,0 @@ -package conn - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" -) - -var cdc *amino.Codec = amino.NewCodec() - -func init() { - crypto.RegisterAmino(cdc) - RegisterPacket(cdc) -} diff --git a/p2p/dummy/peer.go b/p2p/dummy/peer.go deleted file mode 100644 index fc224236..00000000 --- a/p2p/dummy/peer.go +++ /dev/null @@ -1,80 +0,0 @@ -package dummy - -import ( - "net" - - p2p "github.com/tendermint/tendermint/p2p" - tmconn "github.com/tendermint/tendermint/p2p/conn" - cmn "github.com/tendermint/tmlibs/common" -) - -type peer struct { - cmn.BaseService - kv map[string]interface{} -} - -var _ p2p.Peer = (*peer)(nil) - -// NewPeer creates new dummy peer. -func NewPeer() *peer { - p := &peer{ - kv: make(map[string]interface{}), - } - p.BaseService = *cmn.NewBaseService(nil, "peer", p) - - return p -} - -// ID always returns dummy. -func (p *peer) ID() p2p.ID { - return p2p.ID("dummy") -} - -// IsOutbound always returns false. -func (p *peer) IsOutbound() bool { - return false -} - -// IsPersistent always returns false. -func (p *peer) IsPersistent() bool { - return false -} - -// NodeInfo always returns empty node info. -func (p *peer) NodeInfo() p2p.NodeInfo { - return p2p.NodeInfo{} -} - -// RemoteIP always returns localhost. -func (p *peer) RemoteIP() net.IP { - return net.ParseIP("127.0.0.1") -} - -// Status always returns empry connection status. -func (p *peer) Status() tmconn.ConnectionStatus { - return tmconn.ConnectionStatus{} -} - -// Send does not do anything and just returns true. -func (p *peer) Send(byte, []byte) bool { - return true -} - -// TrySend does not do anything and just returns true. -func (p *peer) TrySend(byte, []byte) bool { - return true -} - -// Set records value under key specified in the map. -func (p *peer) Set(key string, value interface{}) { - p.kv[key] = value -} - -// Get returns a value associated with the key. Nil is returned if no value -// found. -func (p *peer) Get(key string) interface{} { - if value, ok := p.kv[key]; ok { - return value - } - return nil -} diff --git a/p2p/errors.go b/p2p/errors.go deleted file mode 100644 index fc477d1c..00000000 --- a/p2p/errors.go +++ /dev/null @@ -1,76 +0,0 @@ -package p2p - -import ( - "fmt" - "net" -) - -// ErrSwitchDuplicatePeerID to be raised when a peer is connecting with a known -// ID. -type ErrSwitchDuplicatePeerID struct { - ID ID -} - -func (e ErrSwitchDuplicatePeerID) Error() string { - return fmt.Sprintf("Duplicate peer ID %v", e.ID) -} - -// ErrSwitchDuplicatePeerIP to be raised whena a peer is connecting with a known -// IP. -type ErrSwitchDuplicatePeerIP struct { - IP net.IP -} - -func (e ErrSwitchDuplicatePeerIP) Error() string { - return fmt.Sprintf("Duplicate peer IP %v", e.IP.String()) -} - -// ErrSwitchConnectToSelf to be raised when trying to connect to itself. -type ErrSwitchConnectToSelf struct { - Addr *NetAddress -} - -func (e ErrSwitchConnectToSelf) Error() string { - return fmt.Sprintf("Connect to self: %v", e.Addr) -} - -type ErrSwitchAuthenticationFailure struct { - Dialed *NetAddress - Got ID -} - -func (e ErrSwitchAuthenticationFailure) Error() string { - return fmt.Sprintf( - "Failed to authenticate peer. Dialed %v, but got peer with ID %s", - e.Dialed, - e.Got, - ) -} - -//------------------------------------------------------------------- - -type ErrNetAddressNoID struct { - Addr string -} - -func (e ErrNetAddressNoID) Error() string { - return fmt.Sprintf("Address (%s) does not contain ID", e.Addr) -} - -type ErrNetAddressInvalid struct { - Addr string - Err error -} - -func (e ErrNetAddressInvalid) Error() string { - return fmt.Sprintf("Invalid address (%s): %v", e.Addr, e.Err) -} - -type ErrNetAddressLookup struct { - Addr string - Err error -} - -func (e ErrNetAddressLookup) Error() string { - return fmt.Sprintf("Error looking up host (%s): %v", e.Addr, e.Err) -} diff --git a/p2p/fuzz.go b/p2p/fuzz.go deleted file mode 100644 index 8d00ba40..00000000 --- a/p2p/fuzz.go +++ /dev/null @@ -1,152 +0,0 @@ -package p2p - -import ( - "net" - "sync" - "time" - - "github.com/tendermint/tendermint/config" - cmn "github.com/tendermint/tmlibs/common" -) - -// FuzzedConnection wraps any net.Conn and depending on the mode either delays -// reads/writes or randomly drops reads/writes/connections. -type FuzzedConnection struct { - conn net.Conn - - mtx sync.Mutex - start <-chan time.Time - active bool - - config *config.FuzzConnConfig -} - -// FuzzConn creates a new FuzzedConnection. Fuzzing starts immediately. -func FuzzConn(conn net.Conn) net.Conn { - return FuzzConnFromConfig(conn, config.DefaultFuzzConnConfig()) -} - -// FuzzConnFromConfig creates a new FuzzedConnection from a config. Fuzzing -// starts immediately. -func FuzzConnFromConfig(conn net.Conn, config *config.FuzzConnConfig) net.Conn { - return &FuzzedConnection{ - conn: conn, - start: make(<-chan time.Time), - active: true, - config: config, - } -} - -// FuzzConnAfter creates a new FuzzedConnection. Fuzzing starts when the -// duration elapses. -func FuzzConnAfter(conn net.Conn, d time.Duration) net.Conn { - return FuzzConnAfterFromConfig(conn, d, config.DefaultFuzzConnConfig()) -} - -// FuzzConnAfterFromConfig creates a new FuzzedConnection from a config. -// Fuzzing starts when the duration elapses. -func FuzzConnAfterFromConfig( - conn net.Conn, - d time.Duration, - config *config.FuzzConnConfig, -) net.Conn { - return &FuzzedConnection{ - conn: conn, - start: time.After(d), - active: false, - config: config, - } -} - -// Config returns the connection's config. -func (fc *FuzzedConnection) Config() *config.FuzzConnConfig { - return fc.config -} - -// Read implements net.Conn. -func (fc *FuzzedConnection) Read(data []byte) (n int, err error) { - if fc.fuzz() { - return 0, nil - } - return fc.conn.Read(data) -} - -// Write implements net.Conn. -func (fc *FuzzedConnection) Write(data []byte) (n int, err error) { - if fc.fuzz() { - return 0, nil - } - return fc.conn.Write(data) -} - -// Close implements net.Conn. -func (fc *FuzzedConnection) Close() error { return fc.conn.Close() } - -// LocalAddr implements net.Conn. -func (fc *FuzzedConnection) LocalAddr() net.Addr { return fc.conn.LocalAddr() } - -// RemoteAddr implements net.Conn. -func (fc *FuzzedConnection) RemoteAddr() net.Addr { return fc.conn.RemoteAddr() } - -// SetDeadline implements net.Conn. -func (fc *FuzzedConnection) SetDeadline(t time.Time) error { return fc.conn.SetDeadline(t) } - -// SetReadDeadline implements net.Conn. -func (fc *FuzzedConnection) SetReadDeadline(t time.Time) error { - return fc.conn.SetReadDeadline(t) -} - -// SetWriteDeadline implements net.Conn. -func (fc *FuzzedConnection) SetWriteDeadline(t time.Time) error { - return fc.conn.SetWriteDeadline(t) -} - -func (fc *FuzzedConnection) randomDuration() time.Duration { - maxDelayMillis := int(fc.config.MaxDelay.Nanoseconds() / 1000) - return time.Millisecond * time.Duration(cmn.RandInt()%maxDelayMillis) // nolint: gas -} - -// implements the fuzz (delay, kill conn) -// and returns whether or not the read/write should be ignored -func (fc *FuzzedConnection) fuzz() bool { - if !fc.shouldFuzz() { - return false - } - - switch fc.config.Mode { - case config.FuzzModeDrop: - // randomly drop the r/w, drop the conn, or sleep - r := cmn.RandFloat64() - if r <= fc.config.ProbDropRW { - return true - } else if r < fc.config.ProbDropRW+fc.config.ProbDropConn { - // XXX: can't this fail because machine precision? - // XXX: do we need an error? - fc.Close() // nolint: errcheck, gas - return true - } else if r < fc.config.ProbDropRW+fc.config.ProbDropConn+fc.config.ProbSleep { - time.Sleep(fc.randomDuration()) - } - case config.FuzzModeDelay: - // sleep a bit - time.Sleep(fc.randomDuration()) - } - return false -} - -func (fc *FuzzedConnection) shouldFuzz() bool { - if fc.active { - return true - } - - fc.mtx.Lock() - defer fc.mtx.Unlock() - - select { - case <-fc.start: - fc.active = true - return true - default: - return false - } -} diff --git a/p2p/key.go b/p2p/key.go deleted file mode 100644 index 73103ebd..00000000 --- a/p2p/key.go +++ /dev/null @@ -1,111 +0,0 @@ -package p2p - -import ( - "bytes" - "encoding/hex" - "fmt" - "io/ioutil" - - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" -) - -// ID is a hex-encoded crypto.Address -type ID string - -// IDByteLength is the length of a crypto.Address. Currently only 20. -// TODO: support other length addresses ? -const IDByteLength = 20 - -//------------------------------------------------------------------------------ -// Persistent peer ID -// TODO: encrypt on disk - -// NodeKey is the persistent peer key. -// It contains the nodes private key for authentication. -type NodeKey struct { - PrivKey crypto.PrivKey `json:"priv_key"` // our priv key -} - -// ID returns the peer's canonical ID - the hash of its public key. -func (nodeKey *NodeKey) ID() ID { - return PubKeyToID(nodeKey.PubKey()) -} - -// PubKey returns the peer's PubKey -func (nodeKey *NodeKey) PubKey() crypto.PubKey { - return nodeKey.PrivKey.PubKey() -} - -// PubKeyToID returns the ID corresponding to the given PubKey. -// It's the hex-encoding of the pubKey.Address(). -func PubKeyToID(pubKey crypto.PubKey) ID { - return ID(hex.EncodeToString(pubKey.Address())) -} - -// LoadOrGenNodeKey attempts to load the NodeKey from the given filePath. -// If the file does not exist, it generates and saves a new NodeKey. -func LoadOrGenNodeKey(filePath string) (*NodeKey, error) { - if cmn.FileExists(filePath) { - nodeKey, err := LoadNodeKey(filePath) - if err != nil { - return nil, err - } - return nodeKey, nil - } - return genNodeKey(filePath) -} - -func LoadNodeKey(filePath string) (*NodeKey, error) { - jsonBytes, err := ioutil.ReadFile(filePath) - if err != nil { - return nil, err - } - nodeKey := new(NodeKey) - err = cdc.UnmarshalJSON(jsonBytes, nodeKey) - if err != nil { - return nil, fmt.Errorf("Error reading NodeKey from %v: %v", filePath, err) - } - return nodeKey, nil -} - -func genNodeKey(filePath string) (*NodeKey, error) { - privKey := crypto.GenPrivKeyEd25519() - nodeKey := &NodeKey{ - PrivKey: privKey, - } - - jsonBytes, err := cdc.MarshalJSON(nodeKey) - if err != nil { - return nil, err - } - err = ioutil.WriteFile(filePath, jsonBytes, 0600) - if err != nil { - return nil, err - } - return nodeKey, nil -} - -//------------------------------------------------------------------------------ - -// MakePoWTarget returns the big-endian encoding of 2^(targetBits - difficulty) - 1. -// It can be used as a Proof of Work target. -// NOTE: targetBits must be a multiple of 8 and difficulty must be less than targetBits. -func MakePoWTarget(difficulty, targetBits uint) []byte { - if targetBits%8 != 0 { - panic(fmt.Sprintf("targetBits (%d) not a multiple of 8", targetBits)) - } - if difficulty >= targetBits { - panic(fmt.Sprintf("difficulty (%d) >= targetBits (%d)", difficulty, targetBits)) - } - targetBytes := targetBits / 8 - zeroPrefixLen := (int(difficulty) / 8) - prefix := bytes.Repeat([]byte{0}, zeroPrefixLen) - mod := (difficulty % 8) - if mod > 0 { - nonZeroPrefix := byte(1<<(8-mod) - 1) - prefix = append(prefix, nonZeroPrefix) - } - tailLen := int(targetBytes) - len(prefix) - return append(prefix, bytes.Repeat([]byte{0xFF}, tailLen)...) -} diff --git a/p2p/key_test.go b/p2p/key_test.go deleted file mode 100644 index c2e1f3e0..00000000 --- a/p2p/key_test.go +++ /dev/null @@ -1,50 +0,0 @@ -package p2p - -import ( - "bytes" - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/assert" - cmn "github.com/tendermint/tmlibs/common" -) - -func TestLoadOrGenNodeKey(t *testing.T) { - filePath := filepath.Join(os.TempDir(), cmn.RandStr(12)+"_peer_id.json") - - nodeKey, err := LoadOrGenNodeKey(filePath) - assert.Nil(t, err) - - nodeKey2, err := LoadOrGenNodeKey(filePath) - assert.Nil(t, err) - - assert.Equal(t, nodeKey, nodeKey2) -} - -//---------------------------------------------------------- - -func padBytes(bz []byte, targetBytes int) []byte { - return append(bz, bytes.Repeat([]byte{0xFF}, targetBytes-len(bz))...) -} - -func TestPoWTarget(t *testing.T) { - - targetBytes := 20 - cases := []struct { - difficulty uint - target []byte - }{ - {0, padBytes([]byte{}, targetBytes)}, - {1, padBytes([]byte{127}, targetBytes)}, - {8, padBytes([]byte{0}, targetBytes)}, - {9, padBytes([]byte{0, 127}, targetBytes)}, - {10, padBytes([]byte{0, 63}, targetBytes)}, - {16, padBytes([]byte{0, 0}, targetBytes)}, - {17, padBytes([]byte{0, 0, 127}, targetBytes)}, - } - - for _, c := range cases { - assert.Equal(t, MakePoWTarget(c.difficulty, 20*8), c.target) - } -} diff --git a/p2p/listener.go b/p2p/listener.go deleted file mode 100644 index e698765c..00000000 --- a/p2p/listener.go +++ /dev/null @@ -1,226 +0,0 @@ -package p2p - -import ( - "fmt" - "net" - "strconv" - "time" - - "github.com/tendermint/tendermint/p2p/upnp" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" -) - -type Listener interface { - Connections() <-chan net.Conn - InternalAddress() *NetAddress - ExternalAddress() *NetAddress - String() string - Stop() error -} - -// Implements Listener -type DefaultListener struct { - cmn.BaseService - - listener net.Listener - intAddr *NetAddress - extAddr *NetAddress - connections chan net.Conn -} - -const ( - numBufferedConnections = 10 - defaultExternalPort = 8770 - tryListenSeconds = 5 -) - -func splitHostPort(addr string) (host string, port int) { - host, portStr, err := net.SplitHostPort(addr) - if err != nil { - panic(err) - } - port, err = strconv.Atoi(portStr) - if err != nil { - panic(err) - } - return host, port -} - -// skipUPNP: If true, does not try getUPNPExternalAddress() -func NewDefaultListener(protocol string, lAddr string, skipUPNP bool, logger log.Logger) Listener { - // Local listen IP & port - lAddrIP, lAddrPort := splitHostPort(lAddr) - - // Create listener - var listener net.Listener - var err error - for i := 0; i < tryListenSeconds; i++ { - listener, err = net.Listen(protocol, lAddr) - if err == nil { - break - } else if i < tryListenSeconds-1 { - time.Sleep(time.Second * 1) - } - } - if err != nil { - panic(err) - } - // Actual listener local IP & port - listenerIP, listenerPort := splitHostPort(listener.Addr().String()) - logger.Info("Local listener", "ip", listenerIP, "port", listenerPort) - - // Determine internal address... - var intAddr *NetAddress - intAddr, err = NewNetAddressStringWithOptionalID(lAddr) - if err != nil { - panic(err) - } - - // Determine external address... - var extAddr *NetAddress - if !skipUPNP { - // If the lAddrIP is INADDR_ANY, try UPnP - if lAddrIP == "" || lAddrIP == "0.0.0.0" { - extAddr = getUPNPExternalAddress(lAddrPort, listenerPort, logger) - } - } - // Otherwise just use the local address... - if extAddr == nil { - extAddr = getNaiveExternalAddress(listenerPort, false, logger) - } - if extAddr == nil { - panic("Could not determine external address!") - } - - dl := &DefaultListener{ - listener: listener, - intAddr: intAddr, - extAddr: extAddr, - connections: make(chan net.Conn, numBufferedConnections), - } - dl.BaseService = *cmn.NewBaseService(logger, "DefaultListener", dl) - err = dl.Start() // Started upon construction - if err != nil { - logger.Error("Error starting base service", "err", err) - } - return dl -} - -func (l *DefaultListener) OnStart() error { - if err := l.BaseService.OnStart(); err != nil { - return err - } - go l.listenRoutine() - return nil -} - -func (l *DefaultListener) OnStop() { - l.BaseService.OnStop() - l.listener.Close() // nolint: errcheck -} - -// Accept connections and pass on the channel -func (l *DefaultListener) listenRoutine() { - for { - conn, err := l.listener.Accept() - - if !l.IsRunning() { - break // Go to cleanup - } - - // listener wasn't stopped, - // yet we encountered an error. - if err != nil { - panic(err) - } - - l.connections <- conn - } - - // Cleanup - close(l.connections) - for range l.connections { - // Drain - } -} - -// A channel of inbound connections. -// It gets closed when the listener closes. -func (l *DefaultListener) Connections() <-chan net.Conn { - return l.connections -} - -func (l *DefaultListener) InternalAddress() *NetAddress { - return l.intAddr -} - -func (l *DefaultListener) ExternalAddress() *NetAddress { - return l.extAddr -} - -// NOTE: The returned listener is already Accept()'ing. -// So it's not suitable to pass into http.Serve(). -func (l *DefaultListener) NetListener() net.Listener { - return l.listener -} - -func (l *DefaultListener) String() string { - return fmt.Sprintf("Listener(@%v)", l.extAddr) -} - -/* external address helpers */ - -// UPNP external address discovery & port mapping -func getUPNPExternalAddress(externalPort, internalPort int, logger log.Logger) *NetAddress { - logger.Info("Getting UPNP external address") - nat, err := upnp.Discover() - if err != nil { - logger.Info("Could not perform UPNP discover", "err", err) - return nil - } - - ext, err := nat.GetExternalAddress() - if err != nil { - logger.Info("Could not get UPNP external address", "err", err) - return nil - } - - // UPnP can't seem to get the external port, so let's just be explicit. - if externalPort == 0 { - externalPort = defaultExternalPort - } - - externalPort, err = nat.AddPortMapping("tcp", externalPort, internalPort, "tendermint", 0) - if err != nil { - logger.Info("Could not add UPNP port mapping", "err", err) - return nil - } - - logger.Info("Got UPNP external address", "address", ext) - return NewNetAddressIPPort(ext, uint16(externalPort)) -} - -// TODO: use syscalls: see issue #712 -func getNaiveExternalAddress(port int, settleForLocal bool, logger log.Logger) *NetAddress { - addrs, err := net.InterfaceAddrs() - if err != nil { - panic(cmn.Fmt("Could not fetch interface addresses: %v", err)) - } - - for _, a := range addrs { - ipnet, ok := a.(*net.IPNet) - if !ok { - continue - } - v4 := ipnet.IP.To4() - if v4 == nil || (!settleForLocal && v4[0] == 127) { - continue - } // loopback - return NewNetAddressIPPort(ipnet.IP, uint16(port)) - } - - // try again, but settle for local - logger.Info("Node may not be connected to internet. Settling for local address") - return getNaiveExternalAddress(port, true, logger) -} diff --git a/p2p/listener_test.go b/p2p/listener_test.go deleted file mode 100644 index 92018e0a..00000000 --- a/p2p/listener_test.go +++ /dev/null @@ -1,47 +0,0 @@ -package p2p - -import ( - "bytes" - "testing" - - "github.com/tendermint/tmlibs/log" -) - -func TestListener(t *testing.T) { - // Create a listener - l := NewDefaultListener("tcp", ":8001", true, log.TestingLogger()) - - // Dial the listener - lAddr := l.ExternalAddress() - connOut, err := lAddr.Dial() - if err != nil { - t.Fatalf("Could not connect to listener address %v", lAddr) - } else { - t.Logf("Created a connection to listener address %v", lAddr) - } - connIn, ok := <-l.Connections() - if !ok { - t.Fatalf("Could not get inbound connection from listener") - } - - msg := []byte("hi!") - go func() { - _, err := connIn.Write(msg) - if err != nil { - t.Error(err) - } - }() - b := make([]byte, 32) - n, err := connOut.Read(b) - if err != nil { - t.Fatalf("Error reading off connection: %v", err) - } - - b = b[:n] - if !bytes.Equal(msg, b) { - t.Fatalf("Got %s, expected %s", b, msg) - } - - // Close the server, no longer needed. - l.Stop() -} diff --git a/p2p/metrics.go b/p2p/metrics.go deleted file mode 100644 index ab876ee7..00000000 --- a/p2p/metrics.go +++ /dev/null @@ -1,33 +0,0 @@ -package p2p - -import ( - "github.com/go-kit/kit/metrics" - "github.com/go-kit/kit/metrics/discard" - - prometheus "github.com/go-kit/kit/metrics/prometheus" - stdprometheus "github.com/prometheus/client_golang/prometheus" -) - -// Metrics contains metrics exposed by this package. -type Metrics struct { - // Number of peers. - Peers metrics.Gauge -} - -// PrometheusMetrics returns Metrics build using Prometheus client library. -func PrometheusMetrics() *Metrics { - return &Metrics{ - Peers: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ - Subsystem: "p2p", - Name: "peers", - Help: "Number of peers.", - }, []string{}), - } -} - -// NopMetrics returns no-op Metrics. -func NopMetrics() *Metrics { - return &Metrics{ - Peers: discard.NewGauge(), - } -} diff --git a/p2p/netaddress.go b/p2p/netaddress.go deleted file mode 100644 index 3e0d99d6..00000000 --- a/p2p/netaddress.go +++ /dev/null @@ -1,317 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package p2p - -import ( - "encoding/hex" - "flag" - "fmt" - "net" - "strconv" - "strings" - "time" - - cmn "github.com/tendermint/tmlibs/common" -) - -// NetAddress defines information about a peer on the network -// including its ID, IP address, and port. -type NetAddress struct { - ID ID `json:"id"` - IP net.IP `json:"ip"` - Port uint16 `json:"port"` - - // TODO: - // Name string `json:"name"` // optional DNS name - - // memoize .String() - str string -} - -// IDAddressString returns id@hostPort. -func IDAddressString(id ID, hostPort string) string { - return fmt.Sprintf("%s@%s", id, hostPort) -} - -// NewNetAddress returns a new NetAddress using the provided TCP -// address. When testing, other net.Addr (except TCP) will result in -// using 0.0.0.0:0. When normal run, other net.Addr (except TCP) will -// panic. -// TODO: socks proxies? -func NewNetAddress(id ID, addr net.Addr) *NetAddress { - tcpAddr, ok := addr.(*net.TCPAddr) - if !ok { - if flag.Lookup("test.v") == nil { // normal run - cmn.PanicSanity(cmn.Fmt("Only TCPAddrs are supported. Got: %v", addr)) - } else { // in testing - netAddr := NewNetAddressIPPort(net.IP("0.0.0.0"), 0) - netAddr.ID = id - return netAddr - } - } - ip := tcpAddr.IP - port := uint16(tcpAddr.Port) - na := NewNetAddressIPPort(ip, port) - na.ID = id - return na -} - -// NewNetAddressString returns a new NetAddress using the provided address in -// the form of "ID@IP:Port". -// Also resolves the host if host is not an IP. -// Errors are of type ErrNetAddressXxx where Xxx is in (NoID, Invalid, Lookup) -func NewNetAddressString(addr string) (*NetAddress, error) { - spl := strings.Split(addr, "@") - if len(spl) < 2 { - return nil, ErrNetAddressNoID{addr} - } - return NewNetAddressStringWithOptionalID(addr) -} - -// NewNetAddressStringWithOptionalID returns a new NetAddress using the -// provided address in the form of "ID@IP:Port", where the ID is optional. -// Also resolves the host if host is not an IP. -func NewNetAddressStringWithOptionalID(addr string) (*NetAddress, error) { - addrWithoutProtocol := removeProtocolIfDefined(addr) - - var id ID - spl := strings.Split(addrWithoutProtocol, "@") - if len(spl) == 2 { - idStr := spl[0] - idBytes, err := hex.DecodeString(idStr) - if err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - if len(idBytes) != IDByteLength { - return nil, ErrNetAddressInvalid{ - addrWithoutProtocol, - fmt.Errorf("invalid hex length - got %d, expected %d", len(idBytes), IDByteLength)} - } - - id, addrWithoutProtocol = ID(idStr), spl[1] - } - - host, portStr, err := net.SplitHostPort(addrWithoutProtocol) - if err != nil { - return nil, ErrNetAddressInvalid{addrWithoutProtocol, err} - } - - ip := net.ParseIP(host) - if ip == nil { - if len(host) > 0 { - ips, err := net.LookupIP(host) - if err != nil { - return nil, ErrNetAddressLookup{host, err} - } - ip = ips[0] - } - } - - port, err := strconv.ParseUint(portStr, 10, 16) - if err != nil { - return nil, ErrNetAddressInvalid{portStr, err} - } - - na := NewNetAddressIPPort(ip, uint16(port)) - na.ID = id - return na, nil -} - -// NewNetAddressStrings returns an array of NetAddress'es build using -// the provided strings. -func NewNetAddressStrings(addrs []string) ([]*NetAddress, []error) { - netAddrs := make([]*NetAddress, 0) - errs := make([]error, 0) - for _, addr := range addrs { - netAddr, err := NewNetAddressString(addr) - if err != nil { - errs = append(errs, err) - } else { - netAddrs = append(netAddrs, netAddr) - } - } - return netAddrs, errs -} - -// NewNetAddressIPPort returns a new NetAddress using the provided IP -// and port number. -func NewNetAddressIPPort(ip net.IP, port uint16) *NetAddress { - return &NetAddress{ - IP: ip, - Port: port, - } -} - -// Equals reports whether na and other are the same addresses, -// including their ID, IP, and Port. -func (na *NetAddress) Equals(other interface{}) bool { - if o, ok := other.(*NetAddress); ok { - return na.String() == o.String() - } - return false -} - -// Same returns true is na has the same non-empty ID or DialString as other. -func (na *NetAddress) Same(other interface{}) bool { - if o, ok := other.(*NetAddress); ok { - if na.DialString() == o.DialString() { - return true - } - if na.ID != "" && na.ID == o.ID { - return true - } - } - return false -} - -// String representation: @: -func (na *NetAddress) String() string { - if na.str == "" { - addrStr := na.DialString() - if na.ID != "" { - addrStr = IDAddressString(na.ID, addrStr) - } - na.str = addrStr - } - return na.str -} - -func (na *NetAddress) DialString() string { - return net.JoinHostPort( - na.IP.String(), - strconv.FormatUint(uint64(na.Port), 10), - ) -} - -// Dial calls net.Dial on the address. -func (na *NetAddress) Dial() (net.Conn, error) { - conn, err := net.Dial("tcp", na.DialString()) - if err != nil { - return nil, err - } - return conn, nil -} - -// DialTimeout calls net.DialTimeout on the address. -func (na *NetAddress) DialTimeout(timeout time.Duration) (net.Conn, error) { - conn, err := net.DialTimeout("tcp", na.DialString(), timeout) - if err != nil { - return nil, err - } - return conn, nil -} - -// Routable returns true if the address is routable. -func (na *NetAddress) Routable() bool { - // TODO(oga) bitcoind doesn't include RFC3849 here, but should we? - return na.Valid() && !(na.RFC1918() || na.RFC3927() || na.RFC4862() || - na.RFC4193() || na.RFC4843() || na.Local()) -} - -// For IPv4 these are either a 0 or all bits set address. For IPv6 a zero -// address or one that matches the RFC3849 documentation address format. -func (na *NetAddress) Valid() bool { - return na.IP != nil && !(na.IP.IsUnspecified() || na.RFC3849() || - na.IP.Equal(net.IPv4bcast)) -} - -// Local returns true if it is a local address. -func (na *NetAddress) Local() bool { - return na.IP.IsLoopback() || zero4.Contains(na.IP) -} - -// ReachabilityTo checks whenever o can be reached from na. -func (na *NetAddress) ReachabilityTo(o *NetAddress) int { - const ( - Unreachable = 0 - Default = iota - Teredo - Ipv6_weak - Ipv4 - Ipv6_strong - ) - if !na.Routable() { - return Unreachable - } else if na.RFC4380() { - if !o.Routable() { - return Default - } else if o.RFC4380() { - return Teredo - } else if o.IP.To4() != nil { - return Ipv4 - } else { // ipv6 - return Ipv6_weak - } - } else if na.IP.To4() != nil { - if o.Routable() && o.IP.To4() != nil { - return Ipv4 - } - return Default - } else /* ipv6 */ { - var tunnelled bool - // Is our v6 is tunnelled? - if o.RFC3964() || o.RFC6052() || o.RFC6145() { - tunnelled = true - } - if !o.Routable() { - return Default - } else if o.RFC4380() { - return Teredo - } else if o.IP.To4() != nil { - return Ipv4 - } else if tunnelled { - // only prioritise ipv6 if we aren't tunnelling it. - return Ipv6_weak - } - return Ipv6_strong - } -} - -// RFC1918: IPv4 Private networks (10.0.0.0/8, 192.168.0.0/16, 172.16.0.0/12) -// RFC3849: IPv6 Documentation address (2001:0DB8::/32) -// RFC3927: IPv4 Autoconfig (169.254.0.0/16) -// RFC3964: IPv6 6to4 (2002::/16) -// RFC4193: IPv6 unique local (FC00::/7) -// RFC4380: IPv6 Teredo tunneling (2001::/32) -// RFC4843: IPv6 ORCHID: (2001:10::/28) -// RFC4862: IPv6 Autoconfig (FE80::/64) -// RFC6052: IPv6 well known prefix (64:FF9B::/96) -// RFC6145: IPv6 IPv4 translated address ::FFFF:0:0:0/96 -var rfc1918_10 = net.IPNet{IP: net.ParseIP("10.0.0.0"), Mask: net.CIDRMask(8, 32)} -var rfc1918_192 = net.IPNet{IP: net.ParseIP("192.168.0.0"), Mask: net.CIDRMask(16, 32)} -var rfc1918_172 = net.IPNet{IP: net.ParseIP("172.16.0.0"), Mask: net.CIDRMask(12, 32)} -var rfc3849 = net.IPNet{IP: net.ParseIP("2001:0DB8::"), Mask: net.CIDRMask(32, 128)} -var rfc3927 = net.IPNet{IP: net.ParseIP("169.254.0.0"), Mask: net.CIDRMask(16, 32)} -var rfc3964 = net.IPNet{IP: net.ParseIP("2002::"), Mask: net.CIDRMask(16, 128)} -var rfc4193 = net.IPNet{IP: net.ParseIP("FC00::"), Mask: net.CIDRMask(7, 128)} -var rfc4380 = net.IPNet{IP: net.ParseIP("2001::"), Mask: net.CIDRMask(32, 128)} -var rfc4843 = net.IPNet{IP: net.ParseIP("2001:10::"), Mask: net.CIDRMask(28, 128)} -var rfc4862 = net.IPNet{IP: net.ParseIP("FE80::"), Mask: net.CIDRMask(64, 128)} -var rfc6052 = net.IPNet{IP: net.ParseIP("64:FF9B::"), Mask: net.CIDRMask(96, 128)} -var rfc6145 = net.IPNet{IP: net.ParseIP("::FFFF:0:0:0"), Mask: net.CIDRMask(96, 128)} -var zero4 = net.IPNet{IP: net.ParseIP("0.0.0.0"), Mask: net.CIDRMask(8, 32)} - -func (na *NetAddress) RFC1918() bool { - return rfc1918_10.Contains(na.IP) || - rfc1918_192.Contains(na.IP) || - rfc1918_172.Contains(na.IP) -} -func (na *NetAddress) RFC3849() bool { return rfc3849.Contains(na.IP) } -func (na *NetAddress) RFC3927() bool { return rfc3927.Contains(na.IP) } -func (na *NetAddress) RFC3964() bool { return rfc3964.Contains(na.IP) } -func (na *NetAddress) RFC4193() bool { return rfc4193.Contains(na.IP) } -func (na *NetAddress) RFC4380() bool { return rfc4380.Contains(na.IP) } -func (na *NetAddress) RFC4843() bool { return rfc4843.Contains(na.IP) } -func (na *NetAddress) RFC4862() bool { return rfc4862.Contains(na.IP) } -func (na *NetAddress) RFC6052() bool { return rfc6052.Contains(na.IP) } -func (na *NetAddress) RFC6145() bool { return rfc6145.Contains(na.IP) } - -func removeProtocolIfDefined(addr string) string { - if strings.Contains(addr, "://") { - return strings.Split(addr, "://")[1] - } - return addr - -} diff --git a/p2p/netaddress_test.go b/p2p/netaddress_test.go deleted file mode 100644 index 653b436a..00000000 --- a/p2p/netaddress_test.go +++ /dev/null @@ -1,148 +0,0 @@ -package p2p - -import ( - "net" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestNewNetAddress(t *testing.T) { - tcpAddr, err := net.ResolveTCPAddr("tcp", "127.0.0.1:8080") - require.Nil(t, err) - addr := NewNetAddress("", tcpAddr) - - assert.Equal(t, "127.0.0.1:8080", addr.String()) - - assert.NotPanics(t, func() { - NewNetAddress("", &net.UDPAddr{IP: net.ParseIP("127.0.0.1"), Port: 8000}) - }, "Calling NewNetAddress with UDPAddr should not panic in testing") -} - -func TestNewNetAddressStringWithOptionalID(t *testing.T) { - testCases := []struct { - addr string - expected string - correct bool - }{ - {"127.0.0.1:8080", "127.0.0.1:8080", true}, - {"tcp://127.0.0.1:8080", "127.0.0.1:8080", true}, - {"udp://127.0.0.1:8080", "127.0.0.1:8080", true}, - {"udp//127.0.0.1:8080", "", false}, - // {"127.0.0:8080", false}, - {"notahost", "", false}, - {"127.0.0.1:notapath", "", false}, - {"notahost:8080", "", false}, - {"8082", "", false}, - {"127.0.0:8080000", "", false}, - - {"deadbeef@127.0.0.1:8080", "", false}, - {"this-isnot-hex@127.0.0.1:8080", "", false}, - {"xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, - - {"tcp://deadbeef@127.0.0.1:8080", "", false}, - {"tcp://this-isnot-hex@127.0.0.1:8080", "", false}, - {"tcp://xxxxbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "", false}, - {"tcp://deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, - - {"tcp://@127.0.0.1:8080", "", false}, - {"tcp://@", "", false}, - {"", "", false}, - {"@", "", false}, - {" @", "", false}, - {" @ ", "", false}, - } - - for _, tc := range testCases { - addr, err := NewNetAddressStringWithOptionalID(tc.addr) - if tc.correct { - if assert.Nil(t, err, tc.addr) { - assert.Equal(t, tc.expected, addr.String()) - } - } else { - assert.NotNil(t, err, tc.addr) - } - } -} - -func TestNewNetAddressString(t *testing.T) { - testCases := []struct { - addr string - expected string - correct bool - }{ - {"127.0.0.1:8080", "127.0.0.1:8080", false}, - {"deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", true}, - } - - for _, tc := range testCases { - addr, err := NewNetAddressString(tc.addr) - if tc.correct { - if assert.Nil(t, err, tc.addr) { - assert.Equal(t, tc.expected, addr.String()) - } - } else { - assert.NotNil(t, err, tc.addr) - } - } -} - -func TestNewNetAddressStrings(t *testing.T) { - addrs, errs := NewNetAddressStrings([]string{ - "127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef@127.0.0.1:8080", - "deadbeefdeadbeefdeadbeefdeadbeefdeadbeed@127.0.0.2:8080"}) - assert.Len(t, errs, 1) - assert.Equal(t, 2, len(addrs)) -} - -func TestNewNetAddressIPPort(t *testing.T) { - addr := NewNetAddressIPPort(net.ParseIP("127.0.0.1"), 8080) - assert.Equal(t, "127.0.0.1:8080", addr.String()) -} - -func TestNetAddressProperties(t *testing.T) { - // TODO add more test cases - testCases := []struct { - addr string - valid bool - local bool - routable bool - }{ - {"127.0.0.1:8080", true, true, false}, - {"ya.ru:80", true, false, true}, - } - - for _, tc := range testCases { - addr, err := NewNetAddressStringWithOptionalID(tc.addr) - require.Nil(t, err) - - assert.Equal(t, tc.valid, addr.Valid()) - assert.Equal(t, tc.local, addr.Local()) - assert.Equal(t, tc.routable, addr.Routable()) - } -} - -func TestNetAddressReachabilityTo(t *testing.T) { - // TODO add more test cases - testCases := []struct { - addr string - other string - reachability int - }{ - {"127.0.0.1:8080", "127.0.0.1:8081", 0}, - {"ya.ru:80", "127.0.0.1:8080", 1}, - } - - for _, tc := range testCases { - addr, err := NewNetAddressStringWithOptionalID(tc.addr) - require.Nil(t, err) - - other, err := NewNetAddressStringWithOptionalID(tc.other) - require.Nil(t, err) - - assert.Equal(t, tc.reachability, addr.ReachabilityTo(other)) - } -} diff --git a/p2p/node_info.go b/p2p/node_info.go deleted file mode 100644 index 60383bc5..00000000 --- a/p2p/node_info.go +++ /dev/null @@ -1,164 +0,0 @@ -package p2p - -import ( - "fmt" - cmn "github.com/tendermint/tmlibs/common" - "strings" -) - -const ( - maxNodeInfoSize = 10240 // 10Kb - maxNumChannels = 16 // plenty of room for upgrades, for now -) - -// Max size of the NodeInfo struct -func MaxNodeInfoSize() int { - return maxNodeInfoSize -} - -// NodeInfo is the basic node information exchanged -// between two peers during the Tendermint P2P handshake. -type NodeInfo struct { - // Authenticate - // TODO: replace with NetAddress - ID ID `json:"id"` // authenticated identifier - ListenAddr string `json:"listen_addr"` // accepting incoming - - // Check compatibility. - // Channels are HexBytes so easier to read as JSON - Network string `json:"network"` // network/chain ID - Version string `json:"version"` // major.minor.revision - Channels cmn.HexBytes `json:"channels"` // channels this node knows about - - // ASCIIText fields - Moniker string `json:"moniker"` // arbitrary moniker - Other []string `json:"other"` // other application specific data -} - -// Validate checks the self-reported NodeInfo is safe. -// It returns an error if there -// are too many Channels, if there are any duplicate Channels, -// if the ListenAddr is malformed, or if the ListenAddr is a host name -// that can not be resolved to some IP. -// TODO: constraints for Moniker/Other? Or is that for the UI ? -// JAE: It needs to be done on the client, but to prevent ambiguous -// unicode characters, maybe it's worth sanitizing it here. -// In the future we might want to validate these, once we have a -// name-resolution system up. -// International clients could then use punycode (or we could use -// url-encoding), and we just need to be careful with how we handle that in our -// clients. (e.g. off by default). -func (info NodeInfo) Validate() error { - if len(info.Channels) > maxNumChannels { - return fmt.Errorf("info.Channels is too long (%v). Max is %v", len(info.Channels), maxNumChannels) - } - - // Sanitize ASCII text fields. - if !cmn.IsASCIIText(info.Moniker) || cmn.ASCIITrim(info.Moniker) == "" { - return fmt.Errorf("info.Moniker must be valid non-empty ASCII text without tabs, but got %v.", info.Moniker) - } - for i, s := range info.Other { - if !cmn.IsASCIIText(s) || cmn.ASCIITrim(s) == "" { - return fmt.Errorf("info.Other[%v] must be valid non-empty ASCII text without tabs, but got %v.", i, s) - } - } - - channels := make(map[byte]struct{}) - for _, ch := range info.Channels { - _, ok := channels[ch] - if ok { - return fmt.Errorf("info.Channels contains duplicate channel id %v", ch) - } - channels[ch] = struct{}{} - } - - // ensure ListenAddr is good - _, err := NewNetAddressString(IDAddressString(info.ID, info.ListenAddr)) - return err -} - -// CompatibleWith checks if two NodeInfo are compatible with eachother. -// CONTRACT: two nodes are compatible if the major version matches and network match -// and they have at least one channel in common. -func (info NodeInfo) CompatibleWith(other NodeInfo) error { - iMajor, iMinor, _, iErr := splitVersion(info.Version) - oMajor, oMinor, _, oErr := splitVersion(other.Version) - - // if our own version number is not formatted right, we messed up - if iErr != nil { - return iErr - } - - // version number must be formatted correctly ("x.x.x") - if oErr != nil { - return oErr - } - - // major version must match - if iMajor != oMajor { - return fmt.Errorf("Peer is on a different major version. Got %v, expected %v", oMajor, iMajor) - } - - // minor version can differ - if iMinor != oMinor { - // ok - } - - // nodes must be on the same network - if info.Network != other.Network { - return fmt.Errorf("Peer is on a different network. Got %v, expected %v", other.Network, info.Network) - } - - // if we have no channels, we're just testing - if len(info.Channels) == 0 { - return nil - } - - // for each of our channels, check if they have it - found := false -OUTER_LOOP: - for _, ch1 := range info.Channels { - for _, ch2 := range other.Channels { - if ch1 == ch2 { - found = true - break OUTER_LOOP // only need one - } - } - } - if !found { - return fmt.Errorf("Peer has no common channels. Our channels: %v ; Peer channels: %v", info.Channels, other.Channels) - } - return nil -} - -// NetAddress returns a NetAddress derived from the NodeInfo - -// it includes the authenticated peer ID and the self-reported -// ListenAddr. Note that the ListenAddr is not authenticated and -// may not match that address actually dialed if its an outbound peer. -func (info NodeInfo) NetAddress() *NetAddress { - netAddr, err := NewNetAddressString(IDAddressString(info.ID, info.ListenAddr)) - if err != nil { - switch err.(type) { - case ErrNetAddressLookup: - // XXX If the peer provided a host name and the lookup fails here - // we're out of luck. - // TODO: use a NetAddress in NodeInfo - default: - panic(err) // everything should be well formed by now - } - } - return netAddr -} - -func (info NodeInfo) String() string { - return fmt.Sprintf("NodeInfo{id: %v, moniker: %v, network: %v [listen %v], version: %v (%v)}", - info.ID, info.Moniker, info.Network, info.ListenAddr, info.Version, info.Other) -} - -func splitVersion(version string) (string, string, string, error) { - spl := strings.Split(version, ".") - if len(spl) != 3 { - return "", "", "", fmt.Errorf("Invalid version format %v", version) - } - return spl[0], spl[1], spl[2], nil -} diff --git a/p2p/peer.go b/p2p/peer.go deleted file mode 100644 index da69fe74..00000000 --- a/p2p/peer.go +++ /dev/null @@ -1,431 +0,0 @@ -package p2p - -import ( - "fmt" - "net" - "sync/atomic" - "time" - - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/tendermint/config" - tmconn "github.com/tendermint/tendermint/p2p/conn" -) - -var testIPSuffix uint32 - -// Peer is an interface representing a peer connected on a reactor. -type Peer interface { - cmn.Service - - ID() ID // peer's cryptographic ID - RemoteIP() net.IP // remote IP of the connection - IsOutbound() bool // did we dial the peer - IsPersistent() bool // do we redial this peer when we disconnect - NodeInfo() NodeInfo // peer's info - Status() tmconn.ConnectionStatus - - Send(byte, []byte) bool - TrySend(byte, []byte) bool - - Set(string, interface{}) - Get(string) interface{} -} - -//---------------------------------------------------------- - -// peerConn contains the raw connection and its config. -type peerConn struct { - outbound bool - persistent bool - config *config.P2PConfig - conn net.Conn // source connection - ip net.IP -} - -// ID only exists for SecretConnection. -// NOTE: Will panic if conn is not *SecretConnection. -func (pc peerConn) ID() ID { - return PubKeyToID(pc.conn.(*tmconn.SecretConnection).RemotePubKey()) -} - -// Return the IP from the connection RemoteAddr -func (pc peerConn) RemoteIP() net.IP { - if pc.ip != nil { - return pc.ip - } - - // In test cases a conn could not be present at all or be an in-memory - // implementation where we want to return a fake ip. - if pc.conn == nil || pc.conn.RemoteAddr().String() == "pipe" { - pc.ip = net.IP{172, 16, 0, byte(atomic.AddUint32(&testIPSuffix, 1))} - - return pc.ip - } - - host, _, err := net.SplitHostPort(pc.conn.RemoteAddr().String()) - if err != nil { - panic(err) - } - - ips, err := net.LookupIP(host) - if err != nil { - panic(err) - } - - pc.ip = ips[0] - - return pc.ip -} - -// peer implements Peer. -// -// Before using a peer, you will need to perform a handshake on connection. -type peer struct { - cmn.BaseService - - // raw peerConn and the multiplex connection - peerConn - mconn *tmconn.MConnection - - // peer's node info and the channel it knows about - // channels = nodeInfo.Channels - // cached to avoid copying nodeInfo in hasChannel - nodeInfo NodeInfo - channels []byte - - // User data - Data *cmn.CMap -} - -func newPeer( - pc peerConn, - mConfig tmconn.MConnConfig, - nodeInfo NodeInfo, - reactorsByCh map[byte]Reactor, - chDescs []*tmconn.ChannelDescriptor, - onPeerError func(Peer, interface{}), -) *peer { - p := &peer{ - peerConn: pc, - nodeInfo: nodeInfo, - channels: nodeInfo.Channels, - Data: cmn.NewCMap(), - } - - p.mconn = createMConnection( - pc.conn, - p, - reactorsByCh, - chDescs, - onPeerError, - mConfig, - ) - p.BaseService = *cmn.NewBaseService(nil, "Peer", p) - - return p -} - -func newOutboundPeerConn( - addr *NetAddress, - config *config.P2PConfig, - persistent bool, - ourNodePrivKey crypto.PrivKey, -) (peerConn, error) { - conn, err := dial(addr, config) - if err != nil { - return peerConn{}, cmn.ErrorWrap(err, "Error creating peer") - } - - pc, err := newPeerConn(conn, config, true, persistent, ourNodePrivKey) - if err != nil { - if cerr := conn.Close(); cerr != nil { - return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) - } - return peerConn{}, err - } - - // ensure dialed ID matches connection ID - if addr.ID != pc.ID() { - if cerr := conn.Close(); cerr != nil { - return peerConn{}, cmn.ErrorWrap(err, cerr.Error()) - } - return peerConn{}, ErrSwitchAuthenticationFailure{addr, pc.ID()} - } - - return pc, nil -} - -func newInboundPeerConn( - conn net.Conn, - config *config.P2PConfig, - ourNodePrivKey crypto.PrivKey, -) (peerConn, error) { - - // TODO: issue PoW challenge - - return newPeerConn(conn, config, false, false, ourNodePrivKey) -} - -func newPeerConn( - rawConn net.Conn, - cfg *config.P2PConfig, - outbound, persistent bool, - ourNodePrivKey crypto.PrivKey, -) (pc peerConn, err error) { - conn := rawConn - - // Fuzz connection - if cfg.TestFuzz { - // so we have time to do peer handshakes and get set up - conn = FuzzConnAfterFromConfig(conn, 10*time.Second, cfg.TestFuzzConfig) - } - - // Set deadline for secret handshake - dl := time.Now().Add(cfg.HandshakeTimeout) - if err := conn.SetDeadline(dl); err != nil { - return pc, cmn.ErrorWrap( - err, - "Error setting deadline while encrypting connection", - ) - } - - // Encrypt connection - conn, err = tmconn.MakeSecretConnection(conn, ourNodePrivKey) - if err != nil { - return pc, cmn.ErrorWrap(err, "Error creating peer") - } - - // Only the information we already have - return peerConn{ - config: cfg, - outbound: outbound, - persistent: persistent, - conn: conn, - }, nil -} - -//--------------------------------------------------- -// Implements cmn.Service - -// SetLogger implements BaseService. -func (p *peer) SetLogger(l log.Logger) { - p.Logger = l - p.mconn.SetLogger(l) -} - -// OnStart implements BaseService. -func (p *peer) OnStart() error { - if err := p.BaseService.OnStart(); err != nil { - return err - } - err := p.mconn.Start() - return err -} - -// OnStop implements BaseService. -func (p *peer) OnStop() { - p.BaseService.OnStop() - p.mconn.Stop() // stop everything and close the conn -} - -//--------------------------------------------------- -// Implements Peer - -// ID returns the peer's ID - the hex encoded hash of its pubkey. -func (p *peer) ID() ID { - return p.nodeInfo.ID -} - -// IsOutbound returns true if the connection is outbound, false otherwise. -func (p *peer) IsOutbound() bool { - return p.peerConn.outbound -} - -// IsPersistent returns true if the peer is persitent, false otherwise. -func (p *peer) IsPersistent() bool { - return p.peerConn.persistent -} - -// NodeInfo returns a copy of the peer's NodeInfo. -func (p *peer) NodeInfo() NodeInfo { - return p.nodeInfo -} - -// Status returns the peer's ConnectionStatus. -func (p *peer) Status() tmconn.ConnectionStatus { - return p.mconn.Status() -} - -// Send msg bytes to the channel identified by chID byte. Returns false if the -// send queue is full after timeout, specified by MConnection. -func (p *peer) Send(chID byte, msgBytes []byte) bool { - if !p.IsRunning() { - // see Switch#Broadcast, where we fetch the list of peers and loop over - // them - while we're looping, one peer may be removed and stopped. - return false - } else if !p.hasChannel(chID) { - return false - } - return p.mconn.Send(chID, msgBytes) -} - -// TrySend msg bytes to the channel identified by chID byte. Immediately returns -// false if the send queue is full. -func (p *peer) TrySend(chID byte, msgBytes []byte) bool { - if !p.IsRunning() { - return false - } else if !p.hasChannel(chID) { - return false - } - return p.mconn.TrySend(chID, msgBytes) -} - -// Get the data for a given key. -func (p *peer) Get(key string) interface{} { - return p.Data.Get(key) -} - -// Set sets the data for the given key. -func (p *peer) Set(key string, data interface{}) { - p.Data.Set(key, data) -} - -// hasChannel returns true if the peer reported -// knowing about the given chID. -func (p *peer) hasChannel(chID byte) bool { - for _, ch := range p.channels { - if ch == chID { - return true - } - } - // NOTE: probably will want to remove this - // but could be helpful while the feature is new - p.Logger.Debug( - "Unknown channel for peer", - "channel", - chID, - "channels", - p.channels, - ) - return false -} - -//--------------------------------------------------- -// methods used by the Switch - -// CloseConn should be called by the Switch if the peer was created but never -// started. -func (pc *peerConn) CloseConn() { - pc.conn.Close() // nolint: errcheck -} - -// HandshakeTimeout performs the Tendermint P2P handshake between a given node -// and the peer by exchanging their NodeInfo. It sets the received nodeInfo on -// the peer. -// NOTE: blocking -func (pc *peerConn) HandshakeTimeout( - ourNodeInfo NodeInfo, - timeout time.Duration, -) (peerNodeInfo NodeInfo, err error) { - // Set deadline for handshake so we don't block forever on conn.ReadFull - if err := pc.conn.SetDeadline(time.Now().Add(timeout)); err != nil { - return peerNodeInfo, cmn.ErrorWrap(err, "Error setting deadline") - } - - var trs, _ = cmn.Parallel( - func(_ int) (val interface{}, err error, abort bool) { - _, err = cdc.MarshalBinaryWriter(pc.conn, ourNodeInfo) - return - }, - func(_ int) (val interface{}, err error, abort bool) { - _, err = cdc.UnmarshalBinaryReader( - pc.conn, - &peerNodeInfo, - int64(MaxNodeInfoSize()), - ) - return - }, - ) - if err := trs.FirstError(); err != nil { - return peerNodeInfo, cmn.ErrorWrap(err, "Error during handshake") - } - - // Remove deadline - if err := pc.conn.SetDeadline(time.Time{}); err != nil { - return peerNodeInfo, cmn.ErrorWrap(err, "Error removing deadline") - } - - return peerNodeInfo, nil -} - -// Addr returns peer's remote network address. -func (p *peer) Addr() net.Addr { - return p.peerConn.conn.RemoteAddr() -} - -// CanSend returns true if the send queue is not full, false otherwise. -func (p *peer) CanSend(chID byte) bool { - if !p.IsRunning() { - return false - } - return p.mconn.CanSend(chID) -} - -// String representation. -func (p *peer) String() string { - if p.outbound { - return fmt.Sprintf("Peer{%v %v out}", p.mconn, p.ID()) - } - - return fmt.Sprintf("Peer{%v %v in}", p.mconn, p.ID()) -} - -//------------------------------------------------------------------ -// helper funcs - -func dial(addr *NetAddress, cfg *config.P2PConfig) (net.Conn, error) { - if cfg.TestDialFail { - return nil, fmt.Errorf("dial err (peerConfig.DialFail == true)") - } - - conn, err := addr.DialTimeout(cfg.DialTimeout) - if err != nil { - return nil, err - } - return conn, nil -} - -func createMConnection( - conn net.Conn, - p *peer, - reactorsByCh map[byte]Reactor, - chDescs []*tmconn.ChannelDescriptor, - onPeerError func(Peer, interface{}), - config tmconn.MConnConfig, -) *tmconn.MConnection { - - onReceive := func(chID byte, msgBytes []byte) { - reactor := reactorsByCh[chID] - if reactor == nil { - // Note that its ok to panic here as it's caught in the conn._recover, - // which does onPeerError. - panic(cmn.Fmt("Unknown channel %X", chID)) - } - reactor.Receive(chID, p, msgBytes) - } - - onError := func(r interface{}) { - onPeerError(p, r) - } - - return tmconn.NewMConnectionWithConfig( - conn, - chDescs, - onReceive, - onError, - config, - ) -} diff --git a/p2p/peer_set.go b/p2p/peer_set.go deleted file mode 100644 index e048cf4e..00000000 --- a/p2p/peer_set.go +++ /dev/null @@ -1,143 +0,0 @@ -package p2p - -import ( - "net" - "sync" -) - -// IPeerSet has a (immutable) subset of the methods of PeerSet. -type IPeerSet interface { - Has(key ID) bool - HasIP(ip net.IP) bool - Get(key ID) Peer - List() []Peer - Size() int -} - -//----------------------------------------------------------------------------- - -// PeerSet is a special structure for keeping a table of peers. -// Iteration over the peers is super fast and thread-safe. -type PeerSet struct { - mtx sync.Mutex - lookup map[ID]*peerSetItem - list []Peer -} - -type peerSetItem struct { - peer Peer - index int -} - -// NewPeerSet creates a new peerSet with a list of initial capacity of 256 items. -func NewPeerSet() *PeerSet { - return &PeerSet{ - lookup: make(map[ID]*peerSetItem), - list: make([]Peer, 0, 256), - } -} - -// Add adds the peer to the PeerSet. -// It returns an error carrying the reason, if the peer is already present. -func (ps *PeerSet) Add(peer Peer) error { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - if ps.lookup[peer.ID()] != nil { - return ErrSwitchDuplicatePeerID{peer.ID()} - } - - index := len(ps.list) - // Appending is safe even with other goroutines - // iterating over the ps.list slice. - ps.list = append(ps.list, peer) - ps.lookup[peer.ID()] = &peerSetItem{peer, index} - return nil -} - -// Has returns true iff the PeerSet contains -// the peer referred to by this peerKey. -func (ps *PeerSet) Has(peerKey ID) bool { - ps.mtx.Lock() - _, ok := ps.lookup[peerKey] - ps.mtx.Unlock() - return ok -} - -// HasIP returns true if the PeerSet contains the peer referred to by this IP -// address. -func (ps *PeerSet) HasIP(peerIP net.IP) bool { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - return ps.hasIP(peerIP) -} - -// hasIP does not acquire a lock so it can be used in public methods which -// already lock. -func (ps *PeerSet) hasIP(peerIP net.IP) bool { - for _, item := range ps.lookup { - if item.peer.RemoteIP().Equal(peerIP) { - return true - } - } - - return false -} - -// Get looks up a peer by the provided peerKey. -func (ps *PeerSet) Get(peerKey ID) Peer { - ps.mtx.Lock() - defer ps.mtx.Unlock() - item, ok := ps.lookup[peerKey] - if ok { - return item.peer - } - return nil -} - -// Remove discards peer by its Key, if the peer was previously memoized. -func (ps *PeerSet) Remove(peer Peer) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - item := ps.lookup[peer.ID()] - if item == nil { - return - } - - index := item.index - // Create a new copy of the list but with one less item. - // (we must copy because we'll be mutating the list). - newList := make([]Peer, len(ps.list)-1) - copy(newList, ps.list) - // If it's the last peer, that's an easy special case. - if index == len(ps.list)-1 { - ps.list = newList - delete(ps.lookup, peer.ID()) - return - } - - // Replace the popped item with the last item in the old list. - lastPeer := ps.list[len(ps.list)-1] - lastPeerKey := lastPeer.ID() - lastPeerItem := ps.lookup[lastPeerKey] - newList[index] = lastPeer - lastPeerItem.index = index - ps.list = newList - delete(ps.lookup, peer.ID()) -} - -// Size returns the number of unique items in the peerSet. -func (ps *PeerSet) Size() int { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return len(ps.list) -} - -// List returns the threadsafe list of peers. -func (ps *PeerSet) List() []Peer { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return ps.list -} diff --git a/p2p/peer_set_test.go b/p2p/peer_set_test.go deleted file mode 100644 index 1675812f..00000000 --- a/p2p/peer_set_test.go +++ /dev/null @@ -1,172 +0,0 @@ -package p2p - -import ( - "math/rand" - "net" - "sync" - "testing" - - "github.com/stretchr/testify/assert" - - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" -) - -// Returns an empty kvstore peer -func randPeer(ip net.IP) *peer { - if ip == nil { - ip = net.IP{127, 0, 0, 1} - } - - nodeKey := NodeKey{PrivKey: crypto.GenPrivKeyEd25519()} - p := &peer{ - nodeInfo: NodeInfo{ - ID: nodeKey.ID(), - ListenAddr: cmn.Fmt("%v.%v.%v.%v:26656", rand.Int()%256, rand.Int()%256, rand.Int()%256, rand.Int()%256), - }, - } - - p.ip = ip - - return p -} - -func TestPeerSetAddRemoveOne(t *testing.T) { - t.Parallel() - - peerSet := NewPeerSet() - - var peerList []Peer - for i := 0; i < 5; i++ { - p := randPeer(net.IP{127, 0, 0, byte(i)}) - if err := peerSet.Add(p); err != nil { - t.Error(err) - } - peerList = append(peerList, p) - } - - n := len(peerList) - // 1. Test removing from the front - for i, peerAtFront := range peerList { - peerSet.Remove(peerAtFront) - wantSize := n - i - 1 - for j := 0; j < 2; j++ { - assert.Equal(t, false, peerSet.Has(peerAtFront.ID()), "#%d Run #%d: failed to remove peer", i, j) - assert.Equal(t, wantSize, peerSet.Size(), "#%d Run #%d: failed to remove peer and decrement size", i, j) - // Test the route of removing the now non-existent element - peerSet.Remove(peerAtFront) - } - } - - // 2. Next we are testing removing the peer at the end - // a) Replenish the peerSet - for _, peer := range peerList { - if err := peerSet.Add(peer); err != nil { - t.Error(err) - } - } - - // b) In reverse, remove each element - for i := n - 1; i >= 0; i-- { - peerAtEnd := peerList[i] - peerSet.Remove(peerAtEnd) - assert.Equal(t, false, peerSet.Has(peerAtEnd.ID()), "#%d: failed to remove item at end", i) - assert.Equal(t, i, peerSet.Size(), "#%d: differing sizes after peerSet.Remove(atEndPeer)", i) - } -} - -func TestPeerSetAddRemoveMany(t *testing.T) { - t.Parallel() - peerSet := NewPeerSet() - - peers := []Peer{} - N := 100 - for i := 0; i < N; i++ { - peer := randPeer(net.IP{127, 0, 0, byte(i)}) - if err := peerSet.Add(peer); err != nil { - t.Errorf("Failed to add new peer") - } - if peerSet.Size() != i+1 { - t.Errorf("Failed to add new peer and increment size") - } - peers = append(peers, peer) - } - - for i, peer := range peers { - peerSet.Remove(peer) - if peerSet.Has(peer.ID()) { - t.Errorf("Failed to remove peer") - } - if peerSet.Size() != len(peers)-i-1 { - t.Errorf("Failed to remove peer and decrement size") - } - } -} - -func TestPeerSetAddDuplicate(t *testing.T) { - t.Parallel() - peerSet := NewPeerSet() - peer := randPeer(nil) - - n := 20 - errsChan := make(chan error) - // Add the same asynchronously to test the - // concurrent guarantees of our APIs, and - // our expectation in the end is that only - // one addition succeeded, but the rest are - // instances of ErrSwitchDuplicatePeer. - for i := 0; i < n; i++ { - go func() { - errsChan <- peerSet.Add(peer) - }() - } - - // Now collect and tally the results - errsTally := make(map[string]int) - for i := 0; i < n; i++ { - err := <-errsChan - - switch err.(type) { - case ErrSwitchDuplicatePeerID: - errsTally["duplicateID"]++ - default: - errsTally["other"]++ - } - } - - // Our next procedure is to ensure that only one addition - // succeeded and that the rest are each ErrSwitchDuplicatePeer. - wantErrCount, gotErrCount := n-1, errsTally["duplicateID"] - assert.Equal(t, wantErrCount, gotErrCount, "invalid ErrSwitchDuplicatePeer count") - - wantNilErrCount, gotNilErrCount := 1, errsTally["other"] - assert.Equal(t, wantNilErrCount, gotNilErrCount, "invalid nil errCount") -} - -func TestPeerSetGet(t *testing.T) { - t.Parallel() - - var ( - peerSet = NewPeerSet() - peer = randPeer(nil) - ) - - assert.Nil(t, peerSet.Get(peer.ID()), "expecting a nil lookup, before .Add") - - if err := peerSet.Add(peer); err != nil { - t.Fatalf("Failed to add new peer: %v", err) - } - - var wg sync.WaitGroup - for i := 0; i < 10; i++ { - // Add them asynchronously to test the - // concurrent guarantees of our APIs. - wg.Add(1) - go func(i int) { - defer wg.Done() - have, want := peerSet.Get(peer.ID()), peer - assert.Equal(t, have, want, "%d: have %v, want %v", i, have, want) - }(i) - } - wg.Wait() -} diff --git a/p2p/peer_test.go b/p2p/peer_test.go deleted file mode 100644 index 3a477199..00000000 --- a/p2p/peer_test.go +++ /dev/null @@ -1,175 +0,0 @@ -package p2p - -import ( - golog "log" - "net" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/tendermint/config" - tmconn "github.com/tendermint/tendermint/p2p/conn" -) - -const testCh = 0x01 - -func TestPeerBasic(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // simulate remote peer - rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: cfg} - rp.Start() - defer rp.Stop() - - p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), cfg, tmconn.DefaultMConnConfig()) - require.Nil(err) - - err = p.Start() - require.Nil(err) - defer p.Stop() - - assert.True(p.IsRunning()) - assert.True(p.IsOutbound()) - assert.False(p.IsPersistent()) - p.persistent = true - assert.True(p.IsPersistent()) - assert.Equal(rp.Addr().DialString(), p.Addr().String()) - assert.Equal(rp.ID(), p.ID()) -} - -func TestPeerSend(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - config := cfg - - // simulate remote peer - rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: config} - rp.Start() - defer rp.Stop() - - p, err := createOutboundPeerAndPerformHandshake(rp.Addr(), config, tmconn.DefaultMConnConfig()) - require.Nil(err) - - err = p.Start() - require.Nil(err) - - defer p.Stop() - - assert.True(p.CanSend(testCh)) - assert.True(p.Send(testCh, []byte("Asylum"))) -} - -func createOutboundPeerAndPerformHandshake( - addr *NetAddress, - config *config.P2PConfig, - mConfig tmconn.MConnConfig, -) (*peer, error) { - chDescs := []*tmconn.ChannelDescriptor{ - {ID: testCh, Priority: 1}, - } - reactorsByCh := map[byte]Reactor{testCh: NewTestReactor(chDescs, true)} - pk := crypto.GenPrivKeyEd25519() - pc, err := newOutboundPeerConn(addr, config, false, pk) - if err != nil { - return nil, err - } - nodeInfo, err := pc.HandshakeTimeout(NodeInfo{ - ID: addr.ID, - Moniker: "host_peer", - Network: "testing", - Version: "123.123.123", - Channels: []byte{testCh}, - }, 1*time.Second) - if err != nil { - return nil, err - } - - p := newPeer(pc, mConfig, nodeInfo, reactorsByCh, chDescs, func(p Peer, r interface{}) {}) - p.SetLogger(log.TestingLogger().With("peer", addr)) - return p, nil -} - -type remotePeer struct { - PrivKey crypto.PrivKey - Config *config.P2PConfig - addr *NetAddress - quit chan struct{} - channels cmn.HexBytes - listenAddr string -} - -func (rp *remotePeer) Addr() *NetAddress { - return rp.addr -} - -func (rp *remotePeer) ID() ID { - return PubKeyToID(rp.PrivKey.PubKey()) -} - -func (rp *remotePeer) Start() { - if rp.listenAddr == "" { - rp.listenAddr = "127.0.0.1:0" - } - - l, e := net.Listen("tcp", rp.listenAddr) // any available address - if e != nil { - golog.Fatalf("net.Listen tcp :0: %+v", e) - } - rp.addr = NewNetAddress(PubKeyToID(rp.PrivKey.PubKey()), l.Addr()) - rp.quit = make(chan struct{}) - if rp.channels == nil { - rp.channels = []byte{testCh} - } - go rp.accept(l) -} - -func (rp *remotePeer) Stop() { - close(rp.quit) -} - -func (rp *remotePeer) accept(l net.Listener) { - conns := []net.Conn{} - - for { - conn, err := l.Accept() - if err != nil { - golog.Fatalf("Failed to accept conn: %+v", err) - } - - pc, err := newInboundPeerConn(conn, rp.Config, rp.PrivKey) - if err != nil { - golog.Fatalf("Failed to create a peer: %+v", err) - } - - _, err = pc.HandshakeTimeout(NodeInfo{ - ID: rp.Addr().ID, - Moniker: "remote_peer", - Network: "testing", - Version: "123.123.123", - ListenAddr: l.Addr().String(), - Channels: rp.channels, - }, 1*time.Second) - if err != nil { - golog.Fatalf("Failed to perform handshake: %+v", err) - } - - conns = append(conns, conn) - - select { - case <-rp.quit: - for _, conn := range conns { - if err := conn.Close(); err != nil { - golog.Fatal(err) - } - } - return - default: - } - } -} diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go deleted file mode 100644 index dc51761f..00000000 --- a/p2p/pex/addrbook.go +++ /dev/null @@ -1,813 +0,0 @@ -// Modified for Tendermint -// Originally Copyright (c) 2013-2014 Conformal Systems LLC. -// https://github.com/conformal/btcd/blob/master/LICENSE - -package pex - -import ( - "crypto/sha256" - "encoding/binary" - "math" - "net" - "sync" - "time" - - crypto "github.com/tendermint/go-crypto" - "github.com/tendermint/tendermint/p2p" - cmn "github.com/tendermint/tmlibs/common" -) - -const ( - bucketTypeNew = 0x01 - bucketTypeOld = 0x02 -) - -// AddrBook is an address book used for tracking peers -// so we can gossip about them to others and select -// peers to dial. -// TODO: break this up? -type AddrBook interface { - cmn.Service - - // Add our own addresses so we don't later add ourselves - AddOurAddress(*p2p.NetAddress) - // Check if it is our address - OurAddress(*p2p.NetAddress) bool - - // Add and remove an address - AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error - RemoveAddress(*p2p.NetAddress) - - // Check if the address is in the book - HasAddress(*p2p.NetAddress) bool - - // Do we need more peers? - NeedMoreAddrs() bool - - // Pick an address to dial - PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress - - // Mark address - MarkGood(*p2p.NetAddress) - MarkAttempt(*p2p.NetAddress) - MarkBad(*p2p.NetAddress) - - IsGood(*p2p.NetAddress) bool - - // Send a selection of addresses to peers - GetSelection() []*p2p.NetAddress - // Send a selection of addresses with bias - GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress - - // TODO: remove - ListOfKnownAddresses() []*knownAddress - - // Persist to disk - Save() -} - -var _ AddrBook = (*addrBook)(nil) - -// addrBook - concurrency safe peer address manager. -// Implements AddrBook. -type addrBook struct { - cmn.BaseService - - // immutable after creation - filePath string - routabilityStrict bool - key string // random prefix for bucket placement - - // accessed concurrently - mtx sync.Mutex - rand *cmn.Rand - ourAddrs map[string]struct{} - addrLookup map[p2p.ID]*knownAddress // new & old - bucketsOld []map[string]*knownAddress - bucketsNew []map[string]*knownAddress - nOld int - nNew int - - wg sync.WaitGroup -} - -// NewAddrBook creates a new address book. -// Use Start to begin processing asynchronous address updates. -func NewAddrBook(filePath string, routabilityStrict bool) *addrBook { - am := &addrBook{ - rand: cmn.NewRand(), - ourAddrs: make(map[string]struct{}), - addrLookup: make(map[p2p.ID]*knownAddress), - filePath: filePath, - routabilityStrict: routabilityStrict, - } - am.init() - am.BaseService = *cmn.NewBaseService(nil, "AddrBook", am) - return am -} - -// Initialize the buckets. -// When modifying this, don't forget to update loadFromFile() -func (a *addrBook) init() { - a.key = crypto.CRandHex(24) // 24/2 * 8 = 96 bits - // New addr buckets - a.bucketsNew = make([]map[string]*knownAddress, newBucketCount) - for i := range a.bucketsNew { - a.bucketsNew[i] = make(map[string]*knownAddress) - } - // Old addr buckets - a.bucketsOld = make([]map[string]*knownAddress, oldBucketCount) - for i := range a.bucketsOld { - a.bucketsOld[i] = make(map[string]*knownAddress) - } -} - -// OnStart implements Service. -func (a *addrBook) OnStart() error { - if err := a.BaseService.OnStart(); err != nil { - return err - } - a.loadFromFile(a.filePath) - - // wg.Add to ensure that any invocation of .Wait() - // later on will wait for saveRoutine to terminate. - a.wg.Add(1) - go a.saveRoutine() - - return nil -} - -// OnStop implements Service. -func (a *addrBook) OnStop() { - a.BaseService.OnStop() -} - -func (a *addrBook) Wait() { - a.wg.Wait() -} - -func (a *addrBook) FilePath() string { - return a.filePath -} - -//------------------------------------------------------- - -// AddOurAddress one of our addresses. -func (a *addrBook) AddOurAddress(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - a.Logger.Info("Add our address to book", "addr", addr) - a.ourAddrs[addr.String()] = struct{}{} -} - -// OurAddress returns true if it is our address. -func (a *addrBook) OurAddress(addr *p2p.NetAddress) bool { - a.mtx.Lock() - _, ok := a.ourAddrs[addr.String()] - a.mtx.Unlock() - return ok -} - -// AddAddress implements AddrBook -// Add address to a "new" bucket. If it's already in one, only add it probabilistically. -// Returns error if the addr is non-routable. Does not add self. -// NOTE: addr must not be nil -func (a *addrBook) AddAddress(addr *p2p.NetAddress, src *p2p.NetAddress) error { - a.mtx.Lock() - defer a.mtx.Unlock() - return a.addAddress(addr, src) -} - -// RemoveAddress implements AddrBook - removes the address from the book. -func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - a.Logger.Info("Remove address from book", "addr", ka.Addr, "ID", ka.ID()) - a.removeFromAllBuckets(ka) -} - -// IsGood returns true if peer was ever marked as good and haven't -// done anything wrong since then. -func (a *addrBook) IsGood(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - return a.addrLookup[addr.ID].isOld() -} - -// HasAddress returns true if the address is in the book. -func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool { - a.mtx.Lock() - defer a.mtx.Unlock() - ka := a.addrLookup[addr.ID] - return ka != nil -} - -// NeedMoreAddrs implements AddrBook - returns true if there are not have enough addresses in the book. -func (a *addrBook) NeedMoreAddrs() bool { - return a.Size() < needAddressThreshold -} - -// PickAddress implements AddrBook. It picks an address to connect to. -// The address is picked randomly from an old or new bucket according -// to the biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to that range) -// and determines how biased we are to pick an address from a new bucket. -// PickAddress returns nil if the AddrBook is empty or if we try to pick -// from an empty bucket. -func (a *addrBook) PickAddress(biasTowardsNewAddrs int) *p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - a.Logger.Error("Addrbook size less than 0", "nNew", a.nNew, "nOld", a.nOld) - } - return nil - } - if biasTowardsNewAddrs > 100 { - biasTowardsNewAddrs = 100 - } - if biasTowardsNewAddrs < 0 { - biasTowardsNewAddrs = 0 - } - - // Bias between new and old addresses. - oldCorrelation := math.Sqrt(float64(a.nOld)) * (100.0 - float64(biasTowardsNewAddrs)) - newCorrelation := math.Sqrt(float64(a.nNew)) * float64(biasTowardsNewAddrs) - - // pick a random peer from a random bucket - var bucket map[string]*knownAddress - pickFromOldBucket := (newCorrelation+oldCorrelation)*a.rand.Float64() < oldCorrelation - if (pickFromOldBucket && a.nOld == 0) || - (!pickFromOldBucket && a.nNew == 0) { - return nil - } - // loop until we pick a random non-empty bucket - for len(bucket) == 0 { - if pickFromOldBucket { - bucket = a.bucketsOld[a.rand.Intn(len(a.bucketsOld))] - } else { - bucket = a.bucketsNew[a.rand.Intn(len(a.bucketsNew))] - } - } - // pick a random index and loop over the map to return that index - randIndex := a.rand.Intn(len(bucket)) - for _, ka := range bucket { - if randIndex == 0 { - return ka.Addr - } - randIndex-- - } - return nil -} - -// MarkGood implements AddrBook - it marks the peer as good and -// moves it into an "old" bucket. -func (a *addrBook) MarkGood(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - ka.markGood() - if ka.isNew() { - a.moveToOld(ka) - } -} - -// MarkAttempt implements AddrBook - it marks that an attempt was made to connect to the address. -func (a *addrBook) MarkAttempt(addr *p2p.NetAddress) { - a.mtx.Lock() - defer a.mtx.Unlock() - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - ka.markAttempt() -} - -// MarkBad implements AddrBook. Currently it just ejects the address. -// TODO: black list for some amount of time -func (a *addrBook) MarkBad(addr *p2p.NetAddress) { - a.RemoveAddress(addr) -} - -// GetSelection implements AddrBook. -// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. -// Must never return a nil address. -func (a *addrBook) GetSelection() []*p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - a.Logger.Error("Addrbook size less than 0", "nNew", a.nNew, "nOld", a.nOld) - } - return nil - } - - numAddresses := cmn.MaxInt( - cmn.MinInt(minGetSelection, bookSize), - bookSize*getSelectionPercent/100) - numAddresses = cmn.MinInt(maxGetSelection, numAddresses) - - // XXX: instead of making a list of all addresses, shuffling, and slicing a random chunk, - // could we just select a random numAddresses of indexes? - allAddr := make([]*p2p.NetAddress, bookSize) - i := 0 - for _, ka := range a.addrLookup { - allAddr[i] = ka.Addr - i++ - } - - // Fisher-Yates shuffle the array. We only need to do the first - // `numAddresses' since we are throwing the rest. - for i := 0; i < numAddresses; i++ { - // pick a number between current index and the end - j := cmn.RandIntn(len(allAddr)-i) + i - allAddr[i], allAddr[j] = allAddr[j], allAddr[i] - } - - // slice off the limit we are willing to share. - return allAddr[:numAddresses] -} - -// GetSelectionWithBias implements AddrBook. -// It randomly selects some addresses (old & new). Suitable for peer-exchange protocols. -// Must never return a nil address. -// -// Each address is picked randomly from an old or new bucket according to the -// biasTowardsNewAddrs argument, which must be between [0, 100] (or else is truncated to -// that range) and determines how biased we are to pick an address from a new -// bucket. -func (a *addrBook) GetSelectionWithBias(biasTowardsNewAddrs int) []*p2p.NetAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - bookSize := a.size() - if bookSize <= 0 { - if bookSize < 0 { - a.Logger.Error("Addrbook size less than 0", "nNew", a.nNew, "nOld", a.nOld) - } - return nil - } - - if biasTowardsNewAddrs > 100 { - biasTowardsNewAddrs = 100 - } - if biasTowardsNewAddrs < 0 { - biasTowardsNewAddrs = 0 - } - - numAddresses := cmn.MaxInt( - cmn.MinInt(minGetSelection, bookSize), - bookSize*getSelectionPercent/100) - numAddresses = cmn.MinInt(maxGetSelection, numAddresses) - - selection := make([]*p2p.NetAddress, numAddresses) - - oldBucketToAddrsMap := make(map[int]map[string]struct{}) - var oldIndex int - newBucketToAddrsMap := make(map[int]map[string]struct{}) - var newIndex int - - selectionIndex := 0 -ADDRS_LOOP: - for selectionIndex < numAddresses { - pickFromOldBucket := int((float64(selectionIndex)/float64(numAddresses))*100) >= biasTowardsNewAddrs - pickFromOldBucket = (pickFromOldBucket && a.nOld > 0) || a.nNew == 0 - bucket := make(map[string]*knownAddress) - - // loop until we pick a random non-empty bucket - for len(bucket) == 0 { - if pickFromOldBucket { - oldIndex = a.rand.Intn(len(a.bucketsOld)) - bucket = a.bucketsOld[oldIndex] - } else { - newIndex = a.rand.Intn(len(a.bucketsNew)) - bucket = a.bucketsNew[newIndex] - } - } - - // pick a random index - randIndex := a.rand.Intn(len(bucket)) - - // loop over the map to return that index - var selectedAddr *p2p.NetAddress - for _, ka := range bucket { - if randIndex == 0 { - selectedAddr = ka.Addr - break - } - randIndex-- - } - - // if we have selected the address before, restart the loop - // otherwise, record it and continue - if pickFromOldBucket { - if addrsMap, ok := oldBucketToAddrsMap[oldIndex]; ok { - if _, ok = addrsMap[selectedAddr.String()]; ok { - continue ADDRS_LOOP - } - } else { - oldBucketToAddrsMap[oldIndex] = make(map[string]struct{}) - } - oldBucketToAddrsMap[oldIndex][selectedAddr.String()] = struct{}{} - } else { - if addrsMap, ok := newBucketToAddrsMap[newIndex]; ok { - if _, ok = addrsMap[selectedAddr.String()]; ok { - continue ADDRS_LOOP - } - } else { - newBucketToAddrsMap[newIndex] = make(map[string]struct{}) - } - newBucketToAddrsMap[newIndex][selectedAddr.String()] = struct{}{} - } - - selection[selectionIndex] = selectedAddr - selectionIndex++ - } - - return selection -} - -// ListOfKnownAddresses returns the new and old addresses. -func (a *addrBook) ListOfKnownAddresses() []*knownAddress { - a.mtx.Lock() - defer a.mtx.Unlock() - - addrs := []*knownAddress{} - for _, addr := range a.addrLookup { - addrs = append(addrs, addr.copy()) - } - return addrs -} - -//------------------------------------------------ - -// Size returns the number of addresses in the book. -func (a *addrBook) Size() int { - a.mtx.Lock() - defer a.mtx.Unlock() - return a.size() -} - -func (a *addrBook) size() int { - return a.nNew + a.nOld -} - -//---------------------------------------------------------- - -// Save persists the address book to disk. -func (a *addrBook) Save() { - a.saveToFile(a.filePath) // thread safe -} - -func (a *addrBook) saveRoutine() { - defer a.wg.Done() - - saveFileTicker := time.NewTicker(dumpAddressInterval) -out: - for { - select { - case <-saveFileTicker.C: - a.saveToFile(a.filePath) - case <-a.Quit(): - break out - } - } - saveFileTicker.Stop() - a.saveToFile(a.filePath) - a.Logger.Info("Address handler done") -} - -//---------------------------------------------------------- - -func (a *addrBook) getBucket(bucketType byte, bucketIdx int) map[string]*knownAddress { - switch bucketType { - case bucketTypeNew: - return a.bucketsNew[bucketIdx] - case bucketTypeOld: - return a.bucketsOld[bucketIdx] - default: - cmn.PanicSanity("Should not happen") - return nil - } -} - -// Adds ka to new bucket. Returns false if it couldn't do it cuz buckets full. -// NOTE: currently it always returns true. -func (a *addrBook) addToNewBucket(ka *knownAddress, bucketIdx int) { - // Sanity check - if ka.isOld() { - a.Logger.Error("Failed Sanity Check! Cant add old address to new bucket", "ka", ka, "bucket", bucketIdx) - return - } - - addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeNew, bucketIdx) - - // Already exists? - if _, ok := bucket[addrStr]; ok { - return - } - - // Enforce max addresses. - if len(bucket) > newBucketSize { - a.Logger.Info("new bucket is full, expiring new") - a.expireNew(bucketIdx) - } - - // Add to bucket. - bucket[addrStr] = ka - // increment nNew if the peer doesnt already exist in a bucket - if ka.addBucketRef(bucketIdx) == 1 { - a.nNew++ - } - - // Add it to addrLookup - a.addrLookup[ka.ID()] = ka -} - -// Adds ka to old bucket. Returns false if it couldn't do it cuz buckets full. -func (a *addrBook) addToOldBucket(ka *knownAddress, bucketIdx int) bool { - // Sanity check - if ka.isNew() { - a.Logger.Error(cmn.Fmt("Cannot add new address to old bucket: %v", ka)) - return false - } - if len(ka.Buckets) != 0 { - a.Logger.Error(cmn.Fmt("Cannot add already old address to another old bucket: %v", ka)) - return false - } - - addrStr := ka.Addr.String() - bucket := a.getBucket(bucketTypeOld, bucketIdx) - - // Already exists? - if _, ok := bucket[addrStr]; ok { - return true - } - - // Enforce max addresses. - if len(bucket) > oldBucketSize { - return false - } - - // Add to bucket. - bucket[addrStr] = ka - if ka.addBucketRef(bucketIdx) == 1 { - a.nOld++ - } - - // Ensure in addrLookup - a.addrLookup[ka.ID()] = ka - - return true -} - -func (a *addrBook) removeFromBucket(ka *knownAddress, bucketType byte, bucketIdx int) { - if ka.BucketType != bucketType { - a.Logger.Error(cmn.Fmt("Bucket type mismatch: %v", ka)) - return - } - bucket := a.getBucket(bucketType, bucketIdx) - delete(bucket, ka.Addr.String()) - if ka.removeBucketRef(bucketIdx) == 0 { - if bucketType == bucketTypeNew { - a.nNew-- - } else { - a.nOld-- - } - delete(a.addrLookup, ka.ID()) - } -} - -func (a *addrBook) removeFromAllBuckets(ka *knownAddress) { - for _, bucketIdx := range ka.Buckets { - bucket := a.getBucket(ka.BucketType, bucketIdx) - delete(bucket, ka.Addr.String()) - } - ka.Buckets = nil - if ka.BucketType == bucketTypeNew { - a.nNew-- - } else { - a.nOld-- - } - delete(a.addrLookup, ka.ID()) -} - -//---------------------------------------------------------- - -func (a *addrBook) pickOldest(bucketType byte, bucketIdx int) *knownAddress { - bucket := a.getBucket(bucketType, bucketIdx) - var oldest *knownAddress - for _, ka := range bucket { - if oldest == nil || ka.LastAttempt.Before(oldest.LastAttempt) { - oldest = ka - } - } - return oldest -} - -// adds the address to a "new" bucket. if its already in one, -// it only adds it probabilistically -func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { - if addr == nil || src == nil { - return ErrAddrBookNilAddr{addr, src} - } - - if a.routabilityStrict && !addr.Routable() { - return ErrAddrBookNonRoutable{addr} - } - // TODO: we should track ourAddrs by ID and by IP:PORT and refuse both. - if _, ok := a.ourAddrs[addr.String()]; ok { - return ErrAddrBookSelf{addr} - } - - ka := a.addrLookup[addr.ID] - if ka != nil { - // If its already old and the addr is the same, ignore it. - if ka.isOld() && ka.Addr.Equals(addr) { - return nil - } - // Already in max new buckets. - if len(ka.Buckets) == maxNewBucketsPerAddress { - return nil - } - // The more entries we have, the less likely we are to add more. - factor := int32(2 * len(ka.Buckets)) - if a.rand.Int31n(factor) != 0 { - return nil - } - } else { - ka = newKnownAddress(addr, src) - } - - bucket := a.calcNewBucket(addr, src) - a.addToNewBucket(ka, bucket) - return nil -} - -// Make space in the new buckets by expiring the really bad entries. -// If no bad entries are available we remove the oldest. -func (a *addrBook) expireNew(bucketIdx int) { - for addrStr, ka := range a.bucketsNew[bucketIdx] { - // If an entry is bad, throw it away - if ka.isBad() { - a.Logger.Info(cmn.Fmt("expiring bad address %v", addrStr)) - a.removeFromBucket(ka, bucketTypeNew, bucketIdx) - return - } - } - - // If we haven't thrown out a bad entry, throw out the oldest entry - oldest := a.pickOldest(bucketTypeNew, bucketIdx) - a.removeFromBucket(oldest, bucketTypeNew, bucketIdx) -} - -// Promotes an address from new to old. If the destination bucket is full, -// demote the oldest one to a "new" bucket. -// TODO: Demote more probabilistically? -func (a *addrBook) moveToOld(ka *knownAddress) { - // Sanity check - if ka.isOld() { - a.Logger.Error(cmn.Fmt("Cannot promote address that is already old %v", ka)) - return - } - if len(ka.Buckets) == 0 { - a.Logger.Error(cmn.Fmt("Cannot promote address that isn't in any new buckets %v", ka)) - return - } - - // Remove from all (new) buckets. - a.removeFromAllBuckets(ka) - // It's officially old now. - ka.BucketType = bucketTypeOld - - // Try to add it to its oldBucket destination. - oldBucketIdx := a.calcOldBucket(ka.Addr) - added := a.addToOldBucket(ka, oldBucketIdx) - if !added { - // No room; move the oldest to a new bucket - oldest := a.pickOldest(bucketTypeOld, oldBucketIdx) - a.removeFromBucket(oldest, bucketTypeOld, oldBucketIdx) - newBucketIdx := a.calcNewBucket(oldest.Addr, oldest.Src) - a.addToNewBucket(oldest, newBucketIdx) - - // Finally, add our ka to old bucket again. - added = a.addToOldBucket(ka, oldBucketIdx) - if !added { - a.Logger.Error(cmn.Fmt("Could not re-add ka %v to oldBucketIdx %v", ka, oldBucketIdx)) - } - } -} - -//--------------------------------------------------------------------- -// calculate bucket placements - -// doublesha256( key + sourcegroup + -// int64(doublesha256(key + group + sourcegroup))%bucket_per_group ) % num_new_buckets -func (a *addrBook) calcNewBucket(addr, src *p2p.NetAddress) int { - data1 := []byte{} - data1 = append(data1, []byte(a.key)...) - data1 = append(data1, []byte(a.groupKey(addr))...) - data1 = append(data1, []byte(a.groupKey(src))...) - hash1 := doubleSha256(data1) - hash64 := binary.BigEndian.Uint64(hash1) - hash64 %= newBucketsPerGroup - var hashbuf [8]byte - binary.BigEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, []byte(a.key)...) - data2 = append(data2, a.groupKey(src)...) - data2 = append(data2, hashbuf[:]...) - - hash2 := doubleSha256(data2) - return int(binary.BigEndian.Uint64(hash2) % newBucketCount) -} - -// doublesha256( key + group + -// int64(doublesha256(key + addr))%buckets_per_group ) % num_old_buckets -func (a *addrBook) calcOldBucket(addr *p2p.NetAddress) int { - data1 := []byte{} - data1 = append(data1, []byte(a.key)...) - data1 = append(data1, []byte(addr.String())...) - hash1 := doubleSha256(data1) - hash64 := binary.BigEndian.Uint64(hash1) - hash64 %= oldBucketsPerGroup - var hashbuf [8]byte - binary.BigEndian.PutUint64(hashbuf[:], hash64) - data2 := []byte{} - data2 = append(data2, []byte(a.key)...) - data2 = append(data2, a.groupKey(addr)...) - data2 = append(data2, hashbuf[:]...) - - hash2 := doubleSha256(data2) - return int(binary.BigEndian.Uint64(hash2) % oldBucketCount) -} - -// Return a string representing the network group of this address. -// This is the /16 for IPv4, the /32 (/36 for he.net) for IPv6, the string -// "local" for a local address and the string "unroutable" for an unroutable -// address. -func (a *addrBook) groupKey(na *p2p.NetAddress) string { - if a.routabilityStrict && na.Local() { - return "local" - } - if a.routabilityStrict && !na.Routable() { - return "unroutable" - } - - if ipv4 := na.IP.To4(); ipv4 != nil { - return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(16, 32)}).String() - } - if na.RFC6145() || na.RFC6052() { - // last four bytes are the ip address - ip := net.IP(na.IP[12:16]) - return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String() - } - - if na.RFC3964() { - ip := net.IP(na.IP[2:7]) - return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String() - - } - if na.RFC4380() { - // teredo tunnels have the last 4 bytes as the v4 address XOR - // 0xff. - ip := net.IP(make([]byte, 4)) - for i, byte := range na.IP[12:16] { - ip[i] = byte ^ 0xff - } - return (&net.IPNet{IP: ip, Mask: net.CIDRMask(16, 32)}).String() - } - - // OK, so now we know ourselves to be a IPv6 address. - // bitcoind uses /32 for everything, except for Hurricane Electric's - // (he.net) IP range, which it uses /36 for. - bits := 32 - heNet := &net.IPNet{IP: net.ParseIP("2001:470::"), - Mask: net.CIDRMask(32, 128)} - if heNet.Contains(na.IP) { - bits = 36 - } - - return (&net.IPNet{IP: na.IP, Mask: net.CIDRMask(bits, 128)}).String() -} - -// doubleSha256 calculates sha256(sha256(b)) and returns the resulting bytes. -func doubleSha256(b []byte) []byte { - hasher := sha256.New() - hasher.Write(b) // nolint: errcheck, gas - sum := hasher.Sum(nil) - hasher.Reset() - hasher.Write(sum) // nolint: errcheck, gas - return hasher.Sum(nil) -} diff --git a/p2p/pex/addrbook_test.go b/p2p/pex/addrbook_test.go deleted file mode 100644 index 2e260428..00000000 --- a/p2p/pex/addrbook_test.go +++ /dev/null @@ -1,356 +0,0 @@ -package pex - -import ( - "encoding/hex" - "fmt" - "io/ioutil" - "math/rand" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/p2p" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" -) - -func createTempFileName(prefix string) string { - f, err := ioutil.TempFile("", prefix) - if err != nil { - panic(err) - } - fname := f.Name() - err = f.Close() - if err != nil { - panic(err) - } - return fname -} - -func deleteTempFile(fname string) { - err := os.Remove(fname) - if err != nil { - panic(err) - } -} - -func TestAddrBookPickAddress(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - - // 0 addresses - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - assert.Zero(t, book.Size()) - - addr := book.PickAddress(50) - assert.Nil(t, addr, "expected no address") - - randAddrs := randNetAddressPairs(t, 1) - addrSrc := randAddrs[0] - book.AddAddress(addrSrc.addr, addrSrc.src) - - // pick an address when we only have new address - addr = book.PickAddress(0) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(50) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(100) - assert.NotNil(t, addr, "expected an address") - - // pick an address when we only have old address - book.MarkGood(addrSrc.addr) - addr = book.PickAddress(0) - assert.NotNil(t, addr, "expected an address") - addr = book.PickAddress(50) - assert.NotNil(t, addr, "expected an address") - - // in this case, nNew==0 but we biased 100% to new, so we return nil - addr = book.PickAddress(100) - assert.Nil(t, addr, "did not expected an address") -} - -func TestAddrBookSaveLoad(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - - // 0 addresses - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - book.saveToFile(fname) - - book = NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - book.loadFromFile(fname) - - assert.Zero(t, book.Size()) - - // 100 addresses - randAddrs := randNetAddressPairs(t, 100) - - for _, addrSrc := range randAddrs { - book.AddAddress(addrSrc.addr, addrSrc.src) - } - - assert.Equal(t, 100, book.Size()) - book.saveToFile(fname) - - book = NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - book.loadFromFile(fname) - - assert.Equal(t, 100, book.Size()) -} - -func TestAddrBookLookup(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - - randAddrs := randNetAddressPairs(t, 100) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - for _, addrSrc := range randAddrs { - addr := addrSrc.addr - src := addrSrc.src - book.AddAddress(addr, src) - - ka := book.addrLookup[addr.ID] - assert.NotNil(t, ka, "Expected to find KnownAddress %v but wasn't there.", addr) - - if !(ka.Addr.Equals(addr) && ka.Src.Equals(src)) { - t.Fatalf("KnownAddress doesn't match addr & src") - } - } -} - -func TestAddrBookPromoteToOld(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - - randAddrs := randNetAddressPairs(t, 100) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - for _, addrSrc := range randAddrs { - book.AddAddress(addrSrc.addr, addrSrc.src) - } - - // Attempt all addresses. - for _, addrSrc := range randAddrs { - book.MarkAttempt(addrSrc.addr) - } - - // Promote half of them - for i, addrSrc := range randAddrs { - if i%2 == 0 { - book.MarkGood(addrSrc.addr) - } - } - - // TODO: do more testing :) - - selection := book.GetSelection() - t.Logf("selection: %v", selection) - - if len(selection) > book.Size() { - t.Errorf("selection could not be bigger than the book") - } - - selection = book.GetSelectionWithBias(30) - t.Logf("selection: %v", selection) - - if len(selection) > book.Size() { - t.Errorf("selection with bias could not be bigger than the book") - } - - assert.Equal(t, book.Size(), 100, "expecting book size to be 100") -} - -func TestAddrBookHandlesDuplicates(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - randAddrs := randNetAddressPairs(t, 100) - - differentSrc := randIPv4Address(t) - for _, addrSrc := range randAddrs { - book.AddAddress(addrSrc.addr, addrSrc.src) - book.AddAddress(addrSrc.addr, addrSrc.src) // duplicate - book.AddAddress(addrSrc.addr, differentSrc) // different src - } - - assert.Equal(t, 100, book.Size()) -} - -type netAddressPair struct { - addr *p2p.NetAddress - src *p2p.NetAddress -} - -func randNetAddressPairs(t *testing.T, n int) []netAddressPair { - randAddrs := make([]netAddressPair, n) - for i := 0; i < n; i++ { - randAddrs[i] = netAddressPair{addr: randIPv4Address(t), src: randIPv4Address(t)} - } - return randAddrs -} - -func randIPv4Address(t *testing.T) *p2p.NetAddress { - for { - ip := fmt.Sprintf("%v.%v.%v.%v", - rand.Intn(254)+1, - rand.Intn(255), - rand.Intn(255), - rand.Intn(255), - ) - port := rand.Intn(65535-1) + 1 - id := p2p.ID(hex.EncodeToString(cmn.RandBytes(p2p.IDByteLength))) - idAddr := p2p.IDAddressString(id, fmt.Sprintf("%v:%v", ip, port)) - addr, err := p2p.NewNetAddressString(idAddr) - assert.Nil(t, err, "error generating rand network address") - if addr.Routable() { - return addr - } - } -} - -func TestAddrBookRemoveAddress(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - addr := randIPv4Address(t) - book.AddAddress(addr, addr) - assert.Equal(t, 1, book.Size()) - - book.RemoveAddress(addr) - assert.Equal(t, 0, book.Size()) - - nonExistingAddr := randIPv4Address(t) - book.RemoveAddress(nonExistingAddr) - assert.Equal(t, 0, book.Size()) -} - -func TestAddrBookGetSelection(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - // 1) empty book - assert.Empty(t, book.GetSelection()) - - // 2) add one address - addr := randIPv4Address(t) - book.AddAddress(addr, addr) - - assert.Equal(t, 1, len(book.GetSelection())) - assert.Equal(t, addr, book.GetSelection()[0]) - - // 3) add a bunch of addresses - randAddrs := randNetAddressPairs(t, 100) - for _, addrSrc := range randAddrs { - book.AddAddress(addrSrc.addr, addrSrc.src) - } - - // check there is no duplicates - addrs := make(map[string]*p2p.NetAddress) - selection := book.GetSelection() - for _, addr := range selection { - if dup, ok := addrs[addr.String()]; ok { - t.Fatalf("selection %v contains duplicates %v", selection, dup) - } - addrs[addr.String()] = addr - } - - if len(selection) > book.Size() { - t.Errorf("selection %v could not be bigger than the book", selection) - } -} - -func TestAddrBookGetSelectionWithBias(t *testing.T) { - const biasTowardsNewAddrs = 30 - - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - - // 1) empty book - selection := book.GetSelectionWithBias(biasTowardsNewAddrs) - assert.Empty(t, selection) - - // 2) add one address - addr := randIPv4Address(t) - book.AddAddress(addr, addr) - - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - assert.Equal(t, 1, len(selection)) - assert.Equal(t, addr, selection[0]) - - // 3) add a bunch of addresses - randAddrs := randNetAddressPairs(t, 100) - for _, addrSrc := range randAddrs { - book.AddAddress(addrSrc.addr, addrSrc.src) - } - - // check there is no duplicates - addrs := make(map[string]*p2p.NetAddress) - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - for _, addr := range selection { - if dup, ok := addrs[addr.String()]; ok { - t.Fatalf("selection %v contains duplicates %v", selection, dup) - } - addrs[addr.String()] = addr - } - - if len(selection) > book.Size() { - t.Fatalf("selection %v could not be bigger than the book", selection) - } - - // 4) mark 80% of the addresses as good - randAddrsLen := len(randAddrs) - for i, addrSrc := range randAddrs { - if int((float64(i)/float64(randAddrsLen))*100) >= 20 { - book.MarkGood(addrSrc.addr) - } - } - - selection = book.GetSelectionWithBias(biasTowardsNewAddrs) - - // check that ~70% of addresses returned are good - good := 0 - for _, addr := range selection { - if book.IsGood(addr) { - good++ - } - } - got, expected := int((float64(good)/float64(len(selection)))*100), (100 - biasTowardsNewAddrs) - if got >= expected { - t.Fatalf("expected more good peers (%% got: %d, %% expected: %d, number of good addrs: %d, total: %d)", got, expected, good, len(selection)) - } -} - -func TestAddrBookHasAddress(t *testing.T) { - fname := createTempFileName("addrbook_test") - defer deleteTempFile(fname) - - book := NewAddrBook(fname, true) - book.SetLogger(log.TestingLogger()) - addr := randIPv4Address(t) - book.AddAddress(addr, addr) - - assert.True(t, book.HasAddress(addr)) - - book.RemoveAddress(addr) - - assert.False(t, book.HasAddress(addr)) -} diff --git a/p2p/pex/errors.go b/p2p/pex/errors.go deleted file mode 100644 index 0b8bf471..00000000 --- a/p2p/pex/errors.go +++ /dev/null @@ -1,32 +0,0 @@ -package pex - -import ( - "fmt" - - "github.com/tendermint/tendermint/p2p" -) - -type ErrAddrBookNonRoutable struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookNonRoutable) Error() string { - return fmt.Sprintf("Cannot add non-routable address %v", err.Addr) -} - -type ErrAddrBookSelf struct { - Addr *p2p.NetAddress -} - -func (err ErrAddrBookSelf) Error() string { - return fmt.Sprintf("Cannot add ourselves with address %v", err.Addr) -} - -type ErrAddrBookNilAddr struct { - Addr *p2p.NetAddress - Src *p2p.NetAddress -} - -func (err ErrAddrBookNilAddr) Error() string { - return fmt.Sprintf("Cannot add a nil address. Got (addr, src) = (%v, %v)", err.Addr, err.Src) -} diff --git a/p2p/pex/file.go b/p2p/pex/file.go deleted file mode 100644 index 38142dd9..00000000 --- a/p2p/pex/file.go +++ /dev/null @@ -1,83 +0,0 @@ -package pex - -import ( - "encoding/json" - "os" - - cmn "github.com/tendermint/tmlibs/common" -) - -/* Loading & Saving */ - -type addrBookJSON struct { - Key string `json:"key"` - Addrs []*knownAddress `json:"addrs"` -} - -func (a *addrBook) saveToFile(filePath string) { - a.Logger.Info("Saving AddrBook to file", "size", a.Size()) - - a.mtx.Lock() - defer a.mtx.Unlock() - // Compile Addrs - addrs := []*knownAddress{} - for _, ka := range a.addrLookup { - addrs = append(addrs, ka) - } - - aJSON := &addrBookJSON{ - Key: a.key, - Addrs: addrs, - } - - jsonBytes, err := json.MarshalIndent(aJSON, "", "\t") - if err != nil { - a.Logger.Error("Failed to save AddrBook to file", "err", err) - return - } - err = cmn.WriteFileAtomic(filePath, jsonBytes, 0644) - if err != nil { - a.Logger.Error("Failed to save AddrBook to file", "file", filePath, "err", err) - } -} - -// Returns false if file does not exist. -// cmn.Panics if file is corrupt. -func (a *addrBook) loadFromFile(filePath string) bool { - // If doesn't exist, do nothing. - _, err := os.Stat(filePath) - if os.IsNotExist(err) { - return false - } - - // Load addrBookJSON{} - r, err := os.Open(filePath) - if err != nil { - cmn.PanicCrisis(cmn.Fmt("Error opening file %s: %v", filePath, err)) - } - defer r.Close() // nolint: errcheck - aJSON := &addrBookJSON{} - dec := json.NewDecoder(r) - err = dec.Decode(aJSON) - if err != nil { - cmn.PanicCrisis(cmn.Fmt("Error reading file %s: %v", filePath, err)) - } - - // Restore all the fields... - // Restore the key - a.key = aJSON.Key - // Restore .bucketsNew & .bucketsOld - for _, ka := range aJSON.Addrs { - for _, bucketIndex := range ka.Buckets { - bucket := a.getBucket(ka.BucketType, bucketIndex) - bucket[ka.Addr.String()] = ka - } - a.addrLookup[ka.ID()] = ka - if ka.BucketType == bucketTypeNew { - a.nNew++ - } else { - a.nOld++ - } - } - return true -} diff --git a/p2p/pex/known_address.go b/p2p/pex/known_address.go deleted file mode 100644 index 5673dec1..00000000 --- a/p2p/pex/known_address.go +++ /dev/null @@ -1,141 +0,0 @@ -package pex - -import ( - "time" - - "github.com/tendermint/tendermint/p2p" -) - -// knownAddress tracks information about a known network address -// that is used to determine how viable an address is. -type knownAddress struct { - Addr *p2p.NetAddress `json:"addr"` - Src *p2p.NetAddress `json:"src"` - Attempts int32 `json:"attempts"` - LastAttempt time.Time `json:"last_attempt"` - LastSuccess time.Time `json:"last_success"` - BucketType byte `json:"bucket_type"` - Buckets []int `json:"buckets"` -} - -func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress { - return &knownAddress{ - Addr: addr, - Src: src, - Attempts: 0, - LastAttempt: time.Now(), - BucketType: bucketTypeNew, - Buckets: nil, - } -} - -func (ka *knownAddress) ID() p2p.ID { - return ka.Addr.ID -} - -func (ka *knownAddress) copy() *knownAddress { - return &knownAddress{ - Addr: ka.Addr, - Src: ka.Src, - Attempts: ka.Attempts, - LastAttempt: ka.LastAttempt, - LastSuccess: ka.LastSuccess, - BucketType: ka.BucketType, - Buckets: ka.Buckets, - } -} - -func (ka *knownAddress) isOld() bool { - return ka.BucketType == bucketTypeOld -} - -func (ka *knownAddress) isNew() bool { - return ka.BucketType == bucketTypeNew -} - -func (ka *knownAddress) markAttempt() { - now := time.Now() - ka.LastAttempt = now - ka.Attempts++ -} - -func (ka *knownAddress) markGood() { - now := time.Now() - ka.LastAttempt = now - ka.Attempts = 0 - ka.LastSuccess = now -} - -func (ka *knownAddress) addBucketRef(bucketIdx int) int { - for _, bucket := range ka.Buckets { - if bucket == bucketIdx { - // TODO refactor to return error? - // log.Warn(Fmt("Bucket already exists in ka.Buckets: %v", ka)) - return -1 - } - } - ka.Buckets = append(ka.Buckets, bucketIdx) - return len(ka.Buckets) -} - -func (ka *knownAddress) removeBucketRef(bucketIdx int) int { - buckets := []int{} - for _, bucket := range ka.Buckets { - if bucket != bucketIdx { - buckets = append(buckets, bucket) - } - } - if len(buckets) != len(ka.Buckets)-1 { - // TODO refactor to return error? - // log.Warn(Fmt("bucketIdx not found in ka.Buckets: %v", ka)) - return -1 - } - ka.Buckets = buckets - return len(ka.Buckets) -} - -/* - An address is bad if the address in question is a New address, has not been tried in the last - minute, and meets one of the following criteria: - - 1) It claims to be from the future - 2) It hasn't been seen in over a week - 3) It has failed at least three times and never succeeded - 4) It has failed ten times in the last week - - All addresses that meet these criteria are assumed to be worthless and not - worth keeping hold of. - -*/ -func (ka *knownAddress) isBad() bool { - // Is Old --> good - if ka.BucketType == bucketTypeOld { - return false - } - - // Has been attempted in the last minute --> good - if ka.LastAttempt.After(time.Now().Add(-1 * time.Minute)) { - return false - } - - // TODO: From the future? - - // Too old? - // TODO: should be a timestamp of last seen, not just last attempt - if ka.LastAttempt.Before(time.Now().Add(-1 * numMissingDays * time.Hour * 24)) { - return true - } - - // Never succeeded? - if ka.LastSuccess.IsZero() && ka.Attempts >= numRetries { - return true - } - - // Hasn't succeeded in too long? - if ka.LastSuccess.Before(time.Now().Add(-1*minBadDays*time.Hour*24)) && - ka.Attempts >= maxFailures { - return true - } - - return false -} diff --git a/p2p/pex/params.go b/p2p/pex/params.go deleted file mode 100644 index 29b4d45a..00000000 --- a/p2p/pex/params.go +++ /dev/null @@ -1,55 +0,0 @@ -package pex - -import "time" - -const ( - // addresses under which the address manager will claim to need more addresses. - needAddressThreshold = 1000 - - // interval used to dump the address cache to disk for future use. - dumpAddressInterval = time.Minute * 2 - - // max addresses in each old address bucket. - oldBucketSize = 64 - - // buckets we split old addresses over. - oldBucketCount = 64 - - // max addresses in each new address bucket. - newBucketSize = 64 - - // buckets that we spread new addresses over. - newBucketCount = 256 - - // old buckets over which an address group will be spread. - oldBucketsPerGroup = 4 - - // new buckets over which a source address group will be spread. - newBucketsPerGroup = 32 - - // buckets a frequently seen new address may end up in. - maxNewBucketsPerAddress = 4 - - // days before which we assume an address has vanished - // if we have not seen it announced in that long. - numMissingDays = 7 - - // tries without a single success before we assume an address is bad. - numRetries = 3 - - // max failures we will accept without a success before considering an address bad. - maxFailures = 10 // ? - - // days since the last success before we will consider evicting an address. - minBadDays = 7 - - // % of total addresses known returned by GetSelection. - getSelectionPercent = 23 - - // min addresses that must be returned by GetSelection. Useful for bootstrapping. - minGetSelection = 32 - - // max addresses returned by GetSelection - // NOTE: this must match "maxMsgSize" - maxGetSelection = 250 -) diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go deleted file mode 100644 index 27ed422c..00000000 --- a/p2p/pex/pex_reactor.go +++ /dev/null @@ -1,700 +0,0 @@ -package pex - -import ( - "fmt" - "reflect" - "sort" - "sync" - "time" - - amino "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" - - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/p2p/conn" -) - -type Peer = p2p.Peer - -const ( - // PexChannel is a channel for PEX messages - PexChannel = byte(0x00) - - // over-estimate of max NetAddress size - // hexID (40) + IP (16) + Port (2) + Name (100) ... - // NOTE: dont use massive DNS name .. - maxAddressSize = 256 - - // NOTE: amplificaiton factor! - // small request results in up to maxMsgSize response - maxMsgSize = maxAddressSize * maxGetSelection - - // ensure we have enough peers - defaultEnsurePeersPeriod = 30 * time.Second - defaultMinNumOutboundPeers = p2p.DefaultMinNumOutboundPeers - - // Seed/Crawler constants - - // We want seeds to only advertise good peers. Therefore they should wait at - // least as long as we expect it to take for a peer to become good before - // disconnecting. - // see consensus/reactor.go: blocksToContributeToBecomeGoodPeer - // 10000 blocks assuming 1s blocks ~ 2.7 hours. - defaultSeedDisconnectWaitPeriod = 3 * time.Hour - - defaultCrawlPeerInterval = 2 * time.Minute // don't redial for this. TODO: back-off. what for? - - defaultCrawlPeersPeriod = 30 * time.Second // check some peers every this - - maxAttemptsToDial = 16 // ~ 35h in total (last attempt - 18h) - - // if node connects to seed, it does not have any trusted peers. - // Especially in the beginning, node should have more trusted peers than - // untrusted. - biasToSelectNewPeers = 30 // 70 to select good peers -) - -// PEXReactor handles PEX (peer exchange) and ensures that an -// adequate number of peers are connected to the switch. -// -// It uses `AddrBook` (address book) to store `NetAddress`es of the peers. -// -// ## Preventing abuse -// -// Only accept pexAddrsMsg from peers we sent a corresponding pexRequestMsg too. -// Only accept one pexRequestMsg every ~defaultEnsurePeersPeriod. -type PEXReactor struct { - p2p.BaseReactor - - book AddrBook - config *PEXReactorConfig - ensurePeersPeriod time.Duration // TODO: should go in the config - - // maps to prevent abuse - requestsSent *cmn.CMap // ID->struct{}: unanswered send requests - lastReceivedRequests *cmn.CMap // ID->time.Time: last time peer requested from us - - attemptsToDial sync.Map // address (string) -> {number of attempts (int), last time dialed (time.Time)} -} - -func (pexR *PEXReactor) minReceiveRequestInterval() time.Duration { - // NOTE: must be less than ensurePeersPeriod, otherwise we'll request - // peers too quickly from others and they'll think we're bad! - return pexR.ensurePeersPeriod / 3 -} - -// PEXReactorConfig holds reactor specific configuration data. -type PEXReactorConfig struct { - // Seed/Crawler mode - SeedMode bool - - // Seeds is a list of addresses reactor may use - // if it can't connect to peers in the addrbook. - Seeds []string - - // PrivatePeerIDs is a list of peer IDs, which must not be gossiped to other - // peers. - PrivatePeerIDs []string -} - -type _attemptsToDial struct { - number int - lastDialed time.Time -} - -// NewPEXReactor creates new PEX reactor. -func NewPEXReactor(b AddrBook, config *PEXReactorConfig) *PEXReactor { - r := &PEXReactor{ - book: b, - config: config, - ensurePeersPeriod: defaultEnsurePeersPeriod, - requestsSent: cmn.NewCMap(), - lastReceivedRequests: cmn.NewCMap(), - } - r.BaseReactor = *p2p.NewBaseReactor("PEXReactor", r) - return r -} - -// OnStart implements BaseService -func (r *PEXReactor) OnStart() error { - if err := r.BaseReactor.OnStart(); err != nil { - return err - } - err := r.book.Start() - if err != nil && err != cmn.ErrAlreadyStarted { - return err - } - - // return err if user provided a bad seed address - // or a host name that we cant resolve - if err := r.checkSeeds(); err != nil { - return err - } - - // Check if this node should run - // in seed/crawler mode - if r.config.SeedMode { - go r.crawlPeersRoutine() - } else { - go r.ensurePeersRoutine() - } - return nil -} - -// OnStop implements BaseService -func (r *PEXReactor) OnStop() { - r.BaseReactor.OnStop() - r.book.Stop() -} - -// GetChannels implements Reactor -func (r *PEXReactor) GetChannels() []*conn.ChannelDescriptor { - return []*conn.ChannelDescriptor{ - { - ID: PexChannel, - Priority: 1, - SendQueueCapacity: 10, - }, - } -} - -// AddPeer implements Reactor by adding peer to the address book (if inbound) -// or by requesting more addresses (if outbound). -func (r *PEXReactor) AddPeer(p Peer) { - if p.IsOutbound() { - // For outbound peers, the address is already in the books - - // either via DialPeersAsync or r.Receive. - // Ask it for more peers if we need. - if r.book.NeedMoreAddrs() { - r.RequestAddrs(p) - } - } else { - // inbound peer is its own source - addr := p.NodeInfo().NetAddress() - src := addr - - // ignore private addrs - if isAddrPrivate(addr, r.config.PrivatePeerIDs) { - return - } - - // add to book. dont RequestAddrs right away because - // we don't trust inbound as much - let ensurePeersRoutine handle it. - err := r.book.AddAddress(addr, src) - r.logErrAddrBook(err) - } -} - -func (r *PEXReactor) logErrAddrBook(err error) { - if err != nil { - switch err.(type) { - case ErrAddrBookNilAddr: - r.Logger.Error("Failed to add new address", "err", err) - default: - // non-routable, self, full book, etc. - r.Logger.Debug("Failed to add new address", "err", err) - } - } -} - -// RemovePeer implements Reactor. -func (r *PEXReactor) RemovePeer(p Peer, reason interface{}) { - id := string(p.ID()) - r.requestsSent.Delete(id) - r.lastReceivedRequests.Delete(id) -} - -// Receive implements Reactor by handling incoming PEX messages. -func (r *PEXReactor) Receive(chID byte, src Peer, msgBytes []byte) { - msg, err := DecodeMessage(msgBytes) - if err != nil { - r.Logger.Error("Error decoding message", "src", src, "chId", chID, "msg", msg, "err", err, "bytes", msgBytes) - r.Switch.StopPeerForError(src, err) - return - } - r.Logger.Debug("Received message", "src", src, "chId", chID, "msg", msg) - - switch msg := msg.(type) { - case *pexRequestMessage: - // Check we're not receiving too many requests - if err := r.receiveRequest(src); err != nil { - r.Switch.StopPeerForError(src, err) - return - } - - // Seeds disconnect after sending a batch of addrs - // NOTE: this is a prime candidate for amplification attacks - // so it's important we - // 1) restrict how frequently peers can request - // 2) limit the output size - if r.config.SeedMode { - r.SendAddrs(src, r.book.GetSelectionWithBias(biasToSelectNewPeers)) - r.Switch.StopPeerGracefully(src) - } else { - r.SendAddrs(src, r.book.GetSelection()) - } - - case *pexAddrsMessage: - // If we asked for addresses, add them to the book - if err := r.ReceiveAddrs(msg.Addrs, src); err != nil { - r.Switch.StopPeerForError(src, err) - return - } - default: - r.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) - } -} - -// enforces a minimum amount of time between requests -func (r *PEXReactor) receiveRequest(src Peer) error { - id := string(src.ID()) - v := r.lastReceivedRequests.Get(id) - if v == nil { - // initialize with empty time - lastReceived := time.Time{} - r.lastReceivedRequests.Set(id, lastReceived) - return nil - } - - lastReceived := v.(time.Time) - if lastReceived.Equal(time.Time{}) { - // first time gets a free pass. then we start tracking the time - lastReceived = time.Now() - r.lastReceivedRequests.Set(id, lastReceived) - return nil - } - - now := time.Now() - minInterval := r.minReceiveRequestInterval() - if now.Sub(lastReceived) < minInterval { - return fmt.Errorf("Peer (%v) sent next PEX request too soon. lastReceived: %v, now: %v, minInterval: %v. Disconnecting", - src.ID(), - lastReceived, - now, - minInterval, - ) - } - r.lastReceivedRequests.Set(id, now) - return nil -} - -// RequestAddrs asks peer for more addresses if we do not already -// have a request out for this peer. -func (r *PEXReactor) RequestAddrs(p Peer) { - r.Logger.Debug("Request addrs", "from", p) - id := string(p.ID()) - if r.requestsSent.Has(id) { - return - } - r.requestsSent.Set(id, struct{}{}) - p.Send(PexChannel, cdc.MustMarshalBinary(&pexRequestMessage{})) -} - -// ReceiveAddrs adds the given addrs to the addrbook if theres an open -// request for this peer and deletes the open request. -// If there's no open request for the src peer, it returns an error. -func (r *PEXReactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { - - id := string(src.ID()) - if !r.requestsSent.Has(id) { - return cmn.NewError("Received unsolicited pexAddrsMessage") - } - r.requestsSent.Delete(id) - - srcAddr := src.NodeInfo().NetAddress() - for _, netAddr := range addrs { - // NOTE: GetSelection methods should never return nil addrs - if netAddr == nil { - return cmn.NewError("received nil addr") - } - - // ignore private peers - // TODO: give private peers to AddrBook so it can enforce this on AddAddress. - // We'd then have to check for ErrPrivatePeer on AddAddress here, which is - // an error we just ignore (maybe peer is probing us for our private peers :P) - if isAddrPrivate(netAddr, r.config.PrivatePeerIDs) { - continue - } - - err := r.book.AddAddress(netAddr, srcAddr) - r.logErrAddrBook(err) - } - return nil -} - -// SendAddrs sends addrs to the peer. -func (r *PEXReactor) SendAddrs(p Peer, netAddrs []*p2p.NetAddress) { - p.Send(PexChannel, cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: netAddrs})) -} - -// SetEnsurePeersPeriod sets period to ensure peers connected. -func (r *PEXReactor) SetEnsurePeersPeriod(d time.Duration) { - r.ensurePeersPeriod = d -} - -// Ensures that sufficient peers are connected. (continuous) -func (r *PEXReactor) ensurePeersRoutine() { - var ( - seed = cmn.NewRand() - jitter = seed.Int63n(r.ensurePeersPeriod.Nanoseconds()) - ) - - // Randomize first round of communication to avoid thundering herd. - // If no potential peers are present directly start connecting so we guarantee - // swift setup with the help of configured seeds. - if r.hasPotentialPeers() { - time.Sleep(time.Duration(jitter)) - } - - // fire once immediately. - // ensures we dial the seeds right away if the book is empty - r.ensurePeers() - - // fire periodically - ticker := time.NewTicker(r.ensurePeersPeriod) - for { - select { - case <-ticker.C: - r.ensurePeers() - case <-r.Quit(): - ticker.Stop() - return - } - } -} - -// ensurePeers ensures that sufficient peers are connected. (once) -// -// heuristic that we haven't perfected yet, or, perhaps is manually edited by -// the node operator. It should not be used to compute what addresses are -// already connected or not. -func (r *PEXReactor) ensurePeers() { - var ( - out, in, dial = r.Switch.NumPeers() - numToDial = defaultMinNumOutboundPeers - (out + dial) - ) - r.Logger.Info( - "Ensure peers", - "numOutPeers", out, - "numInPeers", in, - "numDialing", dial, - "numToDial", numToDial, - ) - - if numToDial <= 0 { - return - } - - // bias to prefer more vetted peers when we have fewer connections. - // not perfect, but somewhate ensures that we prioritize connecting to more-vetted - // NOTE: range here is [10, 90]. Too high ? - newBias := cmn.MinInt(out, 8)*10 + 10 - - toDial := make(map[p2p.ID]*p2p.NetAddress) - // Try maxAttempts times to pick numToDial addresses to dial - maxAttempts := numToDial * 3 - - for i := 0; i < maxAttempts && len(toDial) < numToDial; i++ { - try := r.book.PickAddress(newBias) - if try == nil { - continue - } - if _, selected := toDial[try.ID]; selected { - continue - } - if dialling := r.Switch.IsDialing(try.ID); dialling { - continue - } - if connected := r.Switch.Peers().Has(try.ID); connected { - continue - } - // TODO: consider moving some checks from toDial into here - // so we don't even consider dialing peers that we want to wait - // before dialling again, or have dialed too many times already - r.Logger.Info("Will dial address", "addr", try) - toDial[try.ID] = try - } - - // Dial picked addresses - for _, addr := range toDial { - go r.dialPeer(addr) - } - - // If we need more addresses, pick a random peer and ask for more. - if r.book.NeedMoreAddrs() { - peers := r.Switch.Peers().List() - peersCount := len(peers) - if peersCount > 0 { - peer := peers[cmn.RandInt()%peersCount] // nolint: gas - r.Logger.Info("We need more addresses. Sending pexRequest to random peer", "peer", peer) - r.RequestAddrs(peer) - } - } - - // If we are not connected to nor dialing anybody, fallback to dialing a seed. - if out+in+dial+len(toDial) == 0 { - r.Logger.Info("No addresses to dial nor connected peers. Falling back to seeds") - r.dialSeeds() - } -} - -func (r *PEXReactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastDialed time.Time) { - _attempts, ok := r.attemptsToDial.Load(addr.DialString()) - if !ok { - return - } - atd := _attempts.(_attemptsToDial) - return atd.number, atd.lastDialed -} - -func (r *PEXReactor) dialPeer(addr *p2p.NetAddress) { - attempts, lastDialed := r.dialAttemptsInfo(addr) - - if attempts > maxAttemptsToDial { - r.Logger.Error("Reached max attempts to dial", "addr", addr, "attempts", attempts) - r.book.MarkBad(addr) - return - } - - // exponential backoff if it's not our first attempt to dial given address - if attempts > 0 { - jitterSeconds := time.Duration(cmn.RandFloat64() * float64(time.Second)) // 1s == (1e9 ns) - backoffDuration := jitterSeconds + ((1 << uint(attempts)) * time.Second) - sinceLastDialed := time.Since(lastDialed) - if sinceLastDialed < backoffDuration { - r.Logger.Debug("Too early to dial", "addr", addr, "backoff_duration", backoffDuration, "last_dialed", lastDialed, "time_since", sinceLastDialed) - return - } - } - - err := r.Switch.DialPeerWithAddress(addr, false) - if err != nil { - r.Logger.Error("Dialing failed", "addr", addr, "err", err, "attempts", attempts) - // TODO: detect more "bad peer" scenarios - if _, ok := err.(p2p.ErrSwitchAuthenticationFailure); ok { - r.book.MarkBad(addr) - r.attemptsToDial.Delete(addr.DialString()) - } else { - r.book.MarkAttempt(addr) - // FIXME: if the addr is going to be removed from the addrbook (hard to - // tell at this point), we need to Delete it from attemptsToDial, not - // record another attempt. - // record attempt - r.attemptsToDial.Store(addr.DialString(), _attemptsToDial{attempts + 1, time.Now()}) - } - } else { - // cleanup any history - r.attemptsToDial.Delete(addr.DialString()) - } -} - -// check seed addresses are well formed -func (r *PEXReactor) checkSeeds() error { - lSeeds := len(r.config.Seeds) - if lSeeds == 0 { - return nil - } - _, errs := p2p.NewNetAddressStrings(r.config.Seeds) - for _, err := range errs { - if err != nil { - return err - } - } - return nil -} - -// randomly dial seeds until we connect to one or exhaust them -func (r *PEXReactor) dialSeeds() { - lSeeds := len(r.config.Seeds) - if lSeeds == 0 { - return - } - seedAddrs, _ := p2p.NewNetAddressStrings(r.config.Seeds) - - perm := cmn.RandPerm(lSeeds) - // perm := r.Switch.rng.Perm(lSeeds) - for _, i := range perm { - // dial a random seed - seedAddr := seedAddrs[i] - err := r.Switch.DialPeerWithAddress(seedAddr, false) - if err == nil { - return - } - r.Switch.Logger.Error("Error dialing seed", "err", err, "seed", seedAddr) - } - r.Switch.Logger.Error("Couldn't connect to any seeds") -} - -// AttemptsToDial returns the number of attempts to dial specific address. It -// returns 0 if never attempted or successfully connected. -func (r *PEXReactor) AttemptsToDial(addr *p2p.NetAddress) int { - lAttempts, attempted := r.attemptsToDial.Load(addr.DialString()) - if attempted { - return lAttempts.(_attemptsToDial).number - } - return 0 -} - -//---------------------------------------------------------- - -// Explores the network searching for more peers. (continuous) -// Seed/Crawler Mode causes this node to quickly disconnect -// from peers, except other seed nodes. -func (r *PEXReactor) crawlPeersRoutine() { - // Do an initial crawl - r.crawlPeers() - - // Fire periodically - ticker := time.NewTicker(defaultCrawlPeersPeriod) - - for { - select { - case <-ticker.C: - r.attemptDisconnects() - r.crawlPeers() - case <-r.Quit(): - return - } - } -} - -// hasPotentialPeers indicates if there is a potential peer to connect to, by -// consulting the Switch as well as the AddrBook. -func (r *PEXReactor) hasPotentialPeers() bool { - out, in, dial := r.Switch.NumPeers() - - return out+in+dial > 0 && len(r.book.ListOfKnownAddresses()) > 0 -} - -// crawlPeerInfo handles temporary data needed for the -// network crawling performed during seed/crawler mode. -type crawlPeerInfo struct { - // The listening address of a potential peer we learned about - Addr *p2p.NetAddress - - // The last time we attempt to reach this address - LastAttempt time.Time - - // The last time we successfully reached this address - LastSuccess time.Time -} - -// oldestFirst implements sort.Interface for []crawlPeerInfo -// based on the LastAttempt field. -type oldestFirst []crawlPeerInfo - -func (of oldestFirst) Len() int { return len(of) } -func (of oldestFirst) Swap(i, j int) { of[i], of[j] = of[j], of[i] } -func (of oldestFirst) Less(i, j int) bool { return of[i].LastAttempt.Before(of[j].LastAttempt) } - -// getPeersToCrawl returns addresses of potential peers that we wish to validate. -// NOTE: The status information is ordered as described above. -func (r *PEXReactor) getPeersToCrawl() []crawlPeerInfo { - var of oldestFirst - - // TODO: be more selective - addrs := r.book.ListOfKnownAddresses() - for _, addr := range addrs { - if len(addr.ID()) == 0 { - continue // dont use peers without id - } - - of = append(of, crawlPeerInfo{ - Addr: addr.Addr, - LastAttempt: addr.LastAttempt, - LastSuccess: addr.LastSuccess, - }) - } - sort.Sort(of) - return of -} - -// crawlPeers will crawl the network looking for new peer addresses. (once) -func (r *PEXReactor) crawlPeers() { - peerInfos := r.getPeersToCrawl() - - now := time.Now() - // Use addresses we know of to reach additional peers - for _, pi := range peerInfos { - // Do not attempt to connect with peers we recently dialed - if now.Sub(pi.LastAttempt) < defaultCrawlPeerInterval { - continue - } - // Otherwise, attempt to connect with the known address - err := r.Switch.DialPeerWithAddress(pi.Addr, false) - if err != nil { - r.book.MarkAttempt(pi.Addr) - continue - } - // Ask for more addresses - peer := r.Switch.Peers().Get(pi.Addr.ID) - r.RequestAddrs(peer) - } -} - -// attemptDisconnects checks if we've been with each peer long enough to disconnect -func (r *PEXReactor) attemptDisconnects() { - for _, peer := range r.Switch.Peers().List() { - if peer.Status().Duration < defaultSeedDisconnectWaitPeriod { - continue - } - if peer.IsPersistent() { - continue - } - r.Switch.StopPeerGracefully(peer) - } -} - -// isAddrPrivate returns true if addr.ID is a private ID. -func isAddrPrivate(addr *p2p.NetAddress, privatePeerIDs []string) bool { - for _, id := range privatePeerIDs { - if string(addr.ID) == id { - return true - } - } - return false -} - -//----------------------------------------------------------------------------- -// Messages - -// PexMessage is a primary type for PEX messages. Underneath, it could contain -// either pexRequestMessage, or pexAddrsMessage messages. -type PexMessage interface{} - -func RegisterPexMessage(cdc *amino.Codec) { - cdc.RegisterInterface((*PexMessage)(nil), nil) - cdc.RegisterConcrete(&pexRequestMessage{}, "tendermint/p2p/PexRequestMessage", nil) - cdc.RegisterConcrete(&pexAddrsMessage{}, "tendermint/p2p/PexAddrsMessage", nil) -} - -// DecodeMessage implements interface registered above. -func DecodeMessage(bz []byte) (msg PexMessage, err error) { - if len(bz) > maxMsgSize { - return msg, fmt.Errorf("Msg exceeds max size (%d > %d)", - len(bz), maxMsgSize) - } - err = cdc.UnmarshalBinary(bz, &msg) - return -} - -/* -A pexRequestMessage requests additional peer addresses. -*/ -type pexRequestMessage struct { -} - -func (m *pexRequestMessage) String() string { - return "[pexRequest]" -} - -/* -A message with announced peer addresses. -*/ -type pexAddrsMessage struct { - Addrs []*p2p.NetAddress -} - -func (m *pexAddrsMessage) String() string { - return fmt.Sprintf("[pexAddrs %v]", m.Addrs) -} diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go deleted file mode 100644 index 645e9317..00000000 --- a/p2p/pex/pex_reactor_test.go +++ /dev/null @@ -1,459 +0,0 @@ -package pex - -import ( - "fmt" - "io/ioutil" - "net" - "os" - "path/filepath" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/p2p/conn" -) - -var ( - cfg *config.P2PConfig -) - -func init() { - cfg = config.DefaultP2PConfig() - cfg.PexReactor = true - cfg.AllowDuplicateIP = true -} - -func TestPEXReactorBasic(t *testing.T) { - r, book := createReactor(&PEXReactorConfig{}) - defer teardownReactor(book) - - assert.NotNil(t, r) - assert.NotEmpty(t, r.GetChannels()) -} - -func TestPEXReactorAddRemovePeer(t *testing.T) { - r, book := createReactor(&PEXReactorConfig{}) - defer teardownReactor(book) - - size := book.Size() - peer := p2p.CreateRandomPeer(false) - - r.AddPeer(peer) - assert.Equal(t, size+1, book.Size()) - - r.RemovePeer(peer, "peer not available") - - outboundPeer := p2p.CreateRandomPeer(true) - - r.AddPeer(outboundPeer) - assert.Equal(t, size+1, book.Size(), "outbound peers should not be added to the address book") - - r.RemovePeer(outboundPeer, "peer not available") -} - -// --- FAIL: TestPEXReactorRunning (11.10s) -// pex_reactor_test.go:411: expected all switches to be connected to at -// least one peer (switches: 0 => {outbound: 1, inbound: 0}, 1 => -// {outbound: 0, inbound: 1}, 2 => {outbound: 0, inbound: 0}, ) -// -// EXPLANATION: peers are getting rejected because in switch#addPeer we check -// if any peer (who we already connected to) has the same IP. Even though local -// peers have different IP addresses, they all have the same underlying remote -// IP: 127.0.0.1. -// -func TestPEXReactorRunning(t *testing.T) { - N := 3 - switches := make([]*p2p.Switch, N) - - // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") - require.Nil(t, err) - defer os.RemoveAll(dir) // nolint: errcheck - - books := make([]*addrBook, N) - logger := log.TestingLogger() - - // create switches - for i := 0; i < N; i++ { - switches[i] = p2p.MakeSwitch(cfg, i, "testing", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { - books[i] = NewAddrBook(filepath.Join(dir, fmt.Sprintf("addrbook%d.json", i)), false) - books[i].SetLogger(logger.With("pex", i)) - sw.SetAddrBook(books[i]) - - sw.SetLogger(logger.With("pex", i)) - - r := NewPEXReactor(books[i], &PEXReactorConfig{}) - r.SetLogger(logger.With("pex", i)) - r.SetEnsurePeersPeriod(250 * time.Millisecond) - sw.AddReactor("pex", r) - - return sw - }) - } - - addOtherNodeAddrToAddrBook := func(switchIndex, otherSwitchIndex int) { - addr := switches[otherSwitchIndex].NodeInfo().NetAddress() - books[switchIndex].AddAddress(addr, addr) - } - - addOtherNodeAddrToAddrBook(0, 1) - addOtherNodeAddrToAddrBook(1, 0) - addOtherNodeAddrToAddrBook(2, 1) - - for i, sw := range switches { - sw.AddListener(p2p.NewDefaultListener("tcp", sw.NodeInfo().ListenAddr, true, logger.With("pex", i))) - - err := sw.Start() // start switch and reactors - require.Nil(t, err) - } - - assertPeersWithTimeout(t, switches, 10*time.Millisecond, 10*time.Second, N-1) - - // stop them - for _, s := range switches { - s.Stop() - } -} - -func TestPEXReactorReceive(t *testing.T) { - r, book := createReactor(&PEXReactorConfig{}) - defer teardownReactor(book) - - peer := p2p.CreateRandomPeer(false) - - // we have to send a request to receive responses - r.RequestAddrs(peer) - - size := book.Size() - addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} - msg := cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: addrs}) - r.Receive(PexChannel, peer, msg) - assert.Equal(t, size+1, book.Size()) - - msg = cdc.MustMarshalBinary(&pexRequestMessage{}) - r.Receive(PexChannel, peer, msg) // should not panic. -} - -func TestPEXReactorRequestMessageAbuse(t *testing.T) { - r, book := createReactor(&PEXReactorConfig{}) - defer teardownReactor(book) - - sw := createSwitchAndAddReactors(r) - sw.SetAddrBook(book) - - peer := newMockPeer() - p2p.AddPeerToSwitch(sw, peer) - assert.True(t, sw.Peers().Has(peer.ID())) - - id := string(peer.ID()) - msg := cdc.MustMarshalBinary(&pexRequestMessage{}) - - // first time creates the entry - r.Receive(PexChannel, peer, msg) - assert.True(t, r.lastReceivedRequests.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // next time sets the last time value - r.Receive(PexChannel, peer, msg) - assert.True(t, r.lastReceivedRequests.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // third time is too many too soon - peer is removed - r.Receive(PexChannel, peer, msg) - assert.False(t, r.lastReceivedRequests.Has(id)) - assert.False(t, sw.Peers().Has(peer.ID())) -} - -func TestPEXReactorAddrsMessageAbuse(t *testing.T) { - r, book := createReactor(&PEXReactorConfig{}) - defer teardownReactor(book) - - sw := createSwitchAndAddReactors(r) - sw.SetAddrBook(book) - - peer := newMockPeer() - p2p.AddPeerToSwitch(sw, peer) - assert.True(t, sw.Peers().Has(peer.ID())) - - id := string(peer.ID()) - - // request addrs from the peer - r.RequestAddrs(peer) - assert.True(t, r.requestsSent.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} - msg := cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: addrs}) - - // receive some addrs. should clear the request - r.Receive(PexChannel, peer, msg) - assert.False(t, r.requestsSent.Has(id)) - assert.True(t, sw.Peers().Has(peer.ID())) - - // receiving more addrs causes a disconnect - r.Receive(PexChannel, peer, msg) - assert.False(t, sw.Peers().Has(peer.ID())) -} - -func TestPEXReactorUsesSeedsIfNeeded(t *testing.T) { - // directory to store address books - dir, err := ioutil.TempDir("", "pex_reactor") - require.Nil(t, err) - defer os.RemoveAll(dir) // nolint: errcheck - - // 1. create seed - seed := p2p.MakeSwitch( - cfg, - 0, - "127.0.0.1", - "123.123.123", - func(i int, sw *p2p.Switch) *p2p.Switch { - book := NewAddrBook(filepath.Join(dir, "addrbook0.json"), false) - book.SetLogger(log.TestingLogger()) - sw.SetAddrBook(book) - - sw.SetLogger(log.TestingLogger()) - - r := NewPEXReactor(book, &PEXReactorConfig{}) - r.SetLogger(log.TestingLogger()) - sw.AddReactor("pex", r) - return sw - }, - ) - seed.AddListener( - p2p.NewDefaultListener( - "tcp", - seed.NodeInfo().ListenAddr, - true, - log.TestingLogger(), - ), - ) - require.Nil(t, seed.Start()) - defer seed.Stop() - - // 2. create usual peer with only seed configured. - peer := p2p.MakeSwitch( - cfg, - 1, - "127.0.0.1", - "123.123.123", - func(i int, sw *p2p.Switch) *p2p.Switch { - book := NewAddrBook(filepath.Join(dir, "addrbook1.json"), false) - book.SetLogger(log.TestingLogger()) - sw.SetAddrBook(book) - - sw.SetLogger(log.TestingLogger()) - - r := NewPEXReactor( - book, - &PEXReactorConfig{ - Seeds: []string{seed.NodeInfo().NetAddress().String()}, - }, - ) - r.SetLogger(log.TestingLogger()) - sw.AddReactor("pex", r) - return sw - }, - ) - require.Nil(t, peer.Start()) - defer peer.Stop() - - // 3. check that the peer connects to seed immediately - assertPeersWithTimeout(t, []*p2p.Switch{peer}, 10*time.Millisecond, 3*time.Second, 1) -} - -func TestPEXReactorCrawlStatus(t *testing.T) { - pexR, book := createReactor(&PEXReactorConfig{SeedMode: true}) - defer teardownReactor(book) - - // Seed/Crawler mode uses data from the Switch - sw := createSwitchAndAddReactors(pexR) - sw.SetAddrBook(book) - - // Create a peer, add it to the peer set and the addrbook. - peer := p2p.CreateRandomPeer(false) - p2p.AddPeerToSwitch(pexR.Switch, peer) - addr1 := peer.NodeInfo().NetAddress() - pexR.book.AddAddress(addr1, addr1) - - // Add a non-connected address to the book. - _, addr2 := p2p.CreateRoutableAddr() - pexR.book.AddAddress(addr2, addr1) - - // Get some peerInfos to crawl - peerInfos := pexR.getPeersToCrawl() - - // Make sure it has the proper number of elements - assert.Equal(t, 2, len(peerInfos)) - - // TODO: test -} - -func TestPEXReactorDoesNotAddPrivatePeersToAddrBook(t *testing.T) { - peer := p2p.CreateRandomPeer(false) - - pexR, book := createReactor(&PEXReactorConfig{PrivatePeerIDs: []string{string(peer.NodeInfo().ID)}}) - defer teardownReactor(book) - - // we have to send a request to receive responses - pexR.RequestAddrs(peer) - - size := book.Size() - addrs := []*p2p.NetAddress{peer.NodeInfo().NetAddress()} - msg := cdc.MustMarshalBinary(&pexAddrsMessage{Addrs: addrs}) - pexR.Receive(PexChannel, peer, msg) - assert.Equal(t, size, book.Size()) - - pexR.AddPeer(peer) - assert.Equal(t, size, book.Size()) -} - -func TestPEXReactorDialPeer(t *testing.T) { - pexR, book := createReactor(&PEXReactorConfig{}) - defer teardownReactor(book) - - sw := createSwitchAndAddReactors(pexR) - sw.SetAddrBook(book) - - peer := newMockPeer() - addr := peer.NodeInfo().NetAddress() - - assert.Equal(t, 0, pexR.AttemptsToDial(addr)) - - // 1st unsuccessful attempt - pexR.dialPeer(addr) - - assert.Equal(t, 1, pexR.AttemptsToDial(addr)) - - // 2nd unsuccessful attempt - pexR.dialPeer(addr) - - // must be skipped because it is too early - assert.Equal(t, 1, pexR.AttemptsToDial(addr)) - - if !testing.Short() { - time.Sleep(3 * time.Second) - - // 3rd attempt - pexR.dialPeer(addr) - - assert.Equal(t, 2, pexR.AttemptsToDial(addr)) - } -} - -type mockPeer struct { - *cmn.BaseService - pubKey crypto.PubKey - addr *p2p.NetAddress - outbound, persistent bool -} - -func newMockPeer() mockPeer { - _, netAddr := p2p.CreateRoutableAddr() - mp := mockPeer{ - addr: netAddr, - pubKey: crypto.GenPrivKeyEd25519().PubKey(), - } - mp.BaseService = cmn.NewBaseService(nil, "MockPeer", mp) - mp.Start() - return mp -} - -func (mp mockPeer) ID() p2p.ID { return mp.addr.ID } -func (mp mockPeer) IsOutbound() bool { return mp.outbound } -func (mp mockPeer) IsPersistent() bool { return mp.persistent } -func (mp mockPeer) NodeInfo() p2p.NodeInfo { - return p2p.NodeInfo{ - ID: mp.addr.ID, - ListenAddr: mp.addr.DialString(), - } -} -func (mp mockPeer) RemoteIP() net.IP { return net.ParseIP("127.0.0.1") } -func (mp mockPeer) Status() conn.ConnectionStatus { return conn.ConnectionStatus{} } -func (mp mockPeer) Send(byte, []byte) bool { return false } -func (mp mockPeer) TrySend(byte, []byte) bool { return false } -func (mp mockPeer) Set(string, interface{}) {} -func (mp mockPeer) Get(string) interface{} { return nil } - -func assertPeersWithTimeout( - t *testing.T, - switches []*p2p.Switch, - checkPeriod, timeout time.Duration, - nPeers int, -) { - var ( - ticker = time.NewTicker(checkPeriod) - remaining = timeout - ) - - for { - select { - case <-ticker.C: - // check peers are connected - allGood := true - for _, s := range switches { - outbound, inbound, _ := s.NumPeers() - if outbound+inbound < nPeers { - allGood = false - } - } - remaining -= checkPeriod - if remaining < 0 { - remaining = 0 - } - if allGood { - return - } - case <-time.After(remaining): - numPeersStr := "" - for i, s := range switches { - outbound, inbound, _ := s.NumPeers() - numPeersStr += fmt.Sprintf("%d => {outbound: %d, inbound: %d}, ", i, outbound, inbound) - } - t.Errorf( - "expected all switches to be connected to at least one peer (switches: %s)", - numPeersStr, - ) - return - } - } -} - -func createReactor(conf *PEXReactorConfig) (r *PEXReactor, book *addrBook) { - // directory to store address book - dir, err := ioutil.TempDir("", "pex_reactor") - if err != nil { - panic(err) - } - book = NewAddrBook(filepath.Join(dir, "addrbook.json"), true) - book.SetLogger(log.TestingLogger()) - - r = NewPEXReactor(book, conf) - r.SetLogger(log.TestingLogger()) - return -} - -func teardownReactor(book *addrBook) { - err := os.RemoveAll(filepath.Dir(book.FilePath())) - if err != nil { - panic(err) - } -} - -func createSwitchAndAddReactors(reactors ...p2p.Reactor) *p2p.Switch { - sw := p2p.MakeSwitch(cfg, 0, "127.0.0.1", "123.123.123", func(i int, sw *p2p.Switch) *p2p.Switch { return sw }) - sw.SetLogger(log.TestingLogger()) - for _, r := range reactors { - sw.AddReactor(r.String(), r) - r.SetSwitch(sw) - } - return sw -} diff --git a/p2p/pex/wire.go b/p2p/pex/wire.go deleted file mode 100644 index 57fc9385..00000000 --- a/p2p/pex/wire.go +++ /dev/null @@ -1,11 +0,0 @@ -package pex - -import ( - "github.com/tendermint/go-amino" -) - -var cdc *amino.Codec = amino.NewCodec() - -func init() { - RegisterPexMessage(cdc) -} diff --git a/p2p/switch.go b/p2p/switch.go deleted file mode 100644 index bf5f9747..00000000 --- a/p2p/switch.go +++ /dev/null @@ -1,662 +0,0 @@ -package p2p - -import ( - "fmt" - "math" - "net" - "sync" - "time" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/p2p/conn" - cmn "github.com/tendermint/tmlibs/common" -) - -const ( - // wait a random amount of time from this interval - // before dialing peers or reconnecting to help prevent DoS - dialRandomizerIntervalMilliseconds = 3000 - - // repeatedly try to reconnect for a few minutes - // ie. 5 * 20 = 100s - reconnectAttempts = 20 - reconnectInterval = 5 * time.Second - - // then move into exponential backoff mode for ~1day - // ie. 3**10 = 16hrs - reconnectBackOffAttempts = 10 - reconnectBackOffBaseSeconds = 3 - - // keep at least this many outbound peers - // TODO: move to config - DefaultMinNumOutboundPeers = 10 -) - -//----------------------------------------------------------------------------- - -// An AddrBook represents an address book from the pex package, which is used -// to store peer addresses. -type AddrBook interface { - AddAddress(addr *NetAddress, src *NetAddress) error - AddOurAddress(*NetAddress) - OurAddress(*NetAddress) bool - MarkGood(*NetAddress) - RemoveAddress(*NetAddress) - HasAddress(*NetAddress) bool - Save() -} - -//----------------------------------------------------------------------------- - -// Switch handles peer connections and exposes an API to receive incoming messages -// on `Reactors`. Each `Reactor` is responsible for handling incoming messages of one -// or more `Channels`. So while sending outgoing messages is typically performed on the peer, -// incoming messages are received on the reactor. -type Switch struct { - cmn.BaseService - - config *config.P2PConfig - listeners []Listener - reactors map[string]Reactor - chDescs []*conn.ChannelDescriptor - reactorsByCh map[byte]Reactor - peers *PeerSet - dialing *cmn.CMap - reconnecting *cmn.CMap - nodeInfo NodeInfo // our node info - nodeKey *NodeKey // our node privkey - addrBook AddrBook - - filterConnByAddr func(net.Addr) error - filterConnByID func(ID) error - - mConfig conn.MConnConfig - - rng *cmn.Rand // seed for randomizing dial times and orders - - metrics *Metrics -} - -// SwitchOption sets an optional parameter on the Switch. -type SwitchOption func(*Switch) - -// NewSwitch creates a new Switch with the given config. -func NewSwitch(cfg *config.P2PConfig, options ...SwitchOption) *Switch { - sw := &Switch{ - config: cfg, - reactors: make(map[string]Reactor), - chDescs: make([]*conn.ChannelDescriptor, 0), - reactorsByCh: make(map[byte]Reactor), - peers: NewPeerSet(), - dialing: cmn.NewCMap(), - reconnecting: cmn.NewCMap(), - metrics: NopMetrics(), - } - - // Ensure we have a completely undeterministic PRNG. - sw.rng = cmn.NewRand() - - mConfig := conn.DefaultMConnConfig() - mConfig.FlushThrottle = time.Duration(cfg.FlushThrottleTimeout) * time.Millisecond - mConfig.SendRate = cfg.SendRate - mConfig.RecvRate = cfg.RecvRate - mConfig.MaxPacketMsgPayloadSize = cfg.MaxPacketMsgPayloadSize - - sw.mConfig = mConfig - - sw.BaseService = *cmn.NewBaseService(nil, "P2P Switch", sw) - - for _, option := range options { - option(sw) - } - - return sw -} - -// WithMetrics sets the metrics. -func WithMetrics(metrics *Metrics) SwitchOption { - return func(sw *Switch) { sw.metrics = metrics } -} - -//--------------------------------------------------------------------- -// Switch setup - -// AddReactor adds the given reactor to the switch. -// NOTE: Not goroutine safe. -func (sw *Switch) AddReactor(name string, reactor Reactor) Reactor { - // Validate the reactor. - // No two reactors can share the same channel. - reactorChannels := reactor.GetChannels() - for _, chDesc := range reactorChannels { - chID := chDesc.ID - if sw.reactorsByCh[chID] != nil { - cmn.PanicSanity(fmt.Sprintf("Channel %X has multiple reactors %v & %v", chID, sw.reactorsByCh[chID], reactor)) - } - sw.chDescs = append(sw.chDescs, chDesc) - sw.reactorsByCh[chID] = reactor - } - sw.reactors[name] = reactor - reactor.SetSwitch(sw) - return reactor -} - -// Reactors returns a map of reactors registered on the switch. -// NOTE: Not goroutine safe. -func (sw *Switch) Reactors() map[string]Reactor { - return sw.reactors -} - -// Reactor returns the reactor with the given name. -// NOTE: Not goroutine safe. -func (sw *Switch) Reactor(name string) Reactor { - return sw.reactors[name] -} - -// AddListener adds the given listener to the switch for listening to incoming peer connections. -// NOTE: Not goroutine safe. -func (sw *Switch) AddListener(l Listener) { - sw.listeners = append(sw.listeners, l) -} - -// Listeners returns the list of listeners the switch listens on. -// NOTE: Not goroutine safe. -func (sw *Switch) Listeners() []Listener { - return sw.listeners -} - -// IsListening returns true if the switch has at least one listener. -// NOTE: Not goroutine safe. -func (sw *Switch) IsListening() bool { - return len(sw.listeners) > 0 -} - -// SetNodeInfo sets the switch's NodeInfo for checking compatibility and handshaking with other nodes. -// NOTE: Not goroutine safe. -func (sw *Switch) SetNodeInfo(nodeInfo NodeInfo) { - sw.nodeInfo = nodeInfo -} - -// NodeInfo returns the switch's NodeInfo. -// NOTE: Not goroutine safe. -func (sw *Switch) NodeInfo() NodeInfo { - return sw.nodeInfo -} - -// SetNodeKey sets the switch's private key for authenticated encryption. -// NOTE: Not goroutine safe. -func (sw *Switch) SetNodeKey(nodeKey *NodeKey) { - sw.nodeKey = nodeKey -} - -//--------------------------------------------------------------------- -// Service start/stop - -// OnStart implements BaseService. It starts all the reactors, peers, and listeners. -func (sw *Switch) OnStart() error { - // Start reactors - for _, reactor := range sw.reactors { - err := reactor.Start() - if err != nil { - return cmn.ErrorWrap(err, "failed to start %v", reactor) - } - } - // Start listeners - for _, listener := range sw.listeners { - go sw.listenerRoutine(listener) - } - return nil -} - -// OnStop implements BaseService. It stops all listeners, peers, and reactors. -func (sw *Switch) OnStop() { - // Stop listeners - for _, listener := range sw.listeners { - listener.Stop() - } - sw.listeners = nil - // Stop peers - for _, peer := range sw.peers.List() { - peer.Stop() - sw.peers.Remove(peer) - } - // Stop reactors - sw.Logger.Debug("Switch: Stopping reactors") - for _, reactor := range sw.reactors { - reactor.Stop() - } -} - -//--------------------------------------------------------------------- -// Peers - -// Broadcast runs a go routine for each attempted send, which will block trying -// to send for defaultSendTimeoutSeconds. Returns a channel which receives -// success values for each attempted send (false if times out). Channel will be -// closed once msg bytes are sent to all peers (or time out). -// -// NOTE: Broadcast uses goroutines, so order of broadcast may not be preserved. -func (sw *Switch) Broadcast(chID byte, msgBytes []byte) chan bool { - successChan := make(chan bool, len(sw.peers.List())) - sw.Logger.Debug("Broadcast", "channel", chID, "msgBytes", fmt.Sprintf("%X", msgBytes)) - var wg sync.WaitGroup - for _, peer := range sw.peers.List() { - wg.Add(1) - go func(peer Peer) { - defer wg.Done() - success := peer.Send(chID, msgBytes) - successChan <- success - }(peer) - } - go func() { - wg.Wait() - close(successChan) - }() - return successChan -} - -// NumPeers returns the count of outbound/inbound and outbound-dialing peers. -func (sw *Switch) NumPeers() (outbound, inbound, dialing int) { - peers := sw.peers.List() - for _, peer := range peers { - if peer.IsOutbound() { - outbound++ - } else { - inbound++ - } - } - dialing = sw.dialing.Size() - return -} - -// Peers returns the set of peers that are connected to the switch. -func (sw *Switch) Peers() IPeerSet { - return sw.peers -} - -// StopPeerForError disconnects from a peer due to external error. -// If the peer is persistent, it will attempt to reconnect. -// TODO: make record depending on reason. -func (sw *Switch) StopPeerForError(peer Peer, reason interface{}) { - sw.Logger.Error("Stopping peer for error", "peer", peer, "err", reason) - sw.stopAndRemovePeer(peer, reason) - - if peer.IsPersistent() { - // NOTE: this is the self-reported addr, not the original we dialed - go sw.reconnectToPeer(peer.NodeInfo().NetAddress()) - } -} - -// StopPeerGracefully disconnects from a peer gracefully. -// TODO: handle graceful disconnects. -func (sw *Switch) StopPeerGracefully(peer Peer) { - sw.Logger.Info("Stopping peer gracefully") - sw.stopAndRemovePeer(peer, nil) -} - -func (sw *Switch) stopAndRemovePeer(peer Peer, reason interface{}) { - sw.peers.Remove(peer) - sw.metrics.Peers.Add(float64(-1)) - peer.Stop() - for _, reactor := range sw.reactors { - reactor.RemovePeer(peer, reason) - } -} - -// reconnectToPeer tries to reconnect to the addr, first repeatedly -// with a fixed interval, then with exponential backoff. -// If no success after all that, it stops trying, and leaves it -// to the PEX/Addrbook to find the peer with the addr again -// NOTE: this will keep trying even if the handshake or auth fails. -// TODO: be more explicit with error types so we only retry on certain failures -// - ie. if we're getting ErrDuplicatePeer we can stop -// because the addrbook got us the peer back already -func (sw *Switch) reconnectToPeer(addr *NetAddress) { - if sw.reconnecting.Has(string(addr.ID)) { - return - } - sw.reconnecting.Set(string(addr.ID), addr) - defer sw.reconnecting.Delete(string(addr.ID)) - - start := time.Now() - sw.Logger.Info("Reconnecting to peer", "addr", addr) - for i := 0; i < reconnectAttempts; i++ { - if !sw.IsRunning() { - return - } - - err := sw.DialPeerWithAddress(addr, true) - if err == nil { - return // success - } - - sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) - // sleep a set amount - sw.randomSleep(reconnectInterval) - continue - } - - sw.Logger.Error("Failed to reconnect to peer. Beginning exponential backoff", - "addr", addr, "elapsed", time.Since(start)) - for i := 0; i < reconnectBackOffAttempts; i++ { - if !sw.IsRunning() { - return - } - - // sleep an exponentially increasing amount - sleepIntervalSeconds := math.Pow(reconnectBackOffBaseSeconds, float64(i)) - sw.randomSleep(time.Duration(sleepIntervalSeconds) * time.Second) - err := sw.DialPeerWithAddress(addr, true) - if err == nil { - return // success - } - sw.Logger.Info("Error reconnecting to peer. Trying again", "tries", i, "err", err, "addr", addr) - } - sw.Logger.Error("Failed to reconnect to peer. Giving up", "addr", addr, "elapsed", time.Since(start)) -} - -// SetAddrBook allows to set address book on Switch. -func (sw *Switch) SetAddrBook(addrBook AddrBook) { - sw.addrBook = addrBook -} - -// MarkPeerAsGood marks the given peer as good when it did something useful -// like contributed to consensus. -func (sw *Switch) MarkPeerAsGood(peer Peer) { - if sw.addrBook != nil { - sw.addrBook.MarkGood(peer.NodeInfo().NetAddress()) - } -} - -//--------------------------------------------------------------------- -// Dialing - -// IsDialing returns true if the switch is currently dialing the given ID. -func (sw *Switch) IsDialing(id ID) bool { - return sw.dialing.Has(string(id)) -} - -// DialPeersAsync dials a list of peers asynchronously in random order (optionally, making them persistent). -// Used to dial peers from config on startup or from unsafe-RPC (trusted sources). -// TODO: remove addrBook arg since it's now set on the switch -func (sw *Switch) DialPeersAsync(addrBook AddrBook, peers []string, persistent bool) error { - netAddrs, errs := NewNetAddressStrings(peers) - // only log errors, dial correct addresses - for _, err := range errs { - sw.Logger.Error("Error in peer's address", "err", err) - } - - ourAddr := sw.nodeInfo.NetAddress() - - // TODO: this code feels like it's in the wrong place. - // The integration tests depend on the addrBook being saved - // right away but maybe we can change that. Recall that - // the addrBook is only written to disk every 2min - if addrBook != nil { - // add peers to `addrBook` - for _, netAddr := range netAddrs { - // do not add our address or ID - if !netAddr.Same(ourAddr) { - if err := addrBook.AddAddress(netAddr, ourAddr); err != nil { - sw.Logger.Error("Can't add peer's address to addrbook", "err", err) - } - } - } - // Persist some peers to disk right away. - // NOTE: integration tests depend on this - addrBook.Save() - } - - // permute the list, dial them in random order. - perm := sw.rng.Perm(len(netAddrs)) - for i := 0; i < len(perm); i++ { - go func(i int) { - j := perm[i] - - addr := netAddrs[j] - // do not dial ourselves - if addr.Same(ourAddr) { - return - } - - sw.randomSleep(0) - err := sw.DialPeerWithAddress(addr, persistent) - if err != nil { - switch err.(type) { - case ErrSwitchConnectToSelf, ErrSwitchDuplicatePeerID: - sw.Logger.Debug("Error dialing peer", "err", err) - default: - sw.Logger.Error("Error dialing peer", "err", err) - } - } - }(i) - } - return nil -} - -// DialPeerWithAddress dials the given peer and runs sw.addPeer if it connects and authenticates successfully. -// If `persistent == true`, the switch will always try to reconnect to this peer if the connection ever fails. -func (sw *Switch) DialPeerWithAddress(addr *NetAddress, persistent bool) error { - sw.dialing.Set(string(addr.ID), addr) - defer sw.dialing.Delete(string(addr.ID)) - return sw.addOutboundPeerWithConfig(addr, sw.config, persistent) -} - -// sleep for interval plus some random amount of ms on [0, dialRandomizerIntervalMilliseconds] -func (sw *Switch) randomSleep(interval time.Duration) { - r := time.Duration(sw.rng.Int63n(dialRandomizerIntervalMilliseconds)) * time.Millisecond - time.Sleep(r + interval) -} - -//------------------------------------------------------------------------------------ -// Connection filtering - -// FilterConnByAddr returns an error if connecting to the given address is forbidden. -func (sw *Switch) FilterConnByAddr(addr net.Addr) error { - if sw.filterConnByAddr != nil { - return sw.filterConnByAddr(addr) - } - return nil -} - -// FilterConnByID returns an error if connecting to the given peer ID is forbidden. -func (sw *Switch) FilterConnByID(id ID) error { - if sw.filterConnByID != nil { - return sw.filterConnByID(id) - } - return nil - -} - -// SetAddrFilter sets the function for filtering connections by address. -func (sw *Switch) SetAddrFilter(f func(net.Addr) error) { - sw.filterConnByAddr = f -} - -// SetIDFilter sets the function for filtering connections by peer ID. -func (sw *Switch) SetIDFilter(f func(ID) error) { - sw.filterConnByID = f -} - -//------------------------------------------------------------------------------------ - -func (sw *Switch) listenerRoutine(l Listener) { - for { - inConn, ok := <-l.Connections() - if !ok { - break - } - - // ignore connection if we already have enough - // leave room for MinNumOutboundPeers - maxPeers := sw.config.MaxNumPeers - DefaultMinNumOutboundPeers - if maxPeers <= sw.peers.Size() { - sw.Logger.Info("Ignoring inbound connection: already have enough peers", "address", inConn.RemoteAddr().String(), "numPeers", sw.peers.Size(), "max", maxPeers) - continue - } - - // New inbound connection! - err := sw.addInboundPeerWithConfig(inConn, sw.config) - if err != nil { - sw.Logger.Info("Ignoring inbound connection: error while adding peer", "address", inConn.RemoteAddr().String(), "err", err) - continue - } - } - - // cleanup -} - -func (sw *Switch) addInboundPeerWithConfig( - conn net.Conn, - config *config.P2PConfig, -) error { - peerConn, err := newInboundPeerConn(conn, config, sw.nodeKey.PrivKey) - if err != nil { - conn.Close() // peer is nil - return err - } - if err = sw.addPeer(peerConn); err != nil { - peerConn.CloseConn() - return err - } - - return nil -} - -// dial the peer; make secret connection; authenticate against the dialed ID; -// add the peer. -// if dialing fails, start the reconnect loop. If handhsake fails, its over. -// If peer is started succesffuly, reconnectLoop will start when -// StopPeerForError is called -func (sw *Switch) addOutboundPeerWithConfig( - addr *NetAddress, - config *config.P2PConfig, - persistent bool, -) error { - sw.Logger.Info("Dialing peer", "address", addr) - peerConn, err := newOutboundPeerConn( - addr, - config, - persistent, - sw.nodeKey.PrivKey, - ) - if err != nil { - if persistent { - go sw.reconnectToPeer(addr) - } - return err - } - - if err := sw.addPeer(peerConn); err != nil { - peerConn.CloseConn() - return err - } - return nil -} - -// addPeer performs the Tendermint P2P handshake with a peer -// that already has a SecretConnection. If all goes well, -// it starts the peer and adds it to the switch. -// NOTE: This performs a blocking handshake before the peer is added. -// NOTE: If error is returned, caller is responsible for calling -// peer.CloseConn() -func (sw *Switch) addPeer(pc peerConn) error { - - addr := pc.conn.RemoteAddr() - if err := sw.FilterConnByAddr(addr); err != nil { - return err - } - - // Exchange NodeInfo on the conn - peerNodeInfo, err := pc.HandshakeTimeout(sw.nodeInfo, time.Duration(sw.config.HandshakeTimeout)) - if err != nil { - return err - } - - peerID := peerNodeInfo.ID - - // ensure connection key matches self reported key - connID := pc.ID() - - if peerID != connID { - return fmt.Errorf( - "nodeInfo.ID() (%v) doesn't match conn.ID() (%v)", - peerID, - connID, - ) - } - - // Validate the peers nodeInfo - if err := peerNodeInfo.Validate(); err != nil { - return err - } - - // Avoid self - if sw.nodeKey.ID() == peerID { - addr := peerNodeInfo.NetAddress() - // remove the given address from the address book - // and add to our addresses to avoid dialing again - sw.addrBook.RemoveAddress(addr) - sw.addrBook.AddOurAddress(addr) - return ErrSwitchConnectToSelf{addr} - } - - // Avoid duplicate - if sw.peers.Has(peerID) { - return ErrSwitchDuplicatePeerID{peerID} - } - - // Check for duplicate connection or peer info IP. - if !sw.config.AllowDuplicateIP && - (sw.peers.HasIP(pc.RemoteIP()) || - sw.peers.HasIP(peerNodeInfo.NetAddress().IP)) { - return ErrSwitchDuplicatePeerIP{pc.RemoteIP()} - } - - // Filter peer against ID white list - if err := sw.FilterConnByID(peerID); err != nil { - return err - } - - // Check version, chain id - if err := sw.nodeInfo.CompatibleWith(peerNodeInfo); err != nil { - return err - } - - peer := newPeer(pc, sw.mConfig, peerNodeInfo, sw.reactorsByCh, sw.chDescs, sw.StopPeerForError) - peer.SetLogger(sw.Logger.With("peer", addr)) - - peer.Logger.Info("Successful handshake with peer", "peerNodeInfo", peerNodeInfo) - - // All good. Start peer - if sw.IsRunning() { - if err = sw.startInitPeer(peer); err != nil { - return err - } - } - - // Add the peer to .peers. - // We start it first so that a peer in the list is safe to Stop. - // It should not err since we already checked peers.Has(). - if err := sw.peers.Add(peer); err != nil { - return err - } - sw.metrics.Peers.Add(float64(1)) - - sw.Logger.Info("Added peer", "peer", peer) - return nil -} - -func (sw *Switch) startInitPeer(peer *peer) error { - err := peer.Start() // spawn send/recv routines - if err != nil { - // Should never happen - sw.Logger.Error("Error starting peer", "peer", peer, "err", err) - return err - } - - for _, reactor := range sw.reactors { - reactor.AddPeer(peer) - } - - return nil -} diff --git a/p2p/switch_test.go b/p2p/switch_test.go deleted file mode 100644 index 6157f45c..00000000 --- a/p2p/switch_test.go +++ /dev/null @@ -1,428 +0,0 @@ -package p2p - -import ( - "bytes" - "fmt" - "net" - "sync" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - crypto "github.com/tendermint/go-crypto" - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/p2p/conn" -) - -var ( - cfg *config.P2PConfig -) - -func init() { - cfg = config.DefaultP2PConfig() - cfg.PexReactor = true - cfg.AllowDuplicateIP = true -} - -type PeerMessage struct { - PeerID ID - Bytes []byte - Counter int -} - -type TestReactor struct { - BaseReactor - - mtx sync.Mutex - channels []*conn.ChannelDescriptor - logMessages bool - msgsCounter int - msgsReceived map[byte][]PeerMessage -} - -func NewTestReactor(channels []*conn.ChannelDescriptor, logMessages bool) *TestReactor { - tr := &TestReactor{ - channels: channels, - logMessages: logMessages, - msgsReceived: make(map[byte][]PeerMessage), - } - tr.BaseReactor = *NewBaseReactor("TestReactor", tr) - tr.SetLogger(log.TestingLogger()) - return tr -} - -func (tr *TestReactor) GetChannels() []*conn.ChannelDescriptor { - return tr.channels -} - -func (tr *TestReactor) AddPeer(peer Peer) {} - -func (tr *TestReactor) RemovePeer(peer Peer, reason interface{}) {} - -func (tr *TestReactor) Receive(chID byte, peer Peer, msgBytes []byte) { - if tr.logMessages { - tr.mtx.Lock() - defer tr.mtx.Unlock() - //fmt.Printf("Received: %X, %X\n", chID, msgBytes) - tr.msgsReceived[chID] = append(tr.msgsReceived[chID], PeerMessage{peer.ID(), msgBytes, tr.msgsCounter}) - tr.msgsCounter++ - } -} - -func (tr *TestReactor) getMsgs(chID byte) []PeerMessage { - tr.mtx.Lock() - defer tr.mtx.Unlock() - return tr.msgsReceived[chID] -} - -//----------------------------------------------------------------------------- - -// convenience method for creating two switches connected to each other. -// XXX: note this uses net.Pipe and not a proper TCP conn -func MakeSwitchPair(t testing.TB, initSwitch func(int, *Switch) *Switch) (*Switch, *Switch) { - // Create two switches that will be interconnected. - switches := MakeConnectedSwitches(cfg, 2, initSwitch, Connect2Switches) - return switches[0], switches[1] -} - -func initSwitchFunc(i int, sw *Switch) *Switch { - sw.SetAddrBook(&addrBookMock{ - addrs: make(map[string]struct{}), - ourAddrs: make(map[string]struct{})}) - - // Make two reactors of two channels each - sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, - }, true)) - sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, - }, true)) - - return sw -} - -func TestSwitches(t *testing.T) { - s1, s2 := MakeSwitchPair(t, initSwitchFunc) - defer s1.Stop() - defer s2.Stop() - - if s1.Peers().Size() != 1 { - t.Errorf("Expected exactly 1 peer in s1, got %v", s1.Peers().Size()) - } - if s2.Peers().Size() != 1 { - t.Errorf("Expected exactly 1 peer in s2, got %v", s2.Peers().Size()) - } - - // Lets send some messages - ch0Msg := []byte("channel zero") - ch1Msg := []byte("channel foo") - ch2Msg := []byte("channel bar") - - s1.Broadcast(byte(0x00), ch0Msg) - s1.Broadcast(byte(0x01), ch1Msg) - s1.Broadcast(byte(0x02), ch2Msg) - - assertMsgReceivedWithTimeout(t, ch0Msg, byte(0x00), s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) - assertMsgReceivedWithTimeout(t, ch1Msg, byte(0x01), s2.Reactor("foo").(*TestReactor), 10*time.Millisecond, 5*time.Second) - assertMsgReceivedWithTimeout(t, ch2Msg, byte(0x02), s2.Reactor("bar").(*TestReactor), 10*time.Millisecond, 5*time.Second) -} - -func assertMsgReceivedWithTimeout(t *testing.T, msgBytes []byte, channel byte, reactor *TestReactor, checkPeriod, timeout time.Duration) { - ticker := time.NewTicker(checkPeriod) - for { - select { - case <-ticker.C: - msgs := reactor.getMsgs(channel) - if len(msgs) > 0 { - if !bytes.Equal(msgs[0].Bytes, msgBytes) { - t.Fatalf("Unexpected message bytes. Wanted: %X, Got: %X", msgBytes, msgs[0].Bytes) - } - return - } - case <-time.After(timeout): - t.Fatalf("Expected to have received 1 message in channel #%v, got zero", channel) - } - } -} - -func TestConnAddrFilter(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) - s2 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) - defer s1.Stop() - defer s2.Stop() - - c1, c2 := conn.NetPipe() - - s1.SetAddrFilter(func(addr net.Addr) error { - if addr.String() == c1.RemoteAddr().String() { - return fmt.Errorf("Error: pipe is blacklisted") - } - return nil - }) - - // connect to good peer - go func() { - err := s1.addPeerWithConnection(c1) - assert.NotNil(t, err, "expected err") - }() - go func() { - err := s2.addPeerWithConnection(c2) - assert.NotNil(t, err, "expected err") - }() - - assertNoPeersAfterTimeout(t, s1, 400*time.Millisecond) - assertNoPeersAfterTimeout(t, s2, 400*time.Millisecond) -} - -func TestSwitchFiltersOutItself(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "127.0.0.1", "123.123.123", initSwitchFunc) - // addr := s1.NodeInfo().NetAddress() - - // // add ourselves like we do in node.go#427 - // s1.addrBook.AddOurAddress(addr) - - // simulate s1 having a public IP by creating a remote peer with the same ID - rp := &remotePeer{PrivKey: s1.nodeKey.PrivKey, Config: cfg} - rp.Start() - - // addr should be rejected in addPeer based on the same ID - err := s1.DialPeerWithAddress(rp.Addr(), false) - if assert.Error(t, err) { - assert.Equal(t, ErrSwitchConnectToSelf{rp.Addr()}.Error(), err.Error()) - } - - assert.True(t, s1.addrBook.OurAddress(rp.Addr())) - - assert.False(t, s1.addrBook.HasAddress(rp.Addr())) - - rp.Stop() - - assertNoPeersAfterTimeout(t, s1, 100*time.Millisecond) -} - -func assertNoPeersAfterTimeout(t *testing.T, sw *Switch, timeout time.Duration) { - time.Sleep(timeout) - if sw.Peers().Size() != 0 { - t.Fatalf("Expected %v to not connect to some peers, got %d", sw, sw.Peers().Size()) - } -} - -func TestConnIDFilter(t *testing.T) { - s1 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) - s2 := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) - defer s1.Stop() - defer s2.Stop() - - c1, c2 := conn.NetPipe() - - s1.SetIDFilter(func(id ID) error { - if id == s2.nodeInfo.ID { - return fmt.Errorf("Error: pipe is blacklisted") - } - return nil - }) - - s2.SetIDFilter(func(id ID) error { - if id == s1.nodeInfo.ID { - return fmt.Errorf("Error: pipe is blacklisted") - } - return nil - }) - - go func() { - err := s1.addPeerWithConnection(c1) - assert.NotNil(t, err, "expected error") - }() - go func() { - err := s2.addPeerWithConnection(c2) - assert.NotNil(t, err, "expected error") - }() - - assertNoPeersAfterTimeout(t, s1, 400*time.Millisecond) - assertNoPeersAfterTimeout(t, s2, 400*time.Millisecond) -} - -func TestSwitchStopsNonPersistentPeerOnError(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) - err := sw.Start() - if err != nil { - t.Error(err) - } - defer sw.Stop() - - // simulate remote peer - rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: cfg} - rp.Start() - defer rp.Stop() - - pc, err := newOutboundPeerConn(rp.Addr(), cfg, false, sw.nodeKey.PrivKey) - require.Nil(err) - err = sw.addPeer(pc) - require.Nil(err) - - peer := sw.Peers().Get(rp.ID()) - require.NotNil(peer) - - // simulate failure by closing connection - pc.CloseConn() - - assertNoPeersAfterTimeout(t, sw, 100*time.Millisecond) - assert.False(peer.IsRunning()) -} - -func TestSwitchReconnectsToPersistentPeer(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - sw := MakeSwitch(cfg, 1, "testing", "123.123.123", initSwitchFunc) - err := sw.Start() - if err != nil { - t.Error(err) - } - defer sw.Stop() - - // simulate remote peer - rp := &remotePeer{PrivKey: crypto.GenPrivKeyEd25519(), Config: cfg} - rp.Start() - defer rp.Stop() - - pc, err := newOutboundPeerConn(rp.Addr(), cfg, true, sw.nodeKey.PrivKey) - // sw.reactorsByCh, sw.chDescs, sw.StopPeerForError, sw.nodeKey.PrivKey, - require.Nil(err) - - require.Nil(sw.addPeer(pc)) - - peer := sw.Peers().Get(rp.ID()) - require.NotNil(peer) - - // simulate failure by closing connection - pc.CloseConn() - - // TODO: remove sleep, detect the disconnection, wait for reconnect - npeers := sw.Peers().Size() - for i := 0; i < 20; i++ { - time.Sleep(250 * time.Millisecond) - npeers = sw.Peers().Size() - if npeers > 0 { - break - } - } - assert.NotZero(npeers) - assert.False(peer.IsRunning()) - - // simulate another remote peer - rp = &remotePeer{ - PrivKey: crypto.GenPrivKeyEd25519(), - Config: cfg, - // Use different interface to prevent duplicate IP filter, this will break - // beyond two peers. - listenAddr: "127.0.0.1:0", - } - rp.Start() - defer rp.Stop() - - // simulate first time dial failure - conf := config.DefaultP2PConfig() - conf.TestDialFail = true - err = sw.addOutboundPeerWithConfig(rp.Addr(), conf, true) - require.NotNil(err) - - // DialPeerWithAddres - sw.peerConfig resets the dialer - - // TODO: same as above - for i := 0; i < 20; i++ { - time.Sleep(250 * time.Millisecond) - npeers = sw.Peers().Size() - if npeers > 1 { - break - } - } - assert.EqualValues(2, npeers) -} - -func TestSwitchFullConnectivity(t *testing.T) { - switches := MakeConnectedSwitches(cfg, 3, initSwitchFunc, Connect2Switches) - defer func() { - for _, sw := range switches { - sw.Stop() - } - }() - - for i, sw := range switches { - if sw.Peers().Size() != 2 { - t.Fatalf("Expected each switch to be connected to 2 other, but %d switch only connected to %d", sw.Peers().Size(), i) - } - } -} - -func BenchmarkSwitchBroadcast(b *testing.B) { - s1, s2 := MakeSwitchPair(b, func(i int, sw *Switch) *Switch { - // Make bar reactors of bar channels each - sw.AddReactor("foo", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x00), Priority: 10}, - {ID: byte(0x01), Priority: 10}, - }, false)) - sw.AddReactor("bar", NewTestReactor([]*conn.ChannelDescriptor{ - {ID: byte(0x02), Priority: 10}, - {ID: byte(0x03), Priority: 10}, - }, false)) - return sw - }) - defer s1.Stop() - defer s2.Stop() - - // Allow time for goroutines to boot up - time.Sleep(1 * time.Second) - - b.ResetTimer() - - numSuccess, numFailure := 0, 0 - - // Send random message from foo channel to another - for i := 0; i < b.N; i++ { - chID := byte(i % 4) - successChan := s1.Broadcast(chID, []byte("test data")) - for s := range successChan { - if s { - numSuccess++ - } else { - numFailure++ - } - } - } - - b.Logf("success: %v, failure: %v", numSuccess, numFailure) -} - -type addrBookMock struct { - addrs map[string]struct{} - ourAddrs map[string]struct{} -} - -var _ AddrBook = (*addrBookMock)(nil) - -func (book *addrBookMock) AddAddress(addr *NetAddress, src *NetAddress) error { - book.addrs[addr.String()] = struct{}{} - return nil -} -func (book *addrBookMock) AddOurAddress(addr *NetAddress) { book.ourAddrs[addr.String()] = struct{}{} } -func (book *addrBookMock) OurAddress(addr *NetAddress) bool { - _, ok := book.ourAddrs[addr.String()] - return ok -} -func (book *addrBookMock) MarkGood(*NetAddress) {} -func (book *addrBookMock) HasAddress(addr *NetAddress) bool { - _, ok := book.addrs[addr.String()] - return ok -} -func (book *addrBookMock) RemoveAddress(addr *NetAddress) { - delete(book.addrs, addr.String()) -} -func (book *addrBookMock) Save() {} diff --git a/p2p/test_util.go b/p2p/test_util.go deleted file mode 100644 index 0d2ba6c5..00000000 --- a/p2p/test_util.go +++ /dev/null @@ -1,156 +0,0 @@ -package p2p - -import ( - "fmt" - "net" - - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/p2p/conn" -) - -func AddPeerToSwitch(sw *Switch, peer Peer) { - sw.peers.Add(peer) -} - -func CreateRandomPeer(outbound bool) *peer { - addr, netAddr := CreateRoutableAddr() - p := &peer{ - peerConn: peerConn{ - outbound: outbound, - }, - nodeInfo: NodeInfo{ - ID: netAddr.ID, - ListenAddr: netAddr.DialString(), - }, - mconn: &conn.MConnection{}, - } - p.SetLogger(log.TestingLogger().With("peer", addr)) - return p -} - -func CreateRoutableAddr() (addr string, netAddr *NetAddress) { - for { - var err error - addr = cmn.Fmt("%X@%v.%v.%v.%v:26656", cmn.RandBytes(20), cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256, cmn.RandInt()%256) - netAddr, err = NewNetAddressString(addr) - if err != nil { - panic(err) - } - if netAddr.Routable() { - break - } - } - return -} - -//------------------------------------------------------------------ -// Connects switches via arbitrary net.Conn. Used for testing. - -const TEST_HOST = "localhost" - -// MakeConnectedSwitches returns n switches, connected according to the connect func. -// If connect==Connect2Switches, the switches will be fully connected. -// initSwitch defines how the i'th switch should be initialized (ie. with what reactors). -// NOTE: panics if any switch fails to start. -func MakeConnectedSwitches(cfg *config.P2PConfig, n int, initSwitch func(int, *Switch) *Switch, connect func([]*Switch, int, int)) []*Switch { - switches := make([]*Switch, n) - for i := 0; i < n; i++ { - switches[i] = MakeSwitch(cfg, i, TEST_HOST, "123.123.123", initSwitch) - } - - if err := StartSwitches(switches); err != nil { - panic(err) - } - - for i := 0; i < n; i++ { - for j := i + 1; j < n; j++ { - connect(switches, i, j) - } - } - - return switches -} - -// Connect2Switches will connect switches i and j via net.Pipe(). -// Blocks until a connection is established. -// NOTE: caller ensures i and j are within bounds. -func Connect2Switches(switches []*Switch, i, j int) { - switchI := switches[i] - switchJ := switches[j] - - c1, c2 := conn.NetPipe() - - doneCh := make(chan struct{}) - go func() { - err := switchI.addPeerWithConnection(c1) - if err != nil { - panic(err) - } - doneCh <- struct{}{} - }() - go func() { - err := switchJ.addPeerWithConnection(c2) - if err != nil { - panic(err) - } - doneCh <- struct{}{} - }() - <-doneCh - <-doneCh -} - -func (sw *Switch) addPeerWithConnection(conn net.Conn) error { - pc, err := newInboundPeerConn(conn, sw.config, sw.nodeKey.PrivKey) - if err != nil { - if err := conn.Close(); err != nil { - sw.Logger.Error("Error closing connection", "err", err) - } - return err - } - if err = sw.addPeer(pc); err != nil { - pc.CloseConn() - return err - } - - return nil -} - -// StartSwitches calls sw.Start() for each given switch. -// It returns the first encountered error. -func StartSwitches(switches []*Switch) error { - for _, s := range switches { - err := s.Start() // start switch and reactors - if err != nil { - return err - } - } - return nil -} - -func MakeSwitch(cfg *config.P2PConfig, i int, network, version string, initSwitch func(int, *Switch) *Switch) *Switch { - // new switch, add reactors - // TODO: let the config be passed in? - nodeKey := &NodeKey{ - PrivKey: crypto.GenPrivKeyEd25519(), - } - sw := NewSwitch(cfg) - sw.SetLogger(log.TestingLogger()) - sw = initSwitch(i, sw) - ni := NodeInfo{ - ID: nodeKey.ID(), - Moniker: cmn.Fmt("switch%d", i), - Network: network, - Version: version, - ListenAddr: fmt.Sprintf("127.0.0.1:%d", cmn.RandIntn(64512)+1023), - } - for ch := range sw.reactorsByCh { - ni.Channels = append(ni.Channels, ch) - } - sw.SetNodeInfo(ni) - sw.SetNodeKey(nodeKey) - return sw -} diff --git a/p2p/trust/config.go b/p2p/trust/config.go deleted file mode 100644 index b20a8b2c..00000000 --- a/p2p/trust/config.go +++ /dev/null @@ -1,55 +0,0 @@ -package trust - -import "time" - -// TrustMetricConfig - Configures the weight functions and time intervals for the metric -type TrustMetricConfig struct { - // Determines the percentage given to current behavior - ProportionalWeight float64 - - // Determines the percentage given to prior behavior - IntegralWeight float64 - - // The window of time that the trust metric will track events across. - // This can be set to cover many days without issue - TrackingWindow time.Duration - - // Each interval should be short for adapability. - // Less than 30 seconds is too sensitive, - // and greater than 5 minutes will make the metric numb - IntervalLength time.Duration -} - -// DefaultConfig returns a config with values that have been tested and produce desirable results -func DefaultConfig() TrustMetricConfig { - return TrustMetricConfig{ - ProportionalWeight: 0.4, - IntegralWeight: 0.6, - TrackingWindow: (time.Minute * 60 * 24) * 14, // 14 days. - IntervalLength: 1 * time.Minute, - } -} - -// Ensures that all configuration elements have valid values -func customConfig(tmc TrustMetricConfig) TrustMetricConfig { - config := DefaultConfig() - - // Check the config for set values, and setup appropriately - if tmc.ProportionalWeight > 0 { - config.ProportionalWeight = tmc.ProportionalWeight - } - - if tmc.IntegralWeight > 0 { - config.IntegralWeight = tmc.IntegralWeight - } - - if tmc.IntervalLength > time.Duration(0) { - config.IntervalLength = tmc.IntervalLength - } - - if tmc.TrackingWindow > time.Duration(0) && - tmc.TrackingWindow >= config.IntervalLength { - config.TrackingWindow = tmc.TrackingWindow - } - return config -} diff --git a/p2p/trust/metric.go b/p2p/trust/metric.go deleted file mode 100644 index 5770b420..00000000 --- a/p2p/trust/metric.go +++ /dev/null @@ -1,411 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "math" - "sync" - "time" - - cmn "github.com/tendermint/tmlibs/common" -) - -//--------------------------------------------------------------------------------------- - -const ( - // The weight applied to the derivative when current behavior is >= previous behavior - defaultDerivativeGamma1 = 0 - - // The weight applied to the derivative when current behavior is less than previous behavior - defaultDerivativeGamma2 = 1.0 - - // The weight applied to history data values when calculating the history value - defaultHistoryDataWeight = 0.8 -) - -// MetricHistoryJSON - history data necessary to save the trust metric -type MetricHistoryJSON struct { - NumIntervals int `json:"intervals"` - History []float64 `json:"history"` -} - -// TrustMetric - keeps track of peer reliability -// See tendermint/docs/architecture/adr-006-trust-metric.md for details -type TrustMetric struct { - cmn.BaseService - - // Mutex that protects the metric from concurrent access - mtx sync.Mutex - - // Determines the percentage given to current behavior - proportionalWeight float64 - - // Determines the percentage given to prior behavior - integralWeight float64 - - // Count of how many time intervals this metric has been tracking - numIntervals int - - // Size of the time interval window for this trust metric - maxIntervals int - - // The time duration for a single time interval - intervalLen time.Duration - - // Stores the trust history data for this metric - history []float64 - - // Weights applied to the history data when calculating the history value - historyWeights []float64 - - // The sum of the history weights used when calculating the history value - historyWeightSum float64 - - // The current number of history data elements - historySize int - - // The maximum number of history data elements - historyMaxSize int - - // The calculated history value for the current time interval - historyValue float64 - - // The number of recorded good and bad events for the current time interval - bad, good float64 - - // While true, history data is not modified - paused bool - - // Used during testing in order to control the passing of time intervals - testTicker MetricTicker -} - -// NewMetric returns a trust metric with the default configuration. -// Use Start to begin tracking the quality of peer behavior over time -func NewMetric() *TrustMetric { - return NewMetricWithConfig(DefaultConfig()) -} - -// NewMetricWithConfig returns a trust metric with a custom configuration. -// Use Start to begin tracking the quality of peer behavior over time -func NewMetricWithConfig(tmc TrustMetricConfig) *TrustMetric { - tm := new(TrustMetric) - config := customConfig(tmc) - - // Setup using the configuration values - tm.proportionalWeight = config.ProportionalWeight - tm.integralWeight = config.IntegralWeight - tm.intervalLen = config.IntervalLength - // The maximum number of time intervals is the tracking window / interval length - tm.maxIntervals = int(config.TrackingWindow / tm.intervalLen) - // The history size will be determined by the maximum number of time intervals - tm.historyMaxSize = intervalToHistoryOffset(tm.maxIntervals) + 1 - // This metric has a perfect history so far - tm.historyValue = 1.0 - - tm.BaseService = *cmn.NewBaseService(nil, "TrustMetric", tm) - return tm -} - -// OnStart implements Service -func (tm *TrustMetric) OnStart() error { - if err := tm.BaseService.OnStart(); err != nil { - return err - } - go tm.processRequests() - return nil -} - -// OnStop implements Service -// Nothing to do since the goroutine shuts down by itself via BaseService.Quit() -func (tm *TrustMetric) OnStop() {} - -// Returns a snapshot of the trust metric history data -func (tm *TrustMetric) HistoryJSON() MetricHistoryJSON { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - return MetricHistoryJSON{ - NumIntervals: tm.numIntervals, - History: tm.history, - } -} - -// Instantiates a trust metric by loading the history data for a single peer. -// This is called only once and only right after creation, which is why the -// lock is not held while accessing the trust metric struct members -func (tm *TrustMetric) Init(hist MetricHistoryJSON) { - // Restore the number of time intervals we have previously tracked - if hist.NumIntervals > tm.maxIntervals { - hist.NumIntervals = tm.maxIntervals - } - tm.numIntervals = hist.NumIntervals - // Restore the history and its current size - if len(hist.History) > tm.historyMaxSize { - // Keep the history no larger than historyMaxSize - last := len(hist.History) - tm.historyMaxSize - hist.History = hist.History[last:] - } - tm.history = hist.History - tm.historySize = len(tm.history) - // Create the history weight values and weight sum - for i := 1; i <= tm.numIntervals; i++ { - x := math.Pow(defaultHistoryDataWeight, float64(i)) // Optimistic weight - tm.historyWeights = append(tm.historyWeights, x) - } - - for _, v := range tm.historyWeights { - tm.historyWeightSum += v - } - // Calculate the history value based on the loaded history data - tm.historyValue = tm.calcHistoryValue() -} - -// Pause tells the metric to pause recording data over time intervals. -// All method calls that indicate events will unpause the metric -func (tm *TrustMetric) Pause() { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - // Pause the metric for now - tm.paused = true -} - -// BadEvents indicates that an undesirable event(s) took place -func (tm *TrustMetric) BadEvents(num int) { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - tm.unpause() - tm.bad += float64(num) -} - -// GoodEvents indicates that a desirable event(s) took place -func (tm *TrustMetric) GoodEvents(num int) { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - tm.unpause() - tm.good += float64(num) -} - -// TrustValue gets the dependable trust value; always between 0 and 1 -func (tm *TrustMetric) TrustValue() float64 { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - return tm.calcTrustValue() -} - -// TrustScore gets a score based on the trust value always between 0 and 100 -func (tm *TrustMetric) TrustScore() int { - score := tm.TrustValue() * 100 - - return int(math.Floor(score)) -} - -// NextTimeInterval saves current time interval data and prepares for the following interval -func (tm *TrustMetric) NextTimeInterval() { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - if tm.paused { - // Do not prepare for the next time interval while paused - return - } - - // Add the current trust value to the history data - newHist := tm.calcTrustValue() - tm.history = append(tm.history, newHist) - - // Update history and interval counters - if tm.historySize < tm.historyMaxSize { - tm.historySize++ - } else { - // Keep the history no larger than historyMaxSize - last := len(tm.history) - tm.historyMaxSize - tm.history = tm.history[last:] - } - - if tm.numIntervals < tm.maxIntervals { - tm.numIntervals++ - // Add the optimistic weight for the new time interval - wk := math.Pow(defaultHistoryDataWeight, float64(tm.numIntervals)) - tm.historyWeights = append(tm.historyWeights, wk) - tm.historyWeightSum += wk - } - - // Update the history data using Faded Memories - tm.updateFadedMemory() - // Calculate the history value for the upcoming time interval - tm.historyValue = tm.calcHistoryValue() - tm.good = 0 - tm.bad = 0 -} - -// SetTicker allows a TestTicker to be provided that will manually control -// the passing of time from the perspective of the TrustMetric. -// The ticker must be set before Start is called on the metric -func (tm *TrustMetric) SetTicker(ticker MetricTicker) { - tm.mtx.Lock() - defer tm.mtx.Unlock() - - tm.testTicker = ticker -} - -// Copy returns a new trust metric with members containing the same values -func (tm *TrustMetric) Copy() *TrustMetric { - tm.mtx.Lock() - defer tm.mtx.Unlock() - if tm == nil { - return nil - } - - return &TrustMetric{ - proportionalWeight: tm.proportionalWeight, - integralWeight: tm.integralWeight, - numIntervals: tm.numIntervals, - maxIntervals: tm.maxIntervals, - intervalLen: tm.intervalLen, - history: tm.history, - historyWeights: tm.historyWeights, - historyWeightSum: tm.historyWeightSum, - historySize: tm.historySize, - historyMaxSize: tm.historyMaxSize, - historyValue: tm.historyValue, - good: tm.good, - bad: tm.bad, - paused: tm.paused, - } - -} - -/* Private methods */ - -// This method is for a goroutine that handles all requests on the metric -func (tm *TrustMetric) processRequests() { - t := tm.testTicker - if t == nil { - // No test ticker was provided, so we create a normal ticker - t = NewTicker(tm.intervalLen) - } - defer t.Stop() - // Obtain the raw channel - tick := t.GetChannel() -loop: - for { - select { - case <-tick: - tm.NextTimeInterval() - case <-tm.Quit(): - // Stop all further tracking for this metric - break loop - } - } -} - -// Wakes the trust metric up if it is currently paused -// This method needs to be called with the mutex locked -func (tm *TrustMetric) unpause() { - // Check if this is the first experience with - // what we are tracking since being paused - if tm.paused { - tm.good = 0 - tm.bad = 0 - // New events cause us to unpause the metric - tm.paused = false - } -} - -// Calculates the trust value for the request processing -func (tm *TrustMetric) calcTrustValue() float64 { - weightedP := tm.proportionalWeight * tm.proportionalValue() - weightedI := tm.integralWeight * tm.historyValue - weightedD := tm.weightedDerivative() - - tv := weightedP + weightedI + weightedD - // Do not return a negative value. - if tv < 0 { - tv = 0 - } - return tv -} - -// Calculates the current score for good/bad experiences -func (tm *TrustMetric) proportionalValue() float64 { - value := 1.0 - - total := tm.good + tm.bad - if total > 0 { - value = tm.good / total - } - return value -} - -// Strengthens the derivative component when the change is negative -func (tm *TrustMetric) weightedDerivative() float64 { - var weight float64 = defaultDerivativeGamma1 - - d := tm.derivativeValue() - if d < 0 { - weight = defaultDerivativeGamma2 - } - return weight * d -} - -// Calculates the derivative component -func (tm *TrustMetric) derivativeValue() float64 { - return tm.proportionalValue() - tm.historyValue -} - -// Calculates the integral (history) component of the trust value -func (tm *TrustMetric) calcHistoryValue() float64 { - var hv float64 - - for i := 0; i < tm.numIntervals; i++ { - hv += tm.fadedMemoryValue(i) * tm.historyWeights[i] - } - - return hv / tm.historyWeightSum -} - -// Retrieves the actual history data value that represents the requested time interval -func (tm *TrustMetric) fadedMemoryValue(interval int) float64 { - first := tm.historySize - 1 - - if interval == 0 { - // Base case - return tm.history[first] - } - - offset := intervalToHistoryOffset(interval) - return tm.history[first-offset] -} - -// Performs the update for our Faded Memories process, which allows the -// trust metric tracking window to be large while maintaining a small -// number of history data values -func (tm *TrustMetric) updateFadedMemory() { - if tm.historySize < 2 { - return - } - - end := tm.historySize - 1 - // Keep the most recent history element - for count := 1; count < tm.historySize; count++ { - i := end - count - // The older the data is, the more we spread it out - x := math.Pow(2, float64(count)) - // Two history data values are merged into a single value - tm.history[i] = ((tm.history[i] * (x - 1)) + tm.history[i+1]) / x - } -} - -// Map the interval value down to an offset from the beginning of history -func intervalToHistoryOffset(interval int) int { - // The system maintains 2^m interval values in the form of m history - // data values. Therefore, we access the ith interval by obtaining - // the history data index = the floor of log2(i) - return int(math.Floor(math.Log2(float64(interval)))) -} diff --git a/p2p/trust/metric_test.go b/p2p/trust/metric_test.go deleted file mode 100644 index 98ea99ab..00000000 --- a/p2p/trust/metric_test.go +++ /dev/null @@ -1,100 +0,0 @@ -package trust - -import ( - "testing" - "time" - - "github.com/stretchr/testify/assert" -) - -func TestTrustMetricScores(t *testing.T) { - tm := NewMetric() - tm.Start() - - // Perfect score - tm.GoodEvents(1) - score := tm.TrustScore() - assert.Equal(t, 100, score) - - // Less than perfect score - tm.BadEvents(10) - score = tm.TrustScore() - assert.NotEqual(t, 100, score) - tm.Stop() -} - -func TestTrustMetricConfig(t *testing.T) { - // 7 days - window := time.Minute * 60 * 24 * 7 - config := TrustMetricConfig{ - TrackingWindow: window, - IntervalLength: 2 * time.Minute, - } - - tm := NewMetricWithConfig(config) - tm.Start() - - // The max time intervals should be the TrackingWindow / IntervalLen - assert.Equal(t, int(config.TrackingWindow/config.IntervalLength), tm.maxIntervals) - - dc := DefaultConfig() - // These weights should still be the default values - assert.Equal(t, dc.ProportionalWeight, tm.proportionalWeight) - assert.Equal(t, dc.IntegralWeight, tm.integralWeight) - tm.Stop() - tm.Wait() - - config.ProportionalWeight = 0.3 - config.IntegralWeight = 0.7 - tm = NewMetricWithConfig(config) - tm.Start() - - // These weights should be equal to our custom values - assert.Equal(t, config.ProportionalWeight, tm.proportionalWeight) - assert.Equal(t, config.IntegralWeight, tm.integralWeight) - tm.Stop() - tm.Wait() -} - -// XXX: This test fails non-deterministically -func _TestTrustMetricStopPause(t *testing.T) { - // The TestTicker will provide manual control over - // the passing of time within the metric - tt := NewTestTicker() - tm := NewMetric() - tm.SetTicker(tt) - tm.Start() - // Allow some time intervals to pass and pause - tt.NextTick() - tt.NextTick() - tm.Pause() - - // could be 1 or 2 because Pause and NextTick race - first := tm.Copy().numIntervals - - // Allow more time to pass and check the intervals are unchanged - tt.NextTick() - tt.NextTick() - assert.Equal(t, first, tm.Copy().numIntervals) - - // Get the trust metric activated again - tm.GoodEvents(5) - // Allow some time intervals to pass and stop - tt.NextTick() - tt.NextTick() - tm.Stop() - tm.Wait() - - second := tm.Copy().numIntervals - // Allow more intervals to pass while the metric is stopped - // and check that the number of intervals match - tm.NextTimeInterval() - tm.NextTimeInterval() - // XXX: fails non-deterministically: - // expected 5, got 6 - assert.Equal(t, second+2, tm.Copy().numIntervals) - - if first > second { - t.Fatalf("numIntervals should always increase or stay the same over time") - } -} diff --git a/p2p/trust/store.go b/p2p/trust/store.go deleted file mode 100644 index bbb4592a..00000000 --- a/p2p/trust/store.go +++ /dev/null @@ -1,207 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "encoding/json" - "sync" - "time" - - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" -) - -const defaultStorePeriodicSaveInterval = 1 * time.Minute - -var trustMetricKey = []byte("trustMetricStore") - -// TrustMetricStore - Manages all trust metrics for peers -type TrustMetricStore struct { - cmn.BaseService - - // Maps a Peer.Key to that peer's TrustMetric - peerMetrics map[string]*TrustMetric - - // Mutex that protects the map and history data file - mtx sync.Mutex - - // The db where peer trust metric history data will be stored - db dbm.DB - - // This configuration will be used when creating new TrustMetrics - config TrustMetricConfig -} - -// NewTrustMetricStore returns a store that saves data to the DB -// and uses the config when creating new trust metrics. -// Use Start to to initialize the trust metric store -func NewTrustMetricStore(db dbm.DB, tmc TrustMetricConfig) *TrustMetricStore { - tms := &TrustMetricStore{ - peerMetrics: make(map[string]*TrustMetric), - db: db, - config: tmc, - } - - tms.BaseService = *cmn.NewBaseService(nil, "TrustMetricStore", tms) - return tms -} - -// OnStart implements Service -func (tms *TrustMetricStore) OnStart() error { - if err := tms.BaseService.OnStart(); err != nil { - return err - } - - tms.mtx.Lock() - defer tms.mtx.Unlock() - - tms.loadFromDB() - go tms.saveRoutine() - return nil -} - -// OnStop implements Service -func (tms *TrustMetricStore) OnStop() { - tms.BaseService.OnStop() - - tms.mtx.Lock() - defer tms.mtx.Unlock() - - // Stop all trust metric go-routines - for _, tm := range tms.peerMetrics { - tm.Stop() - } - - // Make the final trust history data save - tms.saveToDB() -} - -// Size returns the number of entries in the trust metric store -func (tms *TrustMetricStore) Size() int { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - return tms.size() -} - -// AddPeerTrustMetric takes an existing trust metric and associates it with a peer key. -// The caller is expected to call Start on the TrustMetric being added -func (tms *TrustMetricStore) AddPeerTrustMetric(key string, tm *TrustMetric) { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - if key == "" || tm == nil { - return - } - tms.peerMetrics[key] = tm -} - -// GetPeerTrustMetric returns a trust metric by peer key -func (tms *TrustMetricStore) GetPeerTrustMetric(key string) *TrustMetric { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - tm, ok := tms.peerMetrics[key] - if !ok { - // If the metric is not available, we will create it - tm = NewMetricWithConfig(tms.config) - tm.Start() - // The metric needs to be in the map - tms.peerMetrics[key] = tm - } - return tm -} - -// PeerDisconnected pauses the trust metric associated with the peer identified by the key -func (tms *TrustMetricStore) PeerDisconnected(key string) { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - // If the Peer that disconnected has a metric, pause it - if tm, ok := tms.peerMetrics[key]; ok { - tm.Pause() - } -} - -// Saves the history data for all peers to the store DB. -// This public method acquires the trust metric store lock -func (tms *TrustMetricStore) SaveToDB() { - tms.mtx.Lock() - defer tms.mtx.Unlock() - - tms.saveToDB() -} - -/* Private methods */ - -// size returns the number of entries in the store without acquiring the mutex -func (tms *TrustMetricStore) size() int { - return len(tms.peerMetrics) -} - -/* Loading & Saving */ -/* Both loadFromDB and savetoDB assume the mutex has been acquired */ - -// Loads the history data for all peers from the store DB -// cmn.Panics if file is corrupt -func (tms *TrustMetricStore) loadFromDB() bool { - // Obtain the history data we have so far - bytes := tms.db.Get(trustMetricKey) - if bytes == nil { - return false - } - - peers := make(map[string]MetricHistoryJSON) - err := json.Unmarshal(bytes, &peers) - if err != nil { - cmn.PanicCrisis(cmn.Fmt("Could not unmarshal Trust Metric Store DB data: %v", err)) - } - - // If history data exists in the file, - // load it into trust metric - for key, p := range peers { - tm := NewMetricWithConfig(tms.config) - - tm.Start() - tm.Init(p) - // Load the peer trust metric into the store - tms.peerMetrics[key] = tm - } - return true -} - -// Saves the history data for all peers to the store DB -func (tms *TrustMetricStore) saveToDB() { - tms.Logger.Debug("Saving TrustHistory to DB", "size", tms.size()) - - peers := make(map[string]MetricHistoryJSON) - - for key, tm := range tms.peerMetrics { - // Add an entry for the peer identified by key - peers[key] = tm.HistoryJSON() - } - - // Write all the data back to the DB - bytes, err := json.Marshal(peers) - if err != nil { - tms.Logger.Error("Failed to encode the TrustHistory", "err", err) - return - } - tms.db.SetSync(trustMetricKey, bytes) -} - -// Periodically saves the trust history data to the DB -func (tms *TrustMetricStore) saveRoutine() { - t := time.NewTicker(defaultStorePeriodicSaveInterval) - defer t.Stop() -loop: - for { - select { - case <-t.C: - tms.SaveToDB() - case <-tms.Quit(): - break loop - } - } -} diff --git a/p2p/trust/store_test.go b/p2p/trust/store_test.go deleted file mode 100644 index 4e555396..00000000 --- a/p2p/trust/store_test.go +++ /dev/null @@ -1,152 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "fmt" - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" -) - -func TestTrustMetricStoreSaveLoad(t *testing.T) { - dir, err := ioutil.TempDir("", "trust_test") - if err != nil { - panic(err) - } - defer os.Remove(dir) - - historyDB := dbm.NewDB("trusthistory", "goleveldb", dir) - - // 0 peers saved - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - store.saveToDB() - // Load the data from the file - store = NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - store.Start() - // Make sure we still have 0 entries - assert.Zero(t, store.Size()) - - // 100 TestTickers - var tt []*TestTicker - for i := 0; i < 100; i++ { - // The TestTicker will provide manual control over - // the passing of time within the metric - tt = append(tt, NewTestTicker()) - } - // 100 peers - for i := 0; i < 100; i++ { - key := fmt.Sprintf("peer_%d", i) - tm := NewMetric() - - tm.SetTicker(tt[i]) - tm.Start() - store.AddPeerTrustMetric(key, tm) - - tm.BadEvents(10) - tm.GoodEvents(1) - } - // Check that we have 100 entries and save - assert.Equal(t, 100, store.Size()) - // Give the 100 metrics time to process the history data - for i := 0; i < 100; i++ { - tt[i].NextTick() - tt[i].NextTick() - } - // Stop all the trust metrics and save - store.Stop() - - // Load the data from the DB - store = NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - store.Start() - - // Check that we still have 100 peers with imperfect trust values - assert.Equal(t, 100, store.Size()) - for _, tm := range store.peerMetrics { - assert.NotEqual(t, 1.0, tm.TrustValue()) - } - - store.Stop() -} - -func TestTrustMetricStoreConfig(t *testing.T) { - historyDB := dbm.NewDB("", "memdb", "") - - config := TrustMetricConfig{ - ProportionalWeight: 0.5, - IntegralWeight: 0.5, - } - - // Create a store with custom config - store := NewTrustMetricStore(historyDB, config) - store.SetLogger(log.TestingLogger()) - store.Start() - - // Have the store make us a metric with the config - tm := store.GetPeerTrustMetric("TestKey") - - // Check that the options made it to the metric - assert.Equal(t, 0.5, tm.proportionalWeight) - assert.Equal(t, 0.5, tm.integralWeight) - store.Stop() -} - -func TestTrustMetricStoreLookup(t *testing.T) { - historyDB := dbm.NewDB("", "memdb", "") - - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - store.Start() - - // Create 100 peers in the trust metric store - for i := 0; i < 100; i++ { - key := fmt.Sprintf("peer_%d", i) - store.GetPeerTrustMetric(key) - - // Check that the trust metric was successfully entered - ktm := store.peerMetrics[key] - assert.NotNil(t, ktm, "Expected to find TrustMetric %s but wasn't there.", key) - } - - store.Stop() -} - -func TestTrustMetricStorePeerScore(t *testing.T) { - historyDB := dbm.NewDB("", "memdb", "") - - store := NewTrustMetricStore(historyDB, DefaultConfig()) - store.SetLogger(log.TestingLogger()) - store.Start() - - key := "TestKey" - tm := store.GetPeerTrustMetric(key) - - // This peer is innocent so far - first := tm.TrustScore() - assert.Equal(t, 100, first) - - // Add some undesirable events and disconnect - tm.BadEvents(1) - first = tm.TrustScore() - assert.NotEqual(t, 100, first) - tm.BadEvents(10) - second := tm.TrustScore() - - if second > first { - t.Errorf("A greater number of bad events should lower the trust score") - } - store.PeerDisconnected(key) - - // We will remember our experiences with this peer - tm = store.GetPeerTrustMetric(key) - assert.NotEqual(t, 100, tm.TrustScore()) - store.Stop() -} diff --git a/p2p/trust/ticker.go b/p2p/trust/ticker.go deleted file mode 100644 index 3f0f3091..00000000 --- a/p2p/trust/ticker.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2017 Tendermint. All rights reserved. -// Use of this source code is governed by Apache 2 LICENSE that can be found in the LICENSE file. - -package trust - -import ( - "time" -) - -// MetricTicker provides a single ticker interface for the trust metric -type MetricTicker interface { - // GetChannel returns the receive only channel that fires at each time interval - GetChannel() <-chan time.Time - - // Stop will halt further activity on the ticker channel - Stop() -} - -// The ticker used during testing that provides manual control over time intervals -type TestTicker struct { - C chan time.Time - stopped bool -} - -// NewTestTicker returns our ticker used within test routines -func NewTestTicker() *TestTicker { - c := make(chan time.Time) - return &TestTicker{ - C: c, - } -} - -func (t *TestTicker) GetChannel() <-chan time.Time { - return t.C -} - -func (t *TestTicker) Stop() { - t.stopped = true -} - -// NextInterval manually sends Time on the ticker channel -func (t *TestTicker) NextTick() { - if t.stopped { - return - } - t.C <- time.Now() -} - -// Ticker is just a wrap around time.Ticker that allows it -// to meet the requirements of our interface -type Ticker struct { - *time.Ticker -} - -// NewTicker returns a normal time.Ticker wrapped to meet our interface -func NewTicker(d time.Duration) *Ticker { - return &Ticker{time.NewTicker(d)} -} - -func (t *Ticker) GetChannel() <-chan time.Time { - return t.C -} diff --git a/p2p/types.go b/p2p/types.go deleted file mode 100644 index b11765bb..00000000 --- a/p2p/types.go +++ /dev/null @@ -1,8 +0,0 @@ -package p2p - -import ( - "github.com/tendermint/tendermint/p2p/conn" -) - -type ChannelDescriptor = conn.ChannelDescriptor -type ConnectionStatus = conn.ConnectionStatus diff --git a/p2p/upnp/probe.go b/p2p/upnp/probe.go deleted file mode 100644 index 55479415..00000000 --- a/p2p/upnp/probe.go +++ /dev/null @@ -1,112 +0,0 @@ -package upnp - -import ( - "fmt" - "net" - "time" - - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" -) - -type UPNPCapabilities struct { - PortMapping bool - Hairpin bool -} - -func makeUPNPListener(intPort int, extPort int, logger log.Logger) (NAT, net.Listener, net.IP, error) { - nat, err := Discover() - if err != nil { - return nil, nil, nil, fmt.Errorf("NAT upnp could not be discovered: %v", err) - } - logger.Info(cmn.Fmt("ourIP: %v", nat.(*upnpNAT).ourIP)) - - ext, err := nat.GetExternalAddress() - if err != nil { - return nat, nil, nil, fmt.Errorf("External address error: %v", err) - } - logger.Info(cmn.Fmt("External address: %v", ext)) - - port, err := nat.AddPortMapping("tcp", extPort, intPort, "Tendermint UPnP Probe", 0) - if err != nil { - return nat, nil, ext, fmt.Errorf("Port mapping error: %v", err) - } - logger.Info(cmn.Fmt("Port mapping mapped: %v", port)) - - // also run the listener, open for all remote addresses. - listener, err := net.Listen("tcp", fmt.Sprintf(":%v", intPort)) - if err != nil { - return nat, nil, ext, fmt.Errorf("Error establishing listener: %v", err) - } - return nat, listener, ext, nil -} - -func testHairpin(listener net.Listener, extAddr string, logger log.Logger) (supportsHairpin bool) { - // Listener - go func() { - inConn, err := listener.Accept() - if err != nil { - logger.Info(cmn.Fmt("Listener.Accept() error: %v", err)) - return - } - logger.Info(cmn.Fmt("Accepted incoming connection: %v -> %v", inConn.LocalAddr(), inConn.RemoteAddr())) - buf := make([]byte, 1024) - n, err := inConn.Read(buf) - if err != nil { - logger.Info(cmn.Fmt("Incoming connection read error: %v", err)) - return - } - logger.Info(cmn.Fmt("Incoming connection read %v bytes: %X", n, buf)) - if string(buf) == "test data" { - supportsHairpin = true - return - } - }() - - // Establish outgoing - outConn, err := net.Dial("tcp", extAddr) - if err != nil { - logger.Info(cmn.Fmt("Outgoing connection dial error: %v", err)) - return - } - - n, err := outConn.Write([]byte("test data")) - if err != nil { - logger.Info(cmn.Fmt("Outgoing connection write error: %v", err)) - return - } - logger.Info(cmn.Fmt("Outgoing connection wrote %v bytes", n)) - - // Wait for data receipt - time.Sleep(1 * time.Second) - return -} - -func Probe(logger log.Logger) (caps UPNPCapabilities, err error) { - logger.Info("Probing for UPnP!") - - intPort, extPort := 8001, 8001 - - nat, listener, ext, err := makeUPNPListener(intPort, extPort, logger) - if err != nil { - return - } - caps.PortMapping = true - - // Deferred cleanup - defer func() { - if err := nat.DeletePortMapping("tcp", intPort, extPort); err != nil { - logger.Error(cmn.Fmt("Port mapping delete error: %v", err)) - } - if err := listener.Close(); err != nil { - logger.Error(cmn.Fmt("Listener closing error: %v", err)) - } - }() - - supportsHairpin := testHairpin(listener, fmt.Sprintf("%v:%v", ext, extPort), logger) - if supportsHairpin { - caps.Hairpin = true - } - - return -} diff --git a/p2p/upnp/upnp.go b/p2p/upnp/upnp.go deleted file mode 100644 index d53974fc..00000000 --- a/p2p/upnp/upnp.go +++ /dev/null @@ -1,393 +0,0 @@ -// Taken from taipei-torrent. -// Just enough UPnP to be able to forward ports -// For more information, see: http://www.upnp-hacks.org/upnp.html -package upnp - -// TODO: use syscalls to get actual ourIP, see issue #712 - -import ( - "bytes" - "encoding/xml" - "errors" - "fmt" - "io/ioutil" - "net" - "net/http" - "strconv" - "strings" - "time" -) - -type upnpNAT struct { - serviceURL string - ourIP string - urnDomain string -} - -// protocol is either "udp" or "tcp" -type NAT interface { - GetExternalAddress() (addr net.IP, err error) - AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error) - DeletePortMapping(protocol string, externalPort, internalPort int) (err error) -} - -func Discover() (nat NAT, err error) { - ssdp, err := net.ResolveUDPAddr("udp4", "239.255.255.250:1900") - if err != nil { - return - } - conn, err := net.ListenPacket("udp4", ":0") - if err != nil { - return - } - socket := conn.(*net.UDPConn) - defer socket.Close() // nolint: errcheck - - if err := socket.SetDeadline(time.Now().Add(3 * time.Second)); err != nil { - return nil, err - } - - st := "InternetGatewayDevice:1" - - buf := bytes.NewBufferString( - "M-SEARCH * HTTP/1.1\r\n" + - "HOST: 239.255.255.250:1900\r\n" + - "ST: ssdp:all\r\n" + - "MAN: \"ssdp:discover\"\r\n" + - "MX: 2\r\n\r\n") - message := buf.Bytes() - answerBytes := make([]byte, 1024) - for i := 0; i < 3; i++ { - _, err = socket.WriteToUDP(message, ssdp) - if err != nil { - return - } - var n int - _, _, err = socket.ReadFromUDP(answerBytes) - if err != nil { - return - } - for { - n, _, err = socket.ReadFromUDP(answerBytes) - if err != nil { - break - } - answer := string(answerBytes[0:n]) - if !strings.Contains(answer, st) { - continue - } - // HTTP header field names are case-insensitive. - // http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2 - locString := "\r\nlocation:" - answer = strings.ToLower(answer) - locIndex := strings.Index(answer, locString) - if locIndex < 0 { - continue - } - loc := answer[locIndex+len(locString):] - endIndex := strings.Index(loc, "\r\n") - if endIndex < 0 { - continue - } - locURL := strings.TrimSpace(loc[0:endIndex]) - var serviceURL, urnDomain string - serviceURL, urnDomain, err = getServiceURL(locURL) - if err != nil { - return - } - var ourIP net.IP - ourIP, err = localIPv4() - if err != nil { - return - } - nat = &upnpNAT{serviceURL: serviceURL, ourIP: ourIP.String(), urnDomain: urnDomain} - return - } - } - err = errors.New("UPnP port discovery failed") - return -} - -type Envelope struct { - XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Envelope"` - Soap *SoapBody -} -type SoapBody struct { - XMLName xml.Name `xml:"http://schemas.xmlsoap.org/soap/envelope/ Body"` - ExternalIP *ExternalIPAddressResponse -} - -type ExternalIPAddressResponse struct { - XMLName xml.Name `xml:"GetExternalIPAddressResponse"` - IPAddress string `xml:"NewExternalIPAddress"` -} - -type ExternalIPAddress struct { - XMLName xml.Name `xml:"NewExternalIPAddress"` - IP string -} - -type UPNPService struct { - ServiceType string `xml:"serviceType"` - ControlURL string `xml:"controlURL"` -} - -type DeviceList struct { - Device []Device `xml:"device"` -} - -type ServiceList struct { - Service []UPNPService `xml:"service"` -} - -type Device struct { - XMLName xml.Name `xml:"device"` - DeviceType string `xml:"deviceType"` - DeviceList DeviceList `xml:"deviceList"` - ServiceList ServiceList `xml:"serviceList"` -} - -type Root struct { - Device Device -} - -func getChildDevice(d *Device, deviceType string) *Device { - dl := d.DeviceList.Device - for i := 0; i < len(dl); i++ { - if strings.Contains(dl[i].DeviceType, deviceType) { - return &dl[i] - } - } - return nil -} - -func getChildService(d *Device, serviceType string) *UPNPService { - sl := d.ServiceList.Service - for i := 0; i < len(sl); i++ { - if strings.Contains(sl[i].ServiceType, serviceType) { - return &sl[i] - } - } - return nil -} - -func localIPv4() (net.IP, error) { - tt, err := net.Interfaces() - if err != nil { - return nil, err - } - for _, t := range tt { - aa, err := t.Addrs() - if err != nil { - return nil, err - } - for _, a := range aa { - ipnet, ok := a.(*net.IPNet) - if !ok { - continue - } - v4 := ipnet.IP.To4() - if v4 == nil || v4[0] == 127 { // loopback address - continue - } - return v4, nil - } - } - return nil, errors.New("cannot find local IP address") -} - -func getServiceURL(rootURL string) (url, urnDomain string, err error) { - r, err := http.Get(rootURL) - if err != nil { - return - } - defer r.Body.Close() // nolint: errcheck - - if r.StatusCode >= 400 { - err = errors.New(string(r.StatusCode)) - return - } - var root Root - err = xml.NewDecoder(r.Body).Decode(&root) - if err != nil { - return - } - a := &root.Device - if !strings.Contains(a.DeviceType, "InternetGatewayDevice:1") { - err = errors.New("No InternetGatewayDevice") - return - } - b := getChildDevice(a, "WANDevice:1") - if b == nil { - err = errors.New("No WANDevice") - return - } - c := getChildDevice(b, "WANConnectionDevice:1") - if c == nil { - err = errors.New("No WANConnectionDevice") - return - } - d := getChildService(c, "WANIPConnection:1") - if d == nil { - // Some routers don't follow the UPnP spec, and put WanIPConnection under WanDevice, - // instead of under WanConnectionDevice - d = getChildService(b, "WANIPConnection:1") - - if d == nil { - err = errors.New("No WANIPConnection") - return - } - } - // Extract the domain name, which isn't always 'schemas-upnp-org' - urnDomain = strings.Split(d.ServiceType, ":")[1] - url = combineURL(rootURL, d.ControlURL) - return -} - -func combineURL(rootURL, subURL string) string { - protocolEnd := "://" - protoEndIndex := strings.Index(rootURL, protocolEnd) - a := rootURL[protoEndIndex+len(protocolEnd):] - rootIndex := strings.Index(a, "/") - return rootURL[0:protoEndIndex+len(protocolEnd)+rootIndex] + subURL -} - -func soapRequest(url, function, message, domain string) (r *http.Response, err error) { - fullMessage := "" + - "\r\n" + - "" + message + "" - - req, err := http.NewRequest("POST", url, strings.NewReader(fullMessage)) - if err != nil { - return nil, err - } - req.Header.Set("Content-Type", "text/xml ; charset=\"utf-8\"") - req.Header.Set("User-Agent", "Darwin/10.0.0, UPnP/1.0, MiniUPnPc/1.3") - //req.Header.Set("Transfer-Encoding", "chunked") - req.Header.Set("SOAPAction", "\"urn:"+domain+":service:WANIPConnection:1#"+function+"\"") - req.Header.Set("Connection", "Close") - req.Header.Set("Cache-Control", "no-cache") - req.Header.Set("Pragma", "no-cache") - - // log.Stderr("soapRequest ", req) - - r, err = http.DefaultClient.Do(req) - if err != nil { - return nil, err - } - /*if r.Body != nil { - defer r.Body.Close() - }*/ - - if r.StatusCode >= 400 { - // log.Stderr(function, r.StatusCode) - err = errors.New("Error " + strconv.Itoa(r.StatusCode) + " for " + function) - r = nil - return - } - return -} - -type statusInfo struct { - externalIpAddress string -} - -func (n *upnpNAT) getExternalIPAddress() (info statusInfo, err error) { - - message := "\r\n" + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "GetExternalIPAddress", message, n.urnDomain) - if response != nil { - defer response.Body.Close() // nolint: errcheck - } - if err != nil { - return - } - var envelope Envelope - data, err := ioutil.ReadAll(response.Body) - if err != nil { - return - } - reader := bytes.NewReader(data) - err = xml.NewDecoder(reader).Decode(&envelope) - if err != nil { - return - } - - info = statusInfo{envelope.Soap.ExternalIP.IPAddress} - - if err != nil { - return - } - - return -} - -// GetExternalAddress returns an external IP. If GetExternalIPAddress action -// fails or IP returned is invalid, GetExternalAddress returns an error. -func (n *upnpNAT) GetExternalAddress() (addr net.IP, err error) { - info, err := n.getExternalIPAddress() - if err != nil { - return - } - addr = net.ParseIP(info.externalIpAddress) - if addr == nil { - err = fmt.Errorf("Failed to parse IP: %v", info.externalIpAddress) - } - return -} - -func (n *upnpNAT) AddPortMapping(protocol string, externalPort, internalPort int, description string, timeout int) (mappedExternalPort int, err error) { - // A single concatenation would break ARM compilation. - message := "\r\n" + - "" + strconv.Itoa(externalPort) - message += "" + protocol + "" - message += "" + strconv.Itoa(internalPort) + "" + - "" + n.ourIP + "" + - "1" - message += description + - "" + strconv.Itoa(timeout) + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "AddPortMapping", message, n.urnDomain) - if response != nil { - defer response.Body.Close() // nolint: errcheck - } - if err != nil { - return - } - - // TODO: check response to see if the port was forwarded - // log.Println(message, response) - // JAE: - // body, err := ioutil.ReadAll(response.Body) - // fmt.Println(string(body), err) - mappedExternalPort = externalPort - _ = response - return -} - -func (n *upnpNAT) DeletePortMapping(protocol string, externalPort, internalPort int) (err error) { - - message := "\r\n" + - "" + strconv.Itoa(externalPort) + - "" + protocol + "" + - "" - - var response *http.Response - response, err = soapRequest(n.serviceURL, "DeletePortMapping", message, n.urnDomain) - if response != nil { - defer response.Body.Close() // nolint: errcheck - } - if err != nil { - return - } - - // TODO: check response to see if the port was deleted - // log.Println(message, response) - _ = response - return -} diff --git a/p2p/version.go b/p2p/version.go deleted file mode 100644 index 9a4c7bba..00000000 --- a/p2p/version.go +++ /dev/null @@ -1,3 +0,0 @@ -package p2p - -const Version = "0.5.0" diff --git a/p2p/wire.go b/p2p/wire.go deleted file mode 100644 index a90ac851..00000000 --- a/p2p/wire.go +++ /dev/null @@ -1,12 +0,0 @@ -package p2p - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" -) - -var cdc = amino.NewCodec() - -func init() { - crypto.RegisterAmino(cdc) -} diff --git a/privval/priv_validator.go b/privval/priv_validator.go deleted file mode 100644 index 2bb5ef32..00000000 --- a/privval/priv_validator.go +++ /dev/null @@ -1,345 +0,0 @@ -package privval - -import ( - "bytes" - "errors" - "fmt" - "io/ioutil" - "sync" - "time" - - "github.com/tendermint/go-crypto" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -// TODO: type ? -const ( - stepNone int8 = 0 // Used to distinguish the initial state - stepPropose int8 = 1 - stepPrevote int8 = 2 - stepPrecommit int8 = 3 -) - -func voteToStep(vote *types.Vote) int8 { - switch vote.Type { - case types.VoteTypePrevote: - return stepPrevote - case types.VoteTypePrecommit: - return stepPrecommit - default: - cmn.PanicSanity("Unknown vote type") - return 0 - } -} - -// FilePV implements PrivValidator using data persisted to disk -// to prevent double signing. -// NOTE: the directory containing the pv.filePath must already exist. -type FilePV struct { - Address types.Address `json:"address"` - PubKey crypto.PubKey `json:"pub_key"` - LastHeight int64 `json:"last_height"` - LastRound int `json:"last_round"` - LastStep int8 `json:"last_step"` - LastSignature crypto.Signature `json:"last_signature,omitempty"` // so we dont lose signatures XXX Why would we lose signatures? - LastSignBytes cmn.HexBytes `json:"last_signbytes,omitempty"` // so we dont lose signatures XXX Why would we lose signatures? - PrivKey crypto.PrivKey `json:"priv_key"` - - // For persistence. - // Overloaded for testing. - filePath string - mtx sync.Mutex -} - -// GetAddress returns the address of the validator. -// Implements PrivValidator. -func (pv *FilePV) GetAddress() types.Address { - return pv.Address -} - -// GetPubKey returns the public key of the validator. -// Implements PrivValidator. -func (pv *FilePV) GetPubKey() crypto.PubKey { - return pv.PubKey -} - -// GenFilePV generates a new validator with randomly generated private key -// and sets the filePath, but does not call Save(). -func GenFilePV(filePath string) *FilePV { - privKey := crypto.GenPrivKeyEd25519() - return &FilePV{ - Address: privKey.PubKey().Address(), - PubKey: privKey.PubKey(), - PrivKey: privKey, - LastStep: stepNone, - filePath: filePath, - } -} - -// LoadFilePV loads a FilePV from the filePath. The FilePV handles double -// signing prevention by persisting data to the filePath. If the filePath does -// not exist, the FilePV must be created manually and saved. -func LoadFilePV(filePath string) *FilePV { - pvJSONBytes, err := ioutil.ReadFile(filePath) - if err != nil { - cmn.Exit(err.Error()) - } - pv := &FilePV{} - err = cdc.UnmarshalJSON(pvJSONBytes, &pv) - if err != nil { - cmn.Exit(cmn.Fmt("Error reading PrivValidator from %v: %v\n", filePath, err)) - } - - pv.filePath = filePath - return pv -} - -// LoadOrGenFilePV loads a FilePV from the given filePath -// or else generates a new one and saves it to the filePath. -func LoadOrGenFilePV(filePath string) *FilePV { - var pv *FilePV - if cmn.FileExists(filePath) { - pv = LoadFilePV(filePath) - } else { - pv = GenFilePV(filePath) - pv.Save() - } - return pv -} - -// Save persists the FilePV to disk. -func (pv *FilePV) Save() { - pv.mtx.Lock() - defer pv.mtx.Unlock() - pv.save() -} - -func (pv *FilePV) save() { - outFile := pv.filePath - if outFile == "" { - panic("Cannot save PrivValidator: filePath not set") - } - jsonBytes, err := cdc.MarshalJSONIndent(pv, "", " ") - if err != nil { - panic(err) - } - err = cmn.WriteFileAtomic(outFile, jsonBytes, 0600) - if err != nil { - panic(err) - } -} - -// Reset resets all fields in the FilePV. -// NOTE: Unsafe! -func (pv *FilePV) Reset() { - var sig crypto.Signature - pv.LastHeight = 0 - pv.LastRound = 0 - pv.LastStep = 0 - pv.LastSignature = sig - pv.LastSignBytes = nil - pv.Save() -} - -// SignVote signs a canonical representation of the vote, along with the -// chainID. Implements PrivValidator. -func (pv *FilePV) SignVote(chainID string, vote *types.Vote) error { - pv.mtx.Lock() - defer pv.mtx.Unlock() - if err := pv.signVote(chainID, vote); err != nil { - return errors.New(cmn.Fmt("Error signing vote: %v", err)) - } - return nil -} - -// SignProposal signs a canonical representation of the proposal, along with -// the chainID. Implements PrivValidator. -func (pv *FilePV) SignProposal(chainID string, proposal *types.Proposal) error { - pv.mtx.Lock() - defer pv.mtx.Unlock() - if err := pv.signProposal(chainID, proposal); err != nil { - return fmt.Errorf("Error signing proposal: %v", err) - } - return nil -} - -// returns error if HRS regression or no LastSignBytes. returns true if HRS is unchanged -func (pv *FilePV) checkHRS(height int64, round int, step int8) (bool, error) { - if pv.LastHeight > height { - return false, errors.New("Height regression") - } - - if pv.LastHeight == height { - if pv.LastRound > round { - return false, errors.New("Round regression") - } - - if pv.LastRound == round { - if pv.LastStep > step { - return false, errors.New("Step regression") - } else if pv.LastStep == step { - if pv.LastSignBytes != nil { - if pv.LastSignature == nil { - panic("pv: LastSignature is nil but LastSignBytes is not!") - } - return true, nil - } - return false, errors.New("No LastSignature found") - } - } - } - return false, nil -} - -// signVote checks if the vote is good to sign and sets the vote signature. -// It may need to set the timestamp as well if the vote is otherwise the same as -// a previously signed vote (ie. we crashed after signing but before the vote hit the WAL). -func (pv *FilePV) signVote(chainID string, vote *types.Vote) error { - height, round, step := vote.Height, vote.Round, voteToStep(vote) - signBytes := vote.SignBytes(chainID) - - sameHRS, err := pv.checkHRS(height, round, step) - if err != nil { - return err - } - - // We might crash before writing to the wal, - // causing us to try to re-sign for the same HRS. - // If signbytes are the same, use the last signature. - // If they only differ by timestamp, use last timestamp and signature - // Otherwise, return error - if sameHRS { - if bytes.Equal(signBytes, pv.LastSignBytes) { - vote.Signature = pv.LastSignature - } else if timestamp, ok := checkVotesOnlyDifferByTimestamp(pv.LastSignBytes, signBytes); ok { - vote.Timestamp = timestamp - vote.Signature = pv.LastSignature - } else { - err = fmt.Errorf("Conflicting data") - } - return err - } - - // It passed the checks. Sign the vote - sig := pv.PrivKey.Sign(signBytes) - pv.saveSigned(height, round, step, signBytes, sig) - vote.Signature = sig - return nil -} - -// signProposal checks if the proposal is good to sign and sets the proposal signature. -// It may need to set the timestamp as well if the proposal is otherwise the same as -// a previously signed proposal ie. we crashed after signing but before the proposal hit the WAL). -func (pv *FilePV) signProposal(chainID string, proposal *types.Proposal) error { - height, round, step := proposal.Height, proposal.Round, stepPropose - signBytes := proposal.SignBytes(chainID) - - sameHRS, err := pv.checkHRS(height, round, step) - if err != nil { - return err - } - - // We might crash before writing to the wal, - // causing us to try to re-sign for the same HRS. - // If signbytes are the same, use the last signature. - // If they only differ by timestamp, use last timestamp and signature - // Otherwise, return error - if sameHRS { - if bytes.Equal(signBytes, pv.LastSignBytes) { - proposal.Signature = pv.LastSignature - } else if timestamp, ok := checkProposalsOnlyDifferByTimestamp(pv.LastSignBytes, signBytes); ok { - proposal.Timestamp = timestamp - proposal.Signature = pv.LastSignature - } else { - err = fmt.Errorf("Conflicting data") - } - return err - } - - // It passed the checks. Sign the proposal - sig := pv.PrivKey.Sign(signBytes) - pv.saveSigned(height, round, step, signBytes, sig) - proposal.Signature = sig - return nil -} - -// Persist height/round/step and signature -func (pv *FilePV) saveSigned(height int64, round int, step int8, - signBytes []byte, sig crypto.Signature) { - - pv.LastHeight = height - pv.LastRound = round - pv.LastStep = step - pv.LastSignature = sig - pv.LastSignBytes = signBytes - pv.save() -} - -// SignHeartbeat signs a canonical representation of the heartbeat, along with the chainID. -// Implements PrivValidator. -func (pv *FilePV) SignHeartbeat(chainID string, heartbeat *types.Heartbeat) error { - pv.mtx.Lock() - defer pv.mtx.Unlock() - heartbeat.Signature = pv.PrivKey.Sign(heartbeat.SignBytes(chainID)) - return nil -} - -// String returns a string representation of the FilePV. -func (pv *FilePV) String() string { - return fmt.Sprintf("PrivValidator{%v LH:%v, LR:%v, LS:%v}", pv.GetAddress(), pv.LastHeight, pv.LastRound, pv.LastStep) -} - -//------------------------------------- - -// returns the timestamp from the lastSignBytes. -// returns true if the only difference in the votes is their timestamp. -func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { - var lastVote, newVote types.CanonicalJSONVote - if err := cdc.UnmarshalJSON(lastSignBytes, &lastVote); err != nil { - panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err)) - } - if err := cdc.UnmarshalJSON(newSignBytes, &newVote); err != nil { - panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err)) - } - - lastTime, err := time.Parse(types.TimeFormat, lastVote.Timestamp) - if err != nil { - panic(err) - } - - // set the times to the same value and check equality - now := types.CanonicalTime(time.Now()) - lastVote.Timestamp = now - newVote.Timestamp = now - lastVoteBytes, _ := cdc.MarshalJSON(lastVote) - newVoteBytes, _ := cdc.MarshalJSON(newVote) - - return lastTime, bytes.Equal(newVoteBytes, lastVoteBytes) -} - -// returns the timestamp from the lastSignBytes. -// returns true if the only difference in the proposals is their timestamp -func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { - var lastProposal, newProposal types.CanonicalJSONProposal - if err := cdc.UnmarshalJSON(lastSignBytes, &lastProposal); err != nil { - panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err)) - } - if err := cdc.UnmarshalJSON(newSignBytes, &newProposal); err != nil { - panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err)) - } - - lastTime, err := time.Parse(types.TimeFormat, lastProposal.Timestamp) - if err != nil { - panic(err) - } - - // set the times to the same value and check equality - now := types.CanonicalTime(time.Now()) - lastProposal.Timestamp = now - newProposal.Timestamp = now - lastProposalBytes, _ := cdc.MarshalJSON(lastProposal) - newProposalBytes, _ := cdc.MarshalJSON(newProposal) - - return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes) -} diff --git a/privval/priv_validator_test.go b/privval/priv_validator_test.go deleted file mode 100644 index c7212d59..00000000 --- a/privval/priv_validator_test.go +++ /dev/null @@ -1,251 +0,0 @@ -package privval - -import ( - "encoding/base64" - "fmt" - "os" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/go-crypto" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -func TestGenLoadValidator(t *testing.T) { - assert := assert.New(t) - - _, tempFilePath := cmn.Tempfile("priv_validator_") - privVal := GenFilePV(tempFilePath) - - height := int64(100) - privVal.LastHeight = height - privVal.Save() - addr := privVal.GetAddress() - - privVal = LoadFilePV(tempFilePath) - assert.Equal(addr, privVal.GetAddress(), "expected privval addr to be the same") - assert.Equal(height, privVal.LastHeight, "expected privval.LastHeight to have been saved") -} - -func TestLoadOrGenValidator(t *testing.T) { - assert := assert.New(t) - - _, tempFilePath := cmn.Tempfile("priv_validator_") - if err := os.Remove(tempFilePath); err != nil { - t.Error(err) - } - privVal := LoadOrGenFilePV(tempFilePath) - addr := privVal.GetAddress() - privVal = LoadOrGenFilePV(tempFilePath) - assert.Equal(addr, privVal.GetAddress(), "expected privval addr to be the same") -} - -func TestUnmarshalValidator(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // create some fixed values - privKey := crypto.GenPrivKeyEd25519() - pubKey := privKey.PubKey() - addr := pubKey.Address() - pubArray := [32]byte(pubKey.(crypto.PubKeyEd25519)) - pubBytes := pubArray[:] - privArray := [64]byte(privKey) - privBytes := privArray[:] - pubB64 := base64.StdEncoding.EncodeToString(pubBytes) - privB64 := base64.StdEncoding.EncodeToString(privBytes) - - serialized := fmt.Sprintf(`{ - "address": "%s", - "pub_key": { - "type": "AC26791624DE60", - "value": "%s" - }, - "last_height": 0, - "last_round": 0, - "last_step": 0, - "priv_key": { - "type": "954568A3288910", - "value": "%s" - } -}`, addr, pubB64, privB64) - - val := FilePV{} - err := cdc.UnmarshalJSON([]byte(serialized), &val) - require.Nil(err, "%+v", err) - - // make sure the values match - assert.EqualValues(addr, val.GetAddress()) - assert.EqualValues(pubKey, val.GetPubKey()) - assert.EqualValues(privKey, val.PrivKey) - - // export it and make sure it is the same - out, err := cdc.MarshalJSON(val) - require.Nil(err, "%+v", err) - assert.JSONEq(serialized, string(out)) -} - -func TestSignVote(t *testing.T) { - assert := assert.New(t) - - _, tempFilePath := cmn.Tempfile("priv_validator_") - privVal := GenFilePV(tempFilePath) - - block1 := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}} - block2 := types.BlockID{[]byte{3, 2, 1}, types.PartSetHeader{}} - height, round := int64(10), 1 - voteType := types.VoteTypePrevote - - // sign a vote for first time - vote := newVote(privVal.Address, 0, height, round, voteType, block1) - err := privVal.SignVote("mychainid", vote) - assert.NoError(err, "expected no error signing vote") - - // try to sign the same vote again; should be fine - err = privVal.SignVote("mychainid", vote) - assert.NoError(err, "expected no error on signing same vote") - - // now try some bad votes - cases := []*types.Vote{ - newVote(privVal.Address, 0, height, round-1, voteType, block1), // round regression - newVote(privVal.Address, 0, height-1, round, voteType, block1), // height regression - newVote(privVal.Address, 0, height-2, round+4, voteType, block1), // height regression and different round - newVote(privVal.Address, 0, height, round, voteType, block2), // different block - } - - for _, c := range cases { - err = privVal.SignVote("mychainid", c) - assert.Error(err, "expected error on signing conflicting vote") - } - - // try signing a vote with a different time stamp - sig := vote.Signature - vote.Timestamp = vote.Timestamp.Add(time.Duration(1000)) - err = privVal.SignVote("mychainid", vote) - assert.NoError(err) - assert.Equal(sig, vote.Signature) -} - -func TestSignProposal(t *testing.T) { - assert := assert.New(t) - - _, tempFilePath := cmn.Tempfile("priv_validator_") - privVal := GenFilePV(tempFilePath) - - block1 := types.PartSetHeader{5, []byte{1, 2, 3}} - block2 := types.PartSetHeader{10, []byte{3, 2, 1}} - height, round := int64(10), 1 - - // sign a proposal for first time - proposal := newProposal(height, round, block1) - err := privVal.SignProposal("mychainid", proposal) - assert.NoError(err, "expected no error signing proposal") - - // try to sign the same proposal again; should be fine - err = privVal.SignProposal("mychainid", proposal) - assert.NoError(err, "expected no error on signing same proposal") - - // now try some bad Proposals - cases := []*types.Proposal{ - newProposal(height, round-1, block1), // round regression - newProposal(height-1, round, block1), // height regression - newProposal(height-2, round+4, block1), // height regression and different round - newProposal(height, round, block2), // different block - } - - for _, c := range cases { - err = privVal.SignProposal("mychainid", c) - assert.Error(err, "expected error on signing conflicting proposal") - } - - // try signing a proposal with a different time stamp - sig := proposal.Signature - proposal.Timestamp = proposal.Timestamp.Add(time.Duration(1000)) - err = privVal.SignProposal("mychainid", proposal) - assert.NoError(err) - assert.Equal(sig, proposal.Signature) -} - -func TestDifferByTimestamp(t *testing.T) { - _, tempFilePath := cmn.Tempfile("priv_validator_") - privVal := GenFilePV(tempFilePath) - - block1 := types.PartSetHeader{5, []byte{1, 2, 3}} - height, round := int64(10), 1 - chainID := "mychainid" - - // test proposal - { - proposal := newProposal(height, round, block1) - err := privVal.SignProposal(chainID, proposal) - assert.NoError(t, err, "expected no error signing proposal") - signBytes := proposal.SignBytes(chainID) - sig := proposal.Signature - timeStamp := clipToMS(proposal.Timestamp) - - // manipulate the timestamp. should get changed back - proposal.Timestamp = proposal.Timestamp.Add(time.Millisecond) - var emptySig crypto.Signature - proposal.Signature = emptySig - err = privVal.SignProposal("mychainid", proposal) - assert.NoError(t, err, "expected no error on signing same proposal") - - assert.Equal(t, timeStamp, proposal.Timestamp) - assert.Equal(t, signBytes, proposal.SignBytes(chainID)) - assert.Equal(t, sig, proposal.Signature) - } - - // test vote - { - voteType := types.VoteTypePrevote - blockID := types.BlockID{[]byte{1, 2, 3}, types.PartSetHeader{}} - vote := newVote(privVal.Address, 0, height, round, voteType, blockID) - err := privVal.SignVote("mychainid", vote) - assert.NoError(t, err, "expected no error signing vote") - - signBytes := vote.SignBytes(chainID) - sig := vote.Signature - timeStamp := clipToMS(vote.Timestamp) - - // manipulate the timestamp. should get changed back - vote.Timestamp = vote.Timestamp.Add(time.Millisecond) - var emptySig crypto.Signature - vote.Signature = emptySig - err = privVal.SignVote("mychainid", vote) - assert.NoError(t, err, "expected no error on signing same vote") - - assert.Equal(t, timeStamp, vote.Timestamp) - assert.Equal(t, signBytes, vote.SignBytes(chainID)) - assert.Equal(t, sig, vote.Signature) - } -} - -func newVote(addr types.Address, idx int, height int64, round int, typ byte, blockID types.BlockID) *types.Vote { - return &types.Vote{ - ValidatorAddress: addr, - ValidatorIndex: idx, - Height: height, - Round: round, - Type: typ, - Timestamp: time.Now().UTC(), - BlockID: blockID, - } -} - -func newProposal(height int64, round int, partsHeader types.PartSetHeader) *types.Proposal { - return &types.Proposal{ - Height: height, - Round: round, - BlockPartsHeader: partsHeader, - Timestamp: time.Now().UTC(), - } -} - -func clipToMS(t time.Time) time.Time { - nano := t.UnixNano() - million := int64(1000000) - nano = (nano / million) * million - return time.Unix(0, nano).UTC() -} diff --git a/privval/socket.go b/privval/socket.go deleted file mode 100644 index 9f59a815..00000000 --- a/privval/socket.go +++ /dev/null @@ -1,538 +0,0 @@ -package privval - -import ( - "errors" - "fmt" - "io" - "net" - "time" - - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" - - p2pconn "github.com/tendermint/tendermint/p2p/conn" - "github.com/tendermint/tendermint/types" -) - -const ( - defaultAcceptDeadlineSeconds = 3 - defaultConnDeadlineSeconds = 3 - defaultConnHeartBeatSeconds = 30 - defaultConnWaitSeconds = 60 - defaultDialRetries = 10 -) - -// Socket errors. -var ( - ErrDialRetryMax = errors.New("dialed maximum retries") - ErrConnWaitTimeout = errors.New("waited for remote signer for too long") - ErrConnTimeout = errors.New("remote signer timed out") -) - -var ( - acceptDeadline = time.Second + defaultAcceptDeadlineSeconds - connDeadline = time.Second * defaultConnDeadlineSeconds - connHeartbeat = time.Second * defaultConnHeartBeatSeconds -) - -// SocketPVOption sets an optional parameter on the SocketPV. -type SocketPVOption func(*SocketPV) - -// SocketPVAcceptDeadline sets the deadline for the SocketPV listener. -// A zero time value disables the deadline. -func SocketPVAcceptDeadline(deadline time.Duration) SocketPVOption { - return func(sc *SocketPV) { sc.acceptDeadline = deadline } -} - -// SocketPVConnDeadline sets the read and write deadline for connections -// from external signing processes. -func SocketPVConnDeadline(deadline time.Duration) SocketPVOption { - return func(sc *SocketPV) { sc.connDeadline = deadline } -} - -// SocketPVHeartbeat sets the period on which to check the liveness of the -// connected Signer connections. -func SocketPVHeartbeat(period time.Duration) SocketPVOption { - return func(sc *SocketPV) { sc.connHeartbeat = period } -} - -// SocketPVConnWait sets the timeout duration before connection of external -// signing processes are considered to be unsuccessful. -func SocketPVConnWait(timeout time.Duration) SocketPVOption { - return func(sc *SocketPV) { sc.connWaitTimeout = timeout } -} - -// SocketPV implements PrivValidator, it uses a socket to request signatures -// from an external process. -type SocketPV struct { - cmn.BaseService - - addr string - acceptDeadline time.Duration - connDeadline time.Duration - connHeartbeat time.Duration - connWaitTimeout time.Duration - privKey crypto.PrivKeyEd25519 - - conn net.Conn - listener net.Listener -} - -// Check that SocketPV implements PrivValidator. -var _ types.PrivValidator = (*SocketPV)(nil) - -// NewSocketPV returns an instance of SocketPV. -func NewSocketPV( - logger log.Logger, - socketAddr string, - privKey crypto.PrivKeyEd25519, -) *SocketPV { - sc := &SocketPV{ - addr: socketAddr, - acceptDeadline: acceptDeadline, - connDeadline: connDeadline, - connHeartbeat: connHeartbeat, - connWaitTimeout: time.Second * defaultConnWaitSeconds, - privKey: privKey, - } - - sc.BaseService = *cmn.NewBaseService(logger, "SocketPV", sc) - - return sc -} - -// GetAddress implements PrivValidator. -func (sc *SocketPV) GetAddress() types.Address { - addr, err := sc.getAddress() - if err != nil { - panic(err) - } - - return addr -} - -// Address is an alias for PubKey().Address(). -func (sc *SocketPV) getAddress() (cmn.HexBytes, error) { - p, err := sc.getPubKey() - if err != nil { - return nil, err - } - - return p.Address(), nil -} - -// GetPubKey implements PrivValidator. -func (sc *SocketPV) GetPubKey() crypto.PubKey { - pubKey, err := sc.getPubKey() - if err != nil { - panic(err) - } - - return pubKey -} - -func (sc *SocketPV) getPubKey() (crypto.PubKey, error) { - err := writeMsg(sc.conn, &PubKeyMsg{}) - if err != nil { - return nil, err - } - - res, err := readMsg(sc.conn) - if err != nil { - return nil, err - } - - return res.(*PubKeyMsg).PubKey, nil -} - -// SignVote implements PrivValidator. -func (sc *SocketPV) SignVote(chainID string, vote *types.Vote) error { - err := writeMsg(sc.conn, &SignVoteMsg{Vote: vote}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - - *vote = *res.(*SignVoteMsg).Vote - - return nil -} - -// SignProposal implements PrivValidator. -func (sc *SocketPV) SignProposal( - chainID string, - proposal *types.Proposal, -) error { - err := writeMsg(sc.conn, &SignProposalMsg{Proposal: proposal}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - - *proposal = *res.(*SignProposalMsg).Proposal - - return nil -} - -// SignHeartbeat implements PrivValidator. -func (sc *SocketPV) SignHeartbeat( - chainID string, - heartbeat *types.Heartbeat, -) error { - err := writeMsg(sc.conn, &SignHeartbeatMsg{Heartbeat: heartbeat}) - if err != nil { - return err - } - - res, err := readMsg(sc.conn) - if err != nil { - return err - } - - *heartbeat = *res.(*SignHeartbeatMsg).Heartbeat - - return nil -} - -// OnStart implements cmn.Service. -func (sc *SocketPV) OnStart() error { - if err := sc.listen(); err != nil { - err = cmn.ErrorWrap(err, "failed to listen") - sc.Logger.Error( - "OnStart", - "err", err, - ) - return err - } - - conn, err := sc.waitConnection() - if err != nil { - err = cmn.ErrorWrap(err, "failed to accept connection") - sc.Logger.Error( - "OnStart", - "err", err, - ) - - return err - } - - sc.conn = conn - - return nil -} - -// OnStop implements cmn.Service. -func (sc *SocketPV) OnStop() { - if sc.conn != nil { - if err := sc.conn.Close(); err != nil { - err = cmn.ErrorWrap(err, "failed to close connection") - sc.Logger.Error( - "OnStop", - "err", err, - ) - } - } - - if sc.listener != nil { - if err := sc.listener.Close(); err != nil { - err = cmn.ErrorWrap(err, "failed to close listener") - sc.Logger.Error( - "OnStop", - "err", err, - ) - } - } -} - -func (sc *SocketPV) acceptConnection() (net.Conn, error) { - conn, err := sc.listener.Accept() - if err != nil { - if !sc.IsRunning() { - return nil, nil // Ignore error from listener closing. - } - return nil, err - - } - - conn, err = p2pconn.MakeSecretConnection(conn, sc.privKey) - if err != nil { - return nil, err - } - - return conn, nil -} - -func (sc *SocketPV) listen() error { - ln, err := net.Listen(cmn.ProtocolAndAddress(sc.addr)) - if err != nil { - return err - } - - sc.listener = newTCPTimeoutListener( - ln, - sc.acceptDeadline, - sc.connDeadline, - sc.connHeartbeat, - ) - - return nil -} - -// waitConnection uses the configured wait timeout to error if no external -// process connects in the time period. -func (sc *SocketPV) waitConnection() (net.Conn, error) { - var ( - connc = make(chan net.Conn, 1) - errc = make(chan error, 1) - ) - - go func(connc chan<- net.Conn, errc chan<- error) { - conn, err := sc.acceptConnection() - if err != nil { - errc <- err - return - } - - connc <- conn - }(connc, errc) - - select { - case conn := <-connc: - return conn, nil - case err := <-errc: - if _, ok := err.(timeoutError); ok { - return nil, cmn.ErrorWrap(ErrConnWaitTimeout, err.Error()) - } - return nil, err - case <-time.After(sc.connWaitTimeout): - return nil, ErrConnWaitTimeout - } -} - -//--------------------------------------------------------- - -// RemoteSignerOption sets an optional parameter on the RemoteSigner. -type RemoteSignerOption func(*RemoteSigner) - -// RemoteSignerConnDeadline sets the read and write deadline for connections -// from external signing processes. -func RemoteSignerConnDeadline(deadline time.Duration) RemoteSignerOption { - return func(ss *RemoteSigner) { ss.connDeadline = deadline } -} - -// RemoteSignerConnRetries sets the amount of attempted retries to connect. -func RemoteSignerConnRetries(retries int) RemoteSignerOption { - return func(ss *RemoteSigner) { ss.connRetries = retries } -} - -// RemoteSigner implements PrivValidator by dialing to a socket. -type RemoteSigner struct { - cmn.BaseService - - addr string - chainID string - connDeadline time.Duration - connRetries int - privKey crypto.PrivKeyEd25519 - privVal types.PrivValidator - - conn net.Conn -} - -// NewRemoteSigner returns an instance of RemoteSigner. -func NewRemoteSigner( - logger log.Logger, - chainID, socketAddr string, - privVal types.PrivValidator, - privKey crypto.PrivKeyEd25519, -) *RemoteSigner { - rs := &RemoteSigner{ - addr: socketAddr, - chainID: chainID, - connDeadline: time.Second * defaultConnDeadlineSeconds, - connRetries: defaultDialRetries, - privKey: privKey, - privVal: privVal, - } - - rs.BaseService = *cmn.NewBaseService(logger, "RemoteSigner", rs) - - return rs -} - -// OnStart implements cmn.Service. -func (rs *RemoteSigner) OnStart() error { - conn, err := rs.connect() - if err != nil { - err = cmn.ErrorWrap(err, "connect") - rs.Logger.Error("OnStart", "err", err) - return err - } - - go rs.handleConnection(conn) - - return nil -} - -// OnStop implements cmn.Service. -func (rs *RemoteSigner) OnStop() { - if rs.conn == nil { - return - } - - if err := rs.conn.Close(); err != nil { - rs.Logger.Error("OnStop", "err", cmn.ErrorWrap(err, "closing listener failed")) - } -} - -func (rs *RemoteSigner) connect() (net.Conn, error) { - for retries := rs.connRetries; retries > 0; retries-- { - // Don't sleep if it is the first retry. - if retries != rs.connRetries { - time.Sleep(rs.connDeadline) - } - - conn, err := cmn.Connect(rs.addr) - if err != nil { - err = cmn.ErrorWrap(err, "connection failed") - rs.Logger.Error( - "connect", - "addr", rs.addr, - "err", err, - ) - - continue - } - - if err := conn.SetDeadline(time.Now().Add(connDeadline)); err != nil { - err = cmn.ErrorWrap(err, "setting connection timeout failed") - rs.Logger.Error( - "connect", - "err", err, - ) - continue - } - - conn, err = p2pconn.MakeSecretConnection(conn, rs.privKey) - if err != nil { - err = cmn.ErrorWrap(err, "encrypting connection failed") - rs.Logger.Error( - "connect", - "err", err, - ) - - continue - } - - return conn, nil - } - - return nil, ErrDialRetryMax -} - -func (rs *RemoteSigner) handleConnection(conn net.Conn) { - for { - if !rs.IsRunning() { - return // Ignore error from listener closing. - } - - req, err := readMsg(conn) - if err != nil { - if err != io.EOF { - rs.Logger.Error("handleConnection", "err", err) - } - return - } - - var res SocketPVMsg - - switch r := req.(type) { - case *PubKeyMsg: - var p crypto.PubKey - p = rs.privVal.GetPubKey() - res = &PubKeyMsg{p} - case *SignVoteMsg: - err = rs.privVal.SignVote(rs.chainID, r.Vote) - res = &SignVoteMsg{r.Vote} - case *SignProposalMsg: - err = rs.privVal.SignProposal(rs.chainID, r.Proposal) - res = &SignProposalMsg{r.Proposal} - case *SignHeartbeatMsg: - err = rs.privVal.SignHeartbeat(rs.chainID, r.Heartbeat) - res = &SignHeartbeatMsg{r.Heartbeat} - default: - err = fmt.Errorf("unknown msg: %v", r) - } - - if err != nil { - rs.Logger.Error("handleConnection", "err", err) - return - } - - err = writeMsg(conn, res) - if err != nil { - rs.Logger.Error("handleConnection", "err", err) - return - } - } -} - -//--------------------------------------------------------- - -// SocketPVMsg is sent between RemoteSigner and SocketPV. -type SocketPVMsg interface{} - -func RegisterSocketPVMsg(cdc *amino.Codec) { - cdc.RegisterInterface((*SocketPVMsg)(nil), nil) - cdc.RegisterConcrete(&PubKeyMsg{}, "tendermint/socketpv/PubKeyMsg", nil) - cdc.RegisterConcrete(&SignVoteMsg{}, "tendermint/socketpv/SignVoteMsg", nil) - cdc.RegisterConcrete(&SignProposalMsg{}, "tendermint/socketpv/SignProposalMsg", nil) - cdc.RegisterConcrete(&SignHeartbeatMsg{}, "tendermint/socketpv/SignHeartbeatMsg", nil) -} - -// PubKeyMsg is a PrivValidatorSocket message containing the public key. -type PubKeyMsg struct { - PubKey crypto.PubKey -} - -// SignVoteMsg is a PrivValidatorSocket message containing a vote. -type SignVoteMsg struct { - Vote *types.Vote -} - -// SignProposalMsg is a PrivValidatorSocket message containing a Proposal. -type SignProposalMsg struct { - Proposal *types.Proposal -} - -// SignHeartbeatMsg is a PrivValidatorSocket message containing a Heartbeat. -type SignHeartbeatMsg struct { - Heartbeat *types.Heartbeat -} - -func readMsg(r io.Reader) (msg SocketPVMsg, err error) { - const maxSocketPVMsgSize = 1024 * 10 - _, err = cdc.UnmarshalBinaryReader(r, &msg, maxSocketPVMsgSize) - if _, ok := err.(timeoutError); ok { - err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) - } - return -} - -func writeMsg(w io.Writer, msg interface{}) (err error) { - _, err = cdc.MarshalBinaryWriter(w, msg) - if _, ok := err.(timeoutError); ok { - err = cmn.ErrorWrap(ErrConnTimeout, err.Error()) - } - return -} diff --git a/privval/socket_tcp.go b/privval/socket_tcp.go deleted file mode 100644 index b26db00c..00000000 --- a/privval/socket_tcp.go +++ /dev/null @@ -1,66 +0,0 @@ -package privval - -import ( - "net" - "time" -) - -// timeoutError can be used to check if an error returned from the netp package -// was due to a timeout. -type timeoutError interface { - Timeout() bool -} - -// tcpTimeoutListener implements net.Listener. -var _ net.Listener = (*tcpTimeoutListener)(nil) - -// tcpTimeoutListener wraps a *net.TCPListener to standardise protocol timeouts -// and potentially other tuning parameters. -type tcpTimeoutListener struct { - *net.TCPListener - - acceptDeadline time.Duration - connDeadline time.Duration - period time.Duration -} - -// newTCPTimeoutListener returns an instance of tcpTimeoutListener. -func newTCPTimeoutListener( - ln net.Listener, - acceptDeadline, connDeadline time.Duration, - period time.Duration, -) tcpTimeoutListener { - return tcpTimeoutListener{ - TCPListener: ln.(*net.TCPListener), - acceptDeadline: acceptDeadline, - connDeadline: connDeadline, - period: period, - } -} - -// Accept implements net.Listener. -func (ln tcpTimeoutListener) Accept() (net.Conn, error) { - err := ln.SetDeadline(time.Now().Add(ln.acceptDeadline)) - if err != nil { - return nil, err - } - - tc, err := ln.AcceptTCP() - if err != nil { - return nil, err - } - - if err := tc.SetDeadline(time.Now().Add(ln.connDeadline)); err != nil { - return nil, err - } - - if err := tc.SetKeepAlive(true); err != nil { - return nil, err - } - - if err := tc.SetKeepAlivePeriod(ln.period); err != nil { - return nil, err - } - - return tc, nil -} diff --git a/privval/socket_tcp_test.go b/privval/socket_tcp_test.go deleted file mode 100644 index 44a673c0..00000000 --- a/privval/socket_tcp_test.go +++ /dev/null @@ -1,64 +0,0 @@ -package privval - -import ( - "net" - "testing" - "time" -) - -func TestTCPTimeoutListenerAcceptDeadline(t *testing.T) { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - - ln = newTCPTimeoutListener(ln, time.Millisecond, time.Second, time.Second) - - _, err = ln.Accept() - opErr, ok := err.(*net.OpError) - if !ok { - t.Fatalf("have %v, want *net.OpError", err) - } - - if have, want := opErr.Op, "accept"; have != want { - t.Errorf("have %v, want %v", have, want) - } -} - -func TestTCPTimeoutListenerConnDeadline(t *testing.T) { - ln, err := net.Listen("tcp", "127.0.0.1:0") - if err != nil { - t.Fatal(err) - } - - ln = newTCPTimeoutListener(ln, time.Second, time.Millisecond, time.Second) - - donec := make(chan struct{}) - go func(ln net.Listener) { - defer close(donec) - - c, err := ln.Accept() - if err != nil { - t.Fatal(err) - } - - time.Sleep(2 * time.Millisecond) - - _, err = c.Write([]byte("foo")) - opErr, ok := err.(*net.OpError) - if !ok { - t.Fatalf("have %v, want *net.OpError", err) - } - - if have, want := opErr.Op, "write"; have != want { - t.Errorf("have %v, want %v", have, want) - } - }(ln) - - _, err = net.Dial("tcp", ln.Addr().String()) - if err != nil { - t.Fatal(err) - } - - <-donec -} diff --git a/privval/socket_test.go b/privval/socket_test.go deleted file mode 100644 index 197c2508..00000000 --- a/privval/socket_test.go +++ /dev/null @@ -1,282 +0,0 @@ -package privval - -import ( - "fmt" - "net" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" - - p2pconn "github.com/tendermint/tendermint/p2p/conn" - "github.com/tendermint/tendermint/types" -) - -func TestSocketPVAddress(t *testing.T) { - var ( - chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) - ) - defer sc.Stop() - defer rs.Stop() - - serverAddr := rs.privVal.GetAddress() - - clientAddr, err := sc.getAddress() - require.NoError(t, err) - - assert.Equal(t, serverAddr, clientAddr) - - // TODO(xla): Remove when PrivValidator2 replaced PrivValidator. - assert.Equal(t, serverAddr, sc.GetAddress()) - -} - -func TestSocketPVPubKey(t *testing.T) { - var ( - chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) - ) - defer sc.Stop() - defer rs.Stop() - - clientKey, err := sc.getPubKey() - require.NoError(t, err) - - privKey := rs.privVal.GetPubKey() - - assert.Equal(t, privKey, clientKey) - - // TODO(xla): Remove when PrivValidator2 replaced PrivValidator. - assert.Equal(t, privKey, sc.GetPubKey()) -} - -func TestSocketPVProposal(t *testing.T) { - var ( - chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) - - ts = time.Now() - privProposal = &types.Proposal{Timestamp: ts} - clientProposal = &types.Proposal{Timestamp: ts} - ) - defer sc.Stop() - defer rs.Stop() - - require.NoError(t, rs.privVal.SignProposal(chainID, privProposal)) - require.NoError(t, sc.SignProposal(chainID, clientProposal)) - assert.Equal(t, privProposal.Signature, clientProposal.Signature) -} - -func TestSocketPVVote(t *testing.T) { - var ( - chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) - - ts = time.Now() - vType = types.VoteTypePrecommit - want = &types.Vote{Timestamp: ts, Type: vType} - have = &types.Vote{Timestamp: ts, Type: vType} - ) - defer sc.Stop() - defer rs.Stop() - - require.NoError(t, rs.privVal.SignVote(chainID, want)) - require.NoError(t, sc.SignVote(chainID, have)) - assert.Equal(t, want.Signature, have.Signature) -} - -func TestSocketPVHeartbeat(t *testing.T) { - var ( - chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) - - want = &types.Heartbeat{} - have = &types.Heartbeat{} - ) - defer sc.Stop() - defer rs.Stop() - - require.NoError(t, rs.privVal.SignHeartbeat(chainID, want)) - require.NoError(t, sc.SignHeartbeat(chainID, have)) - assert.Equal(t, want.Signature, have.Signature) -} - -func TestSocketPVAcceptDeadline(t *testing.T) { - var ( - sc = NewSocketPV( - log.TestingLogger(), - "127.0.0.1:0", - crypto.GenPrivKeyEd25519(), - ) - ) - defer sc.Stop() - - SocketPVAcceptDeadline(time.Millisecond)(sc) - - assert.Equal(t, sc.Start().(cmn.Error).Cause(), ErrConnWaitTimeout) -} - -func TestSocketPVDeadline(t *testing.T) { - var ( - addr = testFreeAddr(t) - listenc = make(chan struct{}) - sc = NewSocketPV( - log.TestingLogger(), - addr, - crypto.GenPrivKeyEd25519(), - ) - ) - - SocketPVConnDeadline(100 * time.Millisecond)(sc) - SocketPVConnWait(500 * time.Millisecond)(sc) - - go func(sc *SocketPV) { - defer close(listenc) - - require.NoError(t, sc.Start()) - - assert.True(t, sc.IsRunning()) - }(sc) - - for { - conn, err := cmn.Connect(addr) - if err != nil { - continue - } - - _, err = p2pconn.MakeSecretConnection( - conn, - crypto.GenPrivKeyEd25519(), - ) - if err == nil { - break - } - } - - <-listenc - - // Sleep to guarantee deadline has been hit. - time.Sleep(20 * time.Microsecond) - - _, err := sc.getPubKey() - assert.Equal(t, err.(cmn.Error).Cause(), ErrConnTimeout) -} - -func TestSocketPVWait(t *testing.T) { - sc := NewSocketPV( - log.TestingLogger(), - "127.0.0.1:0", - crypto.GenPrivKeyEd25519(), - ) - defer sc.Stop() - - SocketPVConnWait(time.Millisecond)(sc) - - assert.Equal(t, sc.Start().(cmn.Error).Cause(), ErrConnWaitTimeout) -} - -func TestRemoteSignerRetry(t *testing.T) { - var ( - attemptc = make(chan int) - retries = 2 - ) - - ln, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - - go func(ln net.Listener, attemptc chan<- int) { - attempts := 0 - - for { - conn, err := ln.Accept() - require.NoError(t, err) - - err = conn.Close() - require.NoError(t, err) - - attempts++ - - if attempts == retries { - attemptc <- attempts - break - } - } - }(ln, attemptc) - - rs := NewRemoteSigner( - log.TestingLogger(), - cmn.RandStr(12), - ln.Addr().String(), - types.NewMockPV(), - crypto.GenPrivKeyEd25519(), - ) - defer rs.Stop() - - RemoteSignerConnDeadline(time.Millisecond)(rs) - RemoteSignerConnRetries(retries)(rs) - - assert.Equal(t, rs.Start().(cmn.Error).Cause(), ErrDialRetryMax) - - select { - case attempts := <-attemptc: - assert.Equal(t, retries, attempts) - case <-time.After(100 * time.Millisecond): - t.Error("expected remote to observe connection attempts") - } -} - -func testSetupSocketPair( - t *testing.T, - chainID string, -) (*SocketPV, *RemoteSigner) { - var ( - addr = testFreeAddr(t) - logger = log.TestingLogger() - privVal = types.NewMockPV() - readyc = make(chan struct{}) - rs = NewRemoteSigner( - logger, - chainID, - addr, - privVal, - crypto.GenPrivKeyEd25519(), - ) - sc = NewSocketPV( - logger, - addr, - crypto.GenPrivKeyEd25519(), - ) - ) - - go func(sc *SocketPV) { - require.NoError(t, sc.Start()) - assert.True(t, sc.IsRunning()) - - readyc <- struct{}{} - }(sc) - - RemoteSignerConnDeadline(time.Millisecond)(rs) - RemoteSignerConnRetries(1e6)(rs) - - require.NoError(t, rs.Start()) - assert.True(t, rs.IsRunning()) - - <-readyc - - return sc, rs -} - -// testFreeAddr claims a free port so we don't block on listener being ready. -func testFreeAddr(t *testing.T) string { - ln, err := net.Listen("tcp", "127.0.0.1:0") - require.NoError(t, err) - defer ln.Close() - - return fmt.Sprintf("127.0.0.1:%d", ln.Addr().(*net.TCPAddr).Port) -} diff --git a/privval/wire.go b/privval/wire.go deleted file mode 100644 index 68891083..00000000 --- a/privval/wire.go +++ /dev/null @@ -1,13 +0,0 @@ -package privval - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" -) - -var cdc = amino.NewCodec() - -func init() { - crypto.RegisterAmino(cdc) - RegisterSocketPVMsg(cdc) -} diff --git a/proxy/app_conn.go b/proxy/app_conn.go deleted file mode 100644 index 2319fed8..00000000 --- a/proxy/app_conn.go +++ /dev/null @@ -1,144 +0,0 @@ -package proxy - -import ( - abcicli "github.com/tendermint/abci/client" - "github.com/tendermint/abci/types" -) - -//---------------------------------------------------------------------------------------- -// Enforce which abci msgs can be sent on a connection at the type level - -type AppConnConsensus interface { - SetResponseCallback(abcicli.Callback) - Error() error - - InitChainSync(types.RequestInitChain) (*types.ResponseInitChain, error) - - BeginBlockSync(types.RequestBeginBlock) (*types.ResponseBeginBlock, error) - DeliverTxAsync(tx []byte) *abcicli.ReqRes - EndBlockSync(types.RequestEndBlock) (*types.ResponseEndBlock, error) - CommitSync() (*types.ResponseCommit, error) -} - -type AppConnMempool interface { - SetResponseCallback(abcicli.Callback) - Error() error - - CheckTxAsync(tx []byte) *abcicli.ReqRes - - FlushAsync() *abcicli.ReqRes - FlushSync() error -} - -type AppConnQuery interface { - Error() error - - EchoSync(string) (*types.ResponseEcho, error) - InfoSync(types.RequestInfo) (*types.ResponseInfo, error) - QuerySync(types.RequestQuery) (*types.ResponseQuery, error) - - // SetOptionSync(key string, value string) (res types.Result) -} - -//----------------------------------------------------------------------------------------- -// Implements AppConnConsensus (subset of abcicli.Client) - -type appConnConsensus struct { - appConn abcicli.Client -} - -func NewAppConnConsensus(appConn abcicli.Client) *appConnConsensus { - return &appConnConsensus{ - appConn: appConn, - } -} - -func (app *appConnConsensus) SetResponseCallback(cb abcicli.Callback) { - app.appConn.SetResponseCallback(cb) -} - -func (app *appConnConsensus) Error() error { - return app.appConn.Error() -} - -func (app *appConnConsensus) InitChainSync(req types.RequestInitChain) (*types.ResponseInitChain, error) { - return app.appConn.InitChainSync(req) -} - -func (app *appConnConsensus) BeginBlockSync(req types.RequestBeginBlock) (*types.ResponseBeginBlock, error) { - return app.appConn.BeginBlockSync(req) -} - -func (app *appConnConsensus) DeliverTxAsync(tx []byte) *abcicli.ReqRes { - return app.appConn.DeliverTxAsync(tx) -} - -func (app *appConnConsensus) EndBlockSync(req types.RequestEndBlock) (*types.ResponseEndBlock, error) { - return app.appConn.EndBlockSync(req) -} - -func (app *appConnConsensus) CommitSync() (*types.ResponseCommit, error) { - return app.appConn.CommitSync() -} - -//------------------------------------------------ -// Implements AppConnMempool (subset of abcicli.Client) - -type appConnMempool struct { - appConn abcicli.Client -} - -func NewAppConnMempool(appConn abcicli.Client) *appConnMempool { - return &appConnMempool{ - appConn: appConn, - } -} - -func (app *appConnMempool) SetResponseCallback(cb abcicli.Callback) { - app.appConn.SetResponseCallback(cb) -} - -func (app *appConnMempool) Error() error { - return app.appConn.Error() -} - -func (app *appConnMempool) FlushAsync() *abcicli.ReqRes { - return app.appConn.FlushAsync() -} - -func (app *appConnMempool) FlushSync() error { - return app.appConn.FlushSync() -} - -func (app *appConnMempool) CheckTxAsync(tx []byte) *abcicli.ReqRes { - return app.appConn.CheckTxAsync(tx) -} - -//------------------------------------------------ -// Implements AppConnQuery (subset of abcicli.Client) - -type appConnQuery struct { - appConn abcicli.Client -} - -func NewAppConnQuery(appConn abcicli.Client) *appConnQuery { - return &appConnQuery{ - appConn: appConn, - } -} - -func (app *appConnQuery) Error() error { - return app.appConn.Error() -} - -func (app *appConnQuery) EchoSync(msg string) (*types.ResponseEcho, error) { - return app.appConn.EchoSync(msg) -} - -func (app *appConnQuery) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { - return app.appConn.InfoSync(req) -} - -func (app *appConnQuery) QuerySync(reqQuery types.RequestQuery) (*types.ResponseQuery, error) { - return app.appConn.QuerySync(reqQuery) -} diff --git a/proxy/app_conn_test.go b/proxy/app_conn_test.go deleted file mode 100644 index 7eb3831c..00000000 --- a/proxy/app_conn_test.go +++ /dev/null @@ -1,152 +0,0 @@ -package proxy - -import ( - "strings" - "testing" - - abcicli "github.com/tendermint/abci/client" - "github.com/tendermint/abci/example/kvstore" - "github.com/tendermint/abci/server" - "github.com/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" -) - -//---------------------------------------- - -type AppConnTest interface { - EchoAsync(string) *abcicli.ReqRes - FlushSync() error - InfoSync(types.RequestInfo) (*types.ResponseInfo, error) -} - -type appConnTest struct { - appConn abcicli.Client -} - -func NewAppConnTest(appConn abcicli.Client) AppConnTest { - return &appConnTest{appConn} -} - -func (app *appConnTest) EchoAsync(msg string) *abcicli.ReqRes { - return app.appConn.EchoAsync(msg) -} - -func (app *appConnTest) FlushSync() error { - return app.appConn.FlushSync() -} - -func (app *appConnTest) InfoSync(req types.RequestInfo) (*types.ResponseInfo, error) { - return app.appConn.InfoSync(req) -} - -//---------------------------------------- - -var SOCKET = "socket" - -func TestEcho(t *testing.T) { - sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) - clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) - - // Start server - s := server.NewSocketServer(sockPath, kvstore.NewKVStoreApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := s.Start(); err != nil { - t.Fatalf("Error starting socket server: %v", err.Error()) - } - defer s.Stop() - - // Start client - cli, err := clientCreator.NewABCIClient() - if err != nil { - t.Fatalf("Error creating ABCI client: %v", err.Error()) - } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if err := cli.Start(); err != nil { - t.Fatalf("Error starting ABCI client: %v", err.Error()) - } - - proxy := NewAppConnTest(cli) - t.Log("Connected") - - for i := 0; i < 1000; i++ { - proxy.EchoAsync(cmn.Fmt("echo-%v", i)) - } - if err := proxy.FlushSync(); err != nil { - t.Error(err) - } -} - -func BenchmarkEcho(b *testing.B) { - b.StopTimer() // Initialize - sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) - clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) - - // Start server - s := server.NewSocketServer(sockPath, kvstore.NewKVStoreApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := s.Start(); err != nil { - b.Fatalf("Error starting socket server: %v", err.Error()) - } - defer s.Stop() - - // Start client - cli, err := clientCreator.NewABCIClient() - if err != nil { - b.Fatalf("Error creating ABCI client: %v", err.Error()) - } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if err := cli.Start(); err != nil { - b.Fatalf("Error starting ABCI client: %v", err.Error()) - } - - proxy := NewAppConnTest(cli) - b.Log("Connected") - echoString := strings.Repeat(" ", 200) - b.StartTimer() // Start benchmarking tests - - for i := 0; i < b.N; i++ { - proxy.EchoAsync(echoString) - } - if err := proxy.FlushSync(); err != nil { - b.Error(err) - } - - b.StopTimer() - // info := proxy.InfoSync(types.RequestInfo{""}) - //b.Log("N: ", b.N, info) -} - -func TestInfo(t *testing.T) { - sockPath := cmn.Fmt("unix:///tmp/echo_%v.sock", cmn.RandStr(6)) - clientCreator := NewRemoteClientCreator(sockPath, SOCKET, true) - - // Start server - s := server.NewSocketServer(sockPath, kvstore.NewKVStoreApplication()) - s.SetLogger(log.TestingLogger().With("module", "abci-server")) - if err := s.Start(); err != nil { - t.Fatalf("Error starting socket server: %v", err.Error()) - } - defer s.Stop() - - // Start client - cli, err := clientCreator.NewABCIClient() - if err != nil { - t.Fatalf("Error creating ABCI client: %v", err.Error()) - } - cli.SetLogger(log.TestingLogger().With("module", "abci-client")) - if err := cli.Start(); err != nil { - t.Fatalf("Error starting ABCI client: %v", err.Error()) - } - - proxy := NewAppConnTest(cli) - t.Log("Connected") - - resInfo, err := proxy.InfoSync(types.RequestInfo{""}) - if err != nil { - t.Errorf("Unexpected error: %v", err) - } - if string(resInfo.Data) != "{\"size\":0}" { - t.Error("Expected ResponseInfo with one element '{\"size\":0}' but got something else") - } -} diff --git a/proxy/client.go b/proxy/client.go deleted file mode 100644 index 6c987368..00000000 --- a/proxy/client.go +++ /dev/null @@ -1,81 +0,0 @@ -package proxy - -import ( - "sync" - - "github.com/pkg/errors" - - abcicli "github.com/tendermint/abci/client" - "github.com/tendermint/abci/example/kvstore" - "github.com/tendermint/abci/types" -) - -// NewABCIClient returns newly connected client -type ClientCreator interface { - NewABCIClient() (abcicli.Client, error) -} - -//---------------------------------------------------- -// local proxy uses a mutex on an in-proc app - -type localClientCreator struct { - mtx *sync.Mutex - app types.Application -} - -func NewLocalClientCreator(app types.Application) ClientCreator { - return &localClientCreator{ - mtx: new(sync.Mutex), - app: app, - } -} - -func (l *localClientCreator) NewABCIClient() (abcicli.Client, error) { - return abcicli.NewLocalClient(l.mtx, l.app), nil -} - -//--------------------------------------------------------------- -// remote proxy opens new connections to an external app process - -type remoteClientCreator struct { - addr string - transport string - mustConnect bool -} - -func NewRemoteClientCreator(addr, transport string, mustConnect bool) ClientCreator { - return &remoteClientCreator{ - addr: addr, - transport: transport, - mustConnect: mustConnect, - } -} - -func (r *remoteClientCreator) NewABCIClient() (abcicli.Client, error) { - remoteApp, err := abcicli.NewClient(r.addr, r.transport, r.mustConnect) - if err != nil { - return nil, errors.Wrap(err, "Failed to connect to proxy") - } - return remoteApp, nil -} - -//----------------------------------------------------------------- -// default - -func DefaultClientCreator(addr, transport, dbDir string) ClientCreator { - switch addr { - case "kvstore": - fallthrough - case "dummy": - return NewLocalClientCreator(kvstore.NewKVStoreApplication()) - case "persistent_kvstore": - fallthrough - case "persistent_dummy": - return NewLocalClientCreator(kvstore.NewPersistentKVStoreApplication(dbDir)) - case "nilapp": - return NewLocalClientCreator(types.NewBaseApplication()) - default: - mustConnect := false // loop retrying - return NewRemoteClientCreator(addr, transport, mustConnect) - } -} diff --git a/proxy/multi_app_conn.go b/proxy/multi_app_conn.go deleted file mode 100644 index 5d89ef19..00000000 --- a/proxy/multi_app_conn.go +++ /dev/null @@ -1,112 +0,0 @@ -package proxy - -import ( - "github.com/pkg/errors" - - cmn "github.com/tendermint/tmlibs/common" -) - -//----------------------------- - -// Tendermint's interface to the application consists of multiple connections -type AppConns interface { - cmn.Service - - Mempool() AppConnMempool - Consensus() AppConnConsensus - Query() AppConnQuery -} - -func NewAppConns(clientCreator ClientCreator, handshaker Handshaker) AppConns { - return NewMultiAppConn(clientCreator, handshaker) -} - -//----------------------------- -// multiAppConn implements AppConns - -type Handshaker interface { - Handshake(AppConns) error -} - -// a multiAppConn is made of a few appConns (mempool, consensus, query) -// and manages their underlying abci clients, including the handshake -// which ensures the app and tendermint are synced. -// TODO: on app restart, clients must reboot together -type multiAppConn struct { - cmn.BaseService - - handshaker Handshaker - - mempoolConn *appConnMempool - consensusConn *appConnConsensus - queryConn *appConnQuery - - clientCreator ClientCreator -} - -// Make all necessary abci connections to the application -func NewMultiAppConn(clientCreator ClientCreator, handshaker Handshaker) *multiAppConn { - multiAppConn := &multiAppConn{ - handshaker: handshaker, - clientCreator: clientCreator, - } - multiAppConn.BaseService = *cmn.NewBaseService(nil, "multiAppConn", multiAppConn) - return multiAppConn -} - -// Returns the mempool connection -func (app *multiAppConn) Mempool() AppConnMempool { - return app.mempoolConn -} - -// Returns the consensus Connection -func (app *multiAppConn) Consensus() AppConnConsensus { - return app.consensusConn -} - -// Returns the query Connection -func (app *multiAppConn) Query() AppConnQuery { - return app.queryConn -} - -func (app *multiAppConn) OnStart() error { - // query connection - querycli, err := app.clientCreator.NewABCIClient() - if err != nil { - return errors.Wrap(err, "Error creating ABCI client (query connection)") - } - querycli.SetLogger(app.Logger.With("module", "abci-client", "connection", "query")) - if err := querycli.Start(); err != nil { - return errors.Wrap(err, "Error starting ABCI client (query connection)") - } - app.queryConn = NewAppConnQuery(querycli) - - // mempool connection - memcli, err := app.clientCreator.NewABCIClient() - if err != nil { - return errors.Wrap(err, "Error creating ABCI client (mempool connection)") - } - memcli.SetLogger(app.Logger.With("module", "abci-client", "connection", "mempool")) - if err := memcli.Start(); err != nil { - return errors.Wrap(err, "Error starting ABCI client (mempool connection)") - } - app.mempoolConn = NewAppConnMempool(memcli) - - // consensus connection - concli, err := app.clientCreator.NewABCIClient() - if err != nil { - return errors.Wrap(err, "Error creating ABCI client (consensus connection)") - } - concli.SetLogger(app.Logger.With("module", "abci-client", "connection", "consensus")) - if err := concli.Start(); err != nil { - return errors.Wrap(err, "Error starting ABCI client (consensus connection)") - } - app.consensusConn = NewAppConnConsensus(concli) - - // ensure app is synced to the latest state - if app.handshaker != nil { - return app.handshaker.Handshake(app) - } - - return nil -} diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go deleted file mode 100644 index 2254c1d1..00000000 --- a/rpc/client/event_test.go +++ /dev/null @@ -1,149 +0,0 @@ -package client_test - -import ( - "reflect" - "testing" - "time" - - "github.com/stretchr/testify/require" - - abci "github.com/tendermint/abci/types" - "github.com/tendermint/tendermint/rpc/client" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -var waitForEventTimeout = 5 * time.Second - -// MakeTxKV returns a text transaction, allong with expected key, value pair -func MakeTxKV() ([]byte, []byte, []byte) { - k := []byte(cmn.RandStr(8)) - v := []byte(cmn.RandStr(8)) - return k, v, append(k, append([]byte("="), v...)...) -} - -func TestHeaderEvents(t *testing.T) { - for i, c := range GetClients() { - i, c := i, c // capture params - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err, "%d: %+v", i, err) - defer c.Stop() - } - - evtTyp := types.EventNewBlockHeader - evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) - require.Nil(t, err, "%d: %+v", i, err) - _, ok := evt.(types.EventDataNewBlockHeader) - require.True(t, ok, "%d: %#v", i, evt) - // TODO: more checks... - }) - } -} - -func TestBlockEvents(t *testing.T) { - for i, c := range GetClients() { - i, c := i, c // capture params - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err, "%d: %+v", i, err) - defer c.Stop() - } - - // listen for a new block; ensure height increases by 1 - var firstBlockHeight int64 - for j := 0; j < 3; j++ { - evtTyp := types.EventNewBlock - evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) - require.Nil(t, err, "%d: %+v", j, err) - blockEvent, ok := evt.(types.EventDataNewBlock) - require.True(t, ok, "%d: %#v", j, evt) - - block := blockEvent.Block - if j == 0 { - firstBlockHeight = block.Header.Height - continue - } - - require.Equal(t, block.Header.Height, firstBlockHeight+int64(j)) - } - }) - } -} - -func TestTxEventsSentWithBroadcastTxAsync(t *testing.T) { - for i, c := range GetClients() { - i, c := i, c // capture params - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err, "%d: %+v", i, err) - defer c.Stop() - } - - // make the tx - _, _, tx := MakeTxKV() - evtTyp := types.EventTx - - // send async - txres, err := c.BroadcastTxAsync(tx) - require.Nil(t, err, "%+v", err) - require.Equal(t, txres.Code, abci.CodeTypeOK) // FIXME - - // and wait for confirmation - evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) - require.Nil(t, err, "%d: %+v", i, err) - // and make sure it has the proper info - txe, ok := evt.(types.EventDataTx) - require.True(t, ok, "%d: %#v", i, evt) - // make sure this is the proper tx - require.EqualValues(t, tx, txe.Tx) - require.True(t, txe.Result.IsOK()) - }) - } -} - -func TestTxEventsSentWithBroadcastTxSync(t *testing.T) { - for i, c := range GetClients() { - i, c := i, c // capture params - t.Run(reflect.TypeOf(c).String(), func(t *testing.T) { - - // start for this test it if it wasn't already running - if !c.IsRunning() { - // if so, then we start it, listen, and stop it. - err := c.Start() - require.Nil(t, err, "%d: %+v", i, err) - defer c.Stop() - } - - // make the tx - _, _, tx := MakeTxKV() - evtTyp := types.EventTx - - // send sync - txres, err := c.BroadcastTxSync(tx) - require.Nil(t, err, "%+v", err) - require.Equal(t, txres.Code, abci.CodeTypeOK) // FIXME - - // and wait for confirmation - evt, err := client.WaitForOneEvent(c, evtTyp, waitForEventTimeout) - require.Nil(t, err, "%d: %+v", i, err) - // and make sure it has the proper info - txe, ok := evt.(types.EventDataTx) - require.True(t, ok, "%d: %#v", i, evt) - // make sure this is the proper tx - require.EqualValues(t, tx, txe.Tx) - require.True(t, txe.Result.IsOK()) - }) - } -} diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go deleted file mode 100644 index 7e64d116..00000000 --- a/rpc/client/helpers.go +++ /dev/null @@ -1,80 +0,0 @@ -package client - -import ( - "context" - "time" - - "github.com/pkg/errors" - "github.com/tendermint/tendermint/types" -) - -// Waiter is informed of current height, decided whether to quit early -type Waiter func(delta int64) (abort error) - -// DefaultWaitStrategy is the standard backoff algorithm, -// but you can plug in another one -func DefaultWaitStrategy(delta int64) (abort error) { - if delta > 10 { - return errors.Errorf("Waiting for %d blocks... aborting", delta) - } else if delta > 0 { - // estimate of wait time.... - // wait half a second for the next block (in progress) - // plus one second for every full block - delay := time.Duration(delta-1)*time.Second + 500*time.Millisecond - time.Sleep(delay) - } - return nil -} - -// Wait for height will poll status at reasonable intervals until -// the block at the given height is available. -// -// If waiter is nil, we use DefaultWaitStrategy, but you can also -// provide your own implementation -func WaitForHeight(c StatusClient, h int64, waiter Waiter) error { - if waiter == nil { - waiter = DefaultWaitStrategy - } - delta := int64(1) - for delta > 0 { - s, err := c.Status() - if err != nil { - return err - } - delta = h - s.SyncInfo.LatestBlockHeight - // wait for the time, or abort early - if err := waiter(delta); err != nil { - return err - } - } - return nil -} - -// WaitForOneEvent subscribes to a websocket event for the given -// event time and returns upon receiving it one time, or -// when the timeout duration has expired. -// -// This handles subscribing and unsubscribing under the hood -func WaitForOneEvent(c EventsClient, evtTyp string, timeout time.Duration) (types.TMEventData, error) { - const subscriber = "helpers" - ctx, cancel := context.WithTimeout(context.Background(), timeout) - defer cancel() - evts := make(chan interface{}, 1) - - // register for the next event of this type - query := types.QueryForEvent(evtTyp) - err := c.Subscribe(ctx, subscriber, query, evts) - if err != nil { - return nil, errors.Wrap(err, "failed to subscribe") - } - - // make sure to unregister after the test is over - defer c.UnsubscribeAll(ctx, subscriber) - - select { - case evt := <-evts: - return evt.(types.TMEventData), nil - case <-ctx.Done(): - return nil, errors.New("timed out waiting for event") - } -} diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go deleted file mode 100644 index 8b843fcd..00000000 --- a/rpc/client/helpers_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package client_test - -import ( - "errors" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/rpc/client" - "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/core/types" -) - -func TestWaitForHeight(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // test with error result - immediate failure - m := &mock.StatusMock{ - Call: mock.Call{ - Error: errors.New("bye"), - }, - } - r := mock.NewStatusRecorder(m) - - // connection failure always leads to error - err := client.WaitForHeight(r, 8, nil) - require.NotNil(err) - require.Equal("bye", err.Error()) - // we called status once to check - require.Equal(1, len(r.Calls)) - - // now set current block height to 10 - m.Call = mock.Call{ - Response: &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 10}}, - } - - // we will not wait for more than 10 blocks - err = client.WaitForHeight(r, 40, nil) - require.NotNil(err) - require.True(strings.Contains(err.Error(), "aborting")) - // we called status once more to check - require.Equal(2, len(r.Calls)) - - // waiting for the past returns immediately - err = client.WaitForHeight(r, 5, nil) - require.Nil(err) - // we called status once more to check - require.Equal(3, len(r.Calls)) - - // since we can't update in a background goroutine (test --race) - // we use the callback to update the status height - myWaiter := func(delta int64) error { - // update the height for the next call - m.Call.Response = &ctypes.ResultStatus{SyncInfo: ctypes.SyncInfo{LatestBlockHeight: 15}} - return client.DefaultWaitStrategy(delta) - } - - // we wait for a few blocks - err = client.WaitForHeight(r, 12, myWaiter) - require.Nil(err) - // we called status once to check - require.Equal(5, len(r.Calls)) - - pre := r.Calls[3] - require.Nil(pre.Error) - prer, ok := pre.Response.(*ctypes.ResultStatus) - require.True(ok) - assert.Equal(int64(10), prer.SyncInfo.LatestBlockHeight) - - post := r.Calls[4] - require.Nil(post.Error) - postr, ok := post.Response.(*ctypes.ResultStatus) - require.True(ok) - assert.Equal(int64(15), postr.SyncInfo.LatestBlockHeight) -} diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go deleted file mode 100644 index 1414edce..00000000 --- a/rpc/client/httpclient.go +++ /dev/null @@ -1,373 +0,0 @@ -package client - -import ( - "context" - "sync" - - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpcclient "github.com/tendermint/tendermint/rpc/lib/client" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -/* -HTTP is a Client implementation that communicates -with a tendermint node over json rpc and websockets. - -This is the main implementation you probably want to use in -production code. There are other implementations when calling -the tendermint node in-process (local), or when you want to mock -out the server for test code (mock). -*/ -type HTTP struct { - remote string - rpc *rpcclient.JSONRPCClient - *WSEvents -} - -// New takes a remote endpoint in the form tcp://: -// and the websocket path (which always seems to be "/websocket") -func NewHTTP(remote, wsEndpoint string) *HTTP { - rc := rpcclient.NewJSONRPCClient(remote) - cdc := rc.Codec() - ctypes.RegisterAmino(cdc) - - return &HTTP{ - rpc: rc, - remote: remote, - WSEvents: newWSEvents(cdc, remote, wsEndpoint), - } -} - -var ( - _ Client = (*HTTP)(nil) - _ NetworkClient = (*HTTP)(nil) - _ EventsClient = (*HTTP)(nil) -) - -func (c *HTTP) Status() (*ctypes.ResultStatus, error) { - result := new(ctypes.ResultStatus) - _, err := c.rpc.Call("status", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "Status") - } - return result, nil -} - -func (c *HTTP) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - result := new(ctypes.ResultABCIInfo) - _, err := c.rpc.Call("abci_info", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "ABCIInfo") - } - return result, nil -} - -func (c *HTTP) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) -} - -func (c *HTTP) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - result := new(ctypes.ResultABCIQuery) - _, err := c.rpc.Call("abci_query", - map[string]interface{}{"path": path, "data": data, "height": opts.Height, "trusted": opts.Trusted}, - result) - if err != nil { - return nil, errors.Wrap(err, "ABCIQuery") - } - return result, nil -} - -func (c *HTTP) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - result := new(ctypes.ResultBroadcastTxCommit) - _, err := c.rpc.Call("broadcast_tx_commit", map[string]interface{}{"tx": tx}, result) - if err != nil { - return nil, errors.Wrap(err, "broadcast_tx_commit") - } - return result, nil -} - -func (c *HTTP) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return c.broadcastTX("broadcast_tx_async", tx) -} - -func (c *HTTP) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return c.broadcastTX("broadcast_tx_sync", tx) -} - -func (c *HTTP) broadcastTX(route string, tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - result := new(ctypes.ResultBroadcastTx) - _, err := c.rpc.Call(route, map[string]interface{}{"tx": tx}, result) - if err != nil { - return nil, errors.Wrap(err, route) - } - return result, nil -} - -func (c *HTTP) NetInfo() (*ctypes.ResultNetInfo, error) { - result := new(ctypes.ResultNetInfo) - _, err := c.rpc.Call("net_info", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "NetInfo") - } - return result, nil -} - -func (c *HTTP) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { - result := new(ctypes.ResultDumpConsensusState) - _, err := c.rpc.Call("dump_consensus_state", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "DumpConsensusState") - } - return result, nil -} - -func (c *HTTP) ConsensusState() (*ctypes.ResultConsensusState, error) { - result := new(ctypes.ResultConsensusState) - _, err := c.rpc.Call("consensus_state", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "ConsensusState") - } - return result, nil -} - -func (c *HTTP) Health() (*ctypes.ResultHealth, error) { - result := new(ctypes.ResultHealth) - _, err := c.rpc.Call("health", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "Health") - } - return result, nil -} - -func (c *HTTP) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - result := new(ctypes.ResultBlockchainInfo) - _, err := c.rpc.Call("blockchain", - map[string]interface{}{"minHeight": minHeight, "maxHeight": maxHeight}, - result) - if err != nil { - return nil, errors.Wrap(err, "BlockchainInfo") - } - return result, nil -} - -func (c *HTTP) Genesis() (*ctypes.ResultGenesis, error) { - result := new(ctypes.ResultGenesis) - _, err := c.rpc.Call("genesis", map[string]interface{}{}, result) - if err != nil { - return nil, errors.Wrap(err, "Genesis") - } - return result, nil -} - -func (c *HTTP) Block(height *int64) (*ctypes.ResultBlock, error) { - result := new(ctypes.ResultBlock) - _, err := c.rpc.Call("block", map[string]interface{}{"height": height}, result) - if err != nil { - return nil, errors.Wrap(err, "Block") - } - return result, nil -} - -func (c *HTTP) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { - result := new(ctypes.ResultBlockResults) - _, err := c.rpc.Call("block_results", map[string]interface{}{"height": height}, result) - if err != nil { - return nil, errors.Wrap(err, "Block Result") - } - return result, nil -} - -func (c *HTTP) Commit(height *int64) (*ctypes.ResultCommit, error) { - result := new(ctypes.ResultCommit) - _, err := c.rpc.Call("commit", map[string]interface{}{"height": height}, result) - if err != nil { - return nil, errors.Wrap(err, "Commit") - } - return result, nil -} - -func (c *HTTP) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { - result := new(ctypes.ResultTx) - params := map[string]interface{}{ - "hash": hash, - "prove": prove, - } - _, err := c.rpc.Call("tx", params, result) - if err != nil { - return nil, errors.Wrap(err, "Tx") - } - return result, nil -} - -func (c *HTTP) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) { - result := new(ctypes.ResultTxSearch) - params := map[string]interface{}{ - "query": query, - "prove": prove, - "page": page, - "per_page": perPage, - } - _, err := c.rpc.Call("tx_search", params, result) - if err != nil { - return nil, errors.Wrap(err, "TxSearch") - } - return result, nil -} - -func (c *HTTP) Validators(height *int64) (*ctypes.ResultValidators, error) { - result := new(ctypes.ResultValidators) - _, err := c.rpc.Call("validators", map[string]interface{}{"height": height}, result) - if err != nil { - return nil, errors.Wrap(err, "Validators") - } - return result, nil -} - -/** websocket event stuff here... **/ - -type WSEvents struct { - cmn.BaseService - cdc *amino.Codec - remote string - endpoint string - ws *rpcclient.WSClient - - mtx sync.RWMutex - subscriptions map[string]chan<- interface{} -} - -func newWSEvents(cdc *amino.Codec, remote, endpoint string) *WSEvents { - wsEvents := &WSEvents{ - cdc: cdc, - endpoint: endpoint, - remote: remote, - subscriptions: make(map[string]chan<- interface{}), - } - - wsEvents.BaseService = *cmn.NewBaseService(nil, "WSEvents", wsEvents) - return wsEvents -} - -func (w *WSEvents) OnStart() error { - w.ws = rpcclient.NewWSClient(w.remote, w.endpoint, rpcclient.OnReconnect(func() { - w.redoSubscriptions() - })) - w.ws.SetCodec(w.cdc) - - err := w.ws.Start() - if err != nil { - return err - } - - go w.eventListener() - return nil -} - -// Stop wraps the BaseService/eventSwitch actions as Start does -func (w *WSEvents) OnStop() { - err := w.ws.Stop() - if err != nil { - w.Logger.Error("failed to stop WSClient", "err", err) - } -} - -func (w *WSEvents) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { - q := query.String() - - err := w.ws.Subscribe(ctx, q) - if err != nil { - return err - } - - w.mtx.Lock() - // subscriber param is ignored because Tendermint will override it with - // remote IP anyway. - w.subscriptions[q] = out - w.mtx.Unlock() - - return nil -} - -func (w *WSEvents) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { - q := query.String() - - err := w.ws.Unsubscribe(ctx, q) - if err != nil { - return err - } - - w.mtx.Lock() - ch, ok := w.subscriptions[q] - if ok { - close(ch) - delete(w.subscriptions, q) - } - w.mtx.Unlock() - - return nil -} - -func (w *WSEvents) UnsubscribeAll(ctx context.Context, subscriber string) error { - err := w.ws.UnsubscribeAll(ctx) - if err != nil { - return err - } - - w.mtx.Lock() - for _, ch := range w.subscriptions { - close(ch) - } - w.subscriptions = make(map[string]chan<- interface{}) - w.mtx.Unlock() - - return nil -} - -// After being reconnected, it is necessary to redo subscription to server -// otherwise no data will be automatically received. -func (w *WSEvents) redoSubscriptions() { - for q := range w.subscriptions { - // NOTE: no timeout for resubscribing - // FIXME: better logging/handling of errors?? - w.ws.Subscribe(context.Background(), q) - } -} - -// eventListener is an infinite loop pulling all websocket events -// and pushing them to the EventSwitch. -// -// the goroutine only stops by closing quit -func (w *WSEvents) eventListener() { - for { - select { - case resp, ok := <-w.ws.ResponsesCh: - if !ok { - return - } - if resp.Error != nil { - w.Logger.Error("WS error", "err", resp.Error.Error()) - continue - } - result := new(ctypes.ResultEvent) - err := w.cdc.UnmarshalJSON(resp.Result, result) - if err != nil { - w.Logger.Error("failed to unmarshal response", "err", err) - continue - } - // NOTE: writing also happens inside mutex so we can't close a channel in - // Unsubscribe/UnsubscribeAll. - w.mtx.RLock() - if ch, ok := w.subscriptions[result.Query]; ok { - ch <- result.Data - } - w.mtx.RUnlock() - case <-w.Quit(): - return - } - } -} diff --git a/rpc/client/interface.go b/rpc/client/interface.go deleted file mode 100644 index afe2d8fa..00000000 --- a/rpc/client/interface.go +++ /dev/null @@ -1,95 +0,0 @@ -package client - -/* -The client package provides a general purpose interface (Client) for connecting -to a tendermint node, as well as higher-level functionality. - -The main implementation for production code is client.HTTP, which -connects via http to the jsonrpc interface of the tendermint node. - -For connecting to a node running in the same process (eg. when -compiling the abci app in the same process), you can use the client.Local -implementation. - -For mocking out server responses during testing to see behavior for -arbitrary return values, use the mock package. - -In addition to the Client interface, which should be used externally -for maximum flexibility and testability, and two implementations, -this package also provides helper functions that work on any Client -implementation. -*/ - -import ( - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -// ABCIClient groups together the functionality that principally -// affects the ABCI app. In many cases this will be all we want, -// so we can accept an interface which is easier to mock -type ABCIClient interface { - // Reading from abci app - ABCIInfo() (*ctypes.ResultABCIInfo, error) - ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) - ABCIQueryWithOptions(path string, data cmn.HexBytes, - opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) - - // Writing to abci app - BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) - BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) - BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) -} - -// SignClient groups together the interfaces need to get valid -// signatures and prove anything about the chain -type SignClient interface { - Block(height *int64) (*ctypes.ResultBlock, error) - BlockResults(height *int64) (*ctypes.ResultBlockResults, error) - Commit(height *int64) (*ctypes.ResultCommit, error) - Validators(height *int64) (*ctypes.ResultValidators, error) - Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) - TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) -} - -// HistoryClient shows us data from genesis to now in large chunks. -type HistoryClient interface { - Genesis() (*ctypes.ResultGenesis, error) - BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) -} - -type StatusClient interface { - // General chain info - Status() (*ctypes.ResultStatus, error) -} - -// Client wraps most important rpc calls a client would make -// if you want to listen for events, test if it also -// implements events.EventSwitch -type Client interface { - cmn.Service - ABCIClient - SignClient - HistoryClient - StatusClient - EventsClient -} - -// NetworkClient is general info about the network state. May not -// be needed usually. -// -// Not included in the Client interface, but generally implemented -// by concrete implementations. -type NetworkClient interface { - NetInfo() (*ctypes.ResultNetInfo, error) - DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) - ConsensusState() (*ctypes.ResultConsensusState, error) - Health() (*ctypes.ResultHealth, error) -} - -// EventsClient is reactive, you can subscribe to any message, given the proper -// string. see tendermint/types/events.go -type EventsClient interface { - types.EventBusSubscriber -} diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go deleted file mode 100644 index d89ec3b2..00000000 --- a/rpc/client/localclient.go +++ /dev/null @@ -1,145 +0,0 @@ -package client - -import ( - "context" - - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - nm "github.com/tendermint/tendermint/node" - "github.com/tendermint/tendermint/rpc/core" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -/* -Local is a Client implementation that directly executes the rpc -functions on a given node, without going through HTTP or GRPC. - -This implementation is useful for: - -* Running tests against a node in-process without the overhead -of going through an http server -* Communication between an ABCI app and Tendermint core when they -are compiled in process. - -For real clients, you probably want to use client.HTTP. For more -powerful control during testing, you probably want the "client/mock" package. -*/ -type Local struct { - *types.EventBus -} - -// NewLocal configures a client that calls the Node directly. -// -// Note that given how rpc/core works with package singletons, that -// you can only have one node per process. So make sure test cases -// don't run in parallel, or try to simulate an entire network in -// one process... -func NewLocal(node *nm.Node) *Local { - node.ConfigureRPC() - return &Local{ - EventBus: node.EventBus(), - } -} - -var ( - _ Client = (*Local)(nil) - _ NetworkClient = Local{} - _ EventsClient = (*Local)(nil) -) - -func (Local) Status() (*ctypes.ResultStatus, error) { - return core.Status() -} - -func (Local) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - return core.ABCIInfo() -} - -func (c *Local) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) -} - -func (Local) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(path, data, opts.Height, opts.Trusted) -} - -func (Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - return core.BroadcastTxCommit(tx) -} - -func (Local) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxAsync(tx) -} - -func (Local) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxSync(tx) -} - -func (Local) NetInfo() (*ctypes.ResultNetInfo, error) { - return core.NetInfo() -} - -func (Local) DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { - return core.DumpConsensusState() -} - -func (Local) ConsensusState() (*ctypes.ResultConsensusState, error) { - return core.ConsensusState() -} - -func (Local) Health() (*ctypes.ResultHealth, error) { - return core.Health() -} - -func (Local) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { - return core.UnsafeDialSeeds(seeds) -} - -func (Local) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { - return core.UnsafeDialPeers(peers, persistent) -} - -func (Local) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - return core.BlockchainInfo(minHeight, maxHeight) -} - -func (Local) Genesis() (*ctypes.ResultGenesis, error) { - return core.Genesis() -} - -func (Local) Block(height *int64) (*ctypes.ResultBlock, error) { - return core.Block(height) -} - -func (Local) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { - return core.BlockResults(height) -} - -func (Local) Commit(height *int64) (*ctypes.ResultCommit, error) { - return core.Commit(height) -} - -func (Local) Validators(height *int64) (*ctypes.ResultValidators, error) { - return core.Validators(height) -} - -func (Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { - return core.Tx(hash, prove) -} - -func (Local) TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) { - return core.TxSearch(query, prove, page, perPage) -} - -func (c *Local) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { - return c.EventBus.Subscribe(ctx, subscriber, query, out) -} - -func (c *Local) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { - return c.EventBus.Unsubscribe(ctx, subscriber, query) -} - -func (c *Local) UnsubscribeAll(ctx context.Context, subscriber string) error { - return c.EventBus.UnsubscribeAll(ctx, subscriber) -} diff --git a/rpc/client/main_test.go b/rpc/client/main_test.go deleted file mode 100644 index 82b5a019..00000000 --- a/rpc/client/main_test.go +++ /dev/null @@ -1,24 +0,0 @@ -package client_test - -import ( - "os" - "testing" - - "github.com/tendermint/abci/example/kvstore" - nm "github.com/tendermint/tendermint/node" - rpctest "github.com/tendermint/tendermint/rpc/test" -) - -var node *nm.Node - -func TestMain(m *testing.M) { - // start a tendermint node (and kvstore) in the background to test against - app := kvstore.NewKVStoreApplication() - node = rpctest.StartTendermint(app) - code := m.Run() - - // and shut down proper at the end - node.Stop() - node.Wait() - os.Exit(code) -} diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go deleted file mode 100644 index 7f4c45df..00000000 --- a/rpc/client/mock/abci.go +++ /dev/null @@ -1,202 +0,0 @@ -package mock - -import ( - abci "github.com/tendermint/abci/types" - "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/types" - "github.com/tendermint/tendermint/version" - cmn "github.com/tendermint/tmlibs/common" -) - -// ABCIApp will send all abci related request to the named app, -// so you can test app behavior from a client without needing -// an entire tendermint node -type ABCIApp struct { - App abci.Application -} - -var ( - _ client.ABCIClient = ABCIApp{} - _ client.ABCIClient = ABCIMock{} - _ client.ABCIClient = (*ABCIRecorder)(nil) -) - -func (a ABCIApp) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - return &ctypes.ResultABCIInfo{a.App.Info(abci.RequestInfo{version.Version})}, nil -} - -func (a ABCIApp) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { - return a.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) -} - -func (a ABCIApp) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - q := a.App.Query(abci.RequestQuery{data, path, opts.Height, opts.Trusted}) - return &ctypes.ResultABCIQuery{q}, nil -} - -func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - res := ctypes.ResultBroadcastTxCommit{} - res.CheckTx = a.App.CheckTx(tx) - if res.CheckTx.IsErr() { - return &res, nil - } - res.DeliverTx = a.App.DeliverTx(tx) - return &res, nil -} - -func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - c := a.App.CheckTx(tx) - // and this gets written in a background thread... - if !c.IsErr() { - go func() { a.App.DeliverTx(tx) }() // nolint: errcheck - } - return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil -} - -func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - c := a.App.CheckTx(tx) - // and this gets written in a background thread... - if !c.IsErr() { - go func() { a.App.DeliverTx(tx) }() // nolint: errcheck - } - return &ctypes.ResultBroadcastTx{c.Code, c.Data, c.Log, tx.Hash()}, nil -} - -// ABCIMock will send all abci related request to the named app, -// so you can test app behavior from a client without needing -// an entire tendermint node -type ABCIMock struct { - Info Call - Query Call - BroadcastCommit Call - Broadcast Call -} - -func (m ABCIMock) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - res, err := m.Info.GetResponse(nil) - if err != nil { - return nil, err - } - return &ctypes.ResultABCIInfo{res.(abci.ResponseInfo)}, nil -} - -func (m ABCIMock) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { - return m.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) -} - -func (m ABCIMock) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Trusted}) - if err != nil { - return nil, err - } - resQuery := res.(abci.ResponseQuery) - return &ctypes.ResultABCIQuery{resQuery}, nil -} - -func (m ABCIMock) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - res, err := m.BroadcastCommit.GetResponse(tx) - if err != nil { - return nil, err - } - return res.(*ctypes.ResultBroadcastTxCommit), nil -} - -func (m ABCIMock) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - res, err := m.Broadcast.GetResponse(tx) - if err != nil { - return nil, err - } - return res.(*ctypes.ResultBroadcastTx), nil -} - -func (m ABCIMock) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - res, err := m.Broadcast.GetResponse(tx) - if err != nil { - return nil, err - } - return res.(*ctypes.ResultBroadcastTx), nil -} - -// ABCIRecorder can wrap another type (ABCIApp, ABCIMock, or Client) -// and record all ABCI related calls. -type ABCIRecorder struct { - Client client.ABCIClient - Calls []Call -} - -func NewABCIRecorder(client client.ABCIClient) *ABCIRecorder { - return &ABCIRecorder{ - Client: client, - Calls: []Call{}, - } -} - -type QueryArgs struct { - Path string - Data cmn.HexBytes - Height int64 - Trusted bool -} - -func (r *ABCIRecorder) addCall(call Call) { - r.Calls = append(r.Calls, call) -} - -func (r *ABCIRecorder) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - res, err := r.Client.ABCIInfo() - r.addCall(Call{ - Name: "abci_info", - Response: res, - Error: err, - }) - return res, err -} - -func (r *ABCIRecorder) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { - return r.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) -} - -func (r *ABCIRecorder) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - res, err := r.Client.ABCIQueryWithOptions(path, data, opts) - r.addCall(Call{ - Name: "abci_query", - Args: QueryArgs{path, data, opts.Height, opts.Trusted}, - Response: res, - Error: err, - }) - return res, err -} - -func (r *ABCIRecorder) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - res, err := r.Client.BroadcastTxCommit(tx) - r.addCall(Call{ - Name: "broadcast_tx_commit", - Args: tx, - Response: res, - Error: err, - }) - return res, err -} - -func (r *ABCIRecorder) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - res, err := r.Client.BroadcastTxAsync(tx) - r.addCall(Call{ - Name: "broadcast_tx_async", - Args: tx, - Response: res, - Error: err, - }) - return res, err -} - -func (r *ABCIRecorder) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - res, err := r.Client.BroadcastTxSync(tx) - r.addCall(Call{ - Name: "broadcast_tx_sync", - Args: tx, - Response: res, - Error: err, - }) - return res, err -} diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go deleted file mode 100644 index 564f0129..00000000 --- a/rpc/client/mock/abci_test.go +++ /dev/null @@ -1,181 +0,0 @@ -package mock_test - -import ( - "fmt" - "testing" - - "github.com/pkg/errors" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/abci/example/kvstore" - abci "github.com/tendermint/abci/types" - "github.com/tendermint/tendermint/rpc/client" - "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -func TestABCIMock(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - key, value := []byte("foo"), []byte("bar") - height := int64(10) - goodTx := types.Tx{0x01, 0xff} - badTx := types.Tx{0x12, 0x21} - - m := mock.ABCIMock{ - Info: mock.Call{Error: errors.New("foobar")}, - Query: mock.Call{Response: abci.ResponseQuery{ - Key: key, - Value: value, - Height: height, - }}, - // Broadcast commit depends on call - BroadcastCommit: mock.Call{ - Args: goodTx, - Response: &ctypes.ResultBroadcastTxCommit{ - CheckTx: abci.ResponseCheckTx{Data: cmn.HexBytes("stand")}, - DeliverTx: abci.ResponseDeliverTx{Data: cmn.HexBytes("deliver")}, - }, - Error: errors.New("bad tx"), - }, - Broadcast: mock.Call{Error: errors.New("must commit")}, - } - - // now, let's try to make some calls - _, err := m.ABCIInfo() - require.NotNil(err) - assert.Equal("foobar", err.Error()) - - // query always returns the response - _query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Trusted: true}) - query := _query.Response - require.Nil(err) - require.NotNil(query) - assert.EqualValues(key, query.Key) - assert.EqualValues(value, query.Value) - assert.Equal(height, query.Height) - - // non-commit calls always return errors - _, err = m.BroadcastTxSync(goodTx) - require.NotNil(err) - assert.Equal("must commit", err.Error()) - _, err = m.BroadcastTxAsync(goodTx) - require.NotNil(err) - assert.Equal("must commit", err.Error()) - - // commit depends on the input - _, err = m.BroadcastTxCommit(badTx) - require.NotNil(err) - assert.Equal("bad tx", err.Error()) - bres, err := m.BroadcastTxCommit(goodTx) - require.Nil(err, "%+v", err) - assert.EqualValues(0, bres.CheckTx.Code) - assert.EqualValues("stand", bres.CheckTx.Data) - assert.EqualValues("deliver", bres.DeliverTx.Data) -} - -func TestABCIRecorder(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - // This mock returns errors on everything but Query - m := mock.ABCIMock{ - Info: mock.Call{Response: abci.ResponseInfo{ - Data: "data", - Version: "v0.9.9", - }}, - Query: mock.Call{Error: errors.New("query")}, - Broadcast: mock.Call{Error: errors.New("broadcast")}, - BroadcastCommit: mock.Call{Error: errors.New("broadcast_commit")}, - } - r := mock.NewABCIRecorder(m) - - require.Equal(0, len(r.Calls)) - - _, err := r.ABCIInfo() - assert.Nil(err, "expected no err on info") - - _, err = r.ABCIQueryWithOptions("path", cmn.HexBytes("data"), client.ABCIQueryOptions{Trusted: false}) - assert.NotNil(err, "expected error on query") - require.Equal(2, len(r.Calls)) - - info := r.Calls[0] - assert.Equal("abci_info", info.Name) - assert.Nil(info.Error) - assert.Nil(info.Args) - require.NotNil(info.Response) - ir, ok := info.Response.(*ctypes.ResultABCIInfo) - require.True(ok) - assert.Equal("data", ir.Response.Data) - assert.Equal("v0.9.9", ir.Response.Version) - - query := r.Calls[1] - assert.Equal("abci_query", query.Name) - assert.Nil(query.Response) - require.NotNil(query.Error) - assert.Equal("query", query.Error.Error()) - require.NotNil(query.Args) - qa, ok := query.Args.(mock.QueryArgs) - require.True(ok) - assert.Equal("path", qa.Path) - assert.EqualValues("data", qa.Data) - assert.False(qa.Trusted) - - // now add some broadcasts (should all err) - txs := []types.Tx{{1}, {2}, {3}} - _, err = r.BroadcastTxCommit(txs[0]) - assert.NotNil(err, "expected err on broadcast") - _, err = r.BroadcastTxSync(txs[1]) - assert.NotNil(err, "expected err on broadcast") - _, err = r.BroadcastTxAsync(txs[2]) - assert.NotNil(err, "expected err on broadcast") - - require.Equal(5, len(r.Calls)) - - bc := r.Calls[2] - assert.Equal("broadcast_tx_commit", bc.Name) - assert.Nil(bc.Response) - require.NotNil(bc.Error) - assert.EqualValues(bc.Args, txs[0]) - - bs := r.Calls[3] - assert.Equal("broadcast_tx_sync", bs.Name) - assert.Nil(bs.Response) - require.NotNil(bs.Error) - assert.EqualValues(bs.Args, txs[1]) - - ba := r.Calls[4] - assert.Equal("broadcast_tx_async", ba.Name) - assert.Nil(ba.Response) - require.NotNil(ba.Error) - assert.EqualValues(ba.Args, txs[2]) -} - -func TestABCIApp(t *testing.T) { - assert, require := assert.New(t), require.New(t) - app := kvstore.NewKVStoreApplication() - m := mock.ABCIApp{app} - - // get some info - info, err := m.ABCIInfo() - require.Nil(err) - assert.Equal(`{"size":0}`, info.Response.GetData()) - - // add a key - key, value := "foo", "bar" - tx := fmt.Sprintf("%s=%s", key, value) - res, err := m.BroadcastTxCommit(types.Tx(tx)) - require.Nil(err) - assert.True(res.CheckTx.IsOK()) - require.NotNil(res.DeliverTx) - assert.True(res.DeliverTx.IsOK()) - - // check the key - _qres, err := m.ABCIQueryWithOptions("/key", cmn.HexBytes(key), client.ABCIQueryOptions{Trusted: true}) - qres := _qres.Response - require.Nil(err) - assert.EqualValues(value, qres.Value) -} diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go deleted file mode 100644 index 6af9abb2..00000000 --- a/rpc/client/mock/client.go +++ /dev/null @@ -1,135 +0,0 @@ -/* -package mock returns a Client implementation that -accepts various (mock) implementations of the various methods. - -This implementation is useful for using in tests, when you don't -need a real server, but want a high-level of control about -the server response you want to mock (eg. error handling), -or if you just want to record the calls to verify in your tests. - -For real clients, you probably want the "http" package. If you -want to directly call a tendermint node in process, you can use the -"local" package. -*/ -package mock - -import ( - "reflect" - - "github.com/tendermint/tendermint/rpc/client" - "github.com/tendermint/tendermint/rpc/core" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -// Client wraps arbitrary implementations of the various interfaces. -// -// We provide a few choices to mock out each one in this package. -// Nothing hidden here, so no New function, just construct it from -// some parts, and swap them out them during the tests. -type Client struct { - client.ABCIClient - client.SignClient - client.HistoryClient - client.StatusClient - client.EventsClient - cmn.Service -} - -var _ client.Client = Client{} - -// Call is used by recorders to save a call and response. -// It can also be used to configure mock responses. -// -type Call struct { - Name string - Args interface{} - Response interface{} - Error error -} - -// GetResponse will generate the apporiate response for us, when -// using the Call struct to configure a Mock handler. -// -// When configuring a response, if only one of Response or Error is -// set then that will always be returned. If both are set, then -// we return Response if the Args match the set args, Error otherwise. -func (c Call) GetResponse(args interface{}) (interface{}, error) { - // handle the case with no response - if c.Response == nil { - if c.Error == nil { - panic("Misconfigured call, you must set either Response or Error") - } - return nil, c.Error - } - // response without error - if c.Error == nil { - return c.Response, nil - } - // have both, we must check args.... - if reflect.DeepEqual(args, c.Args) { - return c.Response, nil - } - return nil, c.Error -} - -func (c Client) Status() (*ctypes.ResultStatus, error) { - return core.Status() -} - -func (c Client) ABCIInfo() (*ctypes.ResultABCIInfo, error) { - return core.ABCIInfo() -} - -func (c Client) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQueryWithOptions(path, data, client.DefaultABCIQueryOptions) -} - -func (c Client) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(path, data, opts.Height, opts.Trusted) -} - -func (c Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - return core.BroadcastTxCommit(tx) -} - -func (c Client) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxAsync(tx) -} - -func (c Client) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - return core.BroadcastTxSync(tx) -} - -func (c Client) NetInfo() (*ctypes.ResultNetInfo, error) { - return core.NetInfo() -} - -func (c Client) DialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { - return core.UnsafeDialSeeds(seeds) -} - -func (c Client) DialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { - return core.UnsafeDialPeers(peers, persistent) -} - -func (c Client) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - return core.BlockchainInfo(minHeight, maxHeight) -} - -func (c Client) Genesis() (*ctypes.ResultGenesis, error) { - return core.Genesis() -} - -func (c Client) Block(height *int64) (*ctypes.ResultBlock, error) { - return core.Block(height) -} - -func (c Client) Commit(height *int64) (*ctypes.ResultCommit, error) { - return core.Commit(height) -} - -func (c Client) Validators(height *int64) (*ctypes.ResultValidators, error) { - return core.Validators(height) -} diff --git a/rpc/client/mock/status.go b/rpc/client/mock/status.go deleted file mode 100644 index 58b29d57..00000000 --- a/rpc/client/mock/status.go +++ /dev/null @@ -1,52 +0,0 @@ -package mock - -import ( - "github.com/tendermint/tendermint/rpc/client" - ctypes "github.com/tendermint/tendermint/rpc/core/types" -) - -// StatusMock returns the result specified by the Call -type StatusMock struct { - Call -} - -var ( - _ client.StatusClient = (*StatusMock)(nil) - _ client.StatusClient = (*StatusRecorder)(nil) -) - -func (m *StatusMock) Status() (*ctypes.ResultStatus, error) { - res, err := m.GetResponse(nil) - if err != nil { - return nil, err - } - return res.(*ctypes.ResultStatus), nil -} - -// StatusRecorder can wrap another type (StatusMock, full client) -// and record the status calls -type StatusRecorder struct { - Client client.StatusClient - Calls []Call -} - -func NewStatusRecorder(client client.StatusClient) *StatusRecorder { - return &StatusRecorder{ - Client: client, - Calls: []Call{}, - } -} - -func (r *StatusRecorder) addCall(call Call) { - r.Calls = append(r.Calls, call) -} - -func (r *StatusRecorder) Status() (*ctypes.ResultStatus, error) { - res, err := r.Client.Status() - r.addCall(Call{ - Name: "status", - Response: res, - Error: err, - }) - return res, err -} diff --git a/rpc/client/mock/status_test.go b/rpc/client/mock/status_test.go deleted file mode 100644 index dafd3508..00000000 --- a/rpc/client/mock/status_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package mock_test - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/rpc/client/mock" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - cmn "github.com/tendermint/tmlibs/common" -) - -func TestStatus(t *testing.T) { - assert, require := assert.New(t), require.New(t) - - m := &mock.StatusMock{ - Call: mock.Call{ - Response: &ctypes.ResultStatus{ - SyncInfo: ctypes.SyncInfo{ - LatestBlockHash: cmn.HexBytes("block"), - LatestAppHash: cmn.HexBytes("app"), - LatestBlockHeight: 10, - }, - }}, - } - - r := mock.NewStatusRecorder(m) - require.Equal(0, len(r.Calls)) - - // make sure response works proper - status, err := r.Status() - require.Nil(err, "%+v", err) - assert.EqualValues("block", status.SyncInfo.LatestBlockHash) - assert.EqualValues(10, status.SyncInfo.LatestBlockHeight) - - // make sure recorder works properly - require.Equal(1, len(r.Calls)) - rs := r.Calls[0] - assert.Equal("status", rs.Name) - assert.Nil(rs.Args) - assert.Nil(rs.Error) - require.NotNil(rs.Response) - st, ok := rs.Response.(*ctypes.ResultStatus) - require.True(ok) - assert.EqualValues("block", st.SyncInfo.LatestBlockHash) - assert.EqualValues(10, st.SyncInfo.LatestBlockHeight) -} diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go deleted file mode 100644 index 13109f78..00000000 --- a/rpc/client/rpc_test.go +++ /dev/null @@ -1,366 +0,0 @@ -package client_test - -import ( - "fmt" - "strings" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - abci "github.com/tendermint/abci/types" - - "github.com/tendermint/tendermint/rpc/client" - rpctest "github.com/tendermint/tendermint/rpc/test" - "github.com/tendermint/tendermint/types" -) - -func getHTTPClient() *client.HTTP { - rpcAddr := rpctest.GetConfig().RPC.ListenAddress - return client.NewHTTP(rpcAddr, "/websocket") -} - -func getLocalClient() *client.Local { - return client.NewLocal(node) -} - -// GetClients returns a slice of clients for table-driven tests -func GetClients() []client.Client { - return []client.Client{ - getHTTPClient(), - getLocalClient(), - } -} - -// Make sure status is correct (we connect properly) -func TestStatus(t *testing.T) { - for i, c := range GetClients() { - moniker := rpctest.GetConfig().Moniker - status, err := c.Status() - require.Nil(t, err, "%d: %+v", i, err) - assert.Equal(t, moniker, status.NodeInfo.Moniker) - } -} - -// Make sure info is correct (we connect properly) -func TestInfo(t *testing.T) { - for i, c := range GetClients() { - // status, err := c.Status() - // require.Nil(t, err, "%+v", err) - info, err := c.ABCIInfo() - require.Nil(t, err, "%d: %+v", i, err) - // TODO: this is not correct - fix merkleeyes! - // assert.EqualValues(t, status.SyncInfo.LatestBlockHeight, info.Response.LastBlockHeight) - assert.True(t, strings.Contains(info.Response.Data, "size")) - } -} - -func TestNetInfo(t *testing.T) { - for i, c := range GetClients() { - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - netinfo, err := nc.NetInfo() - require.Nil(t, err, "%d: %+v", i, err) - assert.True(t, netinfo.Listening) - assert.Equal(t, 0, len(netinfo.Peers)) - } -} - -func TestDumpConsensusState(t *testing.T) { - for i, c := range GetClients() { - // FIXME: fix server so it doesn't panic on invalid input - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - cons, err := nc.DumpConsensusState() - require.Nil(t, err, "%d: %+v", i, err) - assert.NotEmpty(t, cons.RoundState) - assert.Empty(t, cons.Peers) - } -} - -func TestConsensusState(t *testing.T) { - for i, c := range GetClients() { - // FIXME: fix server so it doesn't panic on invalid input - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - cons, err := nc.ConsensusState() - require.Nil(t, err, "%d: %+v", i, err) - assert.NotEmpty(t, cons.RoundState) - } -} - -func TestHealth(t *testing.T) { - for i, c := range GetClients() { - nc, ok := c.(client.NetworkClient) - require.True(t, ok, "%d", i) - _, err := nc.Health() - require.Nil(t, err, "%d: %+v", i, err) - } -} - -func TestGenesisAndValidators(t *testing.T) { - for i, c := range GetClients() { - - // make sure this is the right genesis file - gen, err := c.Genesis() - require.Nil(t, err, "%d: %+v", i, err) - // get the genesis validator - require.Equal(t, 1, len(gen.Genesis.Validators)) - gval := gen.Genesis.Validators[0] - - // get the current validators - vals, err := c.Validators(nil) - require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, 1, len(vals.Validators)) - val := vals.Validators[0] - - // make sure the current set is also the genesis set - assert.Equal(t, gval.Power, val.VotingPower) - assert.Equal(t, gval.PubKey, val.PubKey) - } -} - -func TestABCIQuery(t *testing.T) { - for i, c := range GetClients() { - // write something - k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(tx) - require.Nil(t, err, "%d: %+v", i, err) - apph := bres.Height + 1 // this is where the tx will be applied to the state - - // wait before querying - client.WaitForHeight(c, apph, nil) - res, err := c.ABCIQuery("/key", k) - qres := res.Response - if assert.Nil(t, err) && assert.True(t, qres.IsOK()) { - assert.EqualValues(t, v, qres.Value) - } - } -} - -// Make some app checks -func TestAppCalls(t *testing.T) { - assert, require := assert.New(t), require.New(t) - for i, c := range GetClients() { - - // get an offset of height to avoid racing and guessing - s, err := c.Status() - require.Nil(err, "%d: %+v", i, err) - // sh is start height or status height - sh := s.SyncInfo.LatestBlockHeight - - // look for the future - h := sh + 2 - _, err = c.Block(&h) - assert.NotNil(err) // no block yet - - // write something - k, v, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(tx) - require.Nil(err, "%d: %+v", i, err) - require.True(bres.DeliverTx.IsOK()) - txh := bres.Height - apph := txh + 1 // this is where the tx will be applied to the state - - // wait before querying - if err := client.WaitForHeight(c, apph, nil); err != nil { - t.Error(err) - } - _qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: true}) - qres := _qres.Response - if assert.Nil(err) && assert.True(qres.IsOK()) { - // assert.Equal(k, data.GetKey()) // only returned for proofs - assert.EqualValues(v, qres.Value) - } - - // make sure we can lookup the tx with proof - ptx, err := c.Tx(bres.Hash, true) - require.Nil(err, "%d: %+v", i, err) - assert.EqualValues(txh, ptx.Height) - assert.EqualValues(tx, ptx.Tx) - - // and we can even check the block is added - block, err := c.Block(&apph) - require.Nil(err, "%d: %+v", i, err) - appHash := block.BlockMeta.Header.AppHash - assert.True(len(appHash) > 0) - assert.EqualValues(apph, block.BlockMeta.Header.Height) - - // now check the results - blockResults, err := c.BlockResults(&txh) - require.Nil(err, "%d: %+v", i, err) - assert.Equal(txh, blockResults.Height) - if assert.Equal(1, len(blockResults.Results.DeliverTx)) { - // check success code - assert.EqualValues(0, blockResults.Results.DeliverTx[0].Code) - } - - // check blockchain info, now that we know there is info - info, err := c.BlockchainInfo(apph, apph) - require.Nil(err, "%d: %+v", i, err) - assert.True(info.LastHeight >= apph) - if assert.Equal(1, len(info.BlockMetas)) { - lastMeta := info.BlockMetas[0] - assert.EqualValues(apph, lastMeta.Header.Height) - bMeta := block.BlockMeta - assert.Equal(bMeta.Header.AppHash, lastMeta.Header.AppHash) - assert.Equal(bMeta.BlockID, lastMeta.BlockID) - } - - // and get the corresponding commit with the same apphash - commit, err := c.Commit(&apph) - require.Nil(err, "%d: %+v", i, err) - cappHash := commit.Header.AppHash - assert.Equal(appHash, cappHash) - assert.NotNil(commit.Commit) - - // compare the commits (note Commit(2) has commit from Block(3)) - h = apph - 1 - commit2, err := c.Commit(&h) - require.Nil(err, "%d: %+v", i, err) - assert.Equal(block.Block.LastCommit, commit2.Commit) - - // and we got a proof that works! - _pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: false}) - pres := _pres.Response - assert.Nil(err) - assert.True(pres.IsOK()) - } -} - -func TestBroadcastTxSync(t *testing.T) { - require := require.New(t) - - mempool := node.MempoolReactor().Mempool - initMempoolSize := mempool.Size() - - for i, c := range GetClients() { - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxSync(tx) - require.Nil(err, "%d: %+v", i, err) - require.Equal(bres.Code, abci.CodeTypeOK) // FIXME - - require.Equal(initMempoolSize+1, mempool.Size()) - - txs := mempool.Reap(1) - require.EqualValues(tx, txs[0]) - mempool.Flush() - } -} - -func TestBroadcastTxCommit(t *testing.T) { - require := require.New(t) - - mempool := node.MempoolReactor().Mempool - for i, c := range GetClients() { - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(tx) - require.Nil(err, "%d: %+v", i, err) - require.True(bres.CheckTx.IsOK()) - require.True(bres.DeliverTx.IsOK()) - - require.Equal(0, mempool.Size()) - } -} - -func TestTx(t *testing.T) { - // first we broadcast a tx - c := getHTTPClient() - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(tx) - require.Nil(t, err, "%+v", err) - - txHeight := bres.Height - txHash := bres.Hash - - anotherTxHash := types.Tx("a different tx").Hash() - - cases := []struct { - valid bool - hash []byte - prove bool - }{ - // only valid if correct hash provided - {true, txHash, false}, - {true, txHash, true}, - {false, anotherTxHash, false}, - {false, anotherTxHash, true}, - {false, nil, false}, - {false, nil, true}, - } - - for i, c := range GetClients() { - for j, tc := range cases { - t.Logf("client %d, case %d", i, j) - - // now we query for the tx. - // since there's only one tx, we know index=0. - ptx, err := c.Tx(tc.hash, tc.prove) - - if !tc.valid { - require.NotNil(t, err) - } else { - require.Nil(t, err, "%+v", err) - assert.EqualValues(t, txHeight, ptx.Height) - assert.EqualValues(t, tx, ptx.Tx) - assert.Zero(t, ptx.Index) - assert.True(t, ptx.TxResult.IsOK()) - assert.EqualValues(t, txHash, ptx.Hash) - - // time to verify the proof - proof := ptx.Proof - if tc.prove && assert.EqualValues(t, tx, proof.Data) { - assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) - } - } - } - } -} - -func TestTxSearch(t *testing.T) { - // first we broadcast a tx - c := getHTTPClient() - _, _, tx := MakeTxKV() - bres, err := c.BroadcastTxCommit(tx) - require.Nil(t, err, "%+v", err) - - txHeight := bres.Height - txHash := bres.Hash - - anotherTxHash := types.Tx("a different tx").Hash() - - for i, c := range GetClients() { - t.Logf("client %d", i) - - // now we query for the tx. - // since there's only one tx, we know index=0. - result, err := c.TxSearch(fmt.Sprintf("tx.hash='%v'", txHash), true, 1, 30) - require.Nil(t, err, "%+v", err) - require.Len(t, result.Txs, 1) - - ptx := result.Txs[0] - assert.EqualValues(t, txHeight, ptx.Height) - assert.EqualValues(t, tx, ptx.Tx) - assert.Zero(t, ptx.Index) - assert.True(t, ptx.TxResult.IsOK()) - assert.EqualValues(t, txHash, ptx.Hash) - - // time to verify the proof - proof := ptx.Proof - if assert.EqualValues(t, tx, proof.Data) { - assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) - } - - // we query for non existing tx - result, err = c.TxSearch(fmt.Sprintf("tx.hash='%X'", anotherTxHash), false, 1, 30) - require.Nil(t, err, "%+v", err) - require.Len(t, result.Txs, 0) - - // we query using a tag (see kvstore application) - result, err = c.TxSearch("app.creator='jae'", false, 1, 30) - require.Nil(t, err, "%+v", err) - if len(result.Txs) == 0 { - t.Fatal("expected a lot of transactions") - } - } -} diff --git a/rpc/client/types.go b/rpc/client/types.go deleted file mode 100644 index 89bd2f98..00000000 --- a/rpc/client/types.go +++ /dev/null @@ -1,12 +0,0 @@ -package client - -// ABCIQueryOptions can be used to provide options for ABCIQuery call other -// than the DefaultABCIQueryOptions. -type ABCIQueryOptions struct { - Height int64 - Trusted bool -} - -// DefaultABCIQueryOptions are latest height (0) and trusted equal to false -// (which will result in a proof being returned). -var DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Trusted: false} diff --git a/rpc/core/README.md b/rpc/core/README.md deleted file mode 100644 index 9547079b..00000000 --- a/rpc/core/README.md +++ /dev/null @@ -1,20 +0,0 @@ -# Tendermint RPC - -## Generate markdown for [Slate](https://github.com/tendermint/slate) - -We are using [Slate](https://github.com/tendermint/slate) to power our RPC -documentation. For generating markdown use: - -```shell -go get github.com/davecheney/godoc2md - -godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's$/src/target$https://github.com/tendermint/tendermint/tree/master/rpc/core$' -``` - -For more information see the [CI script for building the Slate docs](/scripts/slate.sh) - -## Pagination - -Requests that return multiple items will be paginated to 30 items by default. -You can specify further pages with the ?page parameter. You can also set a -custom page size up to 100 with the ?per_page parameter. diff --git a/rpc/core/abci.go b/rpc/core/abci.go deleted file mode 100644 index 067108c4..00000000 --- a/rpc/core/abci.go +++ /dev/null @@ -1,95 +0,0 @@ -package core - -import ( - abci "github.com/tendermint/abci/types" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/version" - cmn "github.com/tendermint/tmlibs/common" -) - -// Query the application for some information. -// -// ```shell -// curl 'localhost:26657/abci_query?path=""&data="abcd"&trusted=false' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// result, err := client.ABCIQuery("", "abcd", true) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": { -// "response": { -// "log": "exists", -// "height": 0, -// "proof": "010114FED0DAD959F36091AD761C922ABA3CBF1D8349990101020103011406AA2262E2F448242DF2C2607C3CDC705313EE3B0001149D16177BC71E445476174622EA559715C293740C", -// "value": "61626364", -// "key": "61626364", -// "index": -1, -// "code": 0 -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+--------+---------+----------+------------------------------------------------| -// | path | string | false | false | Path to the data ("/a/b/c") | -// | data | []byte | false | true | Data | -// | height | int64 | 0 | false | Height (0 means latest) | -// | trusted | bool | false | false | Does not include a proof of the data inclusion | -func ABCIQuery(path string, data cmn.HexBytes, height int64, trusted bool) (*ctypes.ResultABCIQuery, error) { - resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{ - Path: path, - Data: data, - Height: height, - Prove: !trusted, - }) - if err != nil { - return nil, err - } - logger.Info("ABCIQuery", "path", path, "data", data, "result", resQuery) - return &ctypes.ResultABCIQuery{*resQuery}, nil -} - -// Get some info about the application. -// -// ```shell -// curl 'localhost:26657/abci_info' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// info, err := client.ABCIInfo() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": { -// "response": { -// "data": "{\"size\":3}" -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -func ABCIInfo() (*ctypes.ResultABCIInfo, error) { - resInfo, err := proxyAppQuery.InfoSync(abci.RequestInfo{version.Version}) - if err != nil { - return nil, err - } - return &ctypes.ResultABCIInfo{*resInfo}, nil -} diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go deleted file mode 100644 index a5ad5b4c..00000000 --- a/rpc/core/blocks.go +++ /dev/null @@ -1,364 +0,0 @@ -package core - -import ( - "fmt" - - ctypes "github.com/tendermint/tendermint/rpc/core/types" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -// Get block headers for minHeight <= height <= maxHeight. -// Block headers are returned in descending order (highest first). -// -// ```shell -// curl 'localhost:26657/blockchain?minHeight=10&maxHeight=10' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// info, err := client.BlockchainInfo(10, 10) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": { -// "block_metas": [ -// { -// "header": { -// "app_hash": "", -// "chain_id": "test-chain-6UTNIN", -// "height": 10, -// "time": "2017-05-29T15:05:53.877Z", -// "num_txs": 0, -// "last_block_id": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// }, -// "last_commit_hash": "F31CC4282E50B3F2A58D763D233D76F26D26CABE", -// "data_hash": "", -// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" -// }, -// "block_id": { -// "parts": { -// "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": 1 -// }, -// "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" -// } -// } -// ], -// "last_height": 5493 -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -// -// -func BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlockchainInfo, error) { - if minHeight == 0 { - minHeight = 1 - } - - if maxHeight == 0 { - maxHeight = blockStore.Height() - } else { - maxHeight = cmn.MinInt64(blockStore.Height(), maxHeight) - } - - // maximum 20 block metas - const limit int64 = 20 - minHeight = cmn.MaxInt64(minHeight, maxHeight-limit) - - logger.Debug("BlockchainInfoHandler", "maxHeight", maxHeight, "minHeight", minHeight) - - if minHeight > maxHeight { - return nil, fmt.Errorf("min height %d can't be greater than max height %d", minHeight, maxHeight) - } - - blockMetas := []*types.BlockMeta{} - for height := maxHeight; height >= minHeight; height-- { - blockMeta := blockStore.LoadBlockMeta(height) - blockMetas = append(blockMetas, blockMeta) - } - - return &ctypes.ResultBlockchainInfo{blockStore.Height(), blockMetas}, nil -} - -// Get block at a given height. -// If no height is provided, it will fetch the latest block. -// -// ```shell -// curl 'localhost:26657/block?height=10' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// info, err := client.Block(10) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": { -// "block": { -// "last_commit": { -// "precommits": [ -// { -// "signature": { -// "data": "12C0D8893B8A38224488DC1DE6270DF76BB1A5E9DB1C68577706A6A97C6EC34FFD12339183D5CA8BC2F46148773823DE905B7F6F5862FD564038BB7AE03BF50D", -// "type": "ed25519" -// }, -// "block_id": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// }, -// "type": 2, -// "round": 0, -// "height": 9, -// "validator_index": 0, -// "validator_address": "E89A51D60F68385E09E716D353373B11F8FACD62" -// } -// ], -// "blockID": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// } -// }, -// "data": { -// "txs": [] -// }, -// "header": { -// "app_hash": "", -// "chain_id": "test-chain-6UTNIN", -// "height": 10, -// "time": "2017-05-29T15:05:53.877Z", -// "num_txs": 0, -// "last_block_id": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// }, -// "last_commit_hash": "F31CC4282E50B3F2A58D763D233D76F26D26CABE", -// "data_hash": "", -// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" -// } -// }, -// "block_meta": { -// "header": { -// "app_hash": "", -// "chain_id": "test-chain-6UTNIN", -// "height": 10, -// "time": "2017-05-29T15:05:53.877Z", -// "num_txs": 0, -// "last_block_id": { -// "parts": { -// "hash": "3C78F00658E06744A88F24FF97A0A5011139F34A", -// "total": 1 -// }, -// "hash": "F70588DAB36BDA5A953D548A16F7D48C6C2DFD78" -// }, -// "last_commit_hash": "F31CC4282E50B3F2A58D763D233D76F26D26CABE", -// "data_hash": "", -// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" -// }, -// "block_id": { -// "parts": { -// "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": 1 -// }, -// "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" -// } -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -func Block(heightPtr *int64) (*ctypes.ResultBlock, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) - if err != nil { - return nil, err - } - - blockMeta := blockStore.LoadBlockMeta(height) - block := blockStore.LoadBlock(height) - return &ctypes.ResultBlock{blockMeta, block}, nil -} - -// Get block commit at a given height. -// If no height is provided, it will fetch the commit for the latest block. -// -// ```shell -// curl 'localhost:26657/commit?height=11' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// info, err := client.Commit(11) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": { -// "canonical": true, -// "commit": { -// "precommits": [ -// { -// "signature": { -// "data": "00970429FEC652E9E21D106A90AE8C5413759A7488775CEF4A3F44DC46C7F9D941070E4FBE9ED54DF247FA3983359A0C3A238D61DE55C75C9116D72ABC9CF50F", -// "type": "ed25519" -// }, -// "block_id": { -// "parts": { -// "hash": "9E37CBF266BC044A779E09D81C456E653B89E006", -// "total": 1 -// }, -// "hash": "CC6E861E31CA4334E9888381B4A9137D1458AB6A" -// }, -// "type": 2, -// "round": 0, -// "height": 11, -// "validator_index": 0, -// "validator_address": "E89A51D60F68385E09E716D353373B11F8FACD62" -// } -// ], -// "blockID": { -// "parts": { -// "hash": "9E37CBF266BC044A779E09D81C456E653B89E006", -// "total": 1 -// }, -// "hash": "CC6E861E31CA4334E9888381B4A9137D1458AB6A" -// } -// }, -// "header": { -// "app_hash": "", -// "chain_id": "test-chain-6UTNIN", -// "height": 11, -// "time": "2017-05-29T15:05:54.893Z", -// "num_txs": 0, -// "last_block_id": { -// "parts": { -// "hash": "277A4DBEF91483A18B85F2F5677ABF9694DFA40F", -// "total": 1 -// }, -// "hash": "96B1D2F2D201BA4BC383EB8224139DB1294944E5" -// }, -// "last_commit_hash": "3CE0C9727CE524BA9CB7C91E28F08E2B94001087", -// "data_hash": "", -// "validators_hash": "9365FC80F234C967BD233F5A3E2AB2F1E4B0E5AA" -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -func Commit(heightPtr *int64) (*ctypes.ResultCommit, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) - if err != nil { - return nil, err - } - - header := blockStore.LoadBlockMeta(height).Header - - // If the next block has not been committed yet, - // use a non-canonical commit - if height == storeHeight { - commit := blockStore.LoadSeenCommit(height) - return ctypes.NewResultCommit(header, commit, false), nil - } - - // Return the canonical commit (comes from the block at height+1) - commit := blockStore.LoadBlockCommit(height) - return ctypes.NewResultCommit(header, commit, true), nil -} - -// BlockResults gets ABCIResults at a given height. -// If no height is provided, it will fetch results for the latest block. -// -// Results are for the height of the block containing the txs. -// Thus response.results[5] is the results of executing getBlock(h).Txs[5] -// -// ```shell -// curl 'localhost:26657/block_results?height=10' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// info, err := client.BlockResults(10) -// ``` -// -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "height": 10, -// "results": [ -// { -// "code": 0, -// "data": "CAFE00F00D" -// }, -// { -// "code": 102, -// "data": "" -// } -// ] -// } -// ``` -func BlockResults(heightPtr *int64) (*ctypes.ResultBlockResults, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) - if err != nil { - return nil, err - } - - // load the results - results, err := sm.LoadABCIResponses(stateDB, height) - if err != nil { - return nil, err - } - - res := &ctypes.ResultBlockResults{ - Height: height, - Results: results, - } - return res, nil -} - -func getHeight(storeHeight int64, heightPtr *int64) (int64, error) { - if heightPtr != nil { - height := *heightPtr - if height <= 0 { - return 0, fmt.Errorf("Height must be greater than 0") - } - if height > storeHeight { - return 0, fmt.Errorf("Height must be less than or equal to the current blockchain height") - } - return height, nil - } - return storeHeight, nil -} diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go deleted file mode 100644 index dad64b6b..00000000 --- a/rpc/core/consensus.go +++ /dev/null @@ -1,261 +0,0 @@ -package core - -import ( - cm "github.com/tendermint/tendermint/consensus" - "github.com/tendermint/tendermint/p2p" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -// Get the validator set at the given block height. -// If no height is provided, it will fetch the current validator set. -// -// ```shell -// curl 'localhost:26657/validators' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// state, err := client.Validators() -// ``` -// -// The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": { -// "validators": [ -// { -// "accum": 0, -// "voting_power": 10, -// "pub_key": { -// "data": "68DFDA7E50F82946E7E8546BED37944A422CD1B831E70DF66BA3B8430593944D", -// "type": "ed25519" -// }, -// "address": "E89A51D60F68385E09E716D353373B11F8FACD62" -// } -// ], -// "block_height": 5241 -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -func Validators(heightPtr *int64) (*ctypes.ResultValidators, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) - if err != nil { - return nil, err - } - - validators, err := sm.LoadValidators(stateDB, height) - if err != nil { - return nil, err - } - return &ctypes.ResultValidators{height, validators.Validators}, nil -} - -// DumpConsensusState dumps consensus state. -// UNSTABLE -// -// ```shell -// curl 'localhost:26657/dump_consensus_state' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// state, err := client.DumpConsensusState() -// ``` -// -// The above command returns JSON structured like this: -// -// ```json -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "round_state": { -// "height": 7185, -// "round": 0, -// "step": 1, -// "start_time": "2018-05-12T13:57:28.440293621-07:00", -// "commit_time": "2018-05-12T13:57:27.440293621-07:00", -// "validators": { -// "validators": [ -// { -// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", -// "pub_key": { -// "type": "AC26791624DE60", -// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" -// }, -// "voting_power": 10, -// "accum": 0 -// } -// ], -// "proposer": { -// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", -// "pub_key": { -// "type": "AC26791624DE60", -// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" -// }, -// "voting_power": 10, -// "accum": 0 -// } -// }, -// "proposal": null, -// "proposal_block": null, -// "proposal_block_parts": null, -// "locked_round": 0, -// "locked_block": null, -// "locked_block_parts": null, -// "valid_round": 0, -// "valid_block": null, -// "valid_block_parts": null, -// "votes": [ -// { -// "round": 0, -// "prevotes": "_", -// "precommits": "_" -// } -// ], -// "commit_round": -1, -// "last_commit": { -// "votes": [ -// "Vote{0:B5B3D40BE539 7184/00/2(Precommit) 14F946FA7EF0 /702B1B1A602A.../ @ 2018-05-12T20:57:27.342Z}" -// ], -// "votes_bit_array": "x", -// "peer_maj_23s": {} -// }, -// "last_validators": { -// "validators": [ -// { -// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", -// "pub_key": { -// "type": "AC26791624DE60", -// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" -// }, -// "voting_power": 10, -// "accum": 0 -// } -// ], -// "proposer": { -// "address": "B5B3D40BE53982AD294EF99FF5A34C0C3E5A3244", -// "pub_key": { -// "type": "AC26791624DE60", -// "value": "SBctdhRBcXtBgdI/8a/alTsUhGXqGs9k5ylV1u5iKHg=" -// }, -// "voting_power": 10, -// "accum": 0 -// } -// } -// }, -// "peers": [ -// { -// "node_address": "30ad1854af22506383c3f0e57fb3c7f90984c5e8@172.16.63.221:26656", -// "peer_state": { -// "round_state": { -// "height": 7185, -// "round": 0, -// "step": 1, -// "start_time": "2018-05-12T13:57:27.438039872-07:00", -// "proposal": false, -// "proposal_block_parts_header": { -// "total": 0, -// "hash": "" -// }, -// "proposal_block_parts": null, -// "proposal_pol_round": -1, -// "proposal_pol": "_", -// "prevotes": "_", -// "precommits": "_", -// "last_commit_round": 0, -// "last_commit": "x", -// "catchup_commit_round": -1, -// "catchup_commit": "_" -// }, -// "stats": { -// "last_vote_height": 7184, -// "votes": 255, -// "last_block_part_height": 7184, -// "block_parts": 255 -// } -// } -// } -// ] -// } -// } -// ``` -func DumpConsensusState() (*ctypes.ResultDumpConsensusState, error) { - // Get Peer consensus states. - peers := p2pSwitch.Peers().List() - peerStates := make([]ctypes.PeerStateInfo, len(peers)) - for i, peer := range peers { - peerState := peer.Get(types.PeerStateKey).(*cm.PeerState) - peerStateJSON, err := peerState.ToJSON() - if err != nil { - return nil, err - } - peerStates[i] = ctypes.PeerStateInfo{ - // Peer basic info. - NodeAddress: p2p.IDAddressString(peer.ID(), peer.NodeInfo().ListenAddr), - // Peer consensus state. - PeerState: peerStateJSON, - } - } - // Get self round state. - roundState, err := consensusState.GetRoundStateJSON() - if err != nil { - return nil, err - } - return &ctypes.ResultDumpConsensusState{roundState, peerStates}, nil -} - -// ConsensusState returns a concise summary of the consensus state. -// UNSTABLE -// -// ```shell -// curl 'localhost:26657/consensus_state' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// state, err := client.ConsensusState() -// ``` -// -// The above command returns JSON structured like this: -// -// ```json -//{ -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "round_state": { -// "height/round/step": "9336/0/1", -// "start_time": "2018-05-14T10:25:45.72595357-04:00", -// "proposal_block_hash": "", -// "locked_block_hash": "", -// "valid_block_hash": "", -// "height_vote_set": [ -// { -// "round": 0, -// "prevotes": [ -// "nil-Vote" -// ], -// "prevotes_bit_array": "BA{1:_} 0/10 = 0.00", -// "precommits": [ -// "nil-Vote" -// ], -// "precommits_bit_array": "BA{1:_} 0/10 = 0.00" -// } -// ] -// } -// } -//} -//``` -func ConsensusState() (*ctypes.ResultConsensusState, error) { - // Get self round state. - bz, err := consensusState.GetRoundStateSimpleJSON() - return &ctypes.ResultConsensusState{bz}, err -} diff --git a/rpc/core/dev.go b/rpc/core/dev.go deleted file mode 100644 index 0b515476..00000000 --- a/rpc/core/dev.go +++ /dev/null @@ -1,51 +0,0 @@ -package core - -import ( - "os" - "runtime/pprof" - - ctypes "github.com/tendermint/tendermint/rpc/core/types" -) - -func UnsafeFlushMempool() (*ctypes.ResultUnsafeFlushMempool, error) { - mempool.Flush() - return &ctypes.ResultUnsafeFlushMempool{}, nil -} - -var profFile *os.File - -func UnsafeStartCPUProfiler(filename string) (*ctypes.ResultUnsafeProfile, error) { - var err error - profFile, err = os.Create(filename) - if err != nil { - return nil, err - } - err = pprof.StartCPUProfile(profFile) - if err != nil { - return nil, err - } - return &ctypes.ResultUnsafeProfile{}, nil -} - -func UnsafeStopCPUProfiler() (*ctypes.ResultUnsafeProfile, error) { - pprof.StopCPUProfile() - if err := profFile.Close(); err != nil { - return nil, err - } - return &ctypes.ResultUnsafeProfile{}, nil -} - -func UnsafeWriteHeapProfile(filename string) (*ctypes.ResultUnsafeProfile, error) { - memProfFile, err := os.Create(filename) - if err != nil { - return nil, err - } - if err := pprof.WriteHeapProfile(memProfFile); err != nil { - return nil, err - } - if err := memProfFile.Close(); err != nil { - return nil, err - } - - return &ctypes.ResultUnsafeProfile{}, nil -} diff --git a/rpc/core/doc.go b/rpc/core/doc.go deleted file mode 100644 index d076b3ec..00000000 --- a/rpc/core/doc.go +++ /dev/null @@ -1,109 +0,0 @@ -/* -# Introduction - -Tendermint supports the following RPC protocols: - -* URI over HTTP -* JSONRPC over HTTP -* JSONRPC over websockets - -Tendermint RPC is built using [our own RPC library](https://github.com/tendermint/tendermint/tree/master/rpc/lib) which contains its own set of documentation and tests. - -## Configuration - -Set the `laddr` config parameter under `[rpc]` table in the `$TMHOME/config/config.toml` file or the `--rpc.laddr` command-line flag to the desired protocol://host:port setting. Default: `tcp://0.0.0.0:26657`. - -## Arguments - -Arguments which expect strings or byte arrays may be passed as quoted strings, like `"abc"` or as `0x`-prefixed strings, like `0x616263`. - -## URI/HTTP - -```bash -curl 'localhost:26657/broadcast_tx_sync?tx="abc"' -``` - -> Response: - -```json -{ - "error": "", - "result": { - "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF", - "log": "", - "data": "", - "code": 0 - }, - "id": "", - "jsonrpc": "2.0" -} -``` - -The first entry in the result-array (`96`) is the method this response correlates with. `96` refers to "ResultTypeBroadcastTx", see [responses.go](https://github.com/tendermint/tendermint/blob/master/rpc/core/types/responses.go) for a complete overview. - -## JSONRPC/HTTP - -JSONRPC requests can be POST'd to the root RPC endpoint via HTTP (e.g. `http://localhost:26657/`). - -```json -{ - "method": "broadcast_tx_sync", - "jsonrpc": "2.0", - "params": [ "abc" ], - "id": "dontcare" -} -``` - -## JSONRPC/websockets - -JSONRPC requests can be made via websocket. The websocket endpoint is at `/websocket`, e.g. `localhost:26657/websocket`. Asynchronous RPC functions like event `subscribe` and `unsubscribe` are only available via websockets. - - -## More Examples - -See the various bash tests using curl in `test/`, and examples using the `Go` API in `rpc/client/`. - -## Get the list - -An HTTP Get request to the root RPC endpoint shows a list of available endpoints. - -```bash -curl 'localhost:26657' -``` - -> Response: - -```plain -Available endpoints: -/abci_info -/dump_consensus_state -/genesis -/net_info -/num_unconfirmed_txs -/status -/health -/unconfirmed_txs -/unsafe_flush_mempool -/unsafe_stop_cpu_profiler -/validators - -Endpoints that require arguments: -/abci_query?path=_&data=_&prove=_ -/block?height=_ -/blockchain?minHeight=_&maxHeight=_ -/broadcast_tx_async?tx=_ -/broadcast_tx_commit?tx=_ -/broadcast_tx_sync?tx=_ -/commit?height=_ -/dial_seeds?seeds=_ -/dial_persistent_peers?persistent_peers=_ -/subscribe?event=_ -/tx?hash=_&prove=_ -/unsafe_start_cpu_profiler?filename=_ -/unsafe_write_heap_profile?filename=_ -/unsubscribe?event=_ -``` - -# Endpoints -*/ -package core diff --git a/rpc/core/doc_template.txt b/rpc/core/doc_template.txt deleted file mode 100644 index 896d0c27..00000000 --- a/rpc/core/doc_template.txt +++ /dev/null @@ -1,8 +0,0 @@ -{{with .PDoc}} -{{comment_md .Doc}} -{{example_html $ ""}} - -{{range .Funcs}}{{$name_html := html .Name}}## [{{$name_html}}]({{posLink_url $ .Decl}}) -{{comment_md .Doc}}{{end}} -{{end}} ---- diff --git a/rpc/core/events.go b/rpc/core/events.go deleted file mode 100644 index 6f679e33..00000000 --- a/rpc/core/events.go +++ /dev/null @@ -1,188 +0,0 @@ -package core - -import ( - "context" - - "github.com/pkg/errors" - - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpctypes "github.com/tendermint/tendermint/rpc/lib/types" - tmtypes "github.com/tendermint/tendermint/types" -) - -// Subscribe for events via WebSocket. -// -// To tell which events you want, you need to provide a query. query is a -// string, which has a form: "condition AND condition ..." (no OR at the -// moment). condition has a form: "key operation operand". key is a string with -// a restricted set of possible symbols ( \t\n\r\\()"'=>< are not allowed). -// operation can be "=", "<", "<=", ">", ">=", "CONTAINS". operand can be a -// string (escaped with single quotes), number, date or time. -// -// Examples: -// tm.event = 'NewBlock' # new blocks -// tm.event = 'CompleteProposal' # node got a complete proposal -// tm.event = 'Tx' AND tx.hash = 'XYZ' # single transaction -// tm.event = 'Tx' AND tx.height = 5 # all txs of the fifth block -// tx.height = 5 # all txs of the fifth block -// -// Tendermint provides a few predefined keys: tm.event, tx.hash and tx.height. -// Note for transactions, you can define additional keys by providing tags with -// DeliverTx response. -// -// DeliverTx{ -// Tags: []*KVPair{ -// "agent.name": "K", -// } -// } -// -// tm.event = 'Tx' AND agent.name = 'K' -// tm.event = 'Tx' AND account.created_at >= TIME 2013-05-03T14:45:00Z -// tm.event = 'Tx' AND contract.sign_date = DATE 2017-01-01 -// tm.event = 'Tx' AND account.owner CONTAINS 'Igor' -// -// See list of all possible events here -// https://godoc.org/github.com/tendermint/tendermint/types#pkg-constants -// -// For complete query syntax, check out -// https://godoc.org/github.com/tendermint/tendermint/libs/pubsub/query. -// -// ```go -// import "github.com/tendermint/tendermint/libs/pubsub/query" -// import "github.com/tendermint/tendermint/types" -// -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// ctx, cancel := context.WithTimeout(context.Background(), timeout) -// defer cancel() -// query := query.MustParse("tm.event = 'Tx' AND tx.height = 3") -// txs := make(chan interface{}) -// err := client.Subscribe(ctx, "test-client", query, txs) -// -// go func() { -// for e := range txs { -// fmt.Println("got ", e.(types.EventDataTx)) -// } -// }() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": {}, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+--------+---------+----------+-------------| -// | query | string | "" | true | Query | -// -// -func Subscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultSubscribe, error) { - addr := wsCtx.GetRemoteAddr() - logger.Info("Subscribe to query", "remote", addr, "query", query) - - q, err := tmquery.New(query) - if err != nil { - return nil, errors.Wrap(err, "failed to parse query") - } - - ctx, cancel := context.WithTimeout(context.Background(), subscribeTimeout) - defer cancel() - ch := make(chan interface{}) - err = eventBusFor(wsCtx).Subscribe(ctx, addr, q, ch) - if err != nil { - return nil, err - } - - go func() { - for event := range ch { - tmResult := &ctypes.ResultEvent{query, event.(tmtypes.TMEventData)} - wsCtx.TryWriteRPCResponse(rpctypes.NewRPCSuccessResponse(wsCtx.Codec(), wsCtx.Request.ID+"#event", tmResult)) - } - }() - - return &ctypes.ResultSubscribe{}, nil -} - -// Unsubscribe from events via WebSocket. -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.Unsubscribe("test-client", query) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": {}, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+--------+---------+----------+-------------| -// | query | string | "" | true | Query | -// -// -func Unsubscribe(wsCtx rpctypes.WSRPCContext, query string) (*ctypes.ResultUnsubscribe, error) { - addr := wsCtx.GetRemoteAddr() - logger.Info("Unsubscribe from query", "remote", addr, "query", query) - q, err := tmquery.New(query) - if err != nil { - return nil, errors.Wrap(err, "failed to parse query") - } - err = eventBusFor(wsCtx).Unsubscribe(context.Background(), addr, q) - if err != nil { - return nil, err - } - return &ctypes.ResultUnsubscribe{}, nil -} - -// Unsubscribe from all events via WebSocket. -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// err := client.UnsubscribeAll("test-client") -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": {}, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -// -// -func UnsubscribeAll(wsCtx rpctypes.WSRPCContext) (*ctypes.ResultUnsubscribe, error) { - addr := wsCtx.GetRemoteAddr() - logger.Info("Unsubscribe from all", "remote", addr) - err := eventBusFor(wsCtx).UnsubscribeAll(context.Background(), addr) - if err != nil { - return nil, err - } - return &ctypes.ResultUnsubscribe{}, nil -} - -func eventBusFor(wsCtx rpctypes.WSRPCContext) tmtypes.EventBusSubscriber { - es := wsCtx.GetEventSubscriber() - if es == nil { - es = eventBus - } - return es -} diff --git a/rpc/core/health.go b/rpc/core/health.go deleted file mode 100644 index 0ec4b5b4..00000000 --- a/rpc/core/health.go +++ /dev/null @@ -1,31 +0,0 @@ -package core - -import ( - ctypes "github.com/tendermint/tendermint/rpc/core/types" -) - -// Get node health. Returns empty result (200 OK) on success, no response - in -// case of an error. -// -// ```shell -// curl 'localhost:26657/health' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// result, err := client.Health() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": {}, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -func Health() (*ctypes.ResultHealth, error) { - return &ctypes.ResultHealth{}, nil -} diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go deleted file mode 100644 index 515ada87..00000000 --- a/rpc/core/mempool.go +++ /dev/null @@ -1,276 +0,0 @@ -package core - -import ( - "context" - "fmt" - "time" - - "github.com/pkg/errors" - - abci "github.com/tendermint/abci/types" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -//----------------------------------------------------------------------------- -// NOTE: tx should be signed, but this is only checked at the app level (not by Tendermint!) - -// Returns right away, with no response -// -// ```shell -// curl 'localhost:26657/broadcast_tx_async?tx="123"' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// result, err := client.BroadcastTxAsync("123") -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": { -// "hash": "E39AAB7A537ABAA237831742DCE1117F187C3C52", -// "log": "", -// "data": "", -// "code": 0 -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+------+---------+----------+-----------------| -// | tx | Tx | nil | true | The transaction | -func BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - err := mempool.CheckTx(tx, nil) - if err != nil { - return nil, fmt.Errorf("Error broadcasting transaction: %v", err) - } - return &ctypes.ResultBroadcastTx{Hash: tx.Hash()}, nil -} - -// Returns with the response from CheckTx. -// -// ```shell -// curl 'localhost:26657/broadcast_tx_sync?tx="456"' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// result, err := client.BroadcastTxSync("456") -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "code": 0, -// "data": "", -// "log": "", -// "hash": "0D33F2F03A5234F38706E43004489E061AC40A2E" -// }, -// "error": "" -// } -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+------+---------+----------+-----------------| -// | tx | Tx | nil | true | The transaction | -func BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { - resCh := make(chan *abci.Response, 1) - err := mempool.CheckTx(tx, func(res *abci.Response) { - resCh <- res - }) - if err != nil { - return nil, fmt.Errorf("Error broadcasting transaction: %v", err) - } - res := <-resCh - r := res.GetCheckTx() - return &ctypes.ResultBroadcastTx{ - Code: r.Code, - Data: r.Data, - Log: r.Log, - Hash: tx.Hash(), - }, nil -} - -// CONTRACT: only returns error if mempool.BroadcastTx errs (ie. problem with the app) -// or if we timeout waiting for tx to commit. -// If CheckTx or DeliverTx fail, no error will be returned, but the returned result -// will contain a non-OK ABCI code. -// -// ```shell -// curl 'localhost:26657/broadcast_tx_commit?tx="789"' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// result, err := client.BroadcastTxCommit("789") -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": { -// "height": 26682, -// "hash": "75CA0F856A4DA078FC4911580360E70CEFB2EBEE", -// "deliver_tx": { -// "log": "", -// "data": "", -// "code": 0 -// }, -// "check_tx": { -// "log": "", -// "data": "", -// "code": 0 -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+------+---------+----------+-----------------| -// | tx | Tx | nil | true | The transaction | -func BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { - // subscribe to tx being committed in block - ctx, cancel := context.WithTimeout(context.Background(), subscribeTimeout) - defer cancel() - deliverTxResCh := make(chan interface{}) - q := types.EventQueryTxFor(tx) - err := eventBus.Subscribe(ctx, "mempool", q, deliverTxResCh) - if err != nil { - err = errors.Wrap(err, "failed to subscribe to tx") - logger.Error("Error on broadcastTxCommit", "err", err) - return nil, fmt.Errorf("Error on broadcastTxCommit: %v", err) - } - defer eventBus.Unsubscribe(context.Background(), "mempool", q) - - // broadcast the tx and register checktx callback - checkTxResCh := make(chan *abci.Response, 1) - err = mempool.CheckTx(tx, func(res *abci.Response) { - checkTxResCh <- res - }) - if err != nil { - logger.Error("Error on broadcastTxCommit", "err", err) - return nil, fmt.Errorf("Error on broadcastTxCommit: %v", err) - } - checkTxRes := <-checkTxResCh - checkTxR := checkTxRes.GetCheckTx() - if checkTxR.Code != abci.CodeTypeOK { - // CheckTx failed! - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxR, - DeliverTx: abci.ResponseDeliverTx{}, - Hash: tx.Hash(), - }, nil - } - - // Wait for the tx to be included in a block, - // timeout after something reasonable. - // TODO: configurable? - timer := time.NewTimer(60 * 2 * time.Second) - select { - case deliverTxResMsg := <-deliverTxResCh: - deliverTxRes := deliverTxResMsg.(types.EventDataTx) - // The tx was included in a block. - deliverTxR := deliverTxRes.Result - logger.Info("DeliverTx passed ", "tx", cmn.HexBytes(tx), "response", deliverTxR) - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxR, - DeliverTx: deliverTxR, - Hash: tx.Hash(), - Height: deliverTxRes.Height, - }, nil - case <-timer.C: - logger.Error("failed to include tx") - return &ctypes.ResultBroadcastTxCommit{ - CheckTx: *checkTxR, - DeliverTx: abci.ResponseDeliverTx{}, - Hash: tx.Hash(), - }, fmt.Errorf("Timed out waiting for transaction to be included in a block") - } -} - -// Get unconfirmed transactions (maximum ?limit entries) including their number. -// -// ```shell -// curl 'localhost:26657/unconfirmed_txs' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// result, err := client.UnconfirmedTxs() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": { -// "txs": [], -// "n_txs": 0 -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+------+---------+----------+--------------------------------------| -// | limit | int | 30 | false | Maximum number of entries (max: 100) | -// ``` -func UnconfirmedTxs(limit int) (*ctypes.ResultUnconfirmedTxs, error) { - // reuse per_page validator - limit = validatePerPage(limit) - - txs := mempool.Reap(limit) - return &ctypes.ResultUnconfirmedTxs{len(txs), txs}, nil -} - -// Get number of unconfirmed transactions. -// -// ```shell -// curl 'localhost:26657/num_unconfirmed_txs' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// result, err := client.UnconfirmedTxs() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": { -// "txs": null, -// "n_txs": 0 -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -func NumUnconfirmedTxs() (*ctypes.ResultUnconfirmedTxs, error) { - return &ctypes.ResultUnconfirmedTxs{N: mempool.Size()}, nil -} diff --git a/rpc/core/net.go b/rpc/core/net.go deleted file mode 100644 index ba9753d8..00000000 --- a/rpc/core/net.go +++ /dev/null @@ -1,127 +0,0 @@ -package core - -import ( - "github.com/pkg/errors" - - ctypes "github.com/tendermint/tendermint/rpc/core/types" -) - -// Get network info. -// -// ```shell -// curl 'localhost:26657/net_info' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// info, err := client.NetInfo() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": { -// "n_peers": 0, -// "peers": [], -// "listeners": [ -// "Listener(@10.0.2.15:26656)" -// ], -// "listening": true -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -func NetInfo() (*ctypes.ResultNetInfo, error) { - listening := p2pSwitch.IsListening() - listeners := []string{} - for _, listener := range p2pSwitch.Listeners() { - listeners = append(listeners, listener.String()) - } - peers := []ctypes.Peer{} - for _, peer := range p2pSwitch.Peers().List() { - peers = append(peers, ctypes.Peer{ - NodeInfo: peer.NodeInfo(), - IsOutbound: peer.IsOutbound(), - ConnectionStatus: peer.Status(), - }) - } - // TODO: Should we include PersistentPeers and Seeds in here? - // PRO: useful info - // CON: privacy - return &ctypes.ResultNetInfo{ - Listening: listening, - Listeners: listeners, - NPeers: len(peers), - Peers: peers, - }, nil -} - -func UnsafeDialSeeds(seeds []string) (*ctypes.ResultDialSeeds, error) { - if len(seeds) == 0 { - return &ctypes.ResultDialSeeds{}, errors.New("No seeds provided") - } - // starts go routines to dial each peer after random delays - logger.Info("DialSeeds", "addrBook", addrBook, "seeds", seeds) - err := p2pSwitch.DialPeersAsync(addrBook, seeds, false) - if err != nil { - return &ctypes.ResultDialSeeds{}, err - } - return &ctypes.ResultDialSeeds{"Dialing seeds in progress. See /net_info for details"}, nil -} - -func UnsafeDialPeers(peers []string, persistent bool) (*ctypes.ResultDialPeers, error) { - if len(peers) == 0 { - return &ctypes.ResultDialPeers{}, errors.New("No peers provided") - } - // starts go routines to dial each peer after random delays - logger.Info("DialPeers", "addrBook", addrBook, "peers", peers, "persistent", persistent) - err := p2pSwitch.DialPeersAsync(addrBook, peers, persistent) - if err != nil { - return &ctypes.ResultDialPeers{}, err - } - return &ctypes.ResultDialPeers{"Dialing peers in progress. See /net_info for details"}, nil -} - -// Get genesis file. -// -// ```shell -// curl 'localhost:26657/genesis' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// genesis, err := client.Genesis() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": { -// "genesis": { -// "app_hash": "", -// "validators": [ -// { -// "name": "", -// "power": 10, -// "pub_key": { -// "data": "68DFDA7E50F82946E7E8546BED37944A422CD1B831E70DF66BA3B8430593944D", -// "type": "ed25519" -// } -// } -// ], -// "chain_id": "test-chain-6UTNIN", -// "genesis_time": "2017-05-29T15:05:41.671Z" -// } -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -func Genesis() (*ctypes.ResultGenesis, error) { - return &ctypes.ResultGenesis{genDoc}, nil -} diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go deleted file mode 100644 index 7a042362..00000000 --- a/rpc/core/pipe.go +++ /dev/null @@ -1,147 +0,0 @@ -package core - -import ( - "time" - - crypto "github.com/tendermint/go-crypto" - "github.com/tendermint/tendermint/consensus" - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/proxy" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/state/txindex" - "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" -) - -const ( - // see README - defaultPerPage = 30 - maxPerPage = 100 -) - -var subscribeTimeout = 5 * time.Second - -//---------------------------------------------- -// These interfaces are used by RPC and must be thread safe - -type Consensus interface { - GetState() sm.State - GetValidators() (int64, []*types.Validator) - GetRoundStateJSON() ([]byte, error) - GetRoundStateSimpleJSON() ([]byte, error) -} - -type P2P interface { - Listeners() []p2p.Listener - Peers() p2p.IPeerSet - NumPeers() (outbound, inbound, dialig int) - NodeInfo() p2p.NodeInfo - IsListening() bool - DialPeersAsync(p2p.AddrBook, []string, bool) error -} - -//---------------------------------------------- -// These package level globals come with setters -// that are expected to be called only once, on startup - -var ( - // external, thread safe interfaces - proxyAppQuery proxy.AppConnQuery - - // interfaces defined in types and above - stateDB dbm.DB - blockStore sm.BlockStore - mempool sm.Mempool - evidencePool sm.EvidencePool - consensusState Consensus - p2pSwitch P2P - - // objects - pubKey crypto.PubKey - genDoc *types.GenesisDoc // cache the genesis structure - addrBook p2p.AddrBook - txIndexer txindex.TxIndexer - consensusReactor *consensus.ConsensusReactor - eventBus *types.EventBus // thread safe - - logger log.Logger -) - -func SetStateDB(db dbm.DB) { - stateDB = db -} - -func SetBlockStore(bs sm.BlockStore) { - blockStore = bs -} - -func SetMempool(mem sm.Mempool) { - mempool = mem -} - -func SetEvidencePool(evpool sm.EvidencePool) { - evidencePool = evpool -} - -func SetConsensusState(cs Consensus) { - consensusState = cs -} - -func SetSwitch(sw P2P) { - p2pSwitch = sw -} - -func SetPubKey(pk crypto.PubKey) { - pubKey = pk -} - -func SetGenesisDoc(doc *types.GenesisDoc) { - genDoc = doc -} - -func SetAddrBook(book p2p.AddrBook) { - addrBook = book -} - -func SetProxyAppQuery(appConn proxy.AppConnQuery) { - proxyAppQuery = appConn -} - -func SetTxIndexer(indexer txindex.TxIndexer) { - txIndexer = indexer -} - -func SetConsensusReactor(conR *consensus.ConsensusReactor) { - consensusReactor = conR -} - -func SetLogger(l log.Logger) { - logger = l -} - -func SetEventBus(b *types.EventBus) { - eventBus = b -} - -func validatePage(page, perPage, totalCount int) int { - if perPage < 1 { - return 1 - } - - pages := ((totalCount - 1) / perPage) + 1 - if page < 1 { - page = 1 - } else if page > pages { - page = pages - } - - return page -} - -func validatePerPage(perPage int) int { - if perPage < 1 || perPage > maxPerPage { - return defaultPerPage - } - return perPage -} diff --git a/rpc/core/pipe_test.go b/rpc/core/pipe_test.go deleted file mode 100644 index 225e3649..00000000 --- a/rpc/core/pipe_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package core - -import ( - "fmt" - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestPaginationPage(t *testing.T) { - - cases := []struct { - totalCount int - perPage int - page int - newPage int - }{ - {0, 0, 1, 1}, - - {0, 10, 0, 1}, - {0, 10, 1, 1}, - {0, 10, 2, 1}, - - {5, 10, -1, 1}, - {5, 10, 0, 1}, - {5, 10, 1, 1}, - {5, 10, 2, 1}, - {5, 10, 2, 1}, - - {5, 5, 1, 1}, - {5, 5, 2, 1}, - {5, 5, 3, 1}, - - {5, 3, 2, 2}, - {5, 3, 3, 2}, - - {5, 2, 2, 2}, - {5, 2, 3, 3}, - {5, 2, 4, 3}, - } - - for _, c := range cases { - p := validatePage(c.page, c.perPage, c.totalCount) - assert.Equal(t, c.newPage, p, fmt.Sprintf("%v", c)) - } - -} - -func TestPaginationPerPage(t *testing.T) { - - cases := []struct { - totalCount int - perPage int - newPerPage int - }{ - {5, 0, defaultPerPage}, - {5, 1, 1}, - {5, 2, 2}, - {5, defaultPerPage, defaultPerPage}, - {5, maxPerPage - 1, maxPerPage - 1}, - {5, maxPerPage, maxPerPage}, - {5, maxPerPage + 1, defaultPerPage}, - } - - for _, c := range cases { - p := validatePerPage(c.perPage) - assert.Equal(t, c.newPerPage, p, fmt.Sprintf("%v", c)) - } -} diff --git a/rpc/core/routes.go b/rpc/core/routes.go deleted file mode 100644 index f26fadb6..00000000 --- a/rpc/core/routes.go +++ /dev/null @@ -1,52 +0,0 @@ -package core - -import ( - rpc "github.com/tendermint/tendermint/rpc/lib/server" -) - -// TODO: better system than "unsafe" prefix -// NOTE: Amino is registered in rpc/core/types/wire.go. -var Routes = map[string]*rpc.RPCFunc{ - // subscribe/unsubscribe are reserved for websocket events. - "subscribe": rpc.NewWSRPCFunc(Subscribe, "query"), - "unsubscribe": rpc.NewWSRPCFunc(Unsubscribe, "query"), - "unsubscribe_all": rpc.NewWSRPCFunc(UnsubscribeAll, ""), - - // info API - "health": rpc.NewRPCFunc(Health, ""), - "status": rpc.NewRPCFunc(Status, ""), - "net_info": rpc.NewRPCFunc(NetInfo, ""), - "blockchain": rpc.NewRPCFunc(BlockchainInfo, "minHeight,maxHeight"), - "genesis": rpc.NewRPCFunc(Genesis, ""), - "block": rpc.NewRPCFunc(Block, "height"), - "block_results": rpc.NewRPCFunc(BlockResults, "height"), - "commit": rpc.NewRPCFunc(Commit, "height"), - "tx": rpc.NewRPCFunc(Tx, "hash,prove"), - "tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page"), - "validators": rpc.NewRPCFunc(Validators, "height"), - "dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""), - "consensus_state": rpc.NewRPCFunc(ConsensusState, ""), - "unconfirmed_txs": rpc.NewRPCFunc(UnconfirmedTxs, "limit"), - "num_unconfirmed_txs": rpc.NewRPCFunc(NumUnconfirmedTxs, ""), - - // broadcast API - "broadcast_tx_commit": rpc.NewRPCFunc(BroadcastTxCommit, "tx"), - "broadcast_tx_sync": rpc.NewRPCFunc(BroadcastTxSync, "tx"), - "broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"), - - // abci API - "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,trusted"), - "abci_info": rpc.NewRPCFunc(ABCIInfo, ""), -} - -func AddUnsafeRoutes() { - // control API - Routes["dial_seeds"] = rpc.NewRPCFunc(UnsafeDialSeeds, "seeds") - Routes["dial_peers"] = rpc.NewRPCFunc(UnsafeDialPeers, "peers,persistent") - Routes["unsafe_flush_mempool"] = rpc.NewRPCFunc(UnsafeFlushMempool, "") - - // profiler API - Routes["unsafe_start_cpu_profiler"] = rpc.NewRPCFunc(UnsafeStartCPUProfiler, "filename") - Routes["unsafe_stop_cpu_profiler"] = rpc.NewRPCFunc(UnsafeStopCPUProfiler, "") - Routes["unsafe_write_heap_profile"] = rpc.NewRPCFunc(UnsafeWriteHeapProfile, "filename") -} diff --git a/rpc/core/status.go b/rpc/core/status.go deleted file mode 100644 index 044c1289..00000000 --- a/rpc/core/status.go +++ /dev/null @@ -1,133 +0,0 @@ -package core - -import ( - "bytes" - "time" - - ctypes "github.com/tendermint/tendermint/rpc/core/types" - sm "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" -) - -// Get Tendermint status including node info, pubkey, latest block -// hash, app hash, block height and time. -// -// ```shell -// curl 'localhost:26657/status' -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// result, err := client.Status() -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -//{ -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "node_info": { -// "id": "562dd7f579f0ecee8c94a11a3c1e378c1876f433", -// "listen_addr": "192.168.1.2:26656", -// "network": "test-chain-I6zScH", -// "version": "0.19.0", -// "channels": "4020212223303800", -// "moniker": "Ethans-MacBook-Pro.local", -// "other": [ -// "amino_version=0.9.8", -// "p2p_version=0.5.0", -// "consensus_version=v1/0.2.2", -// "rpc_version=0.7.0/3", -// "tx_index=on", -// "rpc_addr=tcp://0.0.0.0:26657" -// ] -// }, -// "sync_info": { -// "latest_block_hash": "2D4D7055BE685E3CB2410603C92AD37AE557AC59", -// "latest_app_hash": "0000000000000000", -// "latest_block_height": 231, -// "latest_block_time": "2018-04-27T23:18:08.459766485-04:00", -// "syncing": false -// }, -// "validator_info": { -// "address": "5875562FF0FFDECC895C20E32FC14988952E99E7", -// "pub_key": { -// "type": "AC26791624DE60", -// "value": "PpDJRUrLG2RgFqYYjawfn/AcAgacSXpLFrmfYYQnuzE=" -// }, -// "voting_power": 10 -// } -// } -//} -// ``` -func Status() (*ctypes.ResultStatus, error) { - latestHeight := blockStore.Height() - var ( - latestBlockMeta *types.BlockMeta - latestBlockHash cmn.HexBytes - latestAppHash cmn.HexBytes - latestBlockTimeNano int64 - ) - if latestHeight != 0 { - latestBlockMeta = blockStore.LoadBlockMeta(latestHeight) - latestBlockHash = latestBlockMeta.BlockID.Hash - latestAppHash = latestBlockMeta.Header.AppHash - latestBlockTimeNano = latestBlockMeta.Header.Time.UnixNano() - } - - latestBlockTime := time.Unix(0, latestBlockTimeNano) - - var votingPower int64 - if val := validatorAtHeight(latestHeight); val != nil { - votingPower = val.VotingPower - } - - result := &ctypes.ResultStatus{ - NodeInfo: p2pSwitch.NodeInfo(), - SyncInfo: ctypes.SyncInfo{ - LatestBlockHash: latestBlockHash, - LatestAppHash: latestAppHash, - LatestBlockHeight: latestHeight, - LatestBlockTime: latestBlockTime, - Syncing: consensusReactor.FastSync(), - }, - ValidatorInfo: ctypes.ValidatorInfo{ - Address: pubKey.Address(), - PubKey: pubKey, - VotingPower: votingPower, - }, - } - - return result, nil -} - -func validatorAtHeight(h int64) *types.Validator { - lastBlockHeight, vals := consensusState.GetValidators() - - privValAddress := pubKey.Address() - - // if we're still at height h, search in the current validator set - if lastBlockHeight == h { - for _, val := range vals { - if bytes.Equal(val.Address, privValAddress) { - return val - } - } - } - - // if we've moved to the next height, retrieve the validator set from DB - if lastBlockHeight > h { - vals, err := sm.LoadValidators(stateDB, h) - if err != nil { - // should not happen - return nil - } - _, val := vals.GetByAddress(privValAddress) - return val - } - - return nil -} diff --git a/rpc/core/tx.go b/rpc/core/tx.go deleted file mode 100644 index 2fa7825f..00000000 --- a/rpc/core/tx.go +++ /dev/null @@ -1,219 +0,0 @@ -package core - -import ( - "fmt" - - cmn "github.com/tendermint/tmlibs/common" - - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - "github.com/tendermint/tendermint/state/txindex/null" - "github.com/tendermint/tendermint/types" -) - -// Tx allows you to query the transaction results. `nil` could mean the -// transaction is in the mempool, invalidated, or was not sent in the first -// place. -// -// ```shell -// curl "localhost:26657/tx?hash=0x2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF" -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// tx, err := client.Tx([]byte("2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF"), true) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "error": "", -// "result": { -// "proof": { -// "Proof": { -// "aunts": [] -// }, -// "Data": "YWJjZA==", -// "RootHash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF", -// "Total": 1, -// "Index": 0 -// }, -// "tx": "YWJjZA==", -// "tx_result": { -// "log": "", -// "data": "", -// "code": 0 -// }, -// "index": 0, -// "height": 52, -// "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF" -// }, -// "id": "", -// "jsonrpc": "2.0" -// } -// ``` -// -// Returns a transaction matching the given transaction hash. -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+--------+---------+----------+-----------------------------------------------------------| -// | hash | []byte | nil | true | The transaction hash | -// | prove | bool | false | false | Include a proof of the transaction inclusion in the block | -// -// ### Returns -// -// - `proof`: the `types.TxProof` object -// - `tx`: `[]byte` - the transaction -// - `tx_result`: the `abci.Result` object -// - `index`: `int` - index of the transaction -// - `height`: `int` - height of the block where this transaction was in -// - `hash`: `[]byte` - hash of the transaction -func Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { - - // if index is disabled, return error - if _, ok := txIndexer.(*null.TxIndex); ok { - return nil, fmt.Errorf("Transaction indexing is disabled") - } - - r, err := txIndexer.Get(hash) - if err != nil { - return nil, err - } - - if r == nil { - return nil, fmt.Errorf("Tx (%X) not found", hash) - } - - height := r.Height - index := r.Index - - var proof types.TxProof - if prove { - block := blockStore.LoadBlock(height) - proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines - } - - return &ctypes.ResultTx{ - Hash: hash, - Height: height, - Index: uint32(index), - TxResult: r.Result, - Tx: r.Tx, - Proof: proof, - }, nil -} - -// TxSearch allows you to query for multiple transactions results. It returns a -// list of transactions (maximum ?per_page entries) and the total count. -// -// ```shell -// curl "localhost:26657/tx_search?query=\"account.owner='Ivan'\"&prove=true" -// ``` -// -// ```go -// client := client.NewHTTP("tcp://0.0.0.0:26657", "/websocket") -// q, err := tmquery.New("account.owner='Ivan'") -// tx, err := client.TxSearch(q, true) -// ``` -// -// > The above command returns JSON structured like this: -// -// ```json -// { -// "jsonrpc": "2.0", -// "id": "", -// "result": { -// "txs": [ -// { -// "proof": { -// "Proof": { -// "aunts": [ -// "J3LHbizt806uKnABNLwG4l7gXCA=", -// "iblMO/M1TnNtlAefJyNCeVhjAb0=", -// "iVk3ryurVaEEhdeS0ohAJZ3wtB8=", -// "5hqMkTeGqpct51ohX0lZLIdsn7Q=", -// "afhsNxFnLlZgFDoyPpdQSe0bR8g=" -// ] -// }, -// "Data": "mvZHHa7HhZ4aRT0xMDA=", -// "RootHash": "F6541223AA46E428CB1070E9840D2C3DF3B6D776", -// "Total": 32, -// "Index": 31 -// }, -// "tx": "mvZHHa7HhZ4aRT0xMDA=", -// "tx_result": {}, -// "index": 31, -// "height": 12, -// "hash": "2B8EC32BA2579B3B8606E42C06DE2F7AFA2556EF" -// } -// ], -// "total_count": 1 -// } -// } -// ``` -// -// ### Query Parameters -// -// | Parameter | Type | Default | Required | Description | -// |-----------+--------+---------+----------+-----------------------------------------------------------| -// | query | string | "" | true | Query | -// | prove | bool | false | false | Include proofs of the transactions inclusion in the block | -// | page | int | 1 | false | Page number (1-based) | -// | per_page | int | 30 | false | Number of entries per page (max: 100) | -// -// ### Returns -// -// - `proof`: the `types.TxProof` object -// - `tx`: `[]byte` - the transaction -// - `tx_result`: the `abci.Result` object -// - `index`: `int` - index of the transaction -// - `height`: `int` - height of the block where this transaction was in -// - `hash`: `[]byte` - hash of the transaction -func TxSearch(query string, prove bool, page, perPage int) (*ctypes.ResultTxSearch, error) { - // if index is disabled, return error - if _, ok := txIndexer.(*null.TxIndex); ok { - return nil, fmt.Errorf("Transaction indexing is disabled") - } - - q, err := tmquery.New(query) - if err != nil { - return nil, err - } - - results, err := txIndexer.Search(q) - if err != nil { - return nil, err - } - - totalCount := len(results) - perPage = validatePerPage(perPage) - page = validatePage(page, perPage, totalCount) - skipCount := (page - 1) * perPage - - apiResults := make([]*ctypes.ResultTx, cmn.MinInt(perPage, totalCount-skipCount)) - var proof types.TxProof - for i := 0; i < len(apiResults); i++ { - r := results[skipCount+i] - height := r.Height - index := r.Index - - if prove { - block := blockStore.LoadBlock(height) - proof = block.Data.Txs.Proof(int(index)) // XXX: overflow on 32-bit machines - } - - apiResults[i] = &ctypes.ResultTx{ - Hash: r.Tx.Hash(), - Height: height, - Index: index, - TxResult: r.Result, - Tx: r.Tx, - Proof: proof, - } - } - - return &ctypes.ResultTxSearch{Txs: apiResults, TotalCount: totalCount}, nil -} diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go deleted file mode 100644 index 5b001d7d..00000000 --- a/rpc/core/types/responses.go +++ /dev/null @@ -1,210 +0,0 @@ -package core_types - -import ( - "encoding/json" - "strings" - "time" - - abci "github.com/tendermint/abci/types" - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" - - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/state" - "github.com/tendermint/tendermint/types" -) - -// List of blocks -type ResultBlockchainInfo struct { - LastHeight int64 `json:"last_height"` - BlockMetas []*types.BlockMeta `json:"block_metas"` -} - -// Genesis file -type ResultGenesis struct { - Genesis *types.GenesisDoc `json:"genesis"` -} - -// Single block (with meta) -type ResultBlock struct { - BlockMeta *types.BlockMeta `json:"block_meta"` - Block *types.Block `json:"block"` -} - -// Commit and Header -type ResultCommit struct { - // SignedHeader is header and commit, embedded so we only have - // one level in the json output - types.SignedHeader - CanonicalCommit bool `json:"canonical"` -} - -// ABCI results from a block -type ResultBlockResults struct { - Height int64 `json:"height"` - Results *state.ABCIResponses `json:"results"` -} - -// NewResultCommit is a helper to initialize the ResultCommit with -// the embedded struct -func NewResultCommit(header *types.Header, commit *types.Commit, - canonical bool) *ResultCommit { - - return &ResultCommit{ - SignedHeader: types.SignedHeader{ - Header: header, - Commit: commit, - }, - CanonicalCommit: canonical, - } -} - -// Info about the node's syncing state -type SyncInfo struct { - LatestBlockHash cmn.HexBytes `json:"latest_block_hash"` - LatestAppHash cmn.HexBytes `json:"latest_app_hash"` - LatestBlockHeight int64 `json:"latest_block_height"` - LatestBlockTime time.Time `json:"latest_block_time"` - Syncing bool `json:"syncing"` -} - -// Info about the node's validator -type ValidatorInfo struct { - Address cmn.HexBytes `json:"address"` - PubKey crypto.PubKey `json:"pub_key"` - VotingPower int64 `json:"voting_power"` -} - -// Node Status -type ResultStatus struct { - NodeInfo p2p.NodeInfo `json:"node_info"` - SyncInfo SyncInfo `json:"sync_info"` - ValidatorInfo ValidatorInfo `json:"validator_info"` -} - -// Is TxIndexing enabled -func (s *ResultStatus) TxIndexEnabled() bool { - if s == nil { - return false - } - for _, s := range s.NodeInfo.Other { - info := strings.Split(s, "=") - if len(info) == 2 && info[0] == "tx_index" { - return info[1] == "on" - } - } - return false -} - -// Info about peer connections -type ResultNetInfo struct { - Listening bool `json:"listening"` - Listeners []string `json:"listeners"` - NPeers int `json:"n_peers"` - Peers []Peer `json:"peers"` -} - -// Log from dialing seeds -type ResultDialSeeds struct { - Log string `json:"log"` -} - -// Log from dialing peers -type ResultDialPeers struct { - Log string `json:"log"` -} - -// A peer -type Peer struct { - p2p.NodeInfo `json:"node_info"` - IsOutbound bool `json:"is_outbound"` - ConnectionStatus p2p.ConnectionStatus `json:"connection_status"` -} - -// Validators for a height -type ResultValidators struct { - BlockHeight int64 `json:"block_height"` - Validators []*types.Validator `json:"validators"` -} - -// Info about the consensus state. -// UNSTABLE -type ResultDumpConsensusState struct { - RoundState json.RawMessage `json:"round_state"` - Peers []PeerStateInfo `json:"peers"` -} - -// UNSTABLE -type PeerStateInfo struct { - NodeAddress string `json:"node_address"` - PeerState json.RawMessage `json:"peer_state"` -} - -// UNSTABLE -type ResultConsensusState struct { - RoundState json.RawMessage `json:"round_state"` -} - -// CheckTx result -type ResultBroadcastTx struct { - Code uint32 `json:"code"` - Data cmn.HexBytes `json:"data"` - Log string `json:"log"` - - Hash cmn.HexBytes `json:"hash"` -} - -// CheckTx and DeliverTx results -type ResultBroadcastTxCommit struct { - CheckTx abci.ResponseCheckTx `json:"check_tx"` - DeliverTx abci.ResponseDeliverTx `json:"deliver_tx"` - Hash cmn.HexBytes `json:"hash"` - Height int64 `json:"height"` -} - -// Result of querying for a tx -type ResultTx struct { - Hash cmn.HexBytes `json:"hash"` - Height int64 `json:"height"` - Index uint32 `json:"index"` - TxResult abci.ResponseDeliverTx `json:"tx_result"` - Tx types.Tx `json:"tx"` - Proof types.TxProof `json:"proof,omitempty"` -} - -// Result of searching for txs -type ResultTxSearch struct { - Txs []*ResultTx `json:"txs"` - TotalCount int `json:"total_count"` -} - -// List of mempool txs -type ResultUnconfirmedTxs struct { - N int `json:"n_txs"` - Txs []types.Tx `json:"txs"` -} - -// Info abci msg -type ResultABCIInfo struct { - Response abci.ResponseInfo `json:"response"` -} - -// Query abci msg -type ResultABCIQuery struct { - Response abci.ResponseQuery `json:"response"` -} - -// empty results -type ( - ResultUnsafeFlushMempool struct{} - ResultUnsafeProfile struct{} - ResultSubscribe struct{} - ResultUnsubscribe struct{} - ResultHealth struct{} -) - -// Event data from a subscription -type ResultEvent struct { - Query string `json:"query"` - Data types.TMEventData `json:"data"` -} diff --git a/rpc/core/types/responses_test.go b/rpc/core/types/responses_test.go deleted file mode 100644 index e410d47a..00000000 --- a/rpc/core/types/responses_test.go +++ /dev/null @@ -1,39 +0,0 @@ -package core_types - -import ( - "testing" - - "github.com/stretchr/testify/assert" - - "github.com/tendermint/tendermint/p2p" -) - -func TestStatusIndexer(t *testing.T) { - assert := assert.New(t) - - var status *ResultStatus - assert.False(status.TxIndexEnabled()) - - status = &ResultStatus{} - assert.False(status.TxIndexEnabled()) - - status.NodeInfo = p2p.NodeInfo{} - assert.False(status.TxIndexEnabled()) - - cases := []struct { - expected bool - other []string - }{ - {false, nil}, - {false, []string{}}, - {false, []string{"a=b"}}, - {false, []string{"tx_indexiskv", "some=dood"}}, - {true, []string{"tx_index=on", "tx_index=other"}}, - {true, []string{"^(*^(", "tx_index=on", "a=n=b=d="}}, - } - - for _, tc := range cases { - status.NodeInfo.Other = tc.other - assert.Equal(tc.expected, status.TxIndexEnabled()) - } -} diff --git a/rpc/core/types/wire.go b/rpc/core/types/wire.go deleted file mode 100644 index 6648364b..00000000 --- a/rpc/core/types/wire.go +++ /dev/null @@ -1,13 +0,0 @@ -package core_types - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" - "github.com/tendermint/tendermint/types" -) - -func RegisterAmino(cdc *amino.Codec) { - types.RegisterEventDatas(cdc) - types.RegisterEvidences(cdc) - crypto.RegisterAmino(cdc) -} diff --git a/rpc/core/version.go b/rpc/core/version.go deleted file mode 100644 index e283de47..00000000 --- a/rpc/core/version.go +++ /dev/null @@ -1,5 +0,0 @@ -package core - -// a single integer is sufficient here - -const Version = "3" // rpc routes for profiling, setting config diff --git a/rpc/grpc/api.go b/rpc/grpc/api.go deleted file mode 100644 index c0a92004..00000000 --- a/rpc/grpc/api.go +++ /dev/null @@ -1,36 +0,0 @@ -package core_grpc - -import ( - "context" - - abci "github.com/tendermint/abci/types" - core "github.com/tendermint/tendermint/rpc/core" -) - -type broadcastAPI struct { -} - -func (bapi *broadcastAPI) Ping(ctx context.Context, req *RequestPing) (*ResponsePing, error) { - // kvstore so we can check if the server is up - return &ResponsePing{}, nil -} - -func (bapi *broadcastAPI) BroadcastTx(ctx context.Context, req *RequestBroadcastTx) (*ResponseBroadcastTx, error) { - res, err := core.BroadcastTxCommit(req.Tx) - if err != nil { - return nil, err - } - return &ResponseBroadcastTx{ - - CheckTx: &abci.ResponseCheckTx{ - Code: res.CheckTx.Code, - Data: res.CheckTx.Data, - Log: res.CheckTx.Log, - }, - DeliverTx: &abci.ResponseDeliverTx{ - Code: res.DeliverTx.Code, - Data: res.DeliverTx.Data, - Log: res.DeliverTx.Log, - }, - }, nil -} diff --git a/rpc/grpc/client_server.go b/rpc/grpc/client_server.go deleted file mode 100644 index 80d736f5..00000000 --- a/rpc/grpc/client_server.go +++ /dev/null @@ -1,44 +0,0 @@ -package core_grpc - -import ( - "fmt" - "net" - "strings" - "time" - - "google.golang.org/grpc" - - cmn "github.com/tendermint/tmlibs/common" -) - -// Start the grpcServer in a go routine -func StartGRPCServer(protoAddr string) (net.Listener, error) { - parts := strings.SplitN(protoAddr, "://", 2) - if len(parts) != 2 { - return nil, fmt.Errorf("Invalid listen address for grpc server (did you forget a tcp:// prefix?) : %s", protoAddr) - } - proto, addr := parts[0], parts[1] - ln, err := net.Listen(proto, addr) - if err != nil { - return nil, err - } - - grpcServer := grpc.NewServer() - RegisterBroadcastAPIServer(grpcServer, &broadcastAPI{}) - go grpcServer.Serve(ln) // nolint: errcheck - - return ln, nil -} - -// Start the client by dialing the server -func StartGRPCClient(protoAddr string) BroadcastAPIClient { - conn, err := grpc.Dial(protoAddr, grpc.WithInsecure(), grpc.WithDialer(dialerFunc)) - if err != nil { - panic(err) - } - return NewBroadcastAPIClient(conn) -} - -func dialerFunc(addr string, timeout time.Duration) (net.Conn, error) { - return cmn.Connect(addr) -} diff --git a/rpc/grpc/compile.sh b/rpc/grpc/compile.sh deleted file mode 100644 index 2c4629c8..00000000 --- a/rpc/grpc/compile.sh +++ /dev/null @@ -1,3 +0,0 @@ -#! /bin/bash - -protoc --go_out=plugins=grpc:. -I $GOPATH/src/ -I . types.proto diff --git a/rpc/grpc/grpc_test.go b/rpc/grpc/grpc_test.go deleted file mode 100644 index 20b3ab7b..00000000 --- a/rpc/grpc/grpc_test.go +++ /dev/null @@ -1,33 +0,0 @@ -package core_grpc_test - -import ( - "context" - "os" - "testing" - - "github.com/stretchr/testify/require" - - "github.com/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/rpc/grpc" - "github.com/tendermint/tendermint/rpc/test" -) - -func TestMain(m *testing.M) { - // start a tendermint node in the background to test against - app := kvstore.NewKVStoreApplication() - node := rpctest.StartTendermint(app) - code := m.Run() - - // and shut down proper at the end - node.Stop() - node.Wait() - os.Exit(code) -} - -func TestBroadcastTx(t *testing.T) { - require := require.New(t) - res, err := rpctest.GetGRPCClient().BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{[]byte("this is a tx")}) - require.Nil(err, "%+v", err) - require.EqualValues(0, res.CheckTx.Code) - require.EqualValues(0, res.DeliverTx.Code) -} diff --git a/rpc/grpc/types.pb.go b/rpc/grpc/types.pb.go deleted file mode 100644 index cf7a5ec7..00000000 --- a/rpc/grpc/types.pb.go +++ /dev/null @@ -1,230 +0,0 @@ -// Code generated by protoc-gen-go. -// source: types.proto -// DO NOT EDIT! - -/* -Package core_grpc is a generated protocol buffer package. - -It is generated from these files: - types.proto - -It has these top-level messages: - RequestPing - RequestBroadcastTx - ResponsePing - ResponseBroadcastTx -*/ -package core_grpc - -import proto "github.com/golang/protobuf/proto" -import fmt "fmt" -import math "math" -import types "github.com/tendermint/abci/types" - -import ( - "context" - - grpc "google.golang.org/grpc" -) - -// Reference imports to suppress errors if they are not otherwise used. -var _ = proto.Marshal -var _ = fmt.Errorf -var _ = math.Inf - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the proto package it is being compiled against. -// A compilation error at this line likely means your copy of the -// proto package needs to be updated. -const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package - -type RequestPing struct { -} - -func (m *RequestPing) Reset() { *m = RequestPing{} } -func (m *RequestPing) String() string { return proto.CompactTextString(m) } -func (*RequestPing) ProtoMessage() {} -func (*RequestPing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } - -type RequestBroadcastTx struct { - Tx []byte `protobuf:"bytes,1,opt,name=tx,proto3" json:"tx,omitempty"` -} - -func (m *RequestBroadcastTx) Reset() { *m = RequestBroadcastTx{} } -func (m *RequestBroadcastTx) String() string { return proto.CompactTextString(m) } -func (*RequestBroadcastTx) ProtoMessage() {} -func (*RequestBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } - -func (m *RequestBroadcastTx) GetTx() []byte { - if m != nil { - return m.Tx - } - return nil -} - -type ResponsePing struct { -} - -func (m *ResponsePing) Reset() { *m = ResponsePing{} } -func (m *ResponsePing) String() string { return proto.CompactTextString(m) } -func (*ResponsePing) ProtoMessage() {} -func (*ResponsePing) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } - -type ResponseBroadcastTx struct { - CheckTx *types.ResponseCheckTx `protobuf:"bytes,1,opt,name=check_tx,json=checkTx" json:"check_tx,omitempty"` - DeliverTx *types.ResponseDeliverTx `protobuf:"bytes,2,opt,name=deliver_tx,json=deliverTx" json:"deliver_tx,omitempty"` -} - -func (m *ResponseBroadcastTx) Reset() { *m = ResponseBroadcastTx{} } -func (m *ResponseBroadcastTx) String() string { return proto.CompactTextString(m) } -func (*ResponseBroadcastTx) ProtoMessage() {} -func (*ResponseBroadcastTx) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } - -func (m *ResponseBroadcastTx) GetCheckTx() *types.ResponseCheckTx { - if m != nil { - return m.CheckTx - } - return nil -} - -func (m *ResponseBroadcastTx) GetDeliverTx() *types.ResponseDeliverTx { - if m != nil { - return m.DeliverTx - } - return nil -} - -func init() { - proto.RegisterType((*RequestPing)(nil), "core_grpc.RequestPing") - proto.RegisterType((*RequestBroadcastTx)(nil), "core_grpc.RequestBroadcastTx") - proto.RegisterType((*ResponsePing)(nil), "core_grpc.ResponsePing") - proto.RegisterType((*ResponseBroadcastTx)(nil), "core_grpc.ResponseBroadcastTx") -} - -// Reference imports to suppress errors if they are not otherwise used. -var _ context.Context -var _ grpc.ClientConn - -// This is a compile-time assertion to ensure that this generated file -// is compatible with the grpc package it is being compiled against. -const _ = grpc.SupportPackageIsVersion4 - -// Client API for BroadcastAPI service - -type BroadcastAPIClient interface { - Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) - BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) -} - -type broadcastAPIClient struct { - cc *grpc.ClientConn -} - -func NewBroadcastAPIClient(cc *grpc.ClientConn) BroadcastAPIClient { - return &broadcastAPIClient{cc} -} - -func (c *broadcastAPIClient) Ping(ctx context.Context, in *RequestPing, opts ...grpc.CallOption) (*ResponsePing, error) { - out := new(ResponsePing) - err := grpc.Invoke(ctx, "/core_grpc.BroadcastAPI/Ping", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -func (c *broadcastAPIClient) BroadcastTx(ctx context.Context, in *RequestBroadcastTx, opts ...grpc.CallOption) (*ResponseBroadcastTx, error) { - out := new(ResponseBroadcastTx) - err := grpc.Invoke(ctx, "/core_grpc.BroadcastAPI/BroadcastTx", in, out, c.cc, opts...) - if err != nil { - return nil, err - } - return out, nil -} - -// Server API for BroadcastAPI service - -type BroadcastAPIServer interface { - Ping(context.Context, *RequestPing) (*ResponsePing, error) - BroadcastTx(context.Context, *RequestBroadcastTx) (*ResponseBroadcastTx, error) -} - -func RegisterBroadcastAPIServer(s *grpc.Server, srv BroadcastAPIServer) { - s.RegisterService(&_BroadcastAPI_serviceDesc, srv) -} - -func _BroadcastAPI_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestPing) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BroadcastAPIServer).Ping(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core_grpc.BroadcastAPI/Ping", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BroadcastAPIServer).Ping(ctx, req.(*RequestPing)) - } - return interceptor(ctx, in, info, handler) -} - -func _BroadcastAPI_BroadcastTx_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(RequestBroadcastTx) - if err := dec(in); err != nil { - return nil, err - } - if interceptor == nil { - return srv.(BroadcastAPIServer).BroadcastTx(ctx, in) - } - info := &grpc.UnaryServerInfo{ - Server: srv, - FullMethod: "/core_grpc.BroadcastAPI/BroadcastTx", - } - handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(BroadcastAPIServer).BroadcastTx(ctx, req.(*RequestBroadcastTx)) - } - return interceptor(ctx, in, info, handler) -} - -var _BroadcastAPI_serviceDesc = grpc.ServiceDesc{ - ServiceName: "core_grpc.BroadcastAPI", - HandlerType: (*BroadcastAPIServer)(nil), - Methods: []grpc.MethodDesc{ - { - MethodName: "Ping", - Handler: _BroadcastAPI_Ping_Handler, - }, - { - MethodName: "BroadcastTx", - Handler: _BroadcastAPI_BroadcastTx_Handler, - }, - }, - Streams: []grpc.StreamDesc{}, - Metadata: "types.proto", -} - -func init() { proto.RegisterFile("types.proto", fileDescriptor0) } - -var fileDescriptor0 = []byte{ - // 264 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2e, 0xa9, 0x2c, 0x48, - 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x4c, 0xce, 0x2f, 0x4a, 0x8d, 0x4f, 0x2f, - 0x2a, 0x48, 0x96, 0xd2, 0x49, 0xcf, 0x2c, 0xc9, 0x28, 0x4d, 0xd2, 0x4b, 0xce, 0xcf, 0xd5, 0x2f, - 0x49, 0xcd, 0x4b, 0x49, 0x2d, 0xca, 0xcd, 0xcc, 0x2b, 0xd1, 0x4f, 0x4c, 0x4a, 0xce, 0xd4, 0x07, - 0x6b, 0xd1, 0x47, 0xd2, 0xa8, 0xc4, 0xcb, 0xc5, 0x1d, 0x94, 0x5a, 0x58, 0x9a, 0x5a, 0x5c, 0x12, - 0x90, 0x99, 0x97, 0xae, 0xa4, 0xc2, 0x25, 0x04, 0xe5, 0x3a, 0x15, 0xe5, 0x27, 0xa6, 0x24, 0x27, - 0x16, 0x97, 0x84, 0x54, 0x08, 0xf1, 0x71, 0x31, 0x95, 0x54, 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xf0, - 0x04, 0x31, 0x95, 0x54, 0x28, 0xf1, 0x71, 0xf1, 0x04, 0xa5, 0x16, 0x17, 0xe4, 0xe7, 0x15, 0xa7, - 0x82, 0x75, 0x35, 0x32, 0x72, 0x09, 0xc3, 0x04, 0x90, 0xf5, 0x19, 0x72, 0x71, 0x24, 0x67, 0xa4, - 0x26, 0x67, 0xc7, 0x43, 0x75, 0x73, 0x1b, 0x89, 0xe9, 0x41, 0x2c, 0x87, 0xa9, 0x76, 0x06, 0x49, - 0x87, 0x54, 0x04, 0xb1, 0x27, 0x43, 0x18, 0x42, 0xe6, 0x5c, 0x5c, 0x29, 0xa9, 0x39, 0x99, 0x65, - 0xa9, 0x45, 0x20, 0x4d, 0x4c, 0x60, 0x4d, 0x12, 0x68, 0x9a, 0x5c, 0x20, 0x0a, 0x42, 0x2a, 0x82, - 0x38, 0x53, 0x60, 0x4c, 0xa3, 0xa9, 0x8c, 0x5c, 0x3c, 0x70, 0xbb, 0x1d, 0x03, 0x3c, 0x85, 0xcc, - 0xb9, 0x58, 0x40, 0x8e, 0x13, 0x12, 0xd3, 0x83, 0x87, 0x8d, 0x1e, 0x92, 0x57, 0xa5, 0xc4, 0x51, - 0xc4, 0x11, 0xbe, 0x11, 0xf2, 0xe1, 0xe2, 0x46, 0xf6, 0x84, 0x2c, 0xa6, 0x7e, 0x24, 0x69, 0x29, - 0x39, 0x2c, 0xc6, 0x20, 0xc9, 0x27, 0xb1, 0x81, 0xc3, 0xd9, 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, - 0x92, 0x29, 0xd9, 0x42, 0xaf, 0x01, 0x00, 0x00, -} diff --git a/rpc/grpc/types.proto b/rpc/grpc/types.proto deleted file mode 100644 index 35462594..00000000 --- a/rpc/grpc/types.proto +++ /dev/null @@ -1,36 +0,0 @@ -syntax = "proto3"; -package core_grpc; - -import "github.com/tendermint/abci/types/types.proto"; - -//---------------------------------------- -// Message types - -//---------------------------------------- -// Request types - -message RequestPing { -} - -message RequestBroadcastTx { - bytes tx = 1; -} - -//---------------------------------------- -// Response types - -message ResponsePing{ -} - -message ResponseBroadcastTx{ - types.ResponseCheckTx check_tx = 1; - types.ResponseDeliverTx deliver_tx = 2; -} - -//---------------------------------------- -// Service Definition - -service BroadcastAPI { - rpc Ping(RequestPing) returns (ResponsePing) ; - rpc BroadcastTx(RequestBroadcastTx) returns (ResponseBroadcastTx) ; -} diff --git a/rpc/lib/client/args_test.go b/rpc/lib/client/args_test.go deleted file mode 100644 index 4442ac2b..00000000 --- a/rpc/lib/client/args_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package rpcclient - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/go-amino" -) - -type Tx []byte - -type Foo struct { - Bar int - Baz string -} - -func TestArgToJSON(t *testing.T) { - assert := assert.New(t) - require := require.New(t) - - cases := []struct { - input interface{} - expected string - }{ - {[]byte("1234"), "0x31323334"}, - {Tx("654"), "0x363534"}, - {Foo{7, "hello"}, `{"Bar":7,"Baz":"hello"}`}, - } - - cdc := amino.NewCodec() - - for i, tc := range cases { - args := map[string]interface{}{"data": tc.input} - err := argsToJSON(cdc, args) - require.Nil(err, "%d: %+v", i, err) - require.Equal(1, len(args), "%d", i) - data, ok := args["data"].(string) - require.True(ok, "%d: %#v", i, args["data"]) - assert.Equal(tc.expected, data, "%d", i) - } -} diff --git a/rpc/lib/client/http_client.go b/rpc/lib/client/http_client.go deleted file mode 100644 index e26d8f27..00000000 --- a/rpc/lib/client/http_client.go +++ /dev/null @@ -1,219 +0,0 @@ -package rpcclient - -import ( - "bytes" - "encoding/json" - "fmt" - "io/ioutil" - "net" - "net/http" - "net/url" - "reflect" - "strings" - - "github.com/pkg/errors" - "github.com/tendermint/go-amino" - - types "github.com/tendermint/tendermint/rpc/lib/types" -) - -// HTTPClient is a common interface for JSONRPCClient and URIClient. -type HTTPClient interface { - Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) - Codec() *amino.Codec - SetCodec(*amino.Codec) -} - -// TODO: Deprecate support for IP:PORT or /path/to/socket -func makeHTTPDialer(remoteAddr string) (string, func(string, string) (net.Conn, error)) { - parts := strings.SplitN(remoteAddr, "://", 2) - var protocol, address string - if len(parts) == 1 { - // default to tcp if nothing specified - protocol, address = "tcp", remoteAddr - } else if len(parts) == 2 { - protocol, address = parts[0], parts[1] - } else { - // return a invalid message - msg := fmt.Sprintf("Invalid addr: %s", remoteAddr) - return msg, func(_ string, _ string) (net.Conn, error) { - return nil, errors.New(msg) - } - } - // accept http as an alias for tcp - if protocol == "http" { - protocol = "tcp" - } - - // replace / with . for http requests (kvstore domain) - trimmedAddress := strings.Replace(address, "/", ".", -1) - return trimmedAddress, func(proto, addr string) (net.Conn, error) { - return net.Dial(protocol, address) - } -} - -// We overwrite the http.Client.Dial so we can do http over tcp or unix. -// remoteAddr should be fully featured (eg. with tcp:// or unix://) -func makeHTTPClient(remoteAddr string) (string, *http.Client) { - address, dialer := makeHTTPDialer(remoteAddr) - return "http://" + address, &http.Client{ - Transport: &http.Transport{ - Dial: dialer, - }, - } -} - -//------------------------------------------------------------------------------------ - -// JSONRPCClient takes params as a slice -type JSONRPCClient struct { - address string - client *http.Client - cdc *amino.Codec -} - -// NewJSONRPCClient returns a JSONRPCClient pointed at the given address. -func NewJSONRPCClient(remote string) *JSONRPCClient { - address, client := makeHTTPClient(remote) - return &JSONRPCClient{ - address: address, - client: client, - cdc: amino.NewCodec(), - } -} - -func (c *JSONRPCClient) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) { - request, err := types.MapToRequest(c.cdc, "jsonrpc-client", method, params) - if err != nil { - return nil, err - } - requestBytes, err := json.Marshal(request) - if err != nil { - return nil, err - } - // log.Info(string(requestBytes)) - requestBuf := bytes.NewBuffer(requestBytes) - // log.Info(Fmt("RPC request to %v (%v): %v", c.remote, method, string(requestBytes))) - httpResponse, err := c.client.Post(c.address, "text/json", requestBuf) - if err != nil { - return nil, err - } - defer httpResponse.Body.Close() // nolint: errcheck - - responseBytes, err := ioutil.ReadAll(httpResponse.Body) - if err != nil { - return nil, err - } - // log.Info(Fmt("RPC response: %v", string(responseBytes))) - return unmarshalResponseBytes(c.cdc, responseBytes, result) -} - -func (c *JSONRPCClient) Codec() *amino.Codec { - return c.cdc -} - -func (c *JSONRPCClient) SetCodec(cdc *amino.Codec) { - c.cdc = cdc -} - -//------------------------------------------------------------- - -// URI takes params as a map -type URIClient struct { - address string - client *http.Client - cdc *amino.Codec -} - -func NewURIClient(remote string) *URIClient { - address, client := makeHTTPClient(remote) - return &URIClient{ - address: address, - client: client, - cdc: amino.NewCodec(), - } -} - -func (c *URIClient) Call(method string, params map[string]interface{}, result interface{}) (interface{}, error) { - values, err := argsToURLValues(c.cdc, params) - if err != nil { - return nil, err - } - // log.Info(Fmt("URI request to %v (%v): %v", c.address, method, values)) - resp, err := c.client.PostForm(c.address+"/"+method, values) - if err != nil { - return nil, err - } - defer resp.Body.Close() // nolint: errcheck - - responseBytes, err := ioutil.ReadAll(resp.Body) - if err != nil { - return nil, err - } - return unmarshalResponseBytes(c.cdc, responseBytes, result) -} - -func (c *URIClient) Codec() *amino.Codec { - return c.cdc -} - -func (c *URIClient) SetCodec(cdc *amino.Codec) { - c.cdc = cdc -} - -//------------------------------------------------ - -func unmarshalResponseBytes(cdc *amino.Codec, responseBytes []byte, result interface{}) (interface{}, error) { - // Read response. If rpc/core/types is imported, the result will unmarshal - // into the correct type. - // log.Notice("response", "response", string(responseBytes)) - var err error - response := &types.RPCResponse{} - err = json.Unmarshal(responseBytes, response) - if err != nil { - return nil, errors.Errorf("Error unmarshalling rpc response: %v", err) - } - if response.Error != nil { - return nil, errors.Errorf("Response error: %v", response.Error) - } - // Unmarshal the RawMessage into the result. - err = cdc.UnmarshalJSON(response.Result, result) - if err != nil { - return nil, errors.Errorf("Error unmarshalling rpc response result: %v", err) - } - return result, nil -} - -func argsToURLValues(cdc *amino.Codec, args map[string]interface{}) (url.Values, error) { - values := make(url.Values) - if len(args) == 0 { - return values, nil - } - err := argsToJSON(cdc, args) - if err != nil { - return nil, err - } - for key, val := range args { - values.Set(key, val.(string)) - } - return values, nil -} - -func argsToJSON(cdc *amino.Codec, args map[string]interface{}) error { - for k, v := range args { - rt := reflect.TypeOf(v) - isByteSlice := rt.Kind() == reflect.Slice && rt.Elem().Kind() == reflect.Uint8 - if isByteSlice { - bytes := reflect.ValueOf(v).Bytes() - args[k] = fmt.Sprintf("0x%X", bytes) - continue - } - - data, err := cdc.MarshalJSON(v) - if err != nil { - return err - } - args[k] = string(data) - } - return nil -} diff --git a/rpc/lib/client/integration_test.go b/rpc/lib/client/integration_test.go deleted file mode 100644 index d3d99337..00000000 --- a/rpc/lib/client/integration_test.go +++ /dev/null @@ -1,66 +0,0 @@ -// +build release - -// The code in here is comprehensive as an integration -// test and is long, hence is only run before releases. - -package rpcclient - -import ( - "bytes" - "errors" - "net" - "regexp" - "testing" - "time" - - "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/log" -) - -func TestWSClientReconnectWithJitter(t *testing.T) { - n := 8 - maxReconnectAttempts := 3 - // Max wait time is ceil(1+0.999) + ceil(2+0.999) + ceil(4+0.999) + ceil(...) = 2 + 3 + 5 = 10s + ... - maxSleepTime := time.Second * time.Duration(((1< c.maxReconnectAttempts { - return errors.Wrap(err, "reached maximum reconnect attempts") - } - } -} - -func (c *WSClient) startReadWriteRoutines() { - c.wg.Add(2) - c.readRoutineQuit = make(chan struct{}) - go c.readRoutine() - go c.writeRoutine() -} - -func (c *WSClient) processBacklog() error { - select { - case request := <-c.backlog: - if c.writeWait > 0 { - if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { - c.Logger.Error("failed to set write deadline", "err", err) - } - } - if err := c.conn.WriteJSON(request); err != nil { - c.Logger.Error("failed to resend request", "err", err) - c.reconnectAfter <- err - // requeue request - c.backlog <- request - return err - } - c.Logger.Info("resend a request", "req", request) - default: - } - return nil -} - -func (c *WSClient) reconnectRoutine() { - for { - select { - case originalError := <-c.reconnectAfter: - // wait until writeRoutine and readRoutine finish - c.wg.Wait() - if err := c.reconnect(); err != nil { - c.Logger.Error("failed to reconnect", "err", err, "original_err", originalError) - c.Stop() - return - } - // drain reconnectAfter - LOOP: - for { - select { - case <-c.reconnectAfter: - default: - break LOOP - } - } - err := c.processBacklog() - if err == nil { - c.startReadWriteRoutines() - } - - case <-c.Quit(): - return - } - } -} - -// The client ensures that there is at most one writer to a connection by -// executing all writes from this goroutine. -func (c *WSClient) writeRoutine() { - var ticker *time.Ticker - if c.pingPeriod > 0 { - // ticker with a predefined period - ticker = time.NewTicker(c.pingPeriod) - } else { - // ticker that never fires - ticker = &time.Ticker{C: make(<-chan time.Time)} - } - - defer func() { - ticker.Stop() - if err := c.conn.Close(); err != nil { - // ignore error; it will trigger in tests - // likely because it's closing an already closed connection - } - c.wg.Done() - }() - - for { - select { - case request := <-c.send: - if c.writeWait > 0 { - if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { - c.Logger.Error("failed to set write deadline", "err", err) - } - } - if err := c.conn.WriteJSON(request); err != nil { - c.Logger.Error("failed to send request", "err", err) - c.reconnectAfter <- err - // add request to the backlog, so we don't lose it - c.backlog <- request - return - } - case <-ticker.C: - if c.writeWait > 0 { - if err := c.conn.SetWriteDeadline(time.Now().Add(c.writeWait)); err != nil { - c.Logger.Error("failed to set write deadline", "err", err) - } - } - if err := c.conn.WriteMessage(websocket.PingMessage, []byte{}); err != nil { - c.Logger.Error("failed to write ping", "err", err) - c.reconnectAfter <- err - return - } - c.mtx.Lock() - c.sentLastPingAt = time.Now() - c.mtx.Unlock() - c.Logger.Debug("sent ping") - case <-c.readRoutineQuit: - return - case <-c.Quit(): - if err := c.conn.WriteMessage(websocket.CloseMessage, websocket.FormatCloseMessage(websocket.CloseNormalClosure, "")); err != nil { - c.Logger.Error("failed to write message", "err", err) - } - return - } - } -} - -// The client ensures that there is at most one reader to a connection by -// executing all reads from this goroutine. -func (c *WSClient) readRoutine() { - defer func() { - if err := c.conn.Close(); err != nil { - // ignore error; it will trigger in tests - // likely because it's closing an already closed connection - } - c.wg.Done() - }() - - c.conn.SetPongHandler(func(string) error { - // gather latency stats - c.mtx.RLock() - t := c.sentLastPingAt - c.mtx.RUnlock() - c.PingPongLatencyTimer.UpdateSince(t) - - c.Logger.Debug("got pong") - return nil - }) - - for { - // reset deadline for every message type (control or data) - if c.readWait > 0 { - if err := c.conn.SetReadDeadline(time.Now().Add(c.readWait)); err != nil { - c.Logger.Error("failed to set read deadline", "err", err) - } - } - _, data, err := c.conn.ReadMessage() - if err != nil { - if !websocket.IsUnexpectedCloseError(err, websocket.CloseNormalClosure) { - return - } - - c.Logger.Error("failed to read response", "err", err) - close(c.readRoutineQuit) - c.reconnectAfter <- err - return - } - - var response types.RPCResponse - err = json.Unmarshal(data, &response) - if err != nil { - c.Logger.Error("failed to parse response", "err", err, "data", string(data)) - continue - } - c.Logger.Info("got response", "resp", response.Result) - // Combine a non-blocking read on BaseService.Quit with a non-blocking write on ResponsesCh to avoid blocking - // c.wg.Wait() in c.Stop(). Note we rely on Quit being closed so that it sends unlimited Quit signals to stop - // both readRoutine and writeRoutine - select { - case <-c.Quit(): - case c.ResponsesCh <- response: - } - } -} - -/////////////////////////////////////////////////////////////////////////////// -// Predefined methods - -// Subscribe to a query. Note the server must have a "subscribe" route -// defined. -func (c *WSClient) Subscribe(ctx context.Context, query string) error { - params := map[string]interface{}{"query": query} - return c.Call(ctx, "subscribe", params) -} - -// Unsubscribe from a query. Note the server must have a "unsubscribe" route -// defined. -func (c *WSClient) Unsubscribe(ctx context.Context, query string) error { - params := map[string]interface{}{"query": query} - return c.Call(ctx, "unsubscribe", params) -} - -// UnsubscribeAll from all. Note the server must have a "unsubscribe_all" route -// defined. -func (c *WSClient) UnsubscribeAll(ctx context.Context) error { - params := map[string]interface{}{} - return c.Call(ctx, "unsubscribe_all", params) -} diff --git a/rpc/lib/client/ws_client_test.go b/rpc/lib/client/ws_client_test.go deleted file mode 100644 index 73f67160..00000000 --- a/rpc/lib/client/ws_client_test.go +++ /dev/null @@ -1,224 +0,0 @@ -package rpcclient - -import ( - "context" - "encoding/json" - "net" - "net/http" - "net/http/httptest" - "sync" - "testing" - "time" - - "github.com/gorilla/websocket" - "github.com/stretchr/testify/require" - "github.com/tendermint/tmlibs/log" - - types "github.com/tendermint/tendermint/rpc/lib/types" -) - -var wsCallTimeout = 5 * time.Second - -type myHandler struct { - closeConnAfterRead bool - mtx sync.RWMutex -} - -var upgrader = websocket.Upgrader{ - ReadBufferSize: 1024, - WriteBufferSize: 1024, -} - -func (h *myHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - conn, err := upgrader.Upgrade(w, r, nil) - if err != nil { - panic(err) - } - defer conn.Close() // nolint: errcheck - for { - messageType, _, err := conn.ReadMessage() - if err != nil { - return - } - - h.mtx.RLock() - if h.closeConnAfterRead { - if err := conn.Close(); err != nil { - panic(err) - } - } - h.mtx.RUnlock() - - res := json.RawMessage(`{}`) - emptyRespBytes, _ := json.Marshal(types.RPCResponse{Result: res}) - if err := conn.WriteMessage(messageType, emptyRespBytes); err != nil { - return - } - } -} - -func TestWSClientReconnectsAfterReadFailure(t *testing.T) { - var wg sync.WaitGroup - - // start server - h := &myHandler{} - s := httptest.NewServer(h) - defer s.Close() - - c := startClient(t, s.Listener.Addr()) - defer c.Stop() - - wg.Add(1) - go callWgDoneOnResult(t, c, &wg) - - h.mtx.Lock() - h.closeConnAfterRead = true - h.mtx.Unlock() - - // results in WS read error, no send retry because write succeeded - call(t, "a", c) - - // expect to reconnect almost immediately - time.Sleep(10 * time.Millisecond) - h.mtx.Lock() - h.closeConnAfterRead = false - h.mtx.Unlock() - - // should succeed - call(t, "b", c) - - wg.Wait() -} - -func TestWSClientReconnectsAfterWriteFailure(t *testing.T) { - var wg sync.WaitGroup - - // start server - h := &myHandler{} - s := httptest.NewServer(h) - - c := startClient(t, s.Listener.Addr()) - defer c.Stop() - - wg.Add(2) - go callWgDoneOnResult(t, c, &wg) - - // hacky way to abort the connection before write - if err := c.conn.Close(); err != nil { - t.Error(err) - } - - // results in WS write error, the client should resend on reconnect - call(t, "a", c) - - // expect to reconnect almost immediately - time.Sleep(10 * time.Millisecond) - - // should succeed - call(t, "b", c) - - wg.Wait() -} - -func TestWSClientReconnectFailure(t *testing.T) { - // start server - h := &myHandler{} - s := httptest.NewServer(h) - - c := startClient(t, s.Listener.Addr()) - defer c.Stop() - - go func() { - for { - select { - case <-c.ResponsesCh: - case <-c.Quit(): - return - } - } - }() - - // hacky way to abort the connection before write - if err := c.conn.Close(); err != nil { - t.Error(err) - } - s.Close() - - // results in WS write error - // provide timeout to avoid blocking - ctx, cancel := context.WithTimeout(context.Background(), wsCallTimeout) - defer cancel() - if err := c.Call(ctx, "a", make(map[string]interface{})); err != nil { - t.Error(err) - } - - // expect to reconnect almost immediately - time.Sleep(10 * time.Millisecond) - - done := make(chan struct{}) - go func() { - // client should block on this - call(t, "b", c) - close(done) - }() - - // test that client blocks on the second send - select { - case <-done: - t.Fatal("client should block on calling 'b' during reconnect") - case <-time.After(5 * time.Second): - t.Log("All good") - } -} - -func TestNotBlockingOnStop(t *testing.T) { - timeout := 2 * time.Second - s := httptest.NewServer(&myHandler{}) - c := startClient(t, s.Listener.Addr()) - c.Call(context.Background(), "a", make(map[string]interface{})) - // Let the readRoutine get around to blocking - time.Sleep(time.Second) - passCh := make(chan struct{}) - go func() { - // Unless we have a non-blocking write to ResponsesCh from readRoutine - // this blocks forever ont the waitgroup - c.Stop() - passCh <- struct{}{} - }() - select { - case <-passCh: - // Pass - case <-time.After(timeout): - t.Fatalf("WSClient did failed to stop within %v seconds - is one of the read/write routines blocking?", - timeout.Seconds()) - } -} - -func startClient(t *testing.T, addr net.Addr) *WSClient { - c := NewWSClient(addr.String(), "/websocket") - err := c.Start() - require.Nil(t, err) - c.SetLogger(log.TestingLogger()) - return c -} - -func call(t *testing.T, method string, c *WSClient) { - err := c.Call(context.Background(), method, make(map[string]interface{})) - require.NoError(t, err) -} - -func callWgDoneOnResult(t *testing.T, c *WSClient, wg *sync.WaitGroup) { - for { - select { - case resp := <-c.ResponsesCh: - if resp.Error != nil { - t.Fatalf("unexpected error: %v", resp.Error) - } - if resp.Result != nil { - wg.Done() - } - case <-c.Quit(): - return - } - } -} diff --git a/rpc/lib/doc.go b/rpc/lib/doc.go deleted file mode 100644 index 2bc43859..00000000 --- a/rpc/lib/doc.go +++ /dev/null @@ -1,103 +0,0 @@ -/* -HTTP RPC server supporting calls via uri params, jsonrpc, and jsonrpc over websockets - -# Client Requests - -Suppose we want to expose the rpc function `HelloWorld(name string, num int)`. - -## GET (URI) - -As a GET request, it would have URI encoded parameters, and look like: - -``` -curl 'http://localhost:8008/hello_world?name="my_world"&num=5' -``` - -Note the `'` around the url, which is just so bash doesn't ignore the quotes in `"my_world"`. -This should also work: - -``` -curl http://localhost:8008/hello_world?name=\"my_world\"&num=5 -``` - -A GET request to `/` returns a list of available endpoints. -For those which take arguments, the arguments will be listed in order, with `_` where the actual value should be. - -## POST (JSONRPC) - -As a POST request, we use JSONRPC. For instance, the same request would have this as the body: - -``` -{ - "jsonrpc": "2.0", - "id": "anything", - "method": "hello_world", - "params": { - "name": "my_world", - "num": 5 - } -} -``` - -With the above saved in file `data.json`, we can make the request with - -``` -curl --data @data.json http://localhost:8008 -``` - -## WebSocket (JSONRPC) - -All requests are exposed over websocket in the same form as the POST JSONRPC. -Websocket connections are available at their own endpoint, typically `/websocket`, -though this is configurable when starting the server. - -# Server Definition - -Define some types and routes: - -``` -type ResultStatus struct { - Value string -} - -// Define some routes -var Routes = map[string]*rpcserver.RPCFunc{ - "status": rpcserver.NewRPCFunc(Status, "arg"), -} - -// an rpc function -func Status(v string) (*ResultStatus, error) { - return &ResultStatus{v}, nil -} - -``` - -Now start the server: - -``` -mux := http.NewServeMux() -rpcserver.RegisterRPCFuncs(mux, Routes) -wm := rpcserver.NewWebsocketManager(Routes) -mux.HandleFunc("/websocket", wm.WebsocketHandler) -logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) -go func() { - _, err := rpcserver.StartHTTPServer("0.0.0.0:8008", mux, logger) - if err != nil { - panic(err) - } -}() - -``` - -Note that unix sockets are supported as well (eg. `/path/to/socket` instead of `0.0.0.0:8008`) - -Now see all available endpoints by sending a GET request to `0.0.0.0:8008`. -Each route is available as a GET request, as a JSONRPCv2 POST request, and via JSONRPCv2 over websockets. - - -# Examples - -* [Tendermint](https://github.com/tendermint/tendermint/blob/master/rpc/core/routes.go) -* [tm-monitor](https://github.com/tendermint/tools/blob/master/tm-monitor/rpc.go) -*/ -package rpc diff --git a/rpc/lib/rpc_test.go b/rpc/lib/rpc_test.go deleted file mode 100644 index f34b09f6..00000000 --- a/rpc/lib/rpc_test.go +++ /dev/null @@ -1,378 +0,0 @@ -package rpc - -import ( - "bytes" - "context" - crand "crypto/rand" - "encoding/json" - "fmt" - "math/rand" - "net/http" - "os" - "os/exec" - "testing" - "time" - - "github.com/go-kit/kit/log/term" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" - - client "github.com/tendermint/tendermint/rpc/lib/client" - server "github.com/tendermint/tendermint/rpc/lib/server" - types "github.com/tendermint/tendermint/rpc/lib/types" -) - -// Client and Server should work over tcp or unix sockets -const ( - tcpAddr = "tcp://0.0.0.0:47768" - - unixSocket = "/tmp/rpc_test.sock" - unixAddr = "unix://" + unixSocket - - websocketEndpoint = "/websocket/endpoint" -) - -type ResultEcho struct { - Value string `json:"value"` -} - -type ResultEchoInt struct { - Value int `json:"value"` -} - -type ResultEchoBytes struct { - Value []byte `json:"value"` -} - -type ResultEchoDataBytes struct { - Value cmn.HexBytes `json:"value"` -} - -// Define some routes -var Routes = map[string]*server.RPCFunc{ - "echo": server.NewRPCFunc(EchoResult, "arg"), - "echo_ws": server.NewWSRPCFunc(EchoWSResult, "arg"), - "echo_bytes": server.NewRPCFunc(EchoBytesResult, "arg"), - "echo_data_bytes": server.NewRPCFunc(EchoDataBytesResult, "arg"), - "echo_int": server.NewRPCFunc(EchoIntResult, "arg"), -} - -// Amino codec required to encode/decode everything above. -var RoutesCdc = amino.NewCodec() - -func EchoResult(v string) (*ResultEcho, error) { - return &ResultEcho{v}, nil -} - -func EchoWSResult(wsCtx types.WSRPCContext, v string) (*ResultEcho, error) { - return &ResultEcho{v}, nil -} - -func EchoIntResult(v int) (*ResultEchoInt, error) { - return &ResultEchoInt{v}, nil -} - -func EchoBytesResult(v []byte) (*ResultEchoBytes, error) { - return &ResultEchoBytes{v}, nil -} - -func EchoDataBytesResult(v cmn.HexBytes) (*ResultEchoDataBytes, error) { - return &ResultEchoDataBytes{v}, nil -} - -func TestMain(m *testing.M) { - setup() - code := m.Run() - os.Exit(code) -} - -var colorFn = func(keyvals ...interface{}) term.FgBgColor { - for i := 0; i < len(keyvals)-1; i += 2 { - if keyvals[i] == "socket" { - if keyvals[i+1] == "tcp" { - return term.FgBgColor{Fg: term.DarkBlue} - } else if keyvals[i+1] == "unix" { - return term.FgBgColor{Fg: term.DarkCyan} - } - } - } - return term.FgBgColor{} -} - -// launch unix and tcp servers -func setup() { - logger := log.NewTMLoggerWithColorFn(log.NewSyncWriter(os.Stdout), colorFn) - - cmd := exec.Command("rm", "-f", unixSocket) - err := cmd.Start() - if err != nil { - panic(err) - } - if err = cmd.Wait(); err != nil { - panic(err) - } - - tcpLogger := logger.With("socket", "tcp") - mux := http.NewServeMux() - server.RegisterRPCFuncs(mux, Routes, RoutesCdc, tcpLogger) - wm := server.NewWebsocketManager(Routes, RoutesCdc, server.ReadWait(5*time.Second), server.PingPeriod(1*time.Second)) - wm.SetLogger(tcpLogger) - mux.HandleFunc(websocketEndpoint, wm.WebsocketHandler) - go func() { - _, err := server.StartHTTPServer(tcpAddr, mux, tcpLogger) - if err != nil { - panic(err) - } - }() - - unixLogger := logger.With("socket", "unix") - mux2 := http.NewServeMux() - server.RegisterRPCFuncs(mux2, Routes, RoutesCdc, unixLogger) - wm = server.NewWebsocketManager(Routes, RoutesCdc) - wm.SetLogger(unixLogger) - mux2.HandleFunc(websocketEndpoint, wm.WebsocketHandler) - go func() { - _, err := server.StartHTTPServer(unixAddr, mux2, unixLogger) - if err != nil { - panic(err) - } - }() - - // wait for servers to start - time.Sleep(time.Second * 2) -} - -func echoViaHTTP(cl client.HTTPClient, val string) (string, error) { - params := map[string]interface{}{ - "arg": val, - } - result := new(ResultEcho) - if _, err := cl.Call("echo", params, result); err != nil { - return "", err - } - return result.Value, nil -} - -func echoIntViaHTTP(cl client.HTTPClient, val int) (int, error) { - params := map[string]interface{}{ - "arg": val, - } - result := new(ResultEchoInt) - if _, err := cl.Call("echo_int", params, result); err != nil { - return 0, err - } - return result.Value, nil -} - -func echoBytesViaHTTP(cl client.HTTPClient, bytes []byte) ([]byte, error) { - params := map[string]interface{}{ - "arg": bytes, - } - result := new(ResultEchoBytes) - if _, err := cl.Call("echo_bytes", params, result); err != nil { - return []byte{}, err - } - return result.Value, nil -} - -func echoDataBytesViaHTTP(cl client.HTTPClient, bytes cmn.HexBytes) (cmn.HexBytes, error) { - params := map[string]interface{}{ - "arg": bytes, - } - result := new(ResultEchoDataBytes) - if _, err := cl.Call("echo_data_bytes", params, result); err != nil { - return []byte{}, err - } - return result.Value, nil -} - -func testWithHTTPClient(t *testing.T, cl client.HTTPClient) { - val := "acbd" - got, err := echoViaHTTP(cl, val) - require.Nil(t, err) - assert.Equal(t, got, val) - - val2 := randBytes(t) - got2, err := echoBytesViaHTTP(cl, val2) - require.Nil(t, err) - assert.Equal(t, got2, val2) - - val3 := cmn.HexBytes(randBytes(t)) - got3, err := echoDataBytesViaHTTP(cl, val3) - require.Nil(t, err) - assert.Equal(t, got3, val3) - - val4 := rand.Intn(10000) - got4, err := echoIntViaHTTP(cl, val4) - require.Nil(t, err) - assert.Equal(t, got4, val4) -} - -func echoViaWS(cl *client.WSClient, val string) (string, error) { - params := map[string]interface{}{ - "arg": val, - } - err := cl.Call(context.Background(), "echo", params) - if err != nil { - return "", err - } - - msg := <-cl.ResponsesCh - if msg.Error != nil { - return "", err - - } - result := new(ResultEcho) - err = json.Unmarshal(msg.Result, result) - if err != nil { - return "", nil - } - return result.Value, nil -} - -func echoBytesViaWS(cl *client.WSClient, bytes []byte) ([]byte, error) { - params := map[string]interface{}{ - "arg": bytes, - } - err := cl.Call(context.Background(), "echo_bytes", params) - if err != nil { - return []byte{}, err - } - - msg := <-cl.ResponsesCh - if msg.Error != nil { - return []byte{}, msg.Error - - } - result := new(ResultEchoBytes) - err = json.Unmarshal(msg.Result, result) - if err != nil { - return []byte{}, nil - } - return result.Value, nil -} - -func testWithWSClient(t *testing.T, cl *client.WSClient) { - val := "acbd" - got, err := echoViaWS(cl, val) - require.Nil(t, err) - assert.Equal(t, got, val) - - val2 := randBytes(t) - got2, err := echoBytesViaWS(cl, val2) - require.Nil(t, err) - assert.Equal(t, got2, val2) -} - -//------------- - -func TestServersAndClientsBasic(t *testing.T) { - serverAddrs := [...]string{tcpAddr, unixAddr} - for _, addr := range serverAddrs { - cl1 := client.NewURIClient(addr) - fmt.Printf("=== testing server on %s using %v client", addr, cl1) - testWithHTTPClient(t, cl1) - - cl2 := client.NewJSONRPCClient(addr) - fmt.Printf("=== testing server on %s using %v client", addr, cl2) - testWithHTTPClient(t, cl2) - - cl3 := client.NewWSClient(addr, websocketEndpoint) - cl3.SetLogger(log.TestingLogger()) - err := cl3.Start() - require.Nil(t, err) - fmt.Printf("=== testing server on %s using %v client", addr, cl3) - testWithWSClient(t, cl3) - cl3.Stop() - } -} - -func TestHexStringArg(t *testing.T) { - cl := client.NewURIClient(tcpAddr) - // should NOT be handled as hex - val := "0xabc" - got, err := echoViaHTTP(cl, val) - require.Nil(t, err) - assert.Equal(t, got, val) -} - -func TestQuotedStringArg(t *testing.T) { - cl := client.NewURIClient(tcpAddr) - // should NOT be unquoted - val := "\"abc\"" - got, err := echoViaHTTP(cl, val) - require.Nil(t, err) - assert.Equal(t, got, val) -} - -func TestWSNewWSRPCFunc(t *testing.T) { - cl := client.NewWSClient(tcpAddr, websocketEndpoint) - cl.SetLogger(log.TestingLogger()) - err := cl.Start() - require.Nil(t, err) - defer cl.Stop() - - val := "acbd" - params := map[string]interface{}{ - "arg": val, - } - err = cl.Call(context.Background(), "echo_ws", params) - require.Nil(t, err) - - msg := <-cl.ResponsesCh - if msg.Error != nil { - t.Fatal(err) - } - result := new(ResultEcho) - err = json.Unmarshal(msg.Result, result) - require.Nil(t, err) - got := result.Value - assert.Equal(t, got, val) -} - -func TestWSHandlesArrayParams(t *testing.T) { - cl := client.NewWSClient(tcpAddr, websocketEndpoint) - cl.SetLogger(log.TestingLogger()) - err := cl.Start() - require.Nil(t, err) - defer cl.Stop() - - val := "acbd" - params := []interface{}{val} - err = cl.CallWithArrayParams(context.Background(), "echo_ws", params) - require.Nil(t, err) - - msg := <-cl.ResponsesCh - if msg.Error != nil { - t.Fatalf("%+v", err) - } - result := new(ResultEcho) - err = json.Unmarshal(msg.Result, result) - require.Nil(t, err) - got := result.Value - assert.Equal(t, got, val) -} - -// TestWSClientPingPong checks that a client & server exchange pings -// & pongs so connection stays alive. -func TestWSClientPingPong(t *testing.T) { - cl := client.NewWSClient(tcpAddr, websocketEndpoint) - cl.SetLogger(log.TestingLogger()) - err := cl.Start() - require.Nil(t, err) - defer cl.Stop() - - time.Sleep(6 * time.Second) -} - -func randBytes(t *testing.T) []byte { - n := rand.Intn(10) + 2 - buf := make([]byte, n) - _, err := crand.Read(buf) - require.Nil(t, err) - return bytes.Replace(buf, []byte("="), []byte{100}, -1) -} diff --git a/rpc/lib/server/handlers.go b/rpc/lib/server/handlers.go deleted file mode 100644 index 6cc03012..00000000 --- a/rpc/lib/server/handlers.go +++ /dev/null @@ -1,794 +0,0 @@ -package rpcserver - -import ( - "bytes" - "context" - "encoding/hex" - "encoding/json" - "fmt" - "io/ioutil" - "net/http" - "reflect" - "runtime/debug" - "sort" - "strings" - "time" - - "github.com/gorilla/websocket" - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" - types "github.com/tendermint/tendermint/rpc/lib/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" -) - -// RegisterRPCFuncs adds a route for each function in the funcMap, as well as general jsonrpc and websocket handlers for all functions. -// "result" is the interface on which the result objects are registered, and is popualted with every RPCResponse -func RegisterRPCFuncs(mux *http.ServeMux, funcMap map[string]*RPCFunc, cdc *amino.Codec, logger log.Logger) { - // HTTP endpoints - for funcName, rpcFunc := range funcMap { - mux.HandleFunc("/"+funcName, makeHTTPHandler(rpcFunc, cdc, logger)) - } - - // JSONRPC endpoints - mux.HandleFunc("/", handleInvalidJSONRPCPaths(makeJSONRPCHandler(funcMap, cdc, logger))) -} - -//------------------------------------- -// function introspection - -// RPCFunc contains the introspected type information for a function -type RPCFunc struct { - f reflect.Value // underlying rpc function - args []reflect.Type // type of each function arg - returns []reflect.Type // type of each return arg - argNames []string // name of each argument - ws bool // websocket only -} - -// NewRPCFunc wraps a function for introspection. -// f is the function, args are comma separated argument names -func NewRPCFunc(f interface{}, args string) *RPCFunc { - return newRPCFunc(f, args, false) -} - -// NewWSRPCFunc wraps a function for introspection and use in the websockets. -func NewWSRPCFunc(f interface{}, args string) *RPCFunc { - return newRPCFunc(f, args, true) -} - -func newRPCFunc(f interface{}, args string, ws bool) *RPCFunc { - var argNames []string - if args != "" { - argNames = strings.Split(args, ",") - } - return &RPCFunc{ - f: reflect.ValueOf(f), - args: funcArgTypes(f), - returns: funcReturnTypes(f), - argNames: argNames, - ws: ws, - } -} - -// return a function's argument types -func funcArgTypes(f interface{}) []reflect.Type { - t := reflect.TypeOf(f) - n := t.NumIn() - typez := make([]reflect.Type, n) - for i := 0; i < n; i++ { - typez[i] = t.In(i) - } - return typez -} - -// return a function's return types -func funcReturnTypes(f interface{}) []reflect.Type { - t := reflect.TypeOf(f) - n := t.NumOut() - typez := make([]reflect.Type, n) - for i := 0; i < n; i++ { - typez[i] = t.Out(i) - } - return typez -} - -// function introspection -//----------------------------------------------------------------------------- -// rpc.json - -// jsonrpc calls grab the given method's function info and runs reflect.Call -func makeJSONRPCHandler(funcMap map[string]*RPCFunc, cdc *amino.Codec, logger log.Logger) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - b, err := ioutil.ReadAll(r.Body) - if err != nil { - WriteRPCResponseHTTP(w, types.RPCInvalidRequestError("", errors.Wrap(err, "Error reading request body"))) - return - } - // if its an empty request (like from a browser), - // just display a list of functions - if len(b) == 0 { - writeListOfEndpoints(w, r, funcMap) - return - } - - var request types.RPCRequest - err = json.Unmarshal(b, &request) - if err != nil { - WriteRPCResponseHTTP(w, types.RPCParseError("", errors.Wrap(err, "Error unmarshalling request"))) - return - } - // A Notification is a Request object without an "id" member. - // The Server MUST NOT reply to a Notification, including those that are within a batch request. - if request.ID == "" { - logger.Debug("HTTPJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)") - return - } - if len(r.URL.Path) > 1 { - WriteRPCResponseHTTP(w, types.RPCInvalidRequestError(request.ID, errors.Errorf("Path %s is invalid", r.URL.Path))) - return - } - rpcFunc := funcMap[request.Method] - if rpcFunc == nil || rpcFunc.ws { - WriteRPCResponseHTTP(w, types.RPCMethodNotFoundError(request.ID)) - return - } - var args []reflect.Value - if len(request.Params) > 0 { - args, err = jsonParamsToArgsRPC(rpcFunc, cdc, request.Params) - if err != nil { - WriteRPCResponseHTTP(w, types.RPCInvalidParamsError(request.ID, errors.Wrap(err, "Error converting json params to arguments"))) - return - } - } - returns := rpcFunc.f.Call(args) - logger.Info("HTTPJSONRPC", "method", request.Method, "args", args, "returns", returns) - result, err := unreflectResult(returns) - if err != nil { - WriteRPCResponseHTTP(w, types.RPCInternalError(request.ID, err)) - return - } - WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(cdc, request.ID, result)) - } -} - -func handleInvalidJSONRPCPaths(next http.HandlerFunc) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - // Since the pattern "/" matches all paths not matched by other registered patterns we check whether the path is indeed - // "/", otherwise return a 404 error - if r.URL.Path != "/" { - http.NotFound(w, r) - return - } - - next(w, r) - } -} - -func mapParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params map[string]json.RawMessage, argsOffset int) ([]reflect.Value, error) { - values := make([]reflect.Value, len(rpcFunc.argNames)) - for i, argName := range rpcFunc.argNames { - argType := rpcFunc.args[i+argsOffset] - - if p, ok := params[argName]; ok && p != nil && len(p) > 0 { - val := reflect.New(argType) - err := cdc.UnmarshalJSON(p, val.Interface()) - if err != nil { - return nil, err - } - values[i] = val.Elem() - } else { // use default for that type - values[i] = reflect.Zero(argType) - } - } - - return values, nil -} - -func arrayParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, params []json.RawMessage, argsOffset int) ([]reflect.Value, error) { - if len(rpcFunc.argNames) != len(params) { - return nil, errors.Errorf("Expected %v parameters (%v), got %v (%v)", - len(rpcFunc.argNames), rpcFunc.argNames, len(params), params) - } - - values := make([]reflect.Value, len(params)) - for i, p := range params { - argType := rpcFunc.args[i+argsOffset] - val := reflect.New(argType) - err := cdc.UnmarshalJSON(p, val.Interface()) - if err != nil { - return nil, err - } - values[i] = val.Elem() - } - return values, nil -} - -// `raw` is unparsed json (from json.RawMessage) encoding either a map or an array. -// `argsOffset` should be 0 for RPC calls, and 1 for WS requests, where len(rpcFunc.args) != len(rpcFunc.argNames). -// -// Example: -// rpcFunc.args = [rpctypes.WSRPCContext string] -// rpcFunc.argNames = ["arg"] -func jsonParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, raw []byte, argsOffset int) ([]reflect.Value, error) { - - // TODO: Make more efficient, perhaps by checking the first character for '{' or '['? - // First, try to get the map. - var m map[string]json.RawMessage - err := json.Unmarshal(raw, &m) - if err == nil { - return mapParamsToArgs(rpcFunc, cdc, m, argsOffset) - } - - // Otherwise, try an array. - var a []json.RawMessage - err = json.Unmarshal(raw, &a) - if err == nil { - return arrayParamsToArgs(rpcFunc, cdc, a, argsOffset) - } - - // Otherwise, bad format, we cannot parse - return nil, errors.Errorf("Unknown type for JSON params: %v. Expected map or array", err) -} - -// Convert a []interface{} OR a map[string]interface{} to properly typed values -func jsonParamsToArgsRPC(rpcFunc *RPCFunc, cdc *amino.Codec, params json.RawMessage) ([]reflect.Value, error) { - return jsonParamsToArgs(rpcFunc, cdc, params, 0) -} - -// Same as above, but with the first param the websocket connection -func jsonParamsToArgsWS(rpcFunc *RPCFunc, cdc *amino.Codec, params json.RawMessage, wsCtx types.WSRPCContext) ([]reflect.Value, error) { - values, err := jsonParamsToArgs(rpcFunc, cdc, params, 1) - if err != nil { - return nil, err - } - return append([]reflect.Value{reflect.ValueOf(wsCtx)}, values...), nil -} - -// rpc.json -//----------------------------------------------------------------------------- -// rpc.http - -// convert from a function name to the http handler -func makeHTTPHandler(rpcFunc *RPCFunc, cdc *amino.Codec, logger log.Logger) func(http.ResponseWriter, *http.Request) { - // Exception for websocket endpoints - if rpcFunc.ws { - return func(w http.ResponseWriter, r *http.Request) { - WriteRPCResponseHTTP(w, types.RPCMethodNotFoundError("")) - } - } - // All other endpoints - return func(w http.ResponseWriter, r *http.Request) { - logger.Debug("HTTP HANDLER", "req", r) - args, err := httpParamsToArgs(rpcFunc, cdc, r) - if err != nil { - WriteRPCResponseHTTP(w, types.RPCInvalidParamsError("", errors.Wrap(err, "Error converting http params to arguments"))) - return - } - returns := rpcFunc.f.Call(args) - logger.Info("HTTPRestRPC", "method", r.URL.Path, "args", args, "returns", returns) - result, err := unreflectResult(returns) - if err != nil { - WriteRPCResponseHTTP(w, types.RPCInternalError("", err)) - return - } - WriteRPCResponseHTTP(w, types.NewRPCSuccessResponse(cdc, "", result)) - } -} - -// Covert an http query to a list of properly typed values. -// To be properly decoded the arg must be a concrete type from tendermint (if its an interface). -func httpParamsToArgs(rpcFunc *RPCFunc, cdc *amino.Codec, r *http.Request) ([]reflect.Value, error) { - values := make([]reflect.Value, len(rpcFunc.args)) - - for i, name := range rpcFunc.argNames { - argType := rpcFunc.args[i] - - values[i] = reflect.Zero(argType) // set default for that type - - arg := GetParam(r, name) - // log.Notice("param to arg", "argType", argType, "name", name, "arg", arg) - - if "" == arg { - continue - } - - v, err, ok := nonJSONToArg(cdc, argType, arg) - if err != nil { - return nil, err - } - if ok { - values[i] = v - continue - } - - values[i], err = _jsonStringToArg(cdc, argType, arg) - if err != nil { - return nil, err - } - } - - return values, nil -} - -func _jsonStringToArg(cdc *amino.Codec, ty reflect.Type, arg string) (reflect.Value, error) { - v := reflect.New(ty) - err := cdc.UnmarshalJSON([]byte(arg), v.Interface()) - if err != nil { - return v, err - } - v = v.Elem() - return v, nil -} - -func nonJSONToArg(cdc *amino.Codec, ty reflect.Type, arg string) (reflect.Value, error, bool) { - isQuotedString := strings.HasPrefix(arg, `"`) && strings.HasSuffix(arg, `"`) - isHexString := strings.HasPrefix(strings.ToLower(arg), "0x") - expectingString := ty.Kind() == reflect.String - expectingByteSlice := ty.Kind() == reflect.Slice && ty.Elem().Kind() == reflect.Uint8 - - if isHexString { - if !expectingString && !expectingByteSlice { - err := errors.Errorf("Got a hex string arg, but expected '%s'", - ty.Kind().String()) - return reflect.ValueOf(nil), err, false - } - - var value []byte - value, err := hex.DecodeString(arg[2:]) - if err != nil { - return reflect.ValueOf(nil), err, false - } - if ty.Kind() == reflect.String { - return reflect.ValueOf(string(value)), nil, true - } - return reflect.ValueOf([]byte(value)), nil, true - } - - if isQuotedString && expectingByteSlice { - v := reflect.New(reflect.TypeOf("")) - err := cdc.UnmarshalJSON([]byte(arg), v.Interface()) - if err != nil { - return reflect.ValueOf(nil), err, false - } - v = v.Elem() - return reflect.ValueOf([]byte(v.String())), nil, true - } - - return reflect.ValueOf(nil), nil, false -} - -// rpc.http -//----------------------------------------------------------------------------- -// rpc.websocket - -const ( - defaultWSWriteChanCapacity = 1000 - defaultWSWriteWait = 10 * time.Second - defaultWSReadWait = 30 * time.Second - defaultWSPingPeriod = (defaultWSReadWait * 9) / 10 -) - -// A single websocket connection contains listener id, underlying ws -// connection, and the event switch for subscribing to events. -// -// In case of an error, the connection is stopped. -type wsConnection struct { - cmn.BaseService - - remoteAddr string - baseConn *websocket.Conn - writeChan chan types.RPCResponse - - funcMap map[string]*RPCFunc - cdc *amino.Codec - - // write channel capacity - writeChanCapacity int - - // each write times out after this. - writeWait time.Duration - - // Connection times out if we haven't received *anything* in this long, not even pings. - readWait time.Duration - - // Send pings to server with this period. Must be less than readWait, but greater than zero. - pingPeriod time.Duration - - // object that is used to subscribe / unsubscribe from events - eventSub types.EventSubscriber -} - -// NewWSConnection wraps websocket.Conn. -// -// See the commentary on the func(*wsConnection) functions for a detailed -// description of how to configure ping period and pong wait time. NOTE: if the -// write buffer is full, pongs may be dropped, which may cause clients to -// disconnect. see https://github.com/gorilla/websocket/issues/97 -func NewWSConnection(baseConn *websocket.Conn, funcMap map[string]*RPCFunc, cdc *amino.Codec, options ...func(*wsConnection)) *wsConnection { - wsc := &wsConnection{ - remoteAddr: baseConn.RemoteAddr().String(), - baseConn: baseConn, - funcMap: funcMap, - cdc: cdc, - writeWait: defaultWSWriteWait, - writeChanCapacity: defaultWSWriteChanCapacity, - readWait: defaultWSReadWait, - pingPeriod: defaultWSPingPeriod, - } - for _, option := range options { - option(wsc) - } - wsc.BaseService = *cmn.NewBaseService(nil, "wsConnection", wsc) - return wsc -} - -// EventSubscriber sets object that is used to subscribe / unsubscribe from -// events - not Goroutine-safe. If none given, default node's eventBus will be -// used. -func EventSubscriber(eventSub types.EventSubscriber) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.eventSub = eventSub - } -} - -// WriteWait sets the amount of time to wait before a websocket write times out. -// It should only be used in the constructor - not Goroutine-safe. -func WriteWait(writeWait time.Duration) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.writeWait = writeWait - } -} - -// WriteChanCapacity sets the capacity of the websocket write channel. -// It should only be used in the constructor - not Goroutine-safe. -func WriteChanCapacity(cap int) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.writeChanCapacity = cap - } -} - -// ReadWait sets the amount of time to wait before a websocket read times out. -// It should only be used in the constructor - not Goroutine-safe. -func ReadWait(readWait time.Duration) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.readWait = readWait - } -} - -// PingPeriod sets the duration for sending websocket pings. -// It should only be used in the constructor - not Goroutine-safe. -func PingPeriod(pingPeriod time.Duration) func(*wsConnection) { - return func(wsc *wsConnection) { - wsc.pingPeriod = pingPeriod - } -} - -// OnStart implements cmn.Service by starting the read and write routines. It -// blocks until the connection closes. -func (wsc *wsConnection) OnStart() error { - wsc.writeChan = make(chan types.RPCResponse, wsc.writeChanCapacity) - - // Read subscriptions/unsubscriptions to events - go wsc.readRoutine() - // Write responses, BLOCKING. - wsc.writeRoutine() - - return nil -} - -// OnStop implements cmn.Service by unsubscribing remoteAddr from all subscriptions. -func (wsc *wsConnection) OnStop() { - // Both read and write loops close the websocket connection when they exit their loops. - // The writeChan is never closed, to allow WriteRPCResponse() to fail. - if wsc.eventSub != nil { - wsc.eventSub.UnsubscribeAll(context.TODO(), wsc.remoteAddr) - } -} - -// GetRemoteAddr returns the remote address of the underlying connection. -// It implements WSRPCConnection -func (wsc *wsConnection) GetRemoteAddr() string { - return wsc.remoteAddr -} - -// GetEventSubscriber implements WSRPCConnection by returning event subscriber. -func (wsc *wsConnection) GetEventSubscriber() types.EventSubscriber { - return wsc.eventSub -} - -// WriteRPCResponse pushes a response to the writeChan, and blocks until it is accepted. -// It implements WSRPCConnection. It is Goroutine-safe. -func (wsc *wsConnection) WriteRPCResponse(resp types.RPCResponse) { - select { - case <-wsc.Quit(): - return - case wsc.writeChan <- resp: - } -} - -// TryWriteRPCResponse attempts to push a response to the writeChan, but does not block. -// It implements WSRPCConnection. It is Goroutine-safe -func (wsc *wsConnection) TryWriteRPCResponse(resp types.RPCResponse) bool { - select { - case <-wsc.Quit(): - return false - case wsc.writeChan <- resp: - return true - default: - return false - } -} - -// Codec returns an amino codec used to decode parameters and encode results. -// It implements WSRPCConnection. -func (wsc *wsConnection) Codec() *amino.Codec { - return wsc.cdc -} - -// Read from the socket and subscribe to or unsubscribe from events -func (wsc *wsConnection) readRoutine() { - defer func() { - if r := recover(); r != nil { - err, ok := r.(error) - if !ok { - err = fmt.Errorf("WSJSONRPC: %v", r) - } - wsc.Logger.Error("Panic in WSJSONRPC handler", "err", err, "stack", string(debug.Stack())) - wsc.WriteRPCResponse(types.RPCInternalError("unknown", err)) - go wsc.readRoutine() - } else { - wsc.baseConn.Close() // nolint: errcheck - } - }() - - wsc.baseConn.SetPongHandler(func(m string) error { - return wsc.baseConn.SetReadDeadline(time.Now().Add(wsc.readWait)) - }) - - for { - select { - case <-wsc.Quit(): - return - default: - // reset deadline for every type of message (control or data) - if err := wsc.baseConn.SetReadDeadline(time.Now().Add(wsc.readWait)); err != nil { - wsc.Logger.Error("failed to set read deadline", "err", err) - } - var in []byte - _, in, err := wsc.baseConn.ReadMessage() - if err != nil { - if websocket.IsCloseError(err, websocket.CloseNormalClosure) { - wsc.Logger.Info("Client closed the connection") - } else { - wsc.Logger.Error("Failed to read request", "err", err) - } - wsc.Stop() - return - } - - var request types.RPCRequest - err = json.Unmarshal(in, &request) - if err != nil { - wsc.WriteRPCResponse(types.RPCParseError("", errors.Wrap(err, "Error unmarshaling request"))) - continue - } - - // A Notification is a Request object without an "id" member. - // The Server MUST NOT reply to a Notification, including those that are within a batch request. - if request.ID == "" { - wsc.Logger.Debug("WSJSONRPC received a notification, skipping... (please send a non-empty ID if you want to call a method)") - continue - } - - // Now, fetch the RPCFunc and execute it. - - rpcFunc := wsc.funcMap[request.Method] - if rpcFunc == nil { - wsc.WriteRPCResponse(types.RPCMethodNotFoundError(request.ID)) - continue - } - var args []reflect.Value - if rpcFunc.ws { - wsCtx := types.WSRPCContext{Request: request, WSRPCConnection: wsc} - if len(request.Params) > 0 { - args, err = jsonParamsToArgsWS(rpcFunc, wsc.cdc, request.Params, wsCtx) - } - } else { - if len(request.Params) > 0 { - args, err = jsonParamsToArgsRPC(rpcFunc, wsc.cdc, request.Params) - } - } - if err != nil { - wsc.WriteRPCResponse(types.RPCInternalError(request.ID, errors.Wrap(err, "Error converting json params to arguments"))) - continue - } - returns := rpcFunc.f.Call(args) - - // TODO: Need to encode args/returns to string if we want to log them - wsc.Logger.Info("WSJSONRPC", "method", request.Method) - - result, err := unreflectResult(returns) - if err != nil { - wsc.WriteRPCResponse(types.RPCInternalError(request.ID, err)) - continue - } - - wsc.WriteRPCResponse(types.NewRPCSuccessResponse(wsc.cdc, request.ID, result)) - } - } -} - -// receives on a write channel and writes out on the socket -func (wsc *wsConnection) writeRoutine() { - pingTicker := time.NewTicker(wsc.pingPeriod) - defer func() { - pingTicker.Stop() - if err := wsc.baseConn.Close(); err != nil { - wsc.Logger.Error("Error closing connection", "err", err) - } - }() - - // https://github.com/gorilla/websocket/issues/97 - pongs := make(chan string, 1) - wsc.baseConn.SetPingHandler(func(m string) error { - select { - case pongs <- m: - default: - } - return nil - }) - - for { - select { - case m := <-pongs: - err := wsc.writeMessageWithDeadline(websocket.PongMessage, []byte(m)) - if err != nil { - wsc.Logger.Info("Failed to write pong (client may disconnect)", "err", err) - } - case <-pingTicker.C: - err := wsc.writeMessageWithDeadline(websocket.PingMessage, []byte{}) - if err != nil { - wsc.Logger.Error("Failed to write ping", "err", err) - wsc.Stop() - return - } - case msg := <-wsc.writeChan: - jsonBytes, err := json.MarshalIndent(msg, "", " ") - if err != nil { - wsc.Logger.Error("Failed to marshal RPCResponse to JSON", "err", err) - } else { - if err = wsc.writeMessageWithDeadline(websocket.TextMessage, jsonBytes); err != nil { - wsc.Logger.Error("Failed to write response", "err", err) - wsc.Stop() - return - } - } - case <-wsc.Quit(): - return - } - } -} - -// All writes to the websocket must (re)set the write deadline. -// If some writes don't set it while others do, they may timeout incorrectly (https://github.com/tendermint/tendermint/issues/553) -func (wsc *wsConnection) writeMessageWithDeadline(msgType int, msg []byte) error { - if err := wsc.baseConn.SetWriteDeadline(time.Now().Add(wsc.writeWait)); err != nil { - return err - } - return wsc.baseConn.WriteMessage(msgType, msg) -} - -//---------------------------------------- - -// WebsocketManager provides a WS handler for incoming connections and passes a -// map of functions along with any additional params to new connections. -// NOTE: The websocket path is defined externally, e.g. in node/node.go -type WebsocketManager struct { - websocket.Upgrader - - funcMap map[string]*RPCFunc - cdc *amino.Codec - logger log.Logger - wsConnOptions []func(*wsConnection) -} - -// NewWebsocketManager returns a new WebsocketManager that passes a map of -// functions, connection options and logger to new WS connections. -func NewWebsocketManager(funcMap map[string]*RPCFunc, cdc *amino.Codec, wsConnOptions ...func(*wsConnection)) *WebsocketManager { - return &WebsocketManager{ - funcMap: funcMap, - cdc: cdc, - Upgrader: websocket.Upgrader{ - CheckOrigin: func(r *http.Request) bool { - // TODO ??? - return true - }, - }, - logger: log.NewNopLogger(), - wsConnOptions: wsConnOptions, - } -} - -// SetLogger sets the logger. -func (wm *WebsocketManager) SetLogger(l log.Logger) { - wm.logger = l -} - -// WebsocketHandler upgrades the request/response (via http.Hijack) and starts -// the wsConnection. -func (wm *WebsocketManager) WebsocketHandler(w http.ResponseWriter, r *http.Request) { - wsConn, err := wm.Upgrade(w, r, nil) - if err != nil { - // TODO - return http error - wm.logger.Error("Failed to upgrade to websocket connection", "err", err) - return - } - - // register connection - con := NewWSConnection(wsConn, wm.funcMap, wm.cdc, wm.wsConnOptions...) - con.SetLogger(wm.logger.With("remote", wsConn.RemoteAddr())) - wm.logger.Info("New websocket connection", "remote", con.remoteAddr) - err = con.Start() // Blocking - if err != nil { - wm.logger.Error("Error starting connection", "err", err) - } -} - -// rpc.websocket -//----------------------------------------------------------------------------- - -// NOTE: assume returns is result struct and error. If error is not nil, return it -func unreflectResult(returns []reflect.Value) (interface{}, error) { - errV := returns[1] - if errV.Interface() != nil { - return nil, errors.Errorf("%v", errV.Interface()) - } - rv := returns[0] - // the result is a registered interface, - // we need a pointer to it so we can marshal with type byte - rvp := reflect.New(rv.Type()) - rvp.Elem().Set(rv) - return rvp.Interface(), nil -} - -// writes a list of available rpc endpoints as an html page -func writeListOfEndpoints(w http.ResponseWriter, r *http.Request, funcMap map[string]*RPCFunc) { - noArgNames := []string{} - argNames := []string{} - for name, funcData := range funcMap { - if len(funcData.args) == 0 { - noArgNames = append(noArgNames, name) - } else { - argNames = append(argNames, name) - } - } - sort.Strings(noArgNames) - sort.Strings(argNames) - buf := new(bytes.Buffer) - buf.WriteString("") - buf.WriteString("
Available endpoints:
") - - for _, name := range noArgNames { - link := fmt.Sprintf("//%s/%s", r.Host, name) - buf.WriteString(fmt.Sprintf("%s
", link, link)) - } - - buf.WriteString("
Endpoints that require arguments:
") - for _, name := range argNames { - link := fmt.Sprintf("//%s/%s?", r.Host, name) - funcData := funcMap[name] - for i, argName := range funcData.argNames { - link += argName + "=_" - if i < len(funcData.argNames)-1 { - link += "&" - } - } - buf.WriteString(fmt.Sprintf("%s
", link, link)) - } - buf.WriteString("") - w.Header().Set("Content-Type", "text/html") - w.WriteHeader(200) - w.Write(buf.Bytes()) // nolint: errcheck -} diff --git a/rpc/lib/server/handlers_test.go b/rpc/lib/server/handlers_test.go deleted file mode 100644 index b1ea4675..00000000 --- a/rpc/lib/server/handlers_test.go +++ /dev/null @@ -1,159 +0,0 @@ -package rpcserver_test - -import ( - "bytes" - "encoding/json" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" - - "github.com/gorilla/websocket" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - amino "github.com/tendermint/go-amino" - rs "github.com/tendermint/tendermint/rpc/lib/server" - types "github.com/tendermint/tendermint/rpc/lib/types" - "github.com/tendermint/tmlibs/log" -) - -////////////////////////////////////////////////////////////////////////////// -// HTTP REST API -// TODO - -////////////////////////////////////////////////////////////////////////////// -// JSON-RPC over HTTP - -func testMux() *http.ServeMux { - funcMap := map[string]*rs.RPCFunc{ - "c": rs.NewRPCFunc(func(s string, i int) (string, error) { return "foo", nil }, "s,i"), - } - cdc := amino.NewCodec() - mux := http.NewServeMux() - buf := new(bytes.Buffer) - logger := log.NewTMLogger(buf) - rs.RegisterRPCFuncs(mux, funcMap, cdc, logger) - - return mux -} - -func statusOK(code int) bool { return code >= 200 && code <= 299 } - -// Ensure that nefarious/unintended inputs to `params` -// do not crash our RPC handlers. -// See Issue https://github.com/tendermint/tendermint/issues/708. -func TestRPCParams(t *testing.T) { - mux := testMux() - tests := []struct { - payload string - wantErr string - }{ - // bad - {`{"jsonrpc": "2.0", "id": "0"}`, "Method not found"}, - {`{"jsonrpc": "2.0", "method": "y", "id": "0"}`, "Method not found"}, - {`{"method": "c", "id": "0", "params": a}`, "invalid character"}, - {`{"method": "c", "id": "0", "params": ["a"]}`, "got 1"}, - {`{"method": "c", "id": "0", "params": ["a", "b"]}`, "of type int"}, - {`{"method": "c", "id": "0", "params": [1, 1]}`, "of type string"}, - - // good - {`{"jsonrpc": "2.0", "method": "c", "id": "0", "params": null}`, ""}, - {`{"method": "c", "id": "0", "params": {}}`, ""}, - {`{"method": "c", "id": "0", "params": ["a", 10]}`, ""}, - } - - for i, tt := range tests { - req, _ := http.NewRequest("POST", "http://localhost/", strings.NewReader(tt.payload)) - rec := httptest.NewRecorder() - mux.ServeHTTP(rec, req) - res := rec.Result() - // Always expecting back a JSONRPCResponse - assert.True(t, statusOK(res.StatusCode), "#%d: should always return 2XX", i) - blob, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Errorf("#%d: err reading body: %v", i, err) - continue - } - - recv := new(types.RPCResponse) - assert.Nil(t, json.Unmarshal(blob, recv), "#%d: expecting successful parsing of an RPCResponse:\nblob: %s", i, blob) - assert.NotEqual(t, recv, new(types.RPCResponse), "#%d: not expecting a blank RPCResponse", i) - - if tt.wantErr == "" { - assert.Nil(t, recv.Error, "#%d: not expecting an error", i) - } else { - assert.True(t, recv.Error.Code < 0, "#%d: not expecting a positive JSONRPC code", i) - // The wanted error is either in the message or the data - assert.Contains(t, recv.Error.Message+recv.Error.Data, tt.wantErr, "#%d: expected substring", i) - } - } -} - -func TestRPCNotification(t *testing.T) { - mux := testMux() - body := strings.NewReader(`{"jsonrpc": "2.0"}`) - req, _ := http.NewRequest("POST", "http://localhost/", body) - rec := httptest.NewRecorder() - mux.ServeHTTP(rec, req) - res := rec.Result() - - // Always expecting back a JSONRPCResponse - require.True(t, statusOK(res.StatusCode), "should always return 2XX") - blob, err := ioutil.ReadAll(res.Body) - require.Nil(t, err, "reading from the body should not give back an error") - require.Equal(t, len(blob), 0, "a notification SHOULD NOT be responded to by the server") -} - -func TestUnknownRPCPath(t *testing.T) { - mux := testMux() - req, _ := http.NewRequest("GET", "http://localhost/unknownrpcpath", nil) - rec := httptest.NewRecorder() - mux.ServeHTTP(rec, req) - res := rec.Result() - - // Always expecting back a 404 error - require.Equal(t, http.StatusNotFound, res.StatusCode, "should always return 404") -} - -////////////////////////////////////////////////////////////////////////////// -// JSON-RPC over WEBSOCKETS - -func TestWebsocketManagerHandler(t *testing.T) { - s := newWSServer() - defer s.Close() - - // check upgrader works - d := websocket.Dialer{} - c, dialResp, err := d.Dial("ws://"+s.Listener.Addr().String()+"/websocket", nil) - require.NoError(t, err) - - if got, want := dialResp.StatusCode, http.StatusSwitchingProtocols; got != want { - t.Errorf("dialResp.StatusCode = %q, want %q", got, want) - } - - // check basic functionality works - req, err := types.MapToRequest(amino.NewCodec(), "TestWebsocketManager", "c", map[string]interface{}{"s": "a", "i": 10}) - require.NoError(t, err) - err = c.WriteJSON(req) - require.NoError(t, err) - - var resp types.RPCResponse - err = c.ReadJSON(&resp) - require.NoError(t, err) - require.Nil(t, resp.Error) -} - -func newWSServer() *httptest.Server { - funcMap := map[string]*rs.RPCFunc{ - "c": rs.NewWSRPCFunc(func(wsCtx types.WSRPCContext, s string, i int) (string, error) { return "foo", nil }, "s,i"), - } - wm := rs.NewWebsocketManager(funcMap, amino.NewCodec()) - wm.SetLogger(log.TestingLogger()) - - mux := http.NewServeMux() - mux.HandleFunc("/websocket", wm.WebsocketHandler) - - return httptest.NewServer(mux) -} diff --git a/rpc/lib/server/http_params.go b/rpc/lib/server/http_params.go deleted file mode 100644 index 56506067..00000000 --- a/rpc/lib/server/http_params.go +++ /dev/null @@ -1,90 +0,0 @@ -package rpcserver - -import ( - "encoding/hex" - "net/http" - "regexp" - "strconv" - - "github.com/pkg/errors" -) - -var ( - // Parts of regular expressions - atom = "[A-Z0-9!#$%&'*+\\-/=?^_`{|}~]+" - dotAtom = atom + `(?:\.` + atom + `)*` - domain = `[A-Z0-9.-]+\.[A-Z]{2,4}` - - RE_HEX = regexp.MustCompile(`^(?i)[a-f0-9]+$`) - RE_EMAIL = regexp.MustCompile(`^(?i)(` + dotAtom + `)@(` + dotAtom + `)$`) - RE_ADDRESS = regexp.MustCompile(`^(?i)[a-z0-9]{25,34}$`) - RE_HOST = regexp.MustCompile(`^(?i)(` + domain + `)$`) - - //RE_ID12 = regexp.MustCompile(`^[a-zA-Z0-9]{12}$`) -) - -func GetParam(r *http.Request, param string) string { - s := r.URL.Query().Get(param) - if s == "" { - s = r.FormValue(param) - } - return s -} - -func GetParamByteSlice(r *http.Request, param string) ([]byte, error) { - s := GetParam(r, param) - return hex.DecodeString(s) -} - -func GetParamInt64(r *http.Request, param string) (int64, error) { - s := GetParam(r, param) - i, err := strconv.ParseInt(s, 10, 64) - if err != nil { - return 0, errors.Errorf(param, err.Error()) - } - return i, nil -} - -func GetParamInt32(r *http.Request, param string) (int32, error) { - s := GetParam(r, param) - i, err := strconv.ParseInt(s, 10, 32) - if err != nil { - return 0, errors.Errorf(param, err.Error()) - } - return int32(i), nil -} - -func GetParamUint64(r *http.Request, param string) (uint64, error) { - s := GetParam(r, param) - i, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, errors.Errorf(param, err.Error()) - } - return i, nil -} - -func GetParamUint(r *http.Request, param string) (uint, error) { - s := GetParam(r, param) - i, err := strconv.ParseUint(s, 10, 64) - if err != nil { - return 0, errors.Errorf(param, err.Error()) - } - return uint(i), nil -} - -func GetParamRegexp(r *http.Request, param string, re *regexp.Regexp) (string, error) { - s := GetParam(r, param) - if !re.MatchString(s) { - return "", errors.Errorf(param, "Did not match regular expression %v", re.String()) - } - return s, nil -} - -func GetParamFloat64(r *http.Request, param string) (float64, error) { - s := GetParam(r, param) - f, err := strconv.ParseFloat(s, 64) - if err != nil { - return 0, errors.Errorf(param, err.Error()) - } - return f, nil -} diff --git a/rpc/lib/server/http_server.go b/rpc/lib/server/http_server.go deleted file mode 100644 index 3f54c61e..00000000 --- a/rpc/lib/server/http_server.go +++ /dev/null @@ -1,156 +0,0 @@ -// Commons for HTTP handling -package rpcserver - -import ( - "bufio" - "encoding/json" - "fmt" - "net" - "net/http" - "runtime/debug" - "strings" - "time" - - "github.com/pkg/errors" - - types "github.com/tendermint/tendermint/rpc/lib/types" - "github.com/tendermint/tmlibs/log" -) - -func StartHTTPServer(listenAddr string, handler http.Handler, logger log.Logger) (listener net.Listener, err error) { - var proto, addr string - parts := strings.SplitN(listenAddr, "://", 2) - if len(parts) != 2 { - return nil, errors.Errorf("Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", listenAddr) - } - proto, addr = parts[0], parts[1] - - logger.Info(fmt.Sprintf("Starting RPC HTTP server on %s", listenAddr)) - listener, err = net.Listen(proto, addr) - if err != nil { - return nil, errors.Errorf("Failed to listen on %v: %v", listenAddr, err) - } - - go func() { - err := http.Serve( - listener, - RecoverAndLogHandler(handler, logger), - ) - logger.Error("RPC HTTP server stopped", "err", err) - }() - return listener, nil -} - -func StartHTTPAndTLSServer(listenAddr string, handler http.Handler, certFile, keyFile string, logger log.Logger) (listener net.Listener, err error) { - var proto, addr string - parts := strings.SplitN(listenAddr, "://", 2) - if len(parts) != 2 { - return nil, errors.Errorf("Invalid listening address %s (use fully formed addresses, including the tcp:// or unix:// prefix)", listenAddr) - } - proto, addr = parts[0], parts[1] - - logger.Info(fmt.Sprintf("Starting RPC HTTPS server on %s (cert: %q, key: %q)", listenAddr, certFile, keyFile)) - listener, err = net.Listen(proto, addr) - if err != nil { - return nil, errors.Errorf("Failed to listen on %v: %v", listenAddr, err) - } - - go func() { - err := http.ServeTLS( - listener, - RecoverAndLogHandler(handler, logger), - certFile, - keyFile, - ) - logger.Error("RPC HTTPS server stopped", "err", err) - }() - return listener, nil -} - -func WriteRPCResponseHTTPError(w http.ResponseWriter, httpCode int, res types.RPCResponse) { - jsonBytes, err := json.MarshalIndent(res, "", " ") - if err != nil { - panic(err) - } - - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(httpCode) - w.Write(jsonBytes) // nolint: errcheck, gas -} - -func WriteRPCResponseHTTP(w http.ResponseWriter, res types.RPCResponse) { - jsonBytes, err := json.MarshalIndent(res, "", " ") - if err != nil { - panic(err) - } - w.Header().Set("Content-Type", "application/json") - w.WriteHeader(200) - w.Write(jsonBytes) // nolint: errcheck, gas -} - -//----------------------------------------------------------------------------- - -// Wraps an HTTP handler, adding error logging. -// If the inner function panics, the outer function recovers, logs, sends an -// HTTP 500 error response. -func RecoverAndLogHandler(handler http.Handler, logger log.Logger) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // Wrap the ResponseWriter to remember the status - rww := &ResponseWriterWrapper{-1, w} - begin := time.Now() - - // Common headers - origin := r.Header.Get("Origin") - rww.Header().Set("Access-Control-Allow-Origin", origin) - rww.Header().Set("Access-Control-Allow-Credentials", "true") - rww.Header().Set("Access-Control-Expose-Headers", "X-Server-Time") - rww.Header().Set("X-Server-Time", fmt.Sprintf("%v", begin.Unix())) - - defer func() { - // Send a 500 error if a panic happens during a handler. - // Without this, Chrome & Firefox were retrying aborted ajax requests, - // at least to my localhost. - if e := recover(); e != nil { - - // If RPCResponse - if res, ok := e.(types.RPCResponse); ok { - WriteRPCResponseHTTP(rww, res) - } else { - // For the rest, - logger.Error("Panic in RPC HTTP handler", "err", e, "stack", string(debug.Stack())) - rww.WriteHeader(http.StatusInternalServerError) - WriteRPCResponseHTTP(rww, types.RPCInternalError("", e.(error))) - } - } - - // Finally, log. - durationMS := time.Since(begin).Nanoseconds() / 1000000 - if rww.Status == -1 { - rww.Status = 200 - } - logger.Info("Served RPC HTTP response", - "method", r.Method, "url", r.URL, - "status", rww.Status, "duration", durationMS, - "remoteAddr", r.RemoteAddr, - ) - }() - - handler.ServeHTTP(rww, r) - }) -} - -// Remember the status for logging -type ResponseWriterWrapper struct { - Status int - http.ResponseWriter -} - -func (w *ResponseWriterWrapper) WriteHeader(status int) { - w.Status = status - w.ResponseWriter.WriteHeader(status) -} - -// implements http.Hijacker -func (w *ResponseWriterWrapper) Hijack() (net.Conn, *bufio.ReadWriter, error) { - return w.ResponseWriter.(http.Hijacker).Hijack() -} diff --git a/rpc/lib/server/parse_test.go b/rpc/lib/server/parse_test.go deleted file mode 100644 index f4323ef5..00000000 --- a/rpc/lib/server/parse_test.go +++ /dev/null @@ -1,176 +0,0 @@ -package rpcserver - -import ( - "encoding/json" - "strconv" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" -) - -func TestParseJSONMap(t *testing.T) { - assert := assert.New(t) - - input := []byte(`{"value":"1234","height":22}`) - - // naive is float,string - var p1 map[string]interface{} - err := json.Unmarshal(input, &p1) - if assert.Nil(err) { - h, ok := p1["height"].(float64) - if assert.True(ok, "%#v", p1["height"]) { - assert.EqualValues(22, h) - } - v, ok := p1["value"].(string) - if assert.True(ok, "%#v", p1["value"]) { - assert.EqualValues("1234", v) - } - } - - // preloading map with values doesn't help - tmp := 0 - p2 := map[string]interface{}{ - "value": &cmn.HexBytes{}, - "height": &tmp, - } - err = json.Unmarshal(input, &p2) - if assert.Nil(err) { - h, ok := p2["height"].(float64) - if assert.True(ok, "%#v", p2["height"]) { - assert.EqualValues(22, h) - } - v, ok := p2["value"].(string) - if assert.True(ok, "%#v", p2["value"]) { - assert.EqualValues("1234", v) - } - } - - // preload here with *pointers* to the desired types - // struct has unknown types, but hard-coded keys - tmp = 0 - p3 := struct { - Value interface{} `json:"value"` - Height interface{} `json:"height"` - }{ - Height: &tmp, - Value: &cmn.HexBytes{}, - } - err = json.Unmarshal(input, &p3) - if assert.Nil(err) { - h, ok := p3.Height.(*int) - if assert.True(ok, "%#v", p3.Height) { - assert.Equal(22, *h) - } - v, ok := p3.Value.(*cmn.HexBytes) - if assert.True(ok, "%#v", p3.Value) { - assert.EqualValues([]byte{0x12, 0x34}, *v) - } - } - - // simplest solution, but hard-coded - p4 := struct { - Value cmn.HexBytes `json:"value"` - Height int `json:"height"` - }{} - err = json.Unmarshal(input, &p4) - if assert.Nil(err) { - assert.EqualValues(22, p4.Height) - assert.EqualValues([]byte{0x12, 0x34}, p4.Value) - } - - // so, let's use this trick... - // dynamic keys on map, and we can deserialize to the desired types - var p5 map[string]*json.RawMessage - err = json.Unmarshal(input, &p5) - if assert.Nil(err) { - var h int - err = json.Unmarshal(*p5["height"], &h) - if assert.Nil(err) { - assert.Equal(22, h) - } - - var v cmn.HexBytes - err = json.Unmarshal(*p5["value"], &v) - if assert.Nil(err) { - assert.Equal(cmn.HexBytes{0x12, 0x34}, v) - } - } -} - -func TestParseJSONArray(t *testing.T) { - assert := assert.New(t) - - input := []byte(`["1234",22]`) - - // naive is float,string - var p1 []interface{} - err := json.Unmarshal(input, &p1) - if assert.Nil(err) { - v, ok := p1[0].(string) - if assert.True(ok, "%#v", p1[0]) { - assert.EqualValues("1234", v) - } - h, ok := p1[1].(float64) - if assert.True(ok, "%#v", p1[1]) { - assert.EqualValues(22, h) - } - } - - // preloading map with values helps here (unlike map - p2 above) - tmp := 0 - p2 := []interface{}{&cmn.HexBytes{}, &tmp} - err = json.Unmarshal(input, &p2) - if assert.Nil(err) { - v, ok := p2[0].(*cmn.HexBytes) - if assert.True(ok, "%#v", p2[0]) { - assert.EqualValues([]byte{0x12, 0x34}, *v) - } - h, ok := p2[1].(*int) - if assert.True(ok, "%#v", p2[1]) { - assert.EqualValues(22, *h) - } - } -} - -func TestParseRPC(t *testing.T) { - assert := assert.New(t) - - demo := func(height int, name string) {} - call := NewRPCFunc(demo, "height,name") - cdc := amino.NewCodec() - - cases := []struct { - raw string - height int64 - name string - fail bool - }{ - // should parse - {`[7, "flew"]`, 7, "flew", false}, - {`{"name": "john", "height": 22}`, 22, "john", false}, - // defaults - {`{"name": "solo", "unused": "stuff"}`, 0, "solo", false}, - // should fail - wrong types/length - {`["flew", 7]`, 0, "", true}, - {`[7,"flew",100]`, 0, "", true}, - {`{"name": -12, "height": "fred"}`, 0, "", true}, - } - for idx, tc := range cases { - i := strconv.Itoa(idx) - data := []byte(tc.raw) - vals, err := jsonParamsToArgs(call, cdc, data, 0) - if tc.fail { - assert.NotNil(err, i) - } else { - assert.Nil(err, "%s: %+v", i, err) - if assert.Equal(2, len(vals), i) { - assert.Equal(tc.height, vals[0].Int(), i) - assert.Equal(tc.name, vals[1].String(), i) - } - } - - } - -} diff --git a/rpc/lib/test/data.json b/rpc/lib/test/data.json deleted file mode 100644 index 83283ec3..00000000 --- a/rpc/lib/test/data.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "jsonrpc": "2.0", - "id": "", - "method": "hello_world", - "params": { - "name": "my_world", - "num": 5 - } -} diff --git a/rpc/lib/test/integration_test.sh b/rpc/lib/test/integration_test.sh deleted file mode 100755 index 7c23be7d..00000000 --- a/rpc/lib/test/integration_test.sh +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Get the directory of where this script is. -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - -# Change into that dir because we expect that. -pushd "$DIR" - -echo "==> Building the server" -go build -o rpcserver main.go - -echo "==> (Re)starting the server" -PID=$(pgrep rpcserver || echo "") -if [[ $PID != "" ]]; then - kill -9 "$PID" -fi -./rpcserver & -PID=$! -sleep 2 - -echo "==> simple request" -R1=$(curl -s 'http://localhost:8008/hello_world?name="my_world"&num=5') -R2=$(curl -s --data @data.json http://localhost:8008) -if [[ "$R1" != "$R2" ]]; then - echo "responses are not identical:" - echo "R1: $R1" - echo "R2: $R2" - echo "FAIL" - exit 1 -else - echo "OK" -fi - -echo "==> request with 0x-prefixed hex string arg" -R1=$(curl -s 'http://localhost:8008/hello_world?name=0x41424344&num=123') -R2='{"jsonrpc":"2.0","id":"","result":{"Result":"hi ABCD 123"},"error":""}' -if [[ "$R1" != "$R2" ]]; then - echo "responses are not identical:" - echo "R1: $R1" - echo "R2: $R2" - echo "FAIL" - exit 1 -else - echo "OK" -fi - -echo "==> request with missing params" -R1=$(curl -s 'http://localhost:8008/hello_world') -R2='{"jsonrpc":"2.0","id":"","result":{"Result":"hi 0"},"error":""}' -if [[ "$R1" != "$R2" ]]; then - echo "responses are not identical:" - echo "R1: $R1" - echo "R2: $R2" - echo "FAIL" - exit 1 -else - echo "OK" -fi - -echo "==> request with unquoted string arg" -R1=$(curl -s 'http://localhost:8008/hello_world?name=abcd&num=123') -R2="{\"jsonrpc\":\"2.0\",\"id\":\"\",\"result\":null,\"error\":\"Error converting http params to args: invalid character 'a' looking for beginning of value\"}" -if [[ "$R1" != "$R2" ]]; then - echo "responses are not identical:" - echo "R1: $R1" - echo "R2: $R2" - echo "FAIL" - exit 1 -else - echo "OK" -fi - -echo "==> request with string type when expecting number arg" -R1=$(curl -s 'http://localhost:8008/hello_world?name="abcd"&num=0xabcd') -R2="{\"jsonrpc\":\"2.0\",\"id\":\"\",\"result\":null,\"error\":\"Error converting http params to args: Got a hex string arg, but expected 'int'\"}" -if [[ "$R1" != "$R2" ]]; then - echo "responses are not identical:" - echo "R1: $R1" - echo "R2: $R2" - echo "FAIL" - exit 1 -else - echo "OK" -fi - -echo "==> Stopping the server" -kill -9 $PID - -rm -f rpcserver - -popd -exit 0 diff --git a/rpc/lib/test/main.go b/rpc/lib/test/main.go deleted file mode 100644 index 604cbd3d..00000000 --- a/rpc/lib/test/main.go +++ /dev/null @@ -1,40 +0,0 @@ -package main - -import ( - "fmt" - "net/http" - "os" - - "github.com/tendermint/go-amino" - rpcserver "github.com/tendermint/tendermint/rpc/lib/server" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" -) - -var routes = map[string]*rpcserver.RPCFunc{ - "hello_world": rpcserver.NewRPCFunc(HelloWorld, "name,num"), -} - -func HelloWorld(name string, num int) (Result, error) { - return Result{fmt.Sprintf("hi %s %d", name, num)}, nil -} - -type Result struct { - Result string -} - -func main() { - mux := http.NewServeMux() - cdc := amino.NewCodec() - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) - rpcserver.RegisterRPCFuncs(mux, routes, cdc, logger) - _, err := rpcserver.StartHTTPServer("0.0.0.0:8008", mux, logger) - if err != nil { - cmn.Exit(err.Error()) - } - - // Wait forever - cmn.TrapSignal(func() { - }) - -} diff --git a/rpc/lib/types/types.go b/rpc/lib/types/types.go deleted file mode 100644 index fe9a9253..00000000 --- a/rpc/lib/types/types.go +++ /dev/null @@ -1,187 +0,0 @@ -package rpctypes - -import ( - "context" - "encoding/json" - "fmt" - "strings" - - "github.com/pkg/errors" - - amino "github.com/tendermint/go-amino" - - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" -) - -//---------------------------------------- -// REQUEST - -type RPCRequest struct { - JSONRPC string `json:"jsonrpc"` - ID string `json:"id"` - Method string `json:"method"` - Params json.RawMessage `json:"params"` // must be map[string]interface{} or []interface{} -} - -func NewRPCRequest(id string, method string, params json.RawMessage) RPCRequest { - return RPCRequest{ - JSONRPC: "2.0", - ID: id, - Method: method, - Params: params, - } -} - -func (req RPCRequest) String() string { - return fmt.Sprintf("[%s %s]", req.ID, req.Method) -} - -func MapToRequest(cdc *amino.Codec, id string, method string, params map[string]interface{}) (RPCRequest, error) { - var params_ = make(map[string]json.RawMessage, len(params)) - for name, value := range params { - valueJSON, err := cdc.MarshalJSON(value) - if err != nil { - return RPCRequest{}, err - } - params_[name] = valueJSON - } - payload, err := json.Marshal(params_) // NOTE: Amino doesn't handle maps yet. - if err != nil { - return RPCRequest{}, err - } - request := NewRPCRequest(id, method, payload) - return request, nil -} - -func ArrayToRequest(cdc *amino.Codec, id string, method string, params []interface{}) (RPCRequest, error) { - var params_ = make([]json.RawMessage, len(params)) - for i, value := range params { - valueJSON, err := cdc.MarshalJSON(value) - if err != nil { - return RPCRequest{}, err - } - params_[i] = valueJSON - } - payload, err := json.Marshal(params_) // NOTE: Amino doesn't handle maps yet. - if err != nil { - return RPCRequest{}, err - } - request := NewRPCRequest(id, method, payload) - return request, nil -} - -//---------------------------------------- -// RESPONSE - -type RPCError struct { - Code int `json:"code"` - Message string `json:"message"` - Data string `json:"data,omitempty"` -} - -func (err RPCError) Error() string { - const baseFormat = "RPC error %v - %s" - if err.Data != "" { - return fmt.Sprintf(baseFormat+": %s", err.Code, err.Message, err.Data) - } - return fmt.Sprintf(baseFormat, err.Code, err.Message) -} - -type RPCResponse struct { - JSONRPC string `json:"jsonrpc"` - ID string `json:"id"` - Result json.RawMessage `json:"result,omitempty"` - Error *RPCError `json:"error,omitempty"` -} - -func NewRPCSuccessResponse(cdc *amino.Codec, id string, res interface{}) RPCResponse { - var rawMsg json.RawMessage - - if res != nil { - var js []byte - js, err := cdc.MarshalJSON(res) - if err != nil { - return RPCInternalError(id, errors.Wrap(err, "Error marshalling response")) - } - rawMsg = json.RawMessage(js) - } - - return RPCResponse{JSONRPC: "2.0", ID: id, Result: rawMsg} -} - -func NewRPCErrorResponse(id string, code int, msg string, data string) RPCResponse { - return RPCResponse{ - JSONRPC: "2.0", - ID: id, - Error: &RPCError{Code: code, Message: msg, Data: data}, - } -} - -func (resp RPCResponse) String() string { - if resp.Error == nil { - return fmt.Sprintf("[%s %v]", resp.ID, resp.Result) - } - return fmt.Sprintf("[%s %s]", resp.ID, resp.Error) -} - -func RPCParseError(id string, err error) RPCResponse { - return NewRPCErrorResponse(id, -32700, "Parse error. Invalid JSON", err.Error()) -} - -func RPCInvalidRequestError(id string, err error) RPCResponse { - return NewRPCErrorResponse(id, -32600, "Invalid Request", err.Error()) -} - -func RPCMethodNotFoundError(id string) RPCResponse { - return NewRPCErrorResponse(id, -32601, "Method not found", "") -} - -func RPCInvalidParamsError(id string, err error) RPCResponse { - return NewRPCErrorResponse(id, -32602, "Invalid params", err.Error()) -} - -func RPCInternalError(id string, err error) RPCResponse { - return NewRPCErrorResponse(id, -32603, "Internal error", err.Error()) -} - -func RPCServerError(id string, err error) RPCResponse { - return NewRPCErrorResponse(id, -32000, "Server error", err.Error()) -} - -//---------------------------------------- - -// *wsConnection implements this interface. -type WSRPCConnection interface { - GetRemoteAddr() string - WriteRPCResponse(resp RPCResponse) - TryWriteRPCResponse(resp RPCResponse) bool - GetEventSubscriber() EventSubscriber - Codec() *amino.Codec -} - -// EventSubscriber mirros tendermint/tendermint/types.EventBusSubscriber -type EventSubscriber interface { - Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error - Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error - UnsubscribeAll(ctx context.Context, subscriber string) error -} - -// websocket-only RPCFuncs take this as the first parameter. -type WSRPCContext struct { - Request RPCRequest - WSRPCConnection -} - -//---------------------------------------- -// SOCKETS -// -// Determine if its a unix or tcp socket. -// If tcp, must specify the port; `0.0.0.0` will return incorrectly as "unix" since there's no port -// TODO: deprecate -func SocketType(listenAddr string) string { - socketType := "unix" - if len(strings.Split(listenAddr, ":")) >= 2 { - socketType = "tcp" - } - return socketType -} diff --git a/rpc/lib/types/types_test.go b/rpc/lib/types/types_test.go deleted file mode 100644 index 9dd1b7a1..00000000 --- a/rpc/lib/types/types_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package rpctypes - -import ( - "encoding/json" - "testing" - - "fmt" - - "github.com/pkg/errors" - "github.com/stretchr/testify/assert" - "github.com/tendermint/go-amino" -) - -type SampleResult struct { - Value string -} - -func TestResponses(t *testing.T) { - assert := assert.New(t) - cdc := amino.NewCodec() - - a := NewRPCSuccessResponse(cdc, "1", &SampleResult{"hello"}) - b, _ := json.Marshal(a) - s := `{"jsonrpc":"2.0","id":"1","result":{"Value":"hello"}}` - assert.Equal(string(s), string(b)) - - d := RPCParseError("1", errors.New("Hello world")) - e, _ := json.Marshal(d) - f := `{"jsonrpc":"2.0","id":"1","error":{"code":-32700,"message":"Parse error. Invalid JSON","data":"Hello world"}}` - assert.Equal(string(f), string(e)) - - g := RPCMethodNotFoundError("2") - h, _ := json.Marshal(g) - i := `{"jsonrpc":"2.0","id":"2","error":{"code":-32601,"message":"Method not found"}}` - assert.Equal(string(h), string(i)) -} - -func TestRPCError(t *testing.T) { - assert.Equal(t, "RPC error 12 - Badness: One worse than a code 11", - fmt.Sprintf("%v", &RPCError{ - Code: 12, - Message: "Badness", - Data: "One worse than a code 11", - })) - - assert.Equal(t, "RPC error 12 - Badness", - fmt.Sprintf("%v", &RPCError{ - Code: 12, - Message: "Badness", - })) -} diff --git a/rpc/lib/version.go b/rpc/lib/version.go deleted file mode 100644 index 8828f260..00000000 --- a/rpc/lib/version.go +++ /dev/null @@ -1,7 +0,0 @@ -package rpc - -const Maj = "0" -const Min = "7" -const Fix = "0" - -const Version = Maj + "." + Min + "." + Fix diff --git a/rpc/test/helpers.go b/rpc/test/helpers.go deleted file mode 100644 index b434c7d9..00000000 --- a/rpc/test/helpers.go +++ /dev/null @@ -1,132 +0,0 @@ -package rpctest - -import ( - "context" - "fmt" - "os" - "path/filepath" - "strings" - "time" - - "github.com/tendermint/tmlibs/log" - - abci "github.com/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - - cfg "github.com/tendermint/tendermint/config" - nm "github.com/tendermint/tendermint/node" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/proxy" - ctypes "github.com/tendermint/tendermint/rpc/core/types" - core_grpc "github.com/tendermint/tendermint/rpc/grpc" - rpcclient "github.com/tendermint/tendermint/rpc/lib/client" -) - -var globalConfig *cfg.Config - -func waitForRPC() { - laddr := GetConfig().RPC.ListenAddress - client := rpcclient.NewJSONRPCClient(laddr) - ctypes.RegisterAmino(client.Codec()) - result := new(ctypes.ResultStatus) - for { - _, err := client.Call("status", map[string]interface{}{}, result) - if err == nil { - return - } else { - fmt.Println("error", err) - time.Sleep(time.Millisecond) - } - } -} - -func waitForGRPC() { - client := GetGRPCClient() - for { - _, err := client.Ping(context.Background(), &core_grpc.RequestPing{}) - if err == nil { - return - } - } -} - -// f**ing long, but unique for each test -func makePathname() string { - // get path - p, err := os.Getwd() - if err != nil { - panic(err) - } - // fmt.Println(p) - sep := string(filepath.Separator) - return strings.Replace(p, sep, "_", -1) -} - -func randPort() int { - return int(cmn.RandUint16()/2 + 10000) -} - -func makeAddrs() (string, string, string) { - start := randPort() - return fmt.Sprintf("tcp://0.0.0.0:%d", start), - fmt.Sprintf("tcp://0.0.0.0:%d", start+1), - fmt.Sprintf("tcp://0.0.0.0:%d", start+2) -} - -// GetConfig returns a config for the test cases as a singleton -func GetConfig() *cfg.Config { - if globalConfig == nil { - pathname := makePathname() - globalConfig = cfg.ResetTestRoot(pathname) - - // and we use random ports to run in parallel - tm, rpc, grpc := makeAddrs() - globalConfig.P2P.ListenAddress = tm - globalConfig.RPC.ListenAddress = rpc - globalConfig.RPC.GRPCListenAddress = grpc - globalConfig.TxIndex.IndexTags = "app.creator" // see kvstore application - } - return globalConfig -} - -func GetGRPCClient() core_grpc.BroadcastAPIClient { - grpcAddr := globalConfig.RPC.GRPCListenAddress - return core_grpc.StartGRPCClient(grpcAddr) -} - -// StartTendermint starts a test tendermint server in a go routine and returns when it is initialized -func StartTendermint(app abci.Application) *nm.Node { - node := NewTendermint(app) - err := node.Start() - if err != nil { - panic(err) - } - - // wait for rpc - waitForRPC() - waitForGRPC() - - fmt.Println("Tendermint running!") - - return node -} - -// NewTendermint creates a new tendermint server and sleeps forever -func NewTendermint(app abci.Application) *nm.Node { - // Create & start node - config := GetConfig() - logger := log.NewTMLogger(log.NewSyncWriter(os.Stdout)) - logger = log.NewFilter(logger, log.AllowError()) - pvFile := config.PrivValidatorFile() - pv := privval.LoadOrGenFilePV(pvFile) - papp := proxy.NewLocalClientCreator(app) - node, err := nm.NewNode(config, pv, papp, - nm.DefaultGenesisDocProviderFunc(config), - nm.DefaultDBProvider, - nm.DefaultMetricsProvider, - logger) - if err != nil { - panic(err) - } - return node -} diff --git a/scripts/README.md b/scripts/README.md deleted file mode 100644 index 18624117..00000000 --- a/scripts/README.md +++ /dev/null @@ -1 +0,0 @@ -* http://redsymbol.net/articles/unofficial-bash-strict-mode/ diff --git a/scripts/dep_utils/parse.sh b/scripts/dep_utils/parse.sh deleted file mode 100644 index e6519efa..00000000 --- a/scripts/dep_utils/parse.sh +++ /dev/null @@ -1,14 +0,0 @@ -#! /bin/bash - -set +u -if [[ "$DEP" == "" ]]; then - DEP=$GOPATH/src/github.com/tendermint/tendermint/Gopkg.lock -fi -set -u - - -set -euo pipefail - -LIB=$1 - -grep -A100 "$LIB" "$DEP" | grep revision | head -n1 | grep -o '"[^"]\+"' | cut -d '"' -f 2 diff --git a/scripts/dist.sh b/scripts/dist.sh deleted file mode 100755 index 40aa71e9..00000000 --- a/scripts/dist.sh +++ /dev/null @@ -1,81 +0,0 @@ -#!/usr/bin/env bash -set -e - -# WARN: non hermetic build (people must run this script inside docker to -# produce deterministic binaries). - -# Get the version from the environment, or try to figure it out. -if [ -z $VERSION ]; then - VERSION=$(awk -F\" '/Version =/ { print $2; exit }' < version/version.go) -fi -if [ -z "$VERSION" ]; then - echo "Please specify a version." - exit 1 -fi -echo "==> Building version $VERSION..." - -# Delete the old dir -echo "==> Removing old directory..." -rm -rf build/pkg -mkdir -p build/pkg - -# Get the git commit -GIT_COMMIT="$(git rev-parse --short=8 HEAD)" -GIT_IMPORT="github.com/tendermint/tendermint/version" - -# Determine the arch/os combos we're building for -XC_ARCH=${XC_ARCH:-"386 amd64 arm"} -XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"} -XC_EXCLUDE=${XC_EXCLUDE:-" darwin/arm solaris/amd64 solaris/386 solaris/arm freebsd/amd64 windows/arm "} - -# Make sure build tools are available. -make get_tools - -# Get VENDORED dependencies -make get_vendor_deps - -# Build! -# ldflags: -s Omit the symbol table and debug information. -# -w Omit the DWARF symbol table. -echo "==> Building..." -IFS=' ' read -ra arch_list <<< "$XC_ARCH" -IFS=' ' read -ra os_list <<< "$XC_OS" -for arch in "${arch_list[@]}"; do - for os in "${os_list[@]}"; do - if [[ "$XC_EXCLUDE" != *" $os/$arch "* ]]; then - echo "--> $os/$arch" - GOOS=${os} GOARCH=${arch} go build -ldflags "-s -w -X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}" -tags="${BUILD_TAGS}" -o "build/pkg/${os}_${arch}/tendermint" ./cmd/tendermint - fi - done -done - -# Zip all the files. -echo "==> Packaging..." -for PLATFORM in $(find ./build/pkg -mindepth 1 -maxdepth 1 -type d); do - OSARCH=$(basename "${PLATFORM}") - echo "--> ${OSARCH}" - - pushd "$PLATFORM" >/dev/null 2>&1 - zip "../${OSARCH}.zip" ./* - popd >/dev/null 2>&1 -done - -# Add "tendermint" and $VERSION prefix to package name. -rm -rf ./build/dist -mkdir -p ./build/dist -for FILENAME in $(find ./build/pkg -mindepth 1 -maxdepth 1 -type f); do - FILENAME=$(basename "$FILENAME") - cp "./build/pkg/${FILENAME}" "./build/dist/tendermint_${VERSION}_${FILENAME}" -done - -# Make the checksums. -pushd ./build/dist -shasum -a256 ./* > "./tendermint_${VERSION}_SHA256SUMS" -popd - -# Done -echo -echo "==> Results:" -ls -hl ./build/dist - -exit 0 diff --git a/scripts/install/install_tendermint_bsd.sh b/scripts/install/install_tendermint_bsd.sh deleted file mode 100644 index aba584f2..00000000 --- a/scripts/install/install_tendermint_bsd.sh +++ /dev/null @@ -1,54 +0,0 @@ -#!/usr/bin/tcsh - -# XXX: this script is intended to be run from -# a fresh Digital Ocean droplet with FreeBSD -# Just run tcsh install_tendermint_bsd.sh - -# upon its completion, you must either reset -# your terminal or run `source ~/.tcshrc` - -# This assumes your installing it through tcsh as root. -# Change the relevant lines from tcsh to csh if your -# installing as a different user, along with changing the -# gopath. - -# change this to a specific release or branch -set BRANCH=master - -sudo pkg update - -sudo pkg upgrade -y -sudo pkg install -y gmake -sudo pkg install -y git - -# get and unpack golang -curl -O https://storage.googleapis.com/golang/go1.10.freebsd-amd64.tar.gz -tar -xvf go1.10.freebsd-amd64.tar.gz - -# move go binary and add to path -mv go /usr/local -set path=($path /usr/local/go/bin) - - -# create the go directory, set GOPATH, and put it on PATH -mkdir go -echo "setenv GOPATH /root/go" >> ~/.tcshrc -setenv GOPATH /root/go -echo "set path=($path $GOPATH/bin)" >> ~/.tcshrc - -source ~/.tcshrc - -# get the code and move into repo -set REPO=github.com/tendermint/tendermint -go get $REPO -cd $GOPATH/src/$REPO - -# build & install master -git checkout $BRANCH -gmake get_tools -gmake get_vendor_deps -gmake install - -# the binary is located in $GOPATH/bin -# run `source ~/.profile` or reset your terminal -# to persist the changes diff --git a/scripts/install/install_tendermint_ubuntu.sh b/scripts/install/install_tendermint_ubuntu.sh deleted file mode 100644 index 0e1de117..00000000 --- a/scripts/install/install_tendermint_ubuntu.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env bash - -# XXX: this script is intended to be run from -# a fresh Digital Ocean droplet with Ubuntu - -# upon its completion, you must either reset -# your terminal or run `source ~/.profile` - -# as written, this script will install -# tendermint core from master branch -REPO=github.com/tendermint/tendermint - -# change this to a specific release or branch -BRANCH=master - -sudo apt-get update -y -sudo apt-get upgrade -y -sudo apt-get install -y make - -# get and unpack golang -curl -O https://storage.googleapis.com/golang/go1.10.linux-amd64.tar.gz -tar -xvf go1.10.linux-amd64.tar.gz - -# move go binary and add to path -mv go /usr/local -echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile - -# create the goApps directory, set GOPATH, and put it on PATH -mkdir goApps -echo "export GOPATH=/root/goApps" >> ~/.profile -echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile - -source ~/.profile - -# get the code and move into repo -go get $REPO -cd $GOPATH/src/$REPO - -# build & install -git checkout $BRANCH -# XXX: uncomment if branch isn't master -# git fetch origin $BRANCH -make get_tools -make get_vendor_deps -make install - -# the binary is located in $GOPATH/bin -# run `source ~/.profile` or reset your terminal -# to persist the changes diff --git a/scripts/install_abci_apps.sh b/scripts/install_abci_apps.sh deleted file mode 100644 index eb70070d..00000000 --- a/scripts/install_abci_apps.sh +++ /dev/null @@ -1,12 +0,0 @@ -#! /bin/bash - -# get the abci commit used by tendermint -COMMIT=$(bash scripts/dep_utils/parse.sh abci) -echo "Checking out vendored commit for abci: $COMMIT" - -go get -d github.com/tendermint/abci -cd "$GOPATH/src/github.com/tendermint/abci" || exit -git checkout "$COMMIT" -make get_tools -make get_vendor_deps -make install diff --git a/scripts/publish.sh b/scripts/publish.sh deleted file mode 100755 index ba944087..00000000 --- a/scripts/publish.sh +++ /dev/null @@ -1,20 +0,0 @@ -#!/usr/bin/env bash -set -e - -VERSION=$1 -DIST_DIR=./build/dist - -# Get the version from the environment, or try to figure it out. -if [ -z $VERSION ]; then - VERSION=$(awk -F\" '/Version =/ { print $2; exit }' < version/version.go) -fi -if [ -z "$VERSION" ]; then - echo "Please specify a version." - exit 1 -fi -echo "==> Copying ${DIST_DIR} to S3..." - -# copy to s3 -aws s3 cp --recursive ${DIST_DIR} s3://tendermint/binaries/tendermint/v${VERSION} --acl public-read - -exit 0 diff --git a/scripts/release.sh b/scripts/release.sh deleted file mode 100755 index 9a4e508e..00000000 --- a/scripts/release.sh +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env bash -set -e - -# Get the version from the environment, or try to figure it out. -if [ -z $VERSION ]; then - VERSION=$(awk -F\" '/Version =/ { print $2; exit }' < version/version.go) -fi -if [ -z "$VERSION" ]; then - echo "Please specify a version." - exit 1 -fi -echo "==> Releasing version $VERSION..." - -# Get the parent directory of where this script is. -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )" - -# Change into that dir because we expect that. -cd "$DIR" - -# Building binaries -sh -c "'$DIR/scripts/dist.sh'" - -# Pushing binaries to S3 -sh -c "'$DIR/scripts/publish.sh'" - -# echo "==> Crafting a Github release" -# today=$(date +"%B-%d-%Y") -# ghr -b "https://github.com/tendermint/tendermint/blob/master/CHANGELOG.md#${VERSION//.}-${today,}" "v$VERSION" "$DIR/build/dist" - -# Build and push Docker image - -## Get SHA256SUM of the linux archive -SHA256SUM=$(shasum -a256 "${DIR}/build/dist/tendermint_${VERSION}_linux_amd64.zip" | awk '{print $1;}') - -## Replace TM_VERSION and TM_SHA256SUM with the new values -sed -i -e "s/TM_VERSION .*/TM_VERSION $VERSION/g" "$DIR/DOCKER/Dockerfile" -sed -i -e "s/TM_SHA256SUM .*/TM_SHA256SUM $SHA256SUM/g" "$DIR/DOCKER/Dockerfile" -git commit -m "update Dockerfile" -a "$DIR/DOCKER/Dockerfile" -echo "==> TODO: update DOCKER/README.md (latest Dockerfile's hash is $(git rev-parse HEAD)) and copy it's content to https://store.docker.com/community/images/tendermint/tendermint" - -pushd "$DIR/DOCKER" - -## Build Docker image -TAG=$VERSION sh -c "'./build.sh'" - -## Push Docker image -TAG=$VERSION sh -c "'./push.sh'" - -popd - -exit 0 diff --git a/scripts/slate.sh b/scripts/slate.sh deleted file mode 100644 index e18babea..00000000 --- a/scripts/slate.sh +++ /dev/null @@ -1,77 +0,0 @@ -#!/usr/bin/env bash -set -euo pipefail - -if [ "$CIRCLE_BRANCH" == "" ]; then - echo "this script is meant to be run on CircleCI, exiting" - echo 1 -fi - -# check for changes in the `rpc/core` directory -did_rpc_change=$(git diff --name-status $CIRCLE_BRANCH origin/master | grep rpc/core) - -if [ "$did_rpc_change" == "" ]; then - echo "no changes detected in rpc/core, exiting" - exit 0 -else - echo "changes detected in rpc/core, continuing" -fi - -# only run this script on changes to rpc/core committed to develop -if [ "$CIRCLE_BRANCH" != "master" ]; then - echo "the branch being built isn't master, exiting" - exit 0 -else - echo "on master, building the RPC docs" -fi - -# godoc2md used to convert the go documentation from -# `rpc/core` into a markdown file consumed by Slate -go get github.com/davecheney/godoc2md - -# slate works via forks, and we'll be committing to -# master branch, which will trigger our fork to run -# the `./deploy.sh` and publish via the `gh-pages` branch -slate_repo=github.com/tendermint/slate -slate_path="$GOPATH"/src/"$slate_repo" - -if [ ! -d "$slate_path" ]; then - git clone https://"$slate_repo".git $slate_path -fi - -# the main file we need to update if rpc/core changed -destination="$slate_path"/source/index.html.md - -# we remove it then re-create it with the latest changes -rm $destination - -header="--- -title: RPC Reference - -language_tabs: - - shell - - go - -toc_footers: - - Tendermint - - Documentation Powered by Slate - -search: true ----" - -# write header to the main slate file -echo "$header" > "$destination" - -# generate a markdown from the godoc comments, using a template -rpc_docs=$(godoc2md -template rpc/core/doc_template.txt github.com/tendermint/tendermint/rpc/core | grep -v -e "pipe.go" -e "routes.go" -e "dev.go" | sed 's$/src/target$https://github.com/tendermint/tendermint/tree/master/rpc/core$') - -# append core RPC docs -echo "$rpc_docs" >> "$destination" - -# commit the changes -cd $slate_path - -git config --global user.email "github@tendermint.com" -git config --global user.name "tenderbot" - -git commit -a -m "Update tendermint RPC docs via CircleCI" -git push -q https://${GITHUB_ACCESS_TOKEN}@github.com/tendermint/slate.git master diff --git a/scripts/txs/random.sh b/scripts/txs/random.sh deleted file mode 100644 index 231fabcf..00000000 --- a/scripts/txs/random.sh +++ /dev/null @@ -1,19 +0,0 @@ -#! /bin/bash -set -u - -function toHex() { - echo -n $1 | hexdump -ve '1/1 "%.2X"' -} - -N=$1 -PORT=$2 - -for i in `seq 1 $N`; do - # store key value pair - KEY=$(head -c 10 /dev/urandom) - VALUE="$i" - echo $(toHex $KEY=$VALUE) - curl 127.0.0.1:$PORT/broadcast_tx_sync?tx=0x$(toHex $KEY=$VALUE) -done - - diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go deleted file mode 100644 index f6ffea43..00000000 --- a/scripts/wal2json/main.go +++ /dev/null @@ -1,59 +0,0 @@ -/* - wal2json converts binary WAL file to JSON. - - Usage: - wal2json -*/ - -package main - -import ( - "encoding/json" - "fmt" - "io" - "os" - - cs "github.com/tendermint/tendermint/consensus" -) - -func main() { - if len(os.Args) < 2 { - fmt.Println("missing one argument: ") - os.Exit(1) - } - - f, err := os.Open(os.Args[1]) - if err != nil { - panic(fmt.Errorf("failed to open WAL file: %v", err)) - } - defer f.Close() - - dec := cs.NewWALDecoder(f) - for { - msg, err := dec.Decode() - if err == io.EOF { - break - } else if err != nil { - panic(fmt.Errorf("failed to decode msg: %v", err)) - } - - json, err := json.Marshal(msg) - if err != nil { - panic(fmt.Errorf("failed to marshal msg: %v", err)) - } - - _, err = os.Stdout.Write(json) - if err == nil { - _, err = os.Stdout.Write([]byte("\n")) - } - if err == nil { - if end, ok := msg.Msg.(cs.EndHeightMessage); ok { - _, err = os.Stdout.Write([]byte(fmt.Sprintf("ENDHEIGHT %d\n", end.Height))) // nolint: errcheck, gas - } - } - if err != nil { - fmt.Println("Failed to write message", err) - os.Exit(1) - } - } -} diff --git a/scripts/wire2amino.go b/scripts/wire2amino.go deleted file mode 100644 index a942ceaa..00000000 --- a/scripts/wire2amino.go +++ /dev/null @@ -1,181 +0,0 @@ -package main - -import ( - "encoding/json" - "fmt" - "io/ioutil" - "os" - "path/filepath" - "time" - - "github.com/tendermint/go-amino" - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" - - "github.com/tendermint/tendermint/p2p" - "github.com/tendermint/tendermint/privval" - "github.com/tendermint/tendermint/types" -) - -type GenesisValidator struct { - PubKey Data `json:"pub_key"` - Power int64 `json:"power"` - Name string `json:"name"` -} - -type Genesis struct { - GenesisTime time.Time `json:"genesis_time"` - ChainID string `json:"chain_id"` - ConsensusParams *types.ConsensusParams `json:"consensus_params,omitempty"` - Validators []GenesisValidator `json:"validators"` - AppHash cmn.HexBytes `json:"app_hash"` - AppStateJSON json.RawMessage `json:"app_state,omitempty"` - AppOptions json.RawMessage `json:"app_options,omitempty"` // DEPRECATED - -} - -type NodeKey struct { - PrivKey Data `json:"priv_key"` -} - -type PrivVal struct { - Address cmn.HexBytes `json:"address"` - LastHeight int64 `json:"last_height"` - LastRound int `json:"last_round"` - LastStep int8 `json:"last_step"` - PubKey Data `json:"pub_key"` - PrivKey Data `json:"priv_key"` -} - -type Data struct { - Type string `json:"type"` - Data cmn.HexBytes `json:"data"` -} - -func convertNodeKey(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) { - var nodeKey NodeKey - err := json.Unmarshal(jsonBytes, &nodeKey) - if err != nil { - return nil, err - } - - var privKey crypto.PrivKeyEd25519 - copy(privKey[:], nodeKey.PrivKey.Data) - - nodeKeyNew := p2p.NodeKey{privKey} - - bz, err := cdc.MarshalJSON(nodeKeyNew) - if err != nil { - return nil, err - } - return bz, nil -} - -func convertPrivVal(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) { - var privVal PrivVal - err := json.Unmarshal(jsonBytes, &privVal) - if err != nil { - return nil, err - } - - var privKey crypto.PrivKeyEd25519 - copy(privKey[:], privVal.PrivKey.Data) - - var pubKey crypto.PubKeyEd25519 - copy(pubKey[:], privVal.PubKey.Data) - - privValNew := privval.FilePV{ - Address: pubKey.Address(), - PubKey: pubKey, - LastHeight: privVal.LastHeight, - LastRound: privVal.LastRound, - LastStep: privVal.LastStep, - PrivKey: privKey, - } - - bz, err := cdc.MarshalJSON(privValNew) - if err != nil { - return nil, err - } - return bz, nil -} - -func convertGenesis(cdc *amino.Codec, jsonBytes []byte) ([]byte, error) { - var genesis Genesis - err := json.Unmarshal(jsonBytes, &genesis) - if err != nil { - return nil, err - } - - genesisNew := types.GenesisDoc{ - GenesisTime: genesis.GenesisTime, - ChainID: genesis.ChainID, - ConsensusParams: genesis.ConsensusParams, - // Validators - AppHash: genesis.AppHash, - AppStateJSON: genesis.AppStateJSON, - } - - if genesis.AppOptions != nil { - genesisNew.AppStateJSON = genesis.AppOptions - } - - for _, v := range genesis.Validators { - var pubKey crypto.PubKeyEd25519 - copy(pubKey[:], v.PubKey.Data) - genesisNew.Validators = append( - genesisNew.Validators, - types.GenesisValidator{ - PubKey: pubKey, - Power: v.Power, - Name: v.Name, - }, - ) - - } - - bz, err := cdc.MarshalJSON(genesisNew) - if err != nil { - return nil, err - } - return bz, nil -} - -func main() { - cdc := amino.NewCodec() - crypto.RegisterAmino(cdc) - - args := os.Args[1:] - if len(args) != 1 { - fmt.Println("Please specify a file to convert") - os.Exit(1) - } - - filePath := args[0] - fileName := filepath.Base(filePath) - - fileBytes, err := ioutil.ReadFile(filePath) - if err != nil { - panic(err) - } - - var bz []byte - - switch fileName { - case "node_key.json": - bz, err = convertNodeKey(cdc, fileBytes) - case "priv_validator.json": - bz, err = convertPrivVal(cdc, fileBytes) - case "genesis.json": - bz, err = convertGenesis(cdc, fileBytes) - default: - fmt.Println("Expected file name to be in (node_key.json, priv_validator.json, genesis.json)") - os.Exit(1) - } - - if err != nil { - panic(err) - } - fmt.Println(string(bz)) - -} diff --git a/state/errors.go b/state/errors.go deleted file mode 100644 index afb5737d..00000000 --- a/state/errors.go +++ /dev/null @@ -1,79 +0,0 @@ -package state - -import ( - cmn "github.com/tendermint/tmlibs/common" -) - -type ( - ErrInvalidBlock error - ErrProxyAppConn error - - ErrUnknownBlock struct { - Height int64 - } - - ErrBlockHashMismatch struct { - CoreHash []byte - AppHash []byte - Height int64 - } - - ErrAppBlockHeightTooHigh struct { - CoreHeight int64 - AppHeight int64 - } - - ErrLastStateMismatch struct { - Height int64 - Core []byte - App []byte - } - - ErrStateMismatch struct { - Got *State - Expected *State - } - - ErrNoValSetForHeight struct { - Height int64 - } - - ErrNoConsensusParamsForHeight struct { - Height int64 - } - - ErrNoABCIResponsesForHeight struct { - Height int64 - } -) - -func (e ErrUnknownBlock) Error() string { - return cmn.Fmt("Could not find block #%d", e.Height) -} - -func (e ErrBlockHashMismatch) Error() string { - return cmn.Fmt("App block hash (%X) does not match core block hash (%X) for height %d", e.AppHash, e.CoreHash, e.Height) -} - -func (e ErrAppBlockHeightTooHigh) Error() string { - return cmn.Fmt("App block height (%d) is higher than core (%d)", e.AppHeight, e.CoreHeight) -} -func (e ErrLastStateMismatch) Error() string { - return cmn.Fmt("Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", e.Height, e.Core, e.App) -} - -func (e ErrStateMismatch) Error() string { - return cmn.Fmt("State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", e.Got, e.Expected) -} - -func (e ErrNoValSetForHeight) Error() string { - return cmn.Fmt("Could not find validator set for height #%d", e.Height) -} - -func (e ErrNoConsensusParamsForHeight) Error() string { - return cmn.Fmt("Could not find consensus params for height #%d", e.Height) -} - -func (e ErrNoABCIResponsesForHeight) Error() string { - return cmn.Fmt("Could not find results for height #%d", e.Height) -} diff --git a/state/execution.go b/state/execution.go deleted file mode 100644 index e6b94429..00000000 --- a/state/execution.go +++ /dev/null @@ -1,398 +0,0 @@ -package state - -import ( - "fmt" - - fail "github.com/ebuchman/fail-test" - abci "github.com/tendermint/abci/types" - "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" -) - -//----------------------------------------------------------------------------- -// BlockExecutor handles block execution and state updates. -// It exposes ApplyBlock(), which validates & executes the block, updates state w/ ABCI responses, -// then commits and updates the mempool atomically, then saves state. - -// BlockExecutor provides the context and accessories for properly executing a block. -type BlockExecutor struct { - // save state, validators, consensus params, abci responses here - db dbm.DB - - // execute the app against this - proxyApp proxy.AppConnConsensus - - // events - eventBus types.BlockEventPublisher - - // update these with block results after commit - mempool Mempool - evpool EvidencePool - - logger log.Logger -} - -// NewBlockExecutor returns a new BlockExecutor with a NopEventBus. -// Call SetEventBus to provide one. -func NewBlockExecutor(db dbm.DB, logger log.Logger, proxyApp proxy.AppConnConsensus, - mempool Mempool, evpool EvidencePool) *BlockExecutor { - return &BlockExecutor{ - db: db, - proxyApp: proxyApp, - eventBus: types.NopEventBus{}, - mempool: mempool, - evpool: evpool, - logger: logger, - } -} - -// SetEventBus - sets the event bus for publishing block related events. -// If not called, it defaults to types.NopEventBus. -func (blockExec *BlockExecutor) SetEventBus(eventBus types.BlockEventPublisher) { - blockExec.eventBus = eventBus -} - -// ValidateBlock validates the given block against the given state. -// If the block is invalid, it returns an error. -// Validation does not mutate state, but does require historical information from the stateDB, -// ie. to verify evidence from a validator at an old height. -func (blockExec *BlockExecutor) ValidateBlock(state State, block *types.Block) error { - return validateBlock(blockExec.db, state, block) -} - -// ApplyBlock validates the block against the state, executes it against the app, -// fires the relevant events, commits the app, and saves the new state and responses. -// It's the only function that needs to be called -// from outside this package to process and commit an entire block. -// It takes a blockID to avoid recomputing the parts hash. -func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, block *types.Block) (State, error) { - - if err := blockExec.ValidateBlock(state, block); err != nil { - return state, ErrInvalidBlock(err) - } - - abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, state.LastValidators, blockExec.db) - if err != nil { - return state, ErrProxyAppConn(err) - } - - fail.Fail() // XXX - - // save the results before we commit - saveABCIResponses(blockExec.db, block.Height, abciResponses) - - fail.Fail() // XXX - - // update the state with the block and responses - state, err = updateState(state, blockID, block.Header, abciResponses) - if err != nil { - return state, fmt.Errorf("Commit failed for application: %v", err) - } - - // lock mempool, commit app state, update mempoool - appHash, err := blockExec.Commit(block) - if err != nil { - return state, fmt.Errorf("Commit failed for application: %v", err) - } - - // Update evpool with the block and state. - blockExec.evpool.Update(block, state) - - fail.Fail() // XXX - - // update the app hash and save the state - state.AppHash = appHash - SaveState(blockExec.db, state) - - fail.Fail() // XXX - - // events are fired after everything else - // NOTE: if we crash between Commit and Save, events wont be fired during replay - fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses) - - return state, nil -} - -// Commit locks the mempool, runs the ABCI Commit message, and updates the mempool. -// It returns the result of calling abci.Commit (the AppHash), and an error. -// The Mempool must be locked during commit and update because state is typically reset on Commit and old txs must be replayed -// against committed state before new txs are run in the mempool, lest they be invalid. -func (blockExec *BlockExecutor) Commit(block *types.Block) ([]byte, error) { - blockExec.mempool.Lock() - defer blockExec.mempool.Unlock() - - // while mempool is Locked, flush to ensure all async requests have completed - // in the ABCI app before Commit. - err := blockExec.mempool.FlushAppConn() - if err != nil { - blockExec.logger.Error("Client error during mempool.FlushAppConn", "err", err) - return nil, err - } - - // Commit block, get hash back - res, err := blockExec.proxyApp.CommitSync() - if err != nil { - blockExec.logger.Error("Client error during proxyAppConn.CommitSync", "err", err) - return nil, err - } - // ResponseCommit has no error code - just data - - blockExec.logger.Info("Committed state", - "height", block.Height, - "txs", block.NumTxs, - "appHash", fmt.Sprintf("%X", res.Data)) - - // Update mempool. - if err := blockExec.mempool.Update(block.Height, block.Txs); err != nil { - return nil, err - } - - return res.Data, nil -} - -//--------------------------------------------------------- -// Helper functions for executing blocks and updating state - -// Executes block's transactions on proxyAppConn. -// Returns a list of transaction results and updates to the validator set -func execBlockOnProxyApp(logger log.Logger, proxyAppConn proxy.AppConnConsensus, - block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) (*ABCIResponses, error) { - var validTxs, invalidTxs = 0, 0 - - txIndex := 0 - abciResponses := NewABCIResponses(block) - - // Execute transactions and get hash - proxyCb := func(req *abci.Request, res *abci.Response) { - switch r := res.Value.(type) { - case *abci.Response_DeliverTx: - // TODO: make use of res.Log - // TODO: make use of this info - // Blocks may include invalid txs. - txRes := r.DeliverTx - if txRes.Code == abci.CodeTypeOK { - validTxs++ - } else { - logger.Debug("Invalid tx", "code", txRes.Code, "log", txRes.Log) - invalidTxs++ - } - abciResponses.DeliverTx[txIndex] = txRes - txIndex++ - } - } - proxyAppConn.SetResponseCallback(proxyCb) - - signVals, byzVals := getBeginBlockValidatorInfo(block, lastValSet, stateDB) - - // Begin block - _, err := proxyAppConn.BeginBlockSync(abci.RequestBeginBlock{ - Hash: block.Hash(), - Header: types.TM2PB.Header(block.Header), - Validators: signVals, - ByzantineValidators: byzVals, - }) - if err != nil { - logger.Error("Error in proxyAppConn.BeginBlock", "err", err) - return nil, err - } - - // Run txs of block - for _, tx := range block.Txs { - proxyAppConn.DeliverTxAsync(tx) - if err := proxyAppConn.Error(); err != nil { - return nil, err - } - } - - // End block - abciResponses.EndBlock, err = proxyAppConn.EndBlockSync(abci.RequestEndBlock{block.Height}) - if err != nil { - logger.Error("Error in proxyAppConn.EndBlock", "err", err) - return nil, err - } - - logger.Info("Executed block", "height", block.Height, "validTxs", validTxs, "invalidTxs", invalidTxs) - - valUpdates := abciResponses.EndBlock.ValidatorUpdates - if len(valUpdates) > 0 { - logger.Info("Updates to validators", "updates", abci.ValidatorsString(valUpdates)) - } - - return abciResponses, nil -} - -func getBeginBlockValidatorInfo(block *types.Block, lastValSet *types.ValidatorSet, stateDB dbm.DB) ([]abci.SigningValidator, []abci.Evidence) { - - // Sanity check that commit length matches validator set size - - // only applies after first block - if block.Height > 1 { - precommitLen := len(block.LastCommit.Precommits) - valSetLen := len(lastValSet.Validators) - if precommitLen != valSetLen { - // sanity check - panic(fmt.Sprintf("precommit length (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v", - precommitLen, valSetLen, block.Height, block.LastCommit.Precommits, lastValSet.Validators)) - } - } - - // determine which validators did not sign last block. - signVals := make([]abci.SigningValidator, len(lastValSet.Validators)) - for i, val := range lastValSet.Validators { - var vote *types.Vote - if i < len(block.LastCommit.Precommits) { - vote = block.LastCommit.Precommits[i] - } - val := abci.SigningValidator{ - Validator: types.TM2PB.Validator(val), - SignedLastBlock: vote != nil, - } - signVals[i] = val - } - - byzVals := make([]abci.Evidence, len(block.Evidence.Evidence)) - for i, ev := range block.Evidence.Evidence { - // We need the validator set. We already did this in validateBlock. - // TODO: Should we instead cache the valset in the evidence itself and add - // `SetValidatorSet()` and `ToABCI` methods ? - valset, err := LoadValidators(stateDB, ev.Height()) - if err != nil { - panic(err) // shouldn't happen - } - byzVals[i] = types.TM2PB.Evidence(ev, valset, block.Time) - } - - return signVals, byzVals - -} - -// If more or equal than 1/3 of total voting power changed in one block, then -// a light client could never prove the transition externally. See -// ./lite/doc.go for details on how a light client tracks validators. -func updateValidators(currentSet *types.ValidatorSet, abciUpdates []abci.Validator) error { - updates, err := types.PB2TM.Validators(abciUpdates) - if err != nil { - return err - } - - // these are tendermint types now - for _, valUpdate := range updates { - address := valUpdate.Address - _, val := currentSet.GetByAddress(address) - if val == nil { - // add val - added := currentSet.Add(valUpdate) - if !added { - return fmt.Errorf("Failed to add new validator %v", valUpdate) - } - } else if valUpdate.VotingPower == 0 { - // remove val - _, removed := currentSet.Remove(address) - if !removed { - return fmt.Errorf("Failed to remove validator %X", address) - } - } else { - // update val - updated := currentSet.Update(valUpdate) - if !updated { - return fmt.Errorf("Failed to update validator %X to %v", address, valUpdate) - } - } - } - return nil -} - -// updateState returns a new State updated according to the header and responses. -func updateState(state State, blockID types.BlockID, header *types.Header, - abciResponses *ABCIResponses) (State, error) { - - // copy the valset so we can apply changes from EndBlock - // and update s.LastValidators and s.Validators - prevValSet := state.Validators.Copy() - nextValSet := prevValSet.Copy() - - // update the validator set with the latest abciResponses - lastHeightValsChanged := state.LastHeightValidatorsChanged - if len(abciResponses.EndBlock.ValidatorUpdates) > 0 { - err := updateValidators(nextValSet, abciResponses.EndBlock.ValidatorUpdates) - if err != nil { - return state, fmt.Errorf("Error changing validator set: %v", err) - } - // change results from this height but only applies to the next height - lastHeightValsChanged = header.Height + 1 - } - - // Update validator accums and set state variables - nextValSet.IncrementAccum(1) - - // update the params with the latest abciResponses - nextParams := state.ConsensusParams - lastHeightParamsChanged := state.LastHeightConsensusParamsChanged - if abciResponses.EndBlock.ConsensusParamUpdates != nil { - // NOTE: must not mutate s.ConsensusParams - nextParams = state.ConsensusParams.Update(abciResponses.EndBlock.ConsensusParamUpdates) - err := nextParams.Validate() - if err != nil { - return state, fmt.Errorf("Error updating consensus params: %v", err) - } - // change results from this height but only applies to the next height - lastHeightParamsChanged = header.Height + 1 - } - - // NOTE: the AppHash has not been populated. - // It will be filled on state.Save. - return State{ - ChainID: state.ChainID, - LastBlockHeight: header.Height, - LastBlockTotalTx: state.LastBlockTotalTx + header.NumTxs, - LastBlockID: blockID, - LastBlockTime: header.Time, - Validators: nextValSet, - LastValidators: state.Validators.Copy(), - LastHeightValidatorsChanged: lastHeightValsChanged, - ConsensusParams: nextParams, - LastHeightConsensusParamsChanged: lastHeightParamsChanged, - LastResultsHash: abciResponses.ResultsHash(), - AppHash: nil, - }, nil -} - -// Fire NewBlock, NewBlockHeader. -// Fire TxEvent for every tx. -// NOTE: if Tendermint crashes before commit, some or all of these events may be published again. -func fireEvents(logger log.Logger, eventBus types.BlockEventPublisher, block *types.Block, abciResponses *ABCIResponses) { - eventBus.PublishEventNewBlock(types.EventDataNewBlock{block}) - eventBus.PublishEventNewBlockHeader(types.EventDataNewBlockHeader{block.Header}) - - for i, tx := range block.Data.Txs { - eventBus.PublishEventTx(types.EventDataTx{types.TxResult{ - Height: block.Height, - Index: uint32(i), - Tx: tx, - Result: *(abciResponses.DeliverTx[i]), - }}) - } -} - -//---------------------------------------------------------------------------------------------------- -// Execute block without state. TODO: eliminate - -// ExecCommitBlock executes and commits a block on the proxyApp without validating or mutating the state. -// It returns the application root hash (result of abci.Commit). -func ExecCommitBlock(appConnConsensus proxy.AppConnConsensus, block *types.Block, - logger log.Logger, lastValSet *types.ValidatorSet, stateDB dbm.DB) ([]byte, error) { - _, err := execBlockOnProxyApp(logger, appConnConsensus, block, lastValSet, stateDB) - if err != nil { - logger.Error("Error executing block on proxy app", "height", block.Height, "err", err) - return nil, err - } - // Commit block, get hash back - res, err := appConnConsensus.CommitSync() - if err != nil { - logger.Error("Client error during proxyAppConn.CommitSync", "err", res) - return nil, err - } - // ResponseCommit has no error or log, just data - return res.Data, nil -} diff --git a/state/execution_test.go b/state/execution_test.go deleted file mode 100644 index b520b0c1..00000000 --- a/state/execution_test.go +++ /dev/null @@ -1,232 +0,0 @@ -package state - -import ( - "fmt" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/tendermint/abci/example/kvstore" - abci "github.com/tendermint/abci/types" - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" - - "github.com/tendermint/tendermint/proxy" - "github.com/tendermint/tendermint/types" -) - -var ( - chainID = "execution_chain" - testPartSize = 65536 - nTxsPerBlock = 10 -) - -func TestApplyBlock(t *testing.T) { - cc := proxy.NewLocalClientCreator(kvstore.NewKVStoreApplication()) - proxyApp := proxy.NewAppConns(cc, nil) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() - - state, stateDB := state(1, 1) - - blockExec := NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), - MockMempool{}, MockEvidencePool{}) - - block := makeBlock(state, 1) - blockID := types.BlockID{block.Hash(), block.MakePartSet(testPartSize).Header()} - - state, err = blockExec.ApplyBlock(state, blockID, block) - require.Nil(t, err) - - // TODO check state and mempool -} - -// TestBeginBlockValidators ensures we send absent validators list. -func TestBeginBlockValidators(t *testing.T) { - app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc, nil) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() - - state, stateDB := state(2, 2) - - prevHash := state.LastBlockID.Hash - prevParts := types.PartSetHeader{} - prevBlockID := types.BlockID{prevHash, prevParts} - - now := time.Now().UTC() - vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit} - vote1 := &types.Vote{ValidatorIndex: 1, Timestamp: now} - - testCases := []struct { - desc string - lastCommitPrecommits []*types.Vote - expectedAbsentValidators []int - }{ - {"none absent", []*types.Vote{vote0, vote1}, []int{}}, - {"one absent", []*types.Vote{vote0, nil}, []int{1}}, - {"multiple absent", []*types.Vote{nil, nil}, []int{0, 1}}, - } - - for _, tc := range testCases { - lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: tc.lastCommitPrecommits} - - // block for height 2 - block, _ := state.MakeBlock(2, makeTxs(2), lastCommit) - _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB) - require.Nil(t, err, tc.desc) - - // -> app receives a list of validators with a bool indicating if they signed - ctr := 0 - for i, v := range app.Validators { - if ctr < len(tc.expectedAbsentValidators) && - tc.expectedAbsentValidators[ctr] == i { - - assert.False(t, v.SignedLastBlock) - ctr++ - } else { - assert.True(t, v.SignedLastBlock) - } - } - } -} - -// TestBeginBlockByzantineValidators ensures we send byzantine validators list. -func TestBeginBlockByzantineValidators(t *testing.T) { - app := &testApp{} - cc := proxy.NewLocalClientCreator(app) - proxyApp := proxy.NewAppConns(cc, nil) - err := proxyApp.Start() - require.Nil(t, err) - defer proxyApp.Stop() - - state, stateDB := state(2, 12) - - prevHash := state.LastBlockID.Hash - prevParts := types.PartSetHeader{} - prevBlockID := types.BlockID{prevHash, prevParts} - - height1, idx1, val1 := int64(8), 0, state.Validators.Validators[0].Address - height2, idx2, val2 := int64(3), 1, state.Validators.Validators[1].Address - ev1 := types.NewMockGoodEvidence(height1, idx1, val1) - ev2 := types.NewMockGoodEvidence(height2, idx2, val2) - - now := time.Now() - valSet := state.Validators - testCases := []struct { - desc string - evidence []types.Evidence - expectedByzantineValidators []abci.Evidence - }{ - {"none byzantine", []types.Evidence{}, []abci.Evidence{}}, - {"one byzantine", []types.Evidence{ev1}, []abci.Evidence{types.TM2PB.Evidence(ev1, valSet, now)}}, - {"multiple byzantine", []types.Evidence{ev1, ev2}, []abci.Evidence{ - types.TM2PB.Evidence(ev1, valSet, now), - types.TM2PB.Evidence(ev2, valSet, now)}}, - } - - vote0 := &types.Vote{ValidatorIndex: 0, Timestamp: now, Type: types.VoteTypePrecommit} - vote1 := &types.Vote{ValidatorIndex: 1, Timestamp: now} - votes := []*types.Vote{vote0, vote1} - lastCommit := &types.Commit{BlockID: prevBlockID, Precommits: votes} - for _, tc := range testCases { - - block, _ := state.MakeBlock(10, makeTxs(2), lastCommit) - block.Time = now - block.Evidence.Evidence = tc.evidence - _, err = ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), state.Validators, stateDB) - require.Nil(t, err, tc.desc) - - // -> app must receive an index of the byzantine validator - assert.Equal(t, tc.expectedByzantineValidators, app.ByzantineValidators, tc.desc) - } -} - -//---------------------------------------------------------------------------- - -// make some bogus txs -func makeTxs(height int64) (txs []types.Tx) { - for i := 0; i < nTxsPerBlock; i++ { - txs = append(txs, types.Tx([]byte{byte(height), byte(i)})) - } - return txs -} - -func state(nVals, height int) (State, dbm.DB) { - vals := make([]types.GenesisValidator, nVals) - for i := 0; i < nVals; i++ { - secret := []byte(fmt.Sprintf("test%d", i)) - pk := crypto.GenPrivKeyEd25519FromSecret(secret) - vals[i] = types.GenesisValidator{ - pk.PubKey(), 1000, fmt.Sprintf("test%d", i), - } - } - s, _ := MakeGenesisState(&types.GenesisDoc{ - ChainID: chainID, - Validators: vals, - AppHash: nil, - }) - - // save validators to db for 2 heights - stateDB := dbm.NewMemDB() - SaveState(stateDB, s) - - for i := 1; i < height; i++ { - s.LastBlockHeight += 1 - SaveState(stateDB, s) - } - return s, stateDB -} - -func makeBlock(state State, height int64) *types.Block { - block, _ := state.MakeBlock(height, makeTxs(state.LastBlockHeight), new(types.Commit)) - return block -} - -//---------------------------------------------------------------------------- - -var _ abci.Application = (*testApp)(nil) - -type testApp struct { - abci.BaseApplication - - Validators []abci.SigningValidator - ByzantineValidators []abci.Evidence -} - -func NewKVStoreApplication() *testApp { - return &testApp{} -} - -func (app *testApp) Info(req abci.RequestInfo) (resInfo abci.ResponseInfo) { - return abci.ResponseInfo{} -} - -func (app *testApp) BeginBlock(req abci.RequestBeginBlock) abci.ResponseBeginBlock { - app.Validators = req.Validators - app.ByzantineValidators = req.ByzantineValidators - return abci.ResponseBeginBlock{} -} - -func (app *testApp) DeliverTx(tx []byte) abci.ResponseDeliverTx { - return abci.ResponseDeliverTx{Tags: []cmn.KVPair{}} -} - -func (app *testApp) CheckTx(tx []byte) abci.ResponseCheckTx { - return abci.ResponseCheckTx{} -} - -func (app *testApp) Commit() abci.ResponseCommit { - return abci.ResponseCommit{} -} - -func (app *testApp) Query(reqQuery abci.RequestQuery) (resQuery abci.ResponseQuery) { - return -} diff --git a/state/services.go b/state/services.go deleted file mode 100644 index bef286b2..00000000 --- a/state/services.go +++ /dev/null @@ -1,86 +0,0 @@ -package state - -import ( - abci "github.com/tendermint/abci/types" - "github.com/tendermint/tendermint/types" -) - -//------------------------------------------------------ -// blockchain services types -// NOTE: Interfaces used by RPC must be thread safe! -//------------------------------------------------------ - -//------------------------------------------------------ -// mempool - -// Mempool defines the mempool interface as used by the ConsensusState. -// Updates to the mempool need to be synchronized with committing a block -// so apps can reset their transient state on Commit -type Mempool interface { - Lock() - Unlock() - - Size() int - CheckTx(types.Tx, func(*abci.Response)) error - Reap(int) types.Txs - Update(height int64, txs types.Txs) error - Flush() - FlushAppConn() error - - TxsAvailable() <-chan int64 - EnableTxsAvailable() -} - -// MockMempool is an empty implementation of a Mempool, useful for testing. -type MockMempool struct { -} - -func (m MockMempool) Lock() {} -func (m MockMempool) Unlock() {} -func (m MockMempool) Size() int { return 0 } -func (m MockMempool) CheckTx(tx types.Tx, cb func(*abci.Response)) error { return nil } -func (m MockMempool) Reap(n int) types.Txs { return types.Txs{} } -func (m MockMempool) Update(height int64, txs types.Txs) error { return nil } -func (m MockMempool) Flush() {} -func (m MockMempool) FlushAppConn() error { return nil } -func (m MockMempool) TxsAvailable() <-chan int64 { return make(chan int64) } -func (m MockMempool) EnableTxsAvailable() {} - -//------------------------------------------------------ -// blockstore - -// BlockStoreRPC is the block store interface used by the RPC. -type BlockStoreRPC interface { - Height() int64 - - LoadBlockMeta(height int64) *types.BlockMeta - LoadBlock(height int64) *types.Block - LoadBlockPart(height int64, index int) *types.Part - - LoadBlockCommit(height int64) *types.Commit - LoadSeenCommit(height int64) *types.Commit -} - -// BlockStore defines the BlockStore interface used by the ConsensusState. -type BlockStore interface { - BlockStoreRPC - SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) -} - -//----------------------------------------------------------------------------------------------------- -// evidence pool - -// EvidencePool defines the EvidencePool interface used by the ConsensusState. -type EvidencePool interface { - PendingEvidence() []types.Evidence - AddEvidence(types.Evidence) error - Update(*types.Block, State) -} - -// MockMempool is an empty implementation of a Mempool, useful for testing. -type MockEvidencePool struct { -} - -func (m MockEvidencePool) PendingEvidence() []types.Evidence { return nil } -func (m MockEvidencePool) AddEvidence(types.Evidence) error { return nil } -func (m MockEvidencePool) Update(*types.Block, State) {} diff --git a/state/state.go b/state/state.go deleted file mode 100644 index 3bc08dae..00000000 --- a/state/state.go +++ /dev/null @@ -1,187 +0,0 @@ -package state - -import ( - "bytes" - "fmt" - "io/ioutil" - "time" - - "github.com/tendermint/tendermint/types" -) - -// database keys -var ( - stateKey = []byte("stateKey") -) - -//----------------------------------------------------------------------------- - -// State is a short description of the latest committed block of the Tendermint consensus. -// It keeps all information necessary to validate new blocks, -// including the last validator set and the consensus params. -// All fields are exposed so the struct can be easily serialized, -// but none of them should be mutated directly. -// Instead, use state.Copy() or state.NextState(...). -// NOTE: not goroutine-safe. -type State struct { - // Immutable - ChainID string - - // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) - LastBlockHeight int64 - LastBlockTotalTx int64 - LastBlockID types.BlockID - LastBlockTime time.Time - - // LastValidators is used to validate block.LastCommit. - // Validators are persisted to the database separately every time they change, - // so we can query for historical validator sets. - // Note that if s.LastBlockHeight causes a valset change, - // we set s.LastHeightValidatorsChanged = s.LastBlockHeight + 1 - Validators *types.ValidatorSet - LastValidators *types.ValidatorSet - LastHeightValidatorsChanged int64 - - // Consensus parameters used for validating blocks. - // Changes returned by EndBlock and updated after Commit. - ConsensusParams types.ConsensusParams - LastHeightConsensusParamsChanged int64 - - // Merkle root of the results from executing prev block - LastResultsHash []byte - - // The latest AppHash we've received from calling abci.Commit() - AppHash []byte -} - -// Copy makes a copy of the State for mutating. -func (state State) Copy() State { - return State{ - ChainID: state.ChainID, - - LastBlockHeight: state.LastBlockHeight, - LastBlockTotalTx: state.LastBlockTotalTx, - LastBlockID: state.LastBlockID, - LastBlockTime: state.LastBlockTime, - - Validators: state.Validators.Copy(), - LastValidators: state.LastValidators.Copy(), - LastHeightValidatorsChanged: state.LastHeightValidatorsChanged, - - ConsensusParams: state.ConsensusParams, - LastHeightConsensusParamsChanged: state.LastHeightConsensusParamsChanged, - - AppHash: state.AppHash, - - LastResultsHash: state.LastResultsHash, - } -} - -// Equals returns true if the States are identical. -func (state State) Equals(state2 State) bool { - sbz, s2bz := state.Bytes(), state2.Bytes() - return bytes.Equal(sbz, s2bz) -} - -// Bytes serializes the State using go-amino. -func (state State) Bytes() []byte { - return cdc.MustMarshalBinaryBare(state) -} - -// IsEmpty returns true if the State is equal to the empty State. -func (state State) IsEmpty() bool { - return state.Validators == nil // XXX can't compare to Empty -} - -// GetValidators returns the last and current validator sets. -func (state State) GetValidators() (last *types.ValidatorSet, current *types.ValidatorSet) { - return state.LastValidators, state.Validators -} - -//------------------------------------------------------------------------ -// Create a block from the latest state - -// MakeBlock builds a block with the given txs and commit from the current state. -func (state State) MakeBlock(height int64, txs []types.Tx, commit *types.Commit) (*types.Block, *types.PartSet) { - // build base block - block := types.MakeBlock(height, txs, commit) - - // fill header with state data - block.ChainID = state.ChainID - block.TotalTxs = state.LastBlockTotalTx + block.NumTxs - block.LastBlockID = state.LastBlockID - block.ValidatorsHash = state.Validators.Hash() - block.AppHash = state.AppHash - block.ConsensusHash = state.ConsensusParams.Hash() - block.LastResultsHash = state.LastResultsHash - - return block, block.MakePartSet(state.ConsensusParams.BlockGossip.BlockPartSizeBytes) -} - -//------------------------------------------------------------------------ -// Genesis - -// MakeGenesisStateFromFile reads and unmarshals state from the given -// file. -// -// Used during replay and in tests. -func MakeGenesisStateFromFile(genDocFile string) (State, error) { - genDoc, err := MakeGenesisDocFromFile(genDocFile) - if err != nil { - return State{}, err - } - return MakeGenesisState(genDoc) -} - -// MakeGenesisDocFromFile reads and unmarshals genesis doc from the given file. -func MakeGenesisDocFromFile(genDocFile string) (*types.GenesisDoc, error) { - genDocJSON, err := ioutil.ReadFile(genDocFile) - if err != nil { - return nil, fmt.Errorf("Couldn't read GenesisDoc file: %v", err) - } - genDoc, err := types.GenesisDocFromJSON(genDocJSON) - if err != nil { - return nil, fmt.Errorf("Error reading GenesisDoc: %v", err) - } - return genDoc, nil -} - -// MakeGenesisState creates state from types.GenesisDoc. -func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { - err := genDoc.ValidateAndComplete() - if err != nil { - return State{}, fmt.Errorf("Error in genesis file: %v", err) - } - - // Make validators slice - validators := make([]*types.Validator, len(genDoc.Validators)) - for i, val := range genDoc.Validators { - pubKey := val.PubKey - address := pubKey.Address() - - // Make validator - validators[i] = &types.Validator{ - Address: address, - PubKey: pubKey, - VotingPower: val.Power, - } - } - - return State{ - - ChainID: genDoc.ChainID, - - LastBlockHeight: 0, - LastBlockID: types.BlockID{}, - LastBlockTime: genDoc.GenesisTime, - - Validators: types.NewValidatorSet(validators), - LastValidators: types.NewValidatorSet(nil), - LastHeightValidatorsChanged: 1, - - ConsensusParams: *genDoc.ConsensusParams, - LastHeightConsensusParamsChanged: 1, - - AppHash: genDoc.AppHash, - }, nil -} diff --git a/state/state_test.go b/state/state_test.go deleted file mode 100644 index 464456ca..00000000 --- a/state/state_test.go +++ /dev/null @@ -1,489 +0,0 @@ -package state - -import ( - "bytes" - "fmt" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - abci "github.com/tendermint/abci/types" - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - - cfg "github.com/tendermint/tendermint/config" - "github.com/tendermint/tendermint/types" -) - -// setupTestCase does setup common to all test cases -func setupTestCase(t *testing.T) (func(t *testing.T), dbm.DB, State) { - config := cfg.ResetTestRoot("state_") - dbType := dbm.DBBackendType(config.DBBackend) - stateDB := dbm.NewDB("state", dbType, config.DBDir()) - state, err := LoadStateFromDBOrGenesisFile(stateDB, config.GenesisFile()) - assert.NoError(t, err, "expected no error on LoadStateFromDBOrGenesisFile") - - tearDown := func(t *testing.T) {} - - return tearDown, stateDB, state -} - -// TestStateCopy tests the correct copying behaviour of State. -func TestStateCopy(t *testing.T) { - tearDown, _, state := setupTestCase(t) - defer tearDown(t) - // nolint: vetshadow - assert := assert.New(t) - - stateCopy := state.Copy() - - assert.True(state.Equals(stateCopy), - cmn.Fmt("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", - stateCopy, state)) - - stateCopy.LastBlockHeight++ - assert.False(state.Equals(stateCopy), cmn.Fmt(`expected states to be different. got same - %v`, state)) -} - -// TestStateSaveLoad tests saving and loading State from a db. -func TestStateSaveLoad(t *testing.T) { - tearDown, stateDB, state := setupTestCase(t) - defer tearDown(t) - // nolint: vetshadow - assert := assert.New(t) - - state.LastBlockHeight++ - SaveState(stateDB, state) - - loadedState := LoadState(stateDB) - assert.True(state.Equals(loadedState), - cmn.Fmt("expected state and its copy to be identical.\ngot: %v\nexpected: %v\n", - loadedState, state)) -} - -// TestABCIResponsesSaveLoad tests saving and loading ABCIResponses. -func TestABCIResponsesSaveLoad1(t *testing.T) { - tearDown, stateDB, state := setupTestCase(t) - defer tearDown(t) - // nolint: vetshadow - assert := assert.New(t) - - state.LastBlockHeight++ - - // build mock responses - block := makeBlock(state, 2) - abciResponses := NewABCIResponses(block) - abciResponses.DeliverTx[0] = &abci.ResponseDeliverTx{Data: []byte("foo"), Tags: nil} - abciResponses.DeliverTx[1] = &abci.ResponseDeliverTx{Data: []byte("bar"), Log: "ok", Tags: nil} - abciResponses.EndBlock = &abci.ResponseEndBlock{ValidatorUpdates: []abci.Validator{ - types.TM2PB.ValidatorFromPubKeyAndPower(crypto.GenPrivKeyEd25519().PubKey(), 10), - }} - - saveABCIResponses(stateDB, block.Height, abciResponses) - loadedABCIResponses, err := LoadABCIResponses(stateDB, block.Height) - assert.Nil(err) - assert.Equal(abciResponses, loadedABCIResponses, - cmn.Fmt("ABCIResponses don't match:\ngot: %v\nexpected: %v\n", - loadedABCIResponses, abciResponses)) -} - -// TestResultsSaveLoad tests saving and loading abci results. -func TestABCIResponsesSaveLoad2(t *testing.T) { - tearDown, stateDB, _ := setupTestCase(t) - defer tearDown(t) - // nolint: vetshadow - assert := assert.New(t) - - cases := [...]struct { - // height is implied index+2 - // as block 1 is created from genesis - added []*abci.ResponseDeliverTx - expected types.ABCIResults - }{ - 0: { - nil, - nil, - }, - 1: { - []*abci.ResponseDeliverTx{ - {Code: 32, Data: []byte("Hello"), Log: "Huh?"}, - }, - types.ABCIResults{ - {32, []byte("Hello")}, - }}, - 2: { - []*abci.ResponseDeliverTx{ - {Code: 383}, - {Data: []byte("Gotcha!"), - Tags: []cmn.KVPair{ - cmn.KVPair{[]byte("a"), []byte("1")}, - cmn.KVPair{[]byte("build"), []byte("stuff")}, - }}, - }, - types.ABCIResults{ - {383, nil}, - {0, []byte("Gotcha!")}, - }}, - 3: { - nil, - nil, - }, - } - - // query all before, should return error - for i := range cases { - h := int64(i + 1) - res, err := LoadABCIResponses(stateDB, h) - assert.Error(err, "%d: %#v", i, res) - } - - // add all cases - for i, tc := range cases { - h := int64(i + 1) // last block height, one below what we save - responses := &ABCIResponses{ - DeliverTx: tc.added, - EndBlock: &abci.ResponseEndBlock{}, - } - saveABCIResponses(stateDB, h, responses) - } - - // query all before, should return expected value - for i, tc := range cases { - h := int64(i + 1) - res, err := LoadABCIResponses(stateDB, h) - assert.NoError(err, "%d", i) - assert.Equal(tc.expected.Hash(), res.ResultsHash(), "%d", i) - } -} - -// TestValidatorSimpleSaveLoad tests saving and loading validators. -func TestValidatorSimpleSaveLoad(t *testing.T) { - tearDown, stateDB, state := setupTestCase(t) - defer tearDown(t) - // nolint: vetshadow - assert := assert.New(t) - - // can't load anything for height 0 - v, err := LoadValidators(stateDB, 0) - assert.IsType(ErrNoValSetForHeight{}, err, "expected err at height 0") - - // should be able to load for height 1 - v, err = LoadValidators(stateDB, 1) - assert.Nil(err, "expected no err at height 1") - assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") - - // increment height, save; should be able to load for next height - state.LastBlockHeight++ - nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - v, err = LoadValidators(stateDB, nextHeight) - assert.Nil(err, "expected no err") - assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") - - // increment height, save; should be able to load for next height - state.LastBlockHeight += 10 - nextHeight = state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - v, err = LoadValidators(stateDB, nextHeight) - assert.Nil(err, "expected no err") - assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") - - // should be able to load for next next height - _, err = LoadValidators(stateDB, state.LastBlockHeight+2) - assert.IsType(ErrNoValSetForHeight{}, err, "expected err at unknown height") -} - -// TestValidatorChangesSaveLoad tests saving and loading a validator set with changes. -func TestOneValidatorChangesSaveLoad(t *testing.T) { - tearDown, stateDB, state := setupTestCase(t) - defer tearDown(t) - - // change vals at these heights - changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} - N := len(changeHeights) - - // build the validator history by running updateState - // with the right validator set for each height - highestHeight := changeHeights[N-1] + 5 - changeIndex := 0 - _, val := state.Validators.GetByIndex(0) - power := val.VotingPower - var err error - for i := int64(1); i < highestHeight; i++ { - // when we get to a change height, - // use the next pubkey - if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { - changeIndex++ - power++ - } - header, blockID, responses := makeHeaderPartsResponsesValPowerChange(state, i, power) - state, err = updateState(state, blockID, header, responses) - assert.Nil(t, err) - nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - } - - // on each change height, increment the power by one. - testCases := make([]int64, highestHeight) - changeIndex = 0 - power = val.VotingPower - for i := int64(1); i < highestHeight+1; i++ { - // we we get to the height after a change height - // use the next pubkey (note our counter starts at 0 this time) - if changeIndex < len(changeHeights) && i == changeHeights[changeIndex]+1 { - changeIndex++ - power++ - } - testCases[i-1] = power - } - - for i, power := range testCases { - v, err := LoadValidators(stateDB, int64(i+1)) - assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) - assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) - _, val := v.GetByIndex(0) - - assert.Equal(t, val.VotingPower, power, fmt.Sprintf(`unexpected powerat - height %d`, i)) - } -} - -// TestValidatorChangesSaveLoad tests saving and loading a validator set with -// changes. -func TestManyValidatorChangesSaveLoad(t *testing.T) { - const valSetSize = 7 - tearDown, stateDB, state := setupTestCase(t) - state.Validators = genValSet(valSetSize) - SaveState(stateDB, state) - defer tearDown(t) - - const height = 1 - pubkey := crypto.GenPrivKeyEd25519().PubKey() - // swap the first validator with a new one ^^^ (validator set size stays the same) - header, blockID, responses := makeHeaderPartsResponsesValPubKeyChange(state, height, pubkey) - var err error - state, err = updateState(state, blockID, header, responses) - require.Nil(t, err) - nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(stateDB, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - - v, err := LoadValidators(stateDB, height+1) - assert.Nil(t, err) - assert.Equal(t, valSetSize, v.Size()) - - index, val := v.GetByAddress(pubkey.Address()) - assert.NotNil(t, val) - if index < 0 { - t.Fatal("expected to find newly added validator") - } -} - -func genValSet(size int) *types.ValidatorSet { - vals := make([]*types.Validator, size) - for i := 0; i < size; i++ { - vals[i] = types.NewValidator(crypto.GenPrivKeyEd25519().PubKey(), 10) - } - return types.NewValidatorSet(vals) -} - -// TestConsensusParamsChangesSaveLoad tests saving and loading consensus params -// with changes. -func TestConsensusParamsChangesSaveLoad(t *testing.T) { - tearDown, stateDB, state := setupTestCase(t) - defer tearDown(t) - - // change vals at these heights - changeHeights := []int64{1, 2, 4, 5, 10, 15, 16, 17, 20} - N := len(changeHeights) - - // each valset is just one validator - // create list of them - params := make([]types.ConsensusParams, N+1) - params[0] = state.ConsensusParams - for i := 1; i < N+1; i++ { - params[i] = *types.DefaultConsensusParams() - params[i].BlockSize.MaxBytes += i - } - - // build the params history by running updateState - // with the right params set for each height - highestHeight := changeHeights[N-1] + 5 - changeIndex := 0 - cp := params[changeIndex] - var err error - for i := int64(1); i < highestHeight; i++ { - // when we get to a change height, - // use the next params - if changeIndex < len(changeHeights) && i == changeHeights[changeIndex] { - changeIndex++ - cp = params[changeIndex] - } - header, blockID, responses := makeHeaderPartsResponsesParams(state, i, cp) - state, err = updateState(state, blockID, header, responses) - - require.Nil(t, err) - nextHeight := state.LastBlockHeight + 1 - saveConsensusParamsInfo(stateDB, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) - } - - // make all the test cases by using the same params until after the change - testCases := make([]paramsChangeTestCase, highestHeight) - changeIndex = 0 - cp = params[changeIndex] - for i := int64(1); i < highestHeight+1; i++ { - // we we get to the height after a change height - // use the next pubkey (note our counter starts at 0 this time) - if changeIndex < len(changeHeights) && i == changeHeights[changeIndex]+1 { - changeIndex++ - cp = params[changeIndex] - } - testCases[i-1] = paramsChangeTestCase{i, cp} - } - - for _, testCase := range testCases { - p, err := LoadConsensusParams(stateDB, testCase.height) - assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", testCase.height)) - assert.Equal(t, testCase.params, p, fmt.Sprintf(`unexpected consensus params at - height %d`, testCase.height)) - } -} - -func makeParams(blockBytes, blockTx, blockGas, txBytes, - txGas, partSize int) types.ConsensusParams { - - return types.ConsensusParams{ - BlockSize: types.BlockSize{ - MaxBytes: blockBytes, - MaxTxs: blockTx, - MaxGas: int64(blockGas), - }, - TxSize: types.TxSize{ - MaxBytes: txBytes, - MaxGas: int64(txGas), - }, - BlockGossip: types.BlockGossip{ - BlockPartSizeBytes: partSize, - }, - } -} - -func pk() []byte { - return crypto.GenPrivKeyEd25519().PubKey().Bytes() -} - -func TestApplyUpdates(t *testing.T) { - initParams := makeParams(1, 2, 3, 4, 5, 6) - - cases := [...]struct { - init types.ConsensusParams - updates abci.ConsensusParams - expected types.ConsensusParams - }{ - 0: {initParams, abci.ConsensusParams{}, initParams}, - 1: {initParams, abci.ConsensusParams{}, initParams}, - 2: {initParams, - abci.ConsensusParams{ - TxSize: &abci.TxSize{ - MaxBytes: 123, - }, - }, - makeParams(1, 2, 3, 123, 5, 6)}, - 3: {initParams, - abci.ConsensusParams{ - BlockSize: &abci.BlockSize{ - MaxTxs: 44, - MaxGas: 55, - }, - }, - makeParams(1, 44, 55, 4, 5, 6)}, - 4: {initParams, - abci.ConsensusParams{ - BlockSize: &abci.BlockSize{ - MaxTxs: 789, - }, - TxSize: &abci.TxSize{ - MaxGas: 888, - }, - BlockGossip: &abci.BlockGossip{ - BlockPartSizeBytes: 2002, - }, - }, - makeParams(1, 789, 3, 4, 888, 2002)}, - } - - for i, tc := range cases { - res := tc.init.Update(&(tc.updates)) - assert.Equal(t, tc.expected, res, "case %d", i) - } -} - -func makeHeaderPartsResponsesValPubKeyChange(state State, height int64, - pubkey crypto.PubKey) (*types.Header, types.BlockID, *ABCIResponses) { - - block := makeBlock(state, height) - abciResponses := &ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, - } - - // if the pubkey is new, remove the old and add the new - _, val := state.Validators.GetByIndex(0) - if !bytes.Equal(pubkey.Bytes(), val.PubKey.Bytes()) { - abciResponses.EndBlock = &abci.ResponseEndBlock{ - ValidatorUpdates: []abci.Validator{ - types.TM2PB.ValidatorFromPubKeyAndPower(val.PubKey, 0), - types.TM2PB.ValidatorFromPubKeyAndPower(pubkey, 10), - }, - } - } - - return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses -} - -func makeHeaderPartsResponsesValPowerChange(state State, height int64, - power int64) (*types.Header, types.BlockID, *ABCIResponses) { - - block := makeBlock(state, height) - abciResponses := &ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ValidatorUpdates: nil}, - } - - // if the pubkey is new, remove the old and add the new - _, val := state.Validators.GetByIndex(0) - if val.VotingPower != power { - abciResponses.EndBlock = &abci.ResponseEndBlock{ - ValidatorUpdates: []abci.Validator{ - types.TM2PB.ValidatorFromPubKeyAndPower(val.PubKey, power), - }, - } - } - - return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses -} - -func makeHeaderPartsResponsesParams(state State, height int64, - params types.ConsensusParams) (*types.Header, types.BlockID, *ABCIResponses) { - - block := makeBlock(state, height) - abciResponses := &ABCIResponses{ - EndBlock: &abci.ResponseEndBlock{ConsensusParamUpdates: types.TM2PB.ConsensusParams(¶ms)}, - } - return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses -} - -type paramsChangeTestCase struct { - height int64 - params types.ConsensusParams -} - -func makeHeaderPartsResults(state State, height int64, - results []*abci.ResponseDeliverTx) (*types.Header, types.BlockID, *ABCIResponses) { - - block := makeBlock(state, height) - abciResponses := &ABCIResponses{ - DeliverTx: results, - EndBlock: &abci.ResponseEndBlock{}, - } - return block.Header, types.BlockID{block.Hash(), types.PartSetHeader{}}, abciResponses -} diff --git a/state/store.go b/state/store.go deleted file mode 100644 index 2164d699..00000000 --- a/state/store.go +++ /dev/null @@ -1,282 +0,0 @@ -package state - -import ( - "fmt" - - abci "github.com/tendermint/abci/types" - "github.com/tendermint/tendermint/types" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" -) - -//------------------------------------------------------------------------ - -func calcValidatorsKey(height int64) []byte { - return []byte(cmn.Fmt("validatorsKey:%v", height)) -} - -func calcConsensusParamsKey(height int64) []byte { - return []byte(cmn.Fmt("consensusParamsKey:%v", height)) -} - -func calcABCIResponsesKey(height int64) []byte { - return []byte(cmn.Fmt("abciResponsesKey:%v", height)) -} - -// LoadStateFromDBOrGenesisFile loads the most recent state from the database, -// or creates a new one from the given genesisFilePath and persists the result -// to the database. -func LoadStateFromDBOrGenesisFile(stateDB dbm.DB, genesisFilePath string) (State, error) { - state := LoadState(stateDB) - if state.IsEmpty() { - var err error - state, err = MakeGenesisStateFromFile(genesisFilePath) - if err != nil { - return state, err - } - SaveState(stateDB, state) - } - - return state, nil -} - -// LoadStateFromDBOrGenesisDoc loads the most recent state from the database, -// or creates a new one from the given genesisDoc and persists the result -// to the database. -func LoadStateFromDBOrGenesisDoc(stateDB dbm.DB, genesisDoc *types.GenesisDoc) (State, error) { - state := LoadState(stateDB) - if state.IsEmpty() { - var err error - state, err = MakeGenesisState(genesisDoc) - if err != nil { - return state, err - } - SaveState(stateDB, state) - } - - return state, nil -} - -// LoadState loads the State from the database. -func LoadState(db dbm.DB) State { - return loadState(db, stateKey) -} - -func loadState(db dbm.DB, key []byte) (state State) { - buf := db.Get(key) - if len(buf) == 0 { - return state - } - - err := cdc.UnmarshalBinaryBare(buf, &state) - if err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadState: Data has been corrupted or its spec has changed: - %v\n`, err)) - } - // TODO: ensure that buf is completely read. - - return state -} - -// SaveState persists the State, the ValidatorsInfo, and the ConsensusParamsInfo to the database. -func SaveState(db dbm.DB, state State) { - saveState(db, state, stateKey) -} - -func saveState(db dbm.DB, state State, key []byte) { - nextHeight := state.LastBlockHeight + 1 - saveValidatorsInfo(db, nextHeight, state.LastHeightValidatorsChanged, state.Validators) - saveConsensusParamsInfo(db, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) - db.SetSync(stateKey, state.Bytes()) -} - -//------------------------------------------------------------------------ - -// ABCIResponses retains the responses -// of the various ABCI calls during block processing. -// It is persisted to disk for each height before calling Commit. -type ABCIResponses struct { - DeliverTx []*abci.ResponseDeliverTx - EndBlock *abci.ResponseEndBlock -} - -// NewABCIResponses returns a new ABCIResponses -func NewABCIResponses(block *types.Block) *ABCIResponses { - resDeliverTxs := make([]*abci.ResponseDeliverTx, block.NumTxs) - if block.NumTxs == 0 { - // This makes Amino encoding/decoding consistent. - resDeliverTxs = nil - } - return &ABCIResponses{ - DeliverTx: resDeliverTxs, - } -} - -// Bytes serializes the ABCIResponse using go-amino. -func (arz *ABCIResponses) Bytes() []byte { - return cdc.MustMarshalBinaryBare(arz) -} - -func (arz *ABCIResponses) ResultsHash() []byte { - results := types.NewResults(arz.DeliverTx) - return results.Hash() -} - -// LoadABCIResponses loads the ABCIResponses for the given height from the database. -// This is useful for recovering from crashes where we called app.Commit and before we called -// s.Save(). It can also be used to produce Merkle proofs of the result of txs. -func LoadABCIResponses(db dbm.DB, height int64) (*ABCIResponses, error) { - buf := db.Get(calcABCIResponsesKey(height)) - if len(buf) == 0 { - return nil, ErrNoABCIResponsesForHeight{height} - } - - abciResponses := new(ABCIResponses) - err := cdc.UnmarshalBinaryBare(buf, abciResponses) - if err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadABCIResponses: Data has been corrupted or its spec has - changed: %v\n`, err)) - } - // TODO: ensure that buf is completely read. - - return abciResponses, nil -} - -// SaveABCIResponses persists the ABCIResponses to the database. -// This is useful in case we crash after app.Commit and before s.Save(). -// Responses are indexed by height so they can also be loaded later to produce Merkle proofs. -func saveABCIResponses(db dbm.DB, height int64, abciResponses *ABCIResponses) { - db.SetSync(calcABCIResponsesKey(height), abciResponses.Bytes()) -} - -//----------------------------------------------------------------------------- - -// ValidatorsInfo represents the latest validator set, or the last height it changed -type ValidatorsInfo struct { - ValidatorSet *types.ValidatorSet - LastHeightChanged int64 -} - -// Bytes serializes the ValidatorsInfo using go-amino. -func (valInfo *ValidatorsInfo) Bytes() []byte { - return cdc.MustMarshalBinaryBare(valInfo) -} - -// LoadValidators loads the ValidatorSet for a given height. -// Returns ErrNoValSetForHeight if the validator set can't be found for this height. -func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) { - valInfo := loadValidatorsInfo(db, height) - if valInfo == nil { - return nil, ErrNoValSetForHeight{height} - } - - if valInfo.ValidatorSet == nil { - valInfo2 := loadValidatorsInfo(db, valInfo.LastHeightChanged) - if valInfo2 == nil { - cmn.PanicSanity(fmt.Sprintf(`Couldn't find validators at height %d as - last changed from height %d`, valInfo.LastHeightChanged, height)) - } - valInfo = valInfo2 - } - - return valInfo.ValidatorSet, nil -} - -func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo { - buf := db.Get(calcValidatorsKey(height)) - if len(buf) == 0 { - return nil - } - - v := new(ValidatorsInfo) - err := cdc.UnmarshalBinaryBare(buf, v) - if err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadValidators: Data has been corrupted or its spec has changed: - %v\n`, err)) - } - // TODO: ensure that buf is completely read. - - return v -} - -// saveValidatorsInfo persists the validator set for the next block to disk. -// It should be called from s.Save(), right before the state itself is persisted. -// If the validator set did not change after processing the latest block, -// only the last height for which the validators changed is persisted. -func saveValidatorsInfo(db dbm.DB, nextHeight, changeHeight int64, valSet *types.ValidatorSet) { - valInfo := &ValidatorsInfo{ - LastHeightChanged: changeHeight, - } - if changeHeight == nextHeight { - valInfo.ValidatorSet = valSet - } - db.SetSync(calcValidatorsKey(nextHeight), valInfo.Bytes()) -} - -//----------------------------------------------------------------------------- - -// ConsensusParamsInfo represents the latest consensus params, or the last height it changed -type ConsensusParamsInfo struct { - ConsensusParams types.ConsensusParams - LastHeightChanged int64 -} - -// Bytes serializes the ConsensusParamsInfo using go-amino. -func (params ConsensusParamsInfo) Bytes() []byte { - return cdc.MustMarshalBinaryBare(params) -} - -// LoadConsensusParams loads the ConsensusParams for a given height. -func LoadConsensusParams(db dbm.DB, height int64) (types.ConsensusParams, error) { - empty := types.ConsensusParams{} - - paramsInfo := loadConsensusParamsInfo(db, height) - if paramsInfo == nil { - return empty, ErrNoConsensusParamsForHeight{height} - } - - if paramsInfo.ConsensusParams == empty { - paramsInfo = loadConsensusParamsInfo(db, paramsInfo.LastHeightChanged) - if paramsInfo == nil { - cmn.PanicSanity(fmt.Sprintf(`Couldn't find consensus params at height %d as - last changed from height %d`, paramsInfo.LastHeightChanged, height)) - } - } - - return paramsInfo.ConsensusParams, nil -} - -func loadConsensusParamsInfo(db dbm.DB, height int64) *ConsensusParamsInfo { - buf := db.Get(calcConsensusParamsKey(height)) - if len(buf) == 0 { - return nil - } - - paramsInfo := new(ConsensusParamsInfo) - err := cdc.UnmarshalBinaryBare(buf, paramsInfo) - if err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - cmn.Exit(cmn.Fmt(`LoadConsensusParams: Data has been corrupted or its spec has changed: - %v\n`, err)) - } - // TODO: ensure that buf is completely read. - - return paramsInfo -} - -// saveConsensusParamsInfo persists the consensus params for the next block to disk. -// It should be called from s.Save(), right before the state itself is persisted. -// If the consensus params did not change after processing the latest block, -// only the last height for which they changed is persisted. -func saveConsensusParamsInfo(db dbm.DB, nextHeight, changeHeight int64, params types.ConsensusParams) { - paramsInfo := &ConsensusParamsInfo{ - LastHeightChanged: changeHeight, - } - if changeHeight == nextHeight { - paramsInfo.ConsensusParams = params - } - db.SetSync(calcConsensusParamsKey(nextHeight), paramsInfo.Bytes()) -} diff --git a/state/txindex/indexer.go b/state/txindex/indexer.go deleted file mode 100644 index ab509f96..00000000 --- a/state/txindex/indexer.go +++ /dev/null @@ -1,58 +0,0 @@ -package txindex - -import ( - "errors" - - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/types" -) - -// TxIndexer interface defines methods to index and search transactions. -type TxIndexer interface { - - // AddBatch analyzes, indexes and stores a batch of transactions. - AddBatch(b *Batch) error - - // Index analyzes, indexes and stores a single transaction. - Index(result *types.TxResult) error - - // Get returns the transaction specified by hash or nil if the transaction is not indexed - // or stored. - Get(hash []byte) (*types.TxResult, error) - - // Search allows you to query for transactions. - Search(q *query.Query) ([]*types.TxResult, error) -} - -//---------------------------------------------------- -// Txs are written as a batch - -// Batch groups together multiple Index operations to be performed at the same time. -// NOTE: Batch is NOT thread-safe and must not be modified after starting its execution. -type Batch struct { - Ops []*types.TxResult -} - -// NewBatch creates a new Batch. -func NewBatch(n int64) *Batch { - return &Batch{ - Ops: make([]*types.TxResult, n), - } -} - -// Add or update an entry for the given result.Index. -func (b *Batch) Add(result *types.TxResult) error { - b.Ops[result.Index] = result - return nil -} - -// Size returns the total number of operations inside the batch. -func (b *Batch) Size() int { - return len(b.Ops) -} - -//---------------------------------------------------- -// Errors - -// ErrorEmptyHash indicates empty hash -var ErrorEmptyHash = errors.New("Transaction hash cannot be empty") diff --git a/state/txindex/indexer_service.go b/state/txindex/indexer_service.go deleted file mode 100644 index 264be1fd..00000000 --- a/state/txindex/indexer_service.go +++ /dev/null @@ -1,73 +0,0 @@ -package txindex - -import ( - "context" - - cmn "github.com/tendermint/tmlibs/common" - - "github.com/tendermint/tendermint/types" -) - -const ( - subscriber = "IndexerService" -) - -// IndexerService connects event bus and transaction indexer together in order -// to index transactions coming from event bus. -type IndexerService struct { - cmn.BaseService - - idr TxIndexer - eventBus *types.EventBus -} - -// NewIndexerService returns a new service instance. -func NewIndexerService(idr TxIndexer, eventBus *types.EventBus) *IndexerService { - is := &IndexerService{idr: idr, eventBus: eventBus} - is.BaseService = *cmn.NewBaseService(nil, "IndexerService", is) - return is -} - -// OnStart implements cmn.Service by subscribing for all transactions -// and indexing them by tags. -func (is *IndexerService) OnStart() error { - blockHeadersCh := make(chan interface{}) - if err := is.eventBus.Subscribe(context.Background(), subscriber, types.EventQueryNewBlockHeader, blockHeadersCh); err != nil { - return err - } - - txsCh := make(chan interface{}) - if err := is.eventBus.Subscribe(context.Background(), subscriber, types.EventQueryTx, txsCh); err != nil { - return err - } - - go func() { - for { - e, ok := <-blockHeadersCh - if !ok { - return - } - header := e.(types.EventDataNewBlockHeader).Header - batch := NewBatch(header.NumTxs) - for i := int64(0); i < header.NumTxs; i++ { - e, ok := <-txsCh - if !ok { - is.Logger.Error("Failed to index all transactions due to closed transactions channel", "height", header.Height, "numTxs", header.NumTxs, "numProcessed", i) - return - } - txResult := e.(types.EventDataTx).TxResult - batch.Add(&txResult) - } - is.idr.AddBatch(batch) - is.Logger.Info("Indexed block", "height", header.Height) - } - }() - return nil -} - -// OnStop implements cmn.Service by unsubscribing from all transactions. -func (is *IndexerService) OnStop() { - if is.eventBus.IsRunning() { - _ = is.eventBus.UnsubscribeAll(context.Background(), subscriber) - } -} diff --git a/state/txindex/kv/kv.go b/state/txindex/kv/kv.go deleted file mode 100644 index 718a55d1..00000000 --- a/state/txindex/kv/kv.go +++ /dev/null @@ -1,437 +0,0 @@ -package kv - -import ( - "bytes" - "encoding/hex" - "fmt" - "sort" - "strconv" - "strings" - "time" - - "github.com/pkg/errors" - cmn "github.com/tendermint/tmlibs/common" - dbm "github.com/tendermint/tmlibs/db" - - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/txindex" - "github.com/tendermint/tendermint/types" -) - -const ( - tagKeySeparator = "/" -) - -var _ txindex.TxIndexer = (*TxIndex)(nil) - -// TxIndex is the simplest possible indexer, backed by key-value storage (levelDB). -type TxIndex struct { - store dbm.DB - tagsToIndex []string - indexAllTags bool -} - -// NewTxIndex creates new KV indexer. -func NewTxIndex(store dbm.DB, options ...func(*TxIndex)) *TxIndex { - txi := &TxIndex{store: store, tagsToIndex: make([]string, 0), indexAllTags: false} - for _, o := range options { - o(txi) - } - return txi -} - -// IndexTags is an option for setting which tags to index. -func IndexTags(tags []string) func(*TxIndex) { - return func(txi *TxIndex) { - txi.tagsToIndex = tags - } -} - -// IndexAllTags is an option for indexing all tags. -func IndexAllTags() func(*TxIndex) { - return func(txi *TxIndex) { - txi.indexAllTags = true - } -} - -// Get gets transaction from the TxIndex storage and returns it or nil if the -// transaction is not found. -func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { - if len(hash) == 0 { - return nil, txindex.ErrorEmptyHash - } - - rawBytes := txi.store.Get(hash) - if rawBytes == nil { - return nil, nil - } - - txResult := new(types.TxResult) - err := cdc.UnmarshalBinaryBare(rawBytes, &txResult) - if err != nil { - return nil, fmt.Errorf("Error reading TxResult: %v", err) - } - - return txResult, nil -} - -// AddBatch indexes a batch of transactions using the given list of tags. -func (txi *TxIndex) AddBatch(b *txindex.Batch) error { - storeBatch := txi.store.NewBatch() - - for _, result := range b.Ops { - hash := result.Tx.Hash() - - // index tx by tags - for _, tag := range result.Result.Tags { - if txi.indexAllTags || cmn.StringInSlice(string(tag.Key), txi.tagsToIndex) { - storeBatch.Set(keyForTag(tag, result), hash) - } - } - - // index tx by hash - rawBytes, err := cdc.MarshalBinaryBare(result) - if err != nil { - return err - } - storeBatch.Set(hash, rawBytes) - } - - storeBatch.Write() - return nil -} - -// Index indexes a single transaction using the given list of tags. -func (txi *TxIndex) Index(result *types.TxResult) error { - b := txi.store.NewBatch() - - hash := result.Tx.Hash() - - // index tx by tags - for _, tag := range result.Result.Tags { - if txi.indexAllTags || cmn.StringInSlice(string(tag.Key), txi.tagsToIndex) { - b.Set(keyForTag(tag, result), hash) - } - } - - // index tx by hash - rawBytes, err := cdc.MarshalBinaryBare(result) - if err != nil { - return err - } - b.Set(hash, rawBytes) - - b.Write() - return nil -} - -// Search performs a search using the given query. It breaks the query into -// conditions (like "tx.height > 5"). For each condition, it queries the DB -// index. One special use cases here: (1) if "tx.hash" is found, it returns tx -// result for it (2) for range queries it is better for the client to provide -// both lower and upper bounds, so we are not performing a full scan. Results -// from querying indexes are then intersected and returned to the caller. -func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) { - var hashes [][]byte - var hashesInitialized bool - - // get a list of conditions (like "tx.height > 5") - conditions := q.Conditions() - - // if there is a hash condition, return the result immediately - hash, err, ok := lookForHash(conditions) - if err != nil { - return nil, errors.Wrap(err, "error during searching for a hash in the query") - } else if ok { - res, err := txi.Get(hash) - if res == nil { - return []*types.TxResult{}, nil - } - return []*types.TxResult{res}, errors.Wrap(err, "error while retrieving the result") - } - - // conditions to skip because they're handled before "everything else" - skipIndexes := make([]int, 0) - - // if there is a height condition ("tx.height=3"), extract it for faster lookups - height, heightIndex := lookForHeight(conditions) - if heightIndex >= 0 { - skipIndexes = append(skipIndexes, heightIndex) - } - - // extract ranges - // if both upper and lower bounds exist, it's better to get them in order not - // no iterate over kvs that are not within range. - ranges, rangeIndexes := lookForRanges(conditions) - if len(ranges) > 0 { - skipIndexes = append(skipIndexes, rangeIndexes...) - - for _, r := range ranges { - if !hashesInitialized { - hashes = txi.matchRange(r, []byte(r.key)) - hashesInitialized = true - } else { - hashes = intersect(hashes, txi.matchRange(r, []byte(r.key))) - } - } - } - - // for all other conditions - for i, c := range conditions { - if cmn.IntInSlice(i, skipIndexes) { - continue - } - - if !hashesInitialized { - hashes = txi.match(c, startKey(c, height)) - hashesInitialized = true - } else { - hashes = intersect(hashes, txi.match(c, startKey(c, height))) - } - } - - results := make([]*types.TxResult, len(hashes)) - i := 0 - for _, h := range hashes { - results[i], err = txi.Get(h) - if err != nil { - return nil, errors.Wrapf(err, "failed to get Tx{%X}", h) - } - i++ - } - - // sort by height by default - sort.Slice(results, func(i, j int) bool { - return results[i].Height < results[j].Height - }) - - return results, nil -} - -func lookForHash(conditions []query.Condition) (hash []byte, err error, ok bool) { - for _, c := range conditions { - if c.Tag == types.TxHashKey { - decoded, err := hex.DecodeString(c.Operand.(string)) - return decoded, err, true - } - } - return -} - -func lookForHeight(conditions []query.Condition) (height int64, index int) { - for i, c := range conditions { - if c.Tag == types.TxHeightKey { - return c.Operand.(int64), i - } - } - return 0, -1 -} - -// special map to hold range conditions -// Example: account.number => queryRange{lowerBound: 1, upperBound: 5} -type queryRanges map[string]queryRange - -type queryRange struct { - key string - lowerBound interface{} // int || time.Time - includeLowerBound bool - upperBound interface{} // int || time.Time - includeUpperBound bool -} - -func (r queryRange) lowerBoundValue() interface{} { - if r.lowerBound == nil { - return nil - } - - if r.includeLowerBound { - return r.lowerBound - } else { - switch t := r.lowerBound.(type) { - case int64: - return t + 1 - case time.Time: - return t.Unix() + 1 - default: - panic("not implemented") - } - } -} - -func (r queryRange) AnyBound() interface{} { - if r.lowerBound != nil { - return r.lowerBound - } else { - return r.upperBound - } -} - -func (r queryRange) upperBoundValue() interface{} { - if r.upperBound == nil { - return nil - } - - if r.includeUpperBound { - return r.upperBound - } else { - switch t := r.upperBound.(type) { - case int64: - return t - 1 - case time.Time: - return t.Unix() - 1 - default: - panic("not implemented") - } - } -} - -func lookForRanges(conditions []query.Condition) (ranges queryRanges, indexes []int) { - ranges = make(queryRanges) - for i, c := range conditions { - if isRangeOperation(c.Op) { - r, ok := ranges[c.Tag] - if !ok { - r = queryRange{key: c.Tag} - } - switch c.Op { - case query.OpGreater: - r.lowerBound = c.Operand - case query.OpGreaterEqual: - r.includeLowerBound = true - r.lowerBound = c.Operand - case query.OpLess: - r.upperBound = c.Operand - case query.OpLessEqual: - r.includeUpperBound = true - r.upperBound = c.Operand - } - ranges[c.Tag] = r - indexes = append(indexes, i) - } - } - return ranges, indexes -} - -func isRangeOperation(op query.Operator) bool { - switch op { - case query.OpGreater, query.OpGreaterEqual, query.OpLess, query.OpLessEqual: - return true - default: - return false - } -} - -func (txi *TxIndex) match(c query.Condition, startKey []byte) (hashes [][]byte) { - if c.Op == query.OpEqual { - it := dbm.IteratePrefix(txi.store, startKey) - defer it.Close() - for ; it.Valid(); it.Next() { - hashes = append(hashes, it.Value()) - } - } else if c.Op == query.OpContains { - // XXX: doing full scan because startKey does not apply here - // For example, if startKey = "account.owner=an" and search query = "accoutn.owner CONSISTS an" - // we can't iterate with prefix "account.owner=an" because we might miss keys like "account.owner=Ulan" - it := txi.store.Iterator(nil, nil) - defer it.Close() - for ; it.Valid(); it.Next() { - if !isTagKey(it.Key()) { - continue - } - if strings.Contains(extractValueFromKey(it.Key()), c.Operand.(string)) { - hashes = append(hashes, it.Value()) - } - } - } else { - panic("other operators should be handled already") - } - return -} - -func (txi *TxIndex) matchRange(r queryRange, prefix []byte) (hashes [][]byte) { - // create a map to prevent duplicates - hashesMap := make(map[string][]byte) - - lowerBound := r.lowerBoundValue() - upperBound := r.upperBoundValue() - - it := dbm.IteratePrefix(txi.store, prefix) - defer it.Close() -LOOP: - for ; it.Valid(); it.Next() { - if !isTagKey(it.Key()) { - continue - } - switch r.AnyBound().(type) { - case int64: - v, err := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) - if err != nil { - continue LOOP - } - include := true - if lowerBound != nil && v < lowerBound.(int64) { - include = false - } - if upperBound != nil && v > upperBound.(int64) { - include = false - } - if include { - hashesMap[fmt.Sprintf("%X", it.Value())] = it.Value() - } - // XXX: passing time in a ABCI Tags is not yet implemented - // case time.Time: - // v := strconv.ParseInt(extractValueFromKey(it.Key()), 10, 64) - // if v == r.upperBound { - // break - // } - } - } - hashes = make([][]byte, len(hashesMap)) - i := 0 - for _, h := range hashesMap { - hashes[i] = h - i++ - } - return -} - -/////////////////////////////////////////////////////////////////////////////// -// Keys - -func startKey(c query.Condition, height int64) []byte { - var key string - if height > 0 { - key = fmt.Sprintf("%s/%v/%d", c.Tag, c.Operand, height) - } else { - key = fmt.Sprintf("%s/%v", c.Tag, c.Operand) - } - return []byte(key) -} - -func isTagKey(key []byte) bool { - return strings.Count(string(key), tagKeySeparator) == 3 -} - -func extractValueFromKey(key []byte) string { - parts := strings.SplitN(string(key), tagKeySeparator, 3) - return parts[1] -} - -func keyForTag(tag cmn.KVPair, result *types.TxResult) []byte { - return []byte(fmt.Sprintf("%s/%s/%d/%d", tag.Key, tag.Value, result.Height, result.Index)) -} - -/////////////////////////////////////////////////////////////////////////////// -// Utils - -func intersect(as, bs [][]byte) [][]byte { - i := make([][]byte, 0, cmn.MinInt(len(as), len(bs))) - for _, a := range as { - for _, b := range bs { - if bytes.Equal(a, b) { - i = append(i, a) - } - } - } - return i -} diff --git a/state/txindex/kv/kv_test.go b/state/txindex/kv/kv_test.go deleted file mode 100644 index af35ec41..00000000 --- a/state/txindex/kv/kv_test.go +++ /dev/null @@ -1,239 +0,0 @@ -package kv - -import ( - "fmt" - "io/ioutil" - "os" - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - abci "github.com/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - db "github.com/tendermint/tmlibs/db" - - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/txindex" - "github.com/tendermint/tendermint/types" -) - -func TestTxIndex(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB()) - - tx := types.Tx("HELLO WORLD") - txResult := &types.TxResult{1, 0, tx, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: nil}} - hash := tx.Hash() - - batch := txindex.NewBatch(1) - if err := batch.Add(txResult); err != nil { - t.Error(err) - } - err := indexer.AddBatch(batch) - require.NoError(t, err) - - loadedTxResult, err := indexer.Get(hash) - require.NoError(t, err) - assert.Equal(t, txResult, loadedTxResult) - - tx2 := types.Tx("BYE BYE WORLD") - txResult2 := &types.TxResult{1, 0, tx2, abci.ResponseDeliverTx{Data: []byte{0}, Code: abci.CodeTypeOK, Log: "", Tags: nil}} - hash2 := tx2.Hash() - - err = indexer.Index(txResult2) - require.NoError(t, err) - - loadedTxResult2, err := indexer.Get(hash2) - require.NoError(t, err) - assert.Equal(t, txResult2, loadedTxResult2) -} - -func TestTxSearch(t *testing.T) { - allowedTags := []string{"account.number", "account.owner", "account.date"} - indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags)) - - txResult := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number"), Value: []byte("1")}, - {Key: []byte("account.owner"), Value: []byte("Ivan")}, - {Key: []byte("not_allowed"), Value: []byte("Vlad")}, - }) - hash := txResult.Tx.Hash() - - err := indexer.Index(txResult) - require.NoError(t, err) - - testCases := []struct { - q string - resultsLength int - }{ - // search by hash - {fmt.Sprintf("tx.hash = '%X'", hash), 1}, - // search by exact match (one tag) - {"account.number = 1", 1}, - // search by exact match (two tags) - {"account.number = 1 AND account.owner = 'Ivan'", 1}, - // search by exact match (two tags) - {"account.number = 1 AND account.owner = 'Vlad'", 0}, - // search by range - {"account.number >= 1 AND account.number <= 5", 1}, - // search by range (lower bound) - {"account.number >= 1", 1}, - // search by range (upper bound) - {"account.number <= 5", 1}, - // search using not allowed tag - {"not_allowed = 'boom'", 0}, - // search for not existing tx result - {"account.number >= 2 AND account.number <= 5", 0}, - // search using not existing tag - {"account.date >= TIME 2013-05-03T14:45:00Z", 0}, - // search using CONTAINS - {"account.owner CONTAINS 'an'", 1}, - // search using CONTAINS - {"account.owner CONTAINS 'Vlad'", 0}, - } - - for _, tc := range testCases { - t.Run(tc.q, func(t *testing.T) { - results, err := indexer.Search(query.MustParse(tc.q)) - assert.NoError(t, err) - - assert.Len(t, results, tc.resultsLength) - if tc.resultsLength > 0 { - assert.Equal(t, []*types.TxResult{txResult}, results) - } - }) - } -} - -func TestTxSearchOneTxWithMultipleSameTagsButDifferentValues(t *testing.T) { - allowedTags := []string{"account.number"} - indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags)) - - txResult := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number"), Value: []byte("1")}, - {Key: []byte("account.number"), Value: []byte("2")}, - }) - - err := indexer.Index(txResult) - require.NoError(t, err) - - results, err := indexer.Search(query.MustParse("account.number >= 1")) - assert.NoError(t, err) - - assert.Len(t, results, 1) - assert.Equal(t, []*types.TxResult{txResult}, results) -} - -func TestTxSearchMultipleTxs(t *testing.T) { - allowedTags := []string{"account.number"} - indexer := NewTxIndex(db.NewMemDB(), IndexTags(allowedTags)) - - // indexed first, but bigger height (to test the order of transactions) - txResult := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number"), Value: []byte("1")}, - }) - txResult.Tx = types.Tx("Bob's account") - txResult.Height = 2 - err := indexer.Index(txResult) - require.NoError(t, err) - - // indexed second, but smaller height (to test the order of transactions) - txResult2 := txResultWithTags([]cmn.KVPair{ - {Key: []byte("account.number"), Value: []byte("2")}, - }) - txResult2.Tx = types.Tx("Alice's account") - txResult2.Height = 1 - err = indexer.Index(txResult2) - require.NoError(t, err) - - results, err := indexer.Search(query.MustParse("account.number >= 1")) - assert.NoError(t, err) - - require.Len(t, results, 2) - assert.Equal(t, []*types.TxResult{txResult2, txResult}, results) -} - -func TestIndexAllTags(t *testing.T) { - indexer := NewTxIndex(db.NewMemDB(), IndexAllTags()) - - txResult := txResultWithTags([]cmn.KVPair{ - cmn.KVPair{[]byte("account.owner"), []byte("Ivan")}, - cmn.KVPair{[]byte("account.number"), []byte("1")}, - }) - - err := indexer.Index(txResult) - require.NoError(t, err) - - results, err := indexer.Search(query.MustParse("account.number >= 1")) - assert.NoError(t, err) - assert.Len(t, results, 1) - assert.Equal(t, []*types.TxResult{txResult}, results) - - results, err = indexer.Search(query.MustParse("account.owner = 'Ivan'")) - assert.NoError(t, err) - assert.Len(t, results, 1) - assert.Equal(t, []*types.TxResult{txResult}, results) -} - -func txResultWithTags(tags []cmn.KVPair) *types.TxResult { - tx := types.Tx("HELLO WORLD") - return &types.TxResult{ - Height: 1, - Index: 0, - Tx: tx, - Result: abci.ResponseDeliverTx{ - Data: []byte{0}, - Code: abci.CodeTypeOK, - Log: "", - Tags: tags, - Fee: cmn.KI64Pair{Key: nil, Value: 0}, - }, - } -} - -func benchmarkTxIndex(txsCount int64, b *testing.B) { - tx := types.Tx("HELLO WORLD") - txResult := &types.TxResult{ - Height: 1, - Index: 0, - Tx: tx, - Result: abci.ResponseDeliverTx{ - Data: []byte{0}, - Code: abci.CodeTypeOK, - Log: "", - Tags: []cmn.KVPair{}, - Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}, - }, - } - - dir, err := ioutil.TempDir("", "tx_index_db") - if err != nil { - b.Fatal(err) - } - defer os.RemoveAll(dir) // nolint: errcheck - - store := db.NewDB("tx_index", "leveldb", dir) - indexer := NewTxIndex(store) - - batch := txindex.NewBatch(txsCount) - for i := int64(0); i < txsCount; i++ { - if err := batch.Add(txResult); err != nil { - b.Fatal(err) - } - txResult.Index++ - } - - b.ResetTimer() - - for n := 0; n < b.N; n++ { - err = indexer.AddBatch(batch) - } - if err != nil { - b.Fatal(err) - } -} - -func BenchmarkTxIndex1(b *testing.B) { benchmarkTxIndex(1, b) } -func BenchmarkTxIndex500(b *testing.B) { benchmarkTxIndex(500, b) } -func BenchmarkTxIndex1000(b *testing.B) { benchmarkTxIndex(1000, b) } -func BenchmarkTxIndex2000(b *testing.B) { benchmarkTxIndex(2000, b) } -func BenchmarkTxIndex10000(b *testing.B) { benchmarkTxIndex(10000, b) } diff --git a/state/txindex/kv/wire.go b/state/txindex/kv/wire.go deleted file mode 100644 index ccca7525..00000000 --- a/state/txindex/kv/wire.go +++ /dev/null @@ -1,10 +0,0 @@ -package kv - -import ( - "github.com/tendermint/go-amino" -) - -var cdc = amino.NewCodec() - -func init() { -} diff --git a/state/txindex/null/null.go b/state/txindex/null/null.go deleted file mode 100644 index f85de2e6..00000000 --- a/state/txindex/null/null.go +++ /dev/null @@ -1,33 +0,0 @@ -package null - -import ( - "errors" - - "github.com/tendermint/tendermint/libs/pubsub/query" - "github.com/tendermint/tendermint/state/txindex" - "github.com/tendermint/tendermint/types" -) - -var _ txindex.TxIndexer = (*TxIndex)(nil) - -// TxIndex acts as a /dev/null. -type TxIndex struct{} - -// Get on a TxIndex is disabled and panics when invoked. -func (txi *TxIndex) Get(hash []byte) (*types.TxResult, error) { - return nil, errors.New(`Indexing is disabled (set 'tx_index = "kv"' in config)`) -} - -// AddBatch is a noop and always returns nil. -func (txi *TxIndex) AddBatch(batch *txindex.Batch) error { - return nil -} - -// Index is a noop and always returns nil. -func (txi *TxIndex) Index(result *types.TxResult) error { - return nil -} - -func (txi *TxIndex) Search(q *query.Query) ([]*types.TxResult, error) { - return []*types.TxResult{}, nil -} diff --git a/state/validation.go b/state/validation.go deleted file mode 100644 index 84a4cc82..00000000 --- a/state/validation.go +++ /dev/null @@ -1,125 +0,0 @@ -package state - -import ( - "bytes" - "errors" - "fmt" - - "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tmlibs/db" -) - -//----------------------------------------------------- -// Validate block - -func validateBlock(stateDB dbm.DB, state State, block *types.Block) error { - // validate internal consistency - if err := block.ValidateBasic(); err != nil { - return err - } - - // validate basic info - if block.ChainID != state.ChainID { - return fmt.Errorf("Wrong Block.Header.ChainID. Expected %v, got %v", state.ChainID, block.ChainID) - } - if block.Height != state.LastBlockHeight+1 { - return fmt.Errorf("Wrong Block.Header.Height. Expected %v, got %v", state.LastBlockHeight+1, block.Height) - } - /* TODO: Determine bounds for Time - See blockchain/reactor "stopSyncingDurationMinutes" - - if !block.Time.After(lastBlockTime) { - return errors.New("Invalid Block.Header.Time") - } - */ - - // validate prev block info - if !block.LastBlockID.Equals(state.LastBlockID) { - return fmt.Errorf("Wrong Block.Header.LastBlockID. Expected %v, got %v", state.LastBlockID, block.LastBlockID) - } - newTxs := int64(len(block.Data.Txs)) - if block.TotalTxs != state.LastBlockTotalTx+newTxs { - return fmt.Errorf("Wrong Block.Header.TotalTxs. Expected %v, got %v", state.LastBlockTotalTx+newTxs, block.TotalTxs) - } - - // validate app info - if !bytes.Equal(block.AppHash, state.AppHash) { - return fmt.Errorf("Wrong Block.Header.AppHash. Expected %X, got %v", state.AppHash, block.AppHash) - } - if !bytes.Equal(block.ConsensusHash, state.ConsensusParams.Hash()) { - return fmt.Errorf("Wrong Block.Header.ConsensusHash. Expected %X, got %v", state.ConsensusParams.Hash(), block.ConsensusHash) - } - if !bytes.Equal(block.LastResultsHash, state.LastResultsHash) { - return fmt.Errorf("Wrong Block.Header.LastResultsHash. Expected %X, got %v", state.LastResultsHash, block.LastResultsHash) - } - if !bytes.Equal(block.ValidatorsHash, state.Validators.Hash()) { - return fmt.Errorf("Wrong Block.Header.ValidatorsHash. Expected %X, got %v", state.Validators.Hash(), block.ValidatorsHash) - } - - // Validate block LastCommit. - if block.Height == 1 { - if len(block.LastCommit.Precommits) != 0 { - return errors.New("Block at height 1 (first block) should have no LastCommit precommits") - } - } else { - if len(block.LastCommit.Precommits) != state.LastValidators.Size() { - return fmt.Errorf("Invalid block commit size. Expected %v, got %v", - state.LastValidators.Size(), len(block.LastCommit.Precommits)) - } - err := state.LastValidators.VerifyCommit( - state.ChainID, state.LastBlockID, block.Height-1, block.LastCommit) - if err != nil { - return err - } - } - - // TODO: Each check requires loading an old validator set. - // We should cap the amount of evidence per block - // to prevent potential proposer DoS. - for _, ev := range block.Evidence.Evidence { - if err := VerifyEvidence(stateDB, state, ev); err != nil { - return types.NewEvidenceInvalidErr(ev, err) - } - } - - return nil -} - -// VerifyEvidence verifies the evidence fully by checking: -// - it is sufficiently recent (MaxAge) -// - it is from a key who was a validator at the given height -// - it is internally consistent -// - it was properly signed by the alleged equivocator -func VerifyEvidence(stateDB dbm.DB, state State, evidence types.Evidence) error { - height := state.LastBlockHeight - - evidenceAge := height - evidence.Height() - maxAge := state.ConsensusParams.EvidenceParams.MaxAge - if evidenceAge > maxAge { - return fmt.Errorf("Evidence from height %d is too old. Min height is %d", - evidence.Height(), height-maxAge) - } - - valset, err := LoadValidators(stateDB, evidence.Height()) - if err != nil { - // TODO: if err is just that we cant find it cuz we pruned, ignore. - // TODO: if its actually bad evidence, punish peer - return err - } - - // The address must have been an active validator at the height. - // NOTE: we will ignore evidence from H if the key was not a validator - // at H, even if it is a validator at some nearby H' - ev := evidence - height, addr := ev.Height(), ev.Address() - _, val := valset.GetByAddress(addr) - if val == nil { - return fmt.Errorf("Address %X was not a validator at height %d", addr, height) - } - - if err := evidence.Verify(state.ChainID, val.PubKey); err != nil { - return err - } - - return nil -} diff --git a/state/validation_test.go b/state/validation_test.go deleted file mode 100644 index b4695b07..00000000 --- a/state/validation_test.go +++ /dev/null @@ -1,68 +0,0 @@ -package state - -import ( - "testing" - - "github.com/stretchr/testify/require" - dbm "github.com/tendermint/tmlibs/db" - "github.com/tendermint/tmlibs/log" -) - -func TestValidateBlock(t *testing.T) { - state, _ := state(1, 1) - - blockExec := NewBlockExecutor(dbm.NewMemDB(), log.TestingLogger(), nil, nil, nil) - - // proper block must pass - block := makeBlock(state, 1) - err := blockExec.ValidateBlock(state, block) - require.NoError(t, err) - - // wrong chain fails - block = makeBlock(state, 1) - block.ChainID = "not-the-real-one" - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) - - // wrong height fails - block = makeBlock(state, 1) - block.Height += 10 - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) - - // wrong total tx fails - block = makeBlock(state, 1) - block.TotalTxs += 10 - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) - - // wrong blockid fails - block = makeBlock(state, 1) - block.LastBlockID.PartsHeader.Total += 10 - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) - - // wrong app hash fails - block = makeBlock(state, 1) - block.AppHash = []byte("wrong app hash") - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) - - // wrong consensus hash fails - block = makeBlock(state, 1) - block.ConsensusHash = []byte("wrong consensus hash") - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) - - // wrong results hash fails - block = makeBlock(state, 1) - block.LastResultsHash = []byte("wrong results hash") - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) - - // wrong validators hash fails - block = makeBlock(state, 1) - block.ValidatorsHash = []byte("wrong validators hash") - err = blockExec.ValidateBlock(state, block) - require.Error(t, err) -} diff --git a/state/wire.go b/state/wire.go deleted file mode 100644 index 3e8b544d..00000000 --- a/state/wire.go +++ /dev/null @@ -1,12 +0,0 @@ -package state - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" -) - -var cdc = amino.NewCodec() - -func init() { - crypto.RegisterAmino(cdc) -} diff --git a/test/README.md b/test/README.md deleted file mode 100644 index fc436948..00000000 --- a/test/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# Tendermint Tests - -The unit tests (ie. the `go test` s) can be run with `make test`. -The integration tests can be run with `make test_integrations`. - -Running the integrations test will build a docker container with local version of tendermint -and run the following tests in docker containers: - -- go tests, with --race - - includes test coverage -- app tests - - kvstore app over socket - - counter app over socket - - counter app over grpc -- persistence tests - - crash tendermint at each of many predefined points, restart, and ensure it syncs properly with the app -- p2p tests - - start a local kvstore app testnet on a docker network (requires docker version 1.10+) - - send a tx on each node and ensure the state root is updated on all of them - - crash and restart nodes one at a time and ensure they can sync back up (via fastsync) - - crash and restart all nodes at once and ensure they can sync back up diff --git a/test/app/clean.sh b/test/app/clean.sh deleted file mode 100755 index 22814f01..00000000 --- a/test/app/clean.sh +++ /dev/null @@ -1,3 +0,0 @@ -killall tendermint -killall abci-cli -rm -rf ~/.tendermint_app diff --git a/test/app/counter_test.sh b/test/app/counter_test.sh deleted file mode 100755 index 868f8d03..00000000 --- a/test/app/counter_test.sh +++ /dev/null @@ -1,141 +0,0 @@ -#! /bin/bash - -if [[ "$GRPC_BROADCAST_TX" == "" ]]; then - GRPC_BROADCAST_TX="" -fi - -set -u - -##################### -# counter over socket -##################### -TESTNAME=$1 - -# Send some txs - -function getCode() { - set +u - R=$1 - set -u - if [[ "$R" == "" ]]; then - echo -1 - fi - - if [[ $(echo $R | jq 'has("code")') == "true" ]]; then - # this wont actually work if theres an error ... - echo "$R" | jq ".code" - else - # protobuf auto adds `omitempty` to everything so code OK and empty data/log - # will not even show when marshalled into json - # apparently we can use github.com/golang/protobuf/jsonpb to do the marshalling ... - echo 0 - fi -} - -# build grpc client if needed -if [[ "$GRPC_BROADCAST_TX" != "" ]]; then - if [ -f grpc_client ]; then - rm grpc_client - fi - echo "... building grpc_client" - go build -o grpc_client grpc_client.go -fi - -function sendTx() { - TX=$1 - set +u - SHOULD_ERR=$2 - if [ "$SHOULD_ERR" == "" ]; then - SHOULD_ERR=false - fi - set -u - if [[ "$GRPC_BROADCAST_TX" == "" ]]; then - RESPONSE=$(curl -s localhost:26657/broadcast_tx_commit?tx=0x"$TX") - IS_ERR=$(echo "$RESPONSE" | jq 'has("error")') - ERROR=$(echo "$RESPONSE" | jq '.error') - ERROR=$(echo "$ERROR" | tr -d '"') # remove surrounding quotes - - RESPONSE=$(echo "$RESPONSE" | jq '.result') - else - RESPONSE=$(./grpc_client "$TX") - IS_ERR=false - ERROR="" - fi - - echo "RESPONSE" - echo "$RESPONSE" - - echo "$RESPONSE" | jq . &> /dev/null - IS_JSON=$? - if [[ "$IS_JSON" != "0" ]]; then - IS_ERR=true - ERROR="$RESPONSE" - fi - APPEND_TX_RESPONSE=$(echo "$RESPONSE" | jq '.deliver_tx') - APPEND_TX_CODE=$(getCode "$APPEND_TX_RESPONSE") - CHECK_TX_RESPONSE=$(echo "$RESPONSE" | jq '.check_tx') - CHECK_TX_CODE=$(getCode "$CHECK_TX_RESPONSE") - - echo "-------" - echo "TX $TX" - echo "RESPONSE $RESPONSE" - echo "ERROR $ERROR" - echo "IS_ERR $IS_ERR" - echo "----" - - if $SHOULD_ERR; then - if [[ "$IS_ERR" != "true" ]]; then - echo "Expected error sending tx ($TX)" - exit 1 - fi - else - if [[ "$IS_ERR" == "true" ]]; then - echo "Unexpected error sending tx ($TX)" - exit 1 - fi - - fi -} - -echo "... sending tx. expect no error" - -# 0 should pass once and get in block, with no error -TX=00 -sendTx $TX -if [[ $APPEND_TX_CODE != 0 ]]; then - echo "Got non-zero exit code for $TX. $RESPONSE" - exit 1 -fi - - -echo "... sending tx. expect error" - -# second time should get rejected by the mempool (return error and non-zero code) -sendTx $TX true - - -echo "... sending tx. expect no error" - -# now, TX=01 should pass, with no error -TX=01 -sendTx $TX -if [[ $APPEND_TX_CODE != 0 ]]; then - echo "Got non-zero exit code for $TX. $RESPONSE" - exit 1 -fi - -echo "... sending tx. expect no error, but invalid" - -# now, TX=03 should get in a block (passes CheckTx, no error), but is invalid -TX=03 -sendTx $TX -if [[ "$CHECK_TX_CODE" != 0 ]]; then - echo "Got non-zero exit code for checktx on $TX. $RESPONSE" - exit 1 -fi -if [[ $APPEND_TX_CODE == 0 ]]; then - echo "Got zero exit code for $TX. Should have been bad nonce. $RESPONSE" - exit 1 -fi - -echo "Passed Test: $TESTNAME" diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go deleted file mode 100644 index c55713c7..00000000 --- a/test/app/grpc_client.go +++ /dev/null @@ -1,42 +0,0 @@ -package main - -import ( - "encoding/hex" - "encoding/json" - "fmt" - "os" - - "context" - - "github.com/tendermint/tendermint/rpc/grpc" -) - -var grpcAddr = "tcp://localhost:36656" - -func main() { - args := os.Args - if len(args) == 1 { - fmt.Println("Must enter a transaction to send (hex)") - os.Exit(1) - } - tx := args[1] - txBytes, err := hex.DecodeString(tx) - if err != nil { - fmt.Println("Invalid hex", err) - os.Exit(1) - } - - clientGRPC := core_grpc.StartGRPCClient(grpcAddr) - res, err := clientGRPC.BroadcastTx(context.Background(), &core_grpc.RequestBroadcastTx{txBytes}) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - - bz, err := json.Marshal(res) - if err != nil { - fmt.Println(err) - os.Exit(1) - } - fmt.Println(string(bz)) -} diff --git a/test/app/kvstore_test.sh b/test/app/kvstore_test.sh deleted file mode 100755 index 67f6b583..00000000 --- a/test/app/kvstore_test.sh +++ /dev/null @@ -1,84 +0,0 @@ -#! /bin/bash -set -ex - -function toHex() { - echo -n $1 | hexdump -ve '1/1 "%.2X"' | awk '{print "0x" $0}' - -} - -##################### -# kvstore with curl -##################### -TESTNAME=$1 - -# store key value pair -KEY="abcd" -VALUE="dcba" -echo $(toHex $KEY=$VALUE) -curl -s 127.0.0.1:26657/broadcast_tx_commit?tx=$(toHex $KEY=$VALUE) -echo $? -echo "" - - -########################### -# test using the abci-cli -########################### - -echo "... testing query with abci-cli" - -# we should be able to look up the key -RESPONSE=`abci-cli query \"$KEY\"` - -set +e -A=`echo $RESPONSE | grep "$VALUE"` -if [[ $? != 0 ]]; then - echo "Failed to find $VALUE for $KEY. Response:" - echo "$RESPONSE" - exit 1 -fi -set -e - -# we should not be able to look up the value -RESPONSE=`abci-cli query \"$VALUE\"` -set +e -A=`echo $RESPONSE | grep $VALUE` -if [[ $? == 0 ]]; then - echo "Found '$VALUE' for $VALUE when we should not have. Response:" - echo "$RESPONSE" - exit 1 -fi -set -e - -############################# -# test using the /abci_query -############################# - -echo "... testing query with /abci_query 2" - -# we should be able to look up the key -RESPONSE=`curl -s "127.0.0.1:26657/abci_query?path=\"\"&data=$(toHex $KEY)&prove=false"` -RESPONSE=`echo $RESPONSE | jq .result.response.log` - -set +e -A=`echo $RESPONSE | grep 'exists'` -if [[ $? != 0 ]]; then - echo "Failed to find 'exists' for $KEY. Response:" - echo "$RESPONSE" - exit 1 -fi -set -e - -# we should not be able to look up the value -RESPONSE=`curl -s "127.0.0.1:26657/abci_query?path=\"\"&data=$(toHex $VALUE)&prove=false"` -RESPONSE=`echo $RESPONSE | jq .result.response.log` -set +e -A=`echo $RESPONSE | grep 'exists'` -if [[ $? == 0 ]]; then - echo "Found 'exists' for $VALUE when we should not have. Response:" - echo "$RESPONSE" - exit 1 -fi -set -e - - -echo "Passed Test: $TESTNAME" diff --git a/test/app/test.sh b/test/app/test.sh deleted file mode 100755 index 0f77da04..00000000 --- a/test/app/test.sh +++ /dev/null @@ -1,129 +0,0 @@ -#! /bin/bash -set -ex - -#- kvstore over socket, curl -#- counter over socket, curl -#- counter over grpc, curl -#- counter over grpc, grpc - -# TODO: install everything - -export PATH="$GOBIN:$PATH" -export TMHOME=$HOME/.tendermint_app - -function kvstore_over_socket(){ - rm -rf $TMHOME - tendermint init - echo "Starting kvstore_over_socket" - abci-cli kvstore > /dev/null & - pid_kvstore=$! - tendermint node > tendermint.log & - pid_tendermint=$! - sleep 5 - - echo "running test" - bash kvstore_test.sh "KVStore over Socket" - - kill -9 $pid_kvstore $pid_tendermint -} - -# start tendermint first -function kvstore_over_socket_reorder(){ - rm -rf $TMHOME - tendermint init - echo "Starting kvstore_over_socket_reorder (ie. start tendermint first)" - tendermint node > tendermint.log & - pid_tendermint=$! - sleep 2 - abci-cli kvstore > /dev/null & - pid_kvstore=$! - sleep 5 - - echo "running test" - bash kvstore_test.sh "KVStore over Socket" - - kill -9 $pid_kvstore $pid_tendermint -} - - -function counter_over_socket() { - rm -rf $TMHOME - tendermint init - echo "Starting counter_over_socket" - abci-cli counter --serial > /dev/null & - pid_counter=$! - tendermint node > tendermint.log & - pid_tendermint=$! - sleep 5 - - echo "running test" - bash counter_test.sh "Counter over Socket" - - kill -9 $pid_counter $pid_tendermint -} - -function counter_over_grpc() { - rm -rf $TMHOME - tendermint init - echo "Starting counter_over_grpc" - abci-cli counter --serial --abci grpc > /dev/null & - pid_counter=$! - tendermint node --abci grpc > tendermint.log & - pid_tendermint=$! - sleep 5 - - echo "running test" - bash counter_test.sh "Counter over GRPC" - - kill -9 $pid_counter $pid_tendermint -} - -function counter_over_grpc_grpc() { - rm -rf $TMHOME - tendermint init - echo "Starting counter_over_grpc_grpc (ie. with grpc broadcast_tx)" - abci-cli counter --serial --abci grpc > /dev/null & - pid_counter=$! - sleep 1 - GRPC_PORT=36656 - tendermint node --abci grpc --rpc.grpc_laddr tcp://localhost:$GRPC_PORT > tendermint.log & - pid_tendermint=$! - sleep 5 - - echo "running test" - GRPC_BROADCAST_TX=true bash counter_test.sh "Counter over GRPC via GRPC BroadcastTx" - - kill -9 $pid_counter $pid_tendermint -} - -cd $GOPATH/src/github.com/tendermint/tendermint/test/app - -case "$1" in - "kvstore_over_socket") - kvstore_over_socket - ;; -"kvstore_over_socket_reorder") - kvstore_over_socket_reorder - ;; - "counter_over_socket") - counter_over_socket - ;; -"counter_over_grpc") - counter_over_grpc - ;; - "counter_over_grpc_grpc") - counter_over_grpc_grpc - ;; -*) - echo "Running all" - kvstore_over_socket - echo "" - kvstore_over_socket_reorder - echo "" - counter_over_socket - echo "" - counter_over_grpc - echo "" - counter_over_grpc_grpc -esac - diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile deleted file mode 100644 index bc211ea4..00000000 --- a/test/docker/Dockerfile +++ /dev/null @@ -1,38 +0,0 @@ -FROM golang:1.10 - -# Add testing deps for curl -RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list - -# Grab deps (jq, hexdump, xxd, killall) -RUN apt-get update && \ - apt-get install -y --no-install-recommends \ - jq bsdmainutils vim-common psmisc netcat curl - -# Setup tendermint repo -ENV REPO $GOPATH/src/github.com/tendermint/tendermint -ENV GOBIN $GOPATH/bin -WORKDIR $REPO - -# Install the vendored dependencies before copying code -# docker caching prevents reinstall on code change! -ADD Gopkg.toml Gopkg.toml -ADD Gopkg.lock Gopkg.lock -ADD Makefile Makefile -RUN make get_tools -RUN make get_vendor_deps - -# Install the apps -ADD scripts scripts -RUN bash scripts/install_abci_apps.sh - -# Now copy in the code -# NOTE: this will overwrite whatever is in vendor/ -COPY . $REPO - -RUN go install ./cmd/tendermint - -# expose the volume for debugging -VOLUME $REPO - -EXPOSE 26656 -EXPOSE 26657 diff --git a/test/docker/build.sh b/test/docker/build.sh deleted file mode 100644 index 39df0872..00000000 --- a/test/docker/build.sh +++ /dev/null @@ -1,3 +0,0 @@ -#! /bin/bash - -docker build -t tester -f ./test/docker/Dockerfile . diff --git a/test/p2p/README.md b/test/p2p/README.md deleted file mode 100644 index 4ee3690a..00000000 --- a/test/p2p/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# Tendermint P2P Tests - -These scripts facilitate setting up and testing a local testnet using docker containers. - -Setup your own local testnet as follows. - -For consistency, we assume all commands are run from the Tendermint repository root (ie. $GOPATH/src/github.com/tendermint/tendermint). - -First, build the docker image: - -``` -docker build -t tendermint_tester -f ./test/docker/Dockerfile . -``` - -Now create the docker network: - -``` -docker network create --driver bridge --subnet 172.57.0.0/16 my_testnet -``` - -This gives us a new network with IP addresses in the rage `172.57.0.0 - 172.57.255.255`. -Peers on the network can have any IP address in this range. -For our four node network, let's pick `172.57.0.101 - 172.57.0.104`. -Since we use Tendermint's default listening port of 26656, our list of seed nodes will look like: - -``` -172.57.0.101:26656,172.57.0.102:26656,172.57.0.103:26656,172.57.0.104:26656 -``` - -Now we can start up the peers. We already have config files setup in `test/p2p/data/`. -Let's use a for-loop to start our peers: - -``` -for i in $(seq 1 4); do - docker run -d \ - --net=my_testnet\ - --ip="172.57.0.$((100 + $i))" \ - --name local_testnet_$i \ - --entrypoint tendermint \ - -e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$i/core \ - tendermint_tester node --p2p.persistent_peers 172.57.0.101:26656,172.57.0.102:26656,172.57.0.103:26656,172.57.0.104:26656 --proxy_app=kvstore -done -``` - -If you now run `docker ps`, you'll see your containers! - -We can confirm they are making blocks by checking the `/status` message using `curl` and `jq` to pretty print the output json: - -``` -curl 172.57.0.101:26657/status | jq . -``` - - - diff --git a/test/p2p/atomic_broadcast/test.sh b/test/p2p/atomic_broadcast/test.sh deleted file mode 100644 index 4e3cc1d8..00000000 --- a/test/p2p/atomic_broadcast/test.sh +++ /dev/null @@ -1,75 +0,0 @@ -#! /bin/bash -set -u - -N=$1 - -################################################################### -# assumes peers are already synced up -# test sending txs -# for each peer: -# send a tx, wait for commit -# assert app hash on every peer reflects the post tx state -################################################################### - -echo "" -# run the test on each of them -for i in $(seq 1 "$N"); do - addr=$(test/p2p/ip.sh "$i"):26657 - - # current state - HASH1=$(curl -s "$addr/status" | jq .result.sync_info.latest_app_hash) - - # - send a tx - TX=aadeadbeefbeefbeef0$i - echo "Broadcast Tx $TX" - curl -s "$addr/broadcast_tx_commit?tx=0x$TX" - echo "" - - # we need to wait another block to get the new app_hash - h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height) - h2=$h1 - while [ "$h2" == "$h1" ]; do - sleep 1 - h2=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height) - done - - # wait for all other peers to get to this height - minHeight=$h2 - for j in $(seq 1 "$N"); do - if [[ "$i" != "$j" ]]; then - addrJ=$(test/p2p/ip.sh "$j"):26657 - - h=$(curl -s "$addrJ/status" | jq .result.sync_info.latest_block_height) - while [ "$h" -lt "$minHeight" ]; do - sleep 1 - h=$(curl -s "$addrJ/status" | jq .result.sync_info.latest_block_height) - done - fi - done - - # check that hash was updated - HASH2=$(curl -s "$addr/status" | jq .result.sync_info.latest_app_hash) - if [[ "$HASH1" == "$HASH2" ]]; then - echo "Expected state hash to update from $HASH1. Got $HASH2" - exit 1 - fi - - # check we get the same new hash on all other nodes - for j in $(seq 1 "$N"); do - if [[ "$i" != "$j" ]]; then - addrJ=$(test/p2p/ip.sh "$j"):26657 - HASH3=$(curl -s "$addrJ/status" | jq .result.sync_info.latest_app_hash) - - if [[ "$HASH2" != "$HASH3" ]]; then - echo "App hash for node $j doesn't match. Got $HASH3, expected $HASH2" - exit 1 - fi - fi - done - - echo "All nodes are up to date" -done - -echo "" -echo "PASS" -echo "" diff --git a/test/p2p/basic/test.sh b/test/p2p/basic/test.sh deleted file mode 100755 index 6b356db2..00000000 --- a/test/p2p/basic/test.sh +++ /dev/null @@ -1,74 +0,0 @@ -#! /bin/bash -set -u - -N=$1 - -################################################################### -# wait for all peers to come online -# for each peer: -# wait to have N-1 peers -# wait to be at height > 1 -################################################################### - -# wait 60s per step per peer -MAX_SLEEP=60 - -# wait for everyone to come online -echo "Waiting for nodes to come online" -for i in `seq 1 $N`; do - addr=$(test/p2p/ip.sh $i):26657 - curl -s $addr/status > /dev/null - ERR=$? - COUNT=0 - while [ "$ERR" != 0 ]; do - sleep 1 - curl -s $addr/status > /dev/null - ERR=$? - COUNT=$((COUNT+1)) - if [ "$COUNT" -gt "$MAX_SLEEP" ]; then - echo "Waited too long for node $i to come online" - exit 1 - fi - done - echo "... node $i is up" -done - -echo "" -# wait for each of them to sync up -for i in `seq 1 $N`; do - addr=$(test/p2p/ip.sh $i):26657 - N_1=$(($N - 1)) - - # - assert everyone has N-1 other peers - N_PEERS=`curl -s $addr/net_info | jq '.result.peers | length'` - COUNT=0 - while [ "$N_PEERS" != $N_1 ]; do - echo "Waiting for node $i to connect to all peers ..." - sleep 1 - N_PEERS=`curl -s $addr/net_info | jq '.result.peers | length'` - COUNT=$((COUNT+1)) - if [ "$COUNT" -gt "$MAX_SLEEP" ]; then - echo "Waited too long for node $i to connect to all peers" - exit 1 - fi - done - - # - assert block height is greater than 1 - BLOCK_HEIGHT=`curl -s $addr/status | jq .result.sync_info.latest_block_height` - COUNT=0 - while [ "$BLOCK_HEIGHT" -le 1 ]; do - echo "Waiting for node $i to commit a block ..." - sleep 1 - BLOCK_HEIGHT=`curl -s $addr/status | jq .result.sync_info.latest_block_height` - COUNT=$((COUNT+1)) - if [ "$COUNT" -gt "$MAX_SLEEP" ]; then - echo "Waited too long for node $i to commit a block" - exit 1 - fi - done - echo "Node $i is connected to all peers and at block $BLOCK_HEIGHT" -done - -echo "" -echo "PASS" -echo "" diff --git a/test/p2p/circleci.sh b/test/p2p/circleci.sh deleted file mode 100644 index 19200afb..00000000 --- a/test/p2p/circleci.sh +++ /dev/null @@ -1,35 +0,0 @@ -#! /bin/bash -set -eux - -# Get the directory of where this script is. -SOURCE="${BASH_SOURCE[0]}" -while [ -h "$SOURCE" ] ; do SOURCE="$(readlink "$SOURCE")"; done -DIR="$( cd -P "$( dirname "$SOURCE" )" && pwd )" - -LOGS_DIR="$DIR/../logs" -echo -echo "* [$(date +"%T")] cleaning up $LOGS_DIR" -rm -rf "$LOGS_DIR" -mkdir -p "$LOGS_DIR" - -set +e -echo -echo "* [$(date +"%T")] removing run_test container" -docker rm -vf run_test -set -e - -echo -echo "* [$(date +"%T")] starting rsyslog container" -docker rm -f rsyslog || true -docker run -d -v "$LOGS_DIR:/var/log/" -p 127.0.0.1:5514:514/udp --name rsyslog voxxit/rsyslog - -set +u -if [[ "$SKIP_BUILD" == "" ]]; then - echo - echo "* [$(date +"%T")] building docker image" - bash "$DIR/../docker/build.sh" -fi - -echo -echo "* [$(date +"%T")] running p2p tests on a local docker network" -bash "$DIR/../p2p/test.sh" tester diff --git a/test/p2p/client.sh b/test/p2p/client.sh deleted file mode 100644 index fa11ce87..00000000 --- a/test/p2p/client.sh +++ /dev/null @@ -1,19 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -ID=$3 -CMD=$4 - -NAME=test_container_$ID - -echo "starting test client container with CMD=$CMD" -# run the test container on the local network -docker run -t --rm \ - -v "$GOPATH/src/github.com/tendermint/tendermint/test/p2p/:/go/src/github.com/tendermint/tendermint/test/p2p" \ - --net="$NETWORK_NAME" \ - --ip=$(test/p2p/ip.sh "-1") \ - --name "$NAME" \ - --entrypoint bash \ - "$DOCKER_IMAGE" $CMD diff --git a/test/p2p/data/mach1/core/config/genesis.json b/test/p2p/data/mach1/core/config/genesis.json deleted file mode 100644 index 19577db0..00000000 --- a/test/p2p/data/mach1/core/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "2016-06-24T20:01:19.322Z", - "chain_id": "chain-9ujDWI", - "validators": [ - { - "pub_key": { - "type": "AC26791624DE60", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "power": 1, - "name": "mach1" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "power": 1, - "name": "mach2" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "power": 1, - "name": "mach3" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "power": 1, - "name": "mach4" - } - ], - "app_hash": "" -} diff --git a/test/p2p/data/mach1/core/config/node_key.json b/test/p2p/data/mach1/core/config/node_key.json deleted file mode 100644 index c6d65008..00000000 --- a/test/p2p/data/mach1/core/config/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key": { - "type": "954568A3288910", - "value": "BpYtFp8xSrudBa5aBLRuSPD72PGDAUm0dJORDL3Kd5YJbluUzRefVFrjwoHZv1yeDj2P9xkEi2L3hJCUz/qFkQ==" - } -} diff --git a/test/p2p/data/mach1/core/config/priv_validator.json b/test/p2p/data/mach1/core/config/priv_validator.json deleted file mode 100644 index 08c7c503..00000000 --- a/test/p2p/data/mach1/core/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "7E9D1FB08EDBAFCF116638D4C8FAFAEE2ABE1AAA", - "pub_key": { - "type": "AC26791624DE60", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "last_height": 0, - "last_round": 0, - "last_step": 0, - "priv_key": { - "type": "954568A3288910", - "value": "VHqgfHqM4WxcsqQMbCbRWwoylgQQqfHqblC2NvGrOJq+iTPf8WAMAm40cY8XhaTN6rkMNWmLOU44tpR66R3hFg==" - } -} diff --git a/test/p2p/data/mach2/core/config/genesis.json b/test/p2p/data/mach2/core/config/genesis.json deleted file mode 100644 index 19577db0..00000000 --- a/test/p2p/data/mach2/core/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "2016-06-24T20:01:19.322Z", - "chain_id": "chain-9ujDWI", - "validators": [ - { - "pub_key": { - "type": "AC26791624DE60", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "power": 1, - "name": "mach1" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "power": 1, - "name": "mach2" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "power": 1, - "name": "mach3" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "power": 1, - "name": "mach4" - } - ], - "app_hash": "" -} diff --git a/test/p2p/data/mach2/core/config/node_key.json b/test/p2p/data/mach2/core/config/node_key.json deleted file mode 100644 index 146a1328..00000000 --- a/test/p2p/data/mach2/core/config/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key": { - "type": "954568A3288910", - "value": "uM6LDVE4wQIIUmq9rc6RxzX8zEGG4G4Jcuw15klzQopF68YfJM4bkbPSavurEcJ4nvBMusKBg2GcARFrZqnFKA==" - } -} diff --git a/test/p2p/data/mach2/core/config/priv_validator.json b/test/p2p/data/mach2/core/config/priv_validator.json deleted file mode 100644 index 8e813dff..00000000 --- a/test/p2p/data/mach2/core/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "8893D14FE09F1157E39CD34B98036048D51B4985", - "pub_key": { - "type": "AC26791624DE60", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "last_height": 0, - "last_round": 0, - "last_step": 0, - "priv_key": { - "type": "954568A3288910", - "value": "0EeInmBQL8MSnQq38zSxg47Z7R7Nmcu5a3GtWr9agUNtxTRGUyMSZYfSoqk7WdaJtxcHOx3paKJabvE9WVMYrQ==" - } -} diff --git a/test/p2p/data/mach3/core/config/genesis.json b/test/p2p/data/mach3/core/config/genesis.json deleted file mode 100644 index 19577db0..00000000 --- a/test/p2p/data/mach3/core/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "2016-06-24T20:01:19.322Z", - "chain_id": "chain-9ujDWI", - "validators": [ - { - "pub_key": { - "type": "AC26791624DE60", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "power": 1, - "name": "mach1" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "power": 1, - "name": "mach2" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "power": 1, - "name": "mach3" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "power": 1, - "name": "mach4" - } - ], - "app_hash": "" -} diff --git a/test/p2p/data/mach3/core/config/node_key.json b/test/p2p/data/mach3/core/config/node_key.json deleted file mode 100644 index 82689b8e..00000000 --- a/test/p2p/data/mach3/core/config/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key": { - "type": "954568A3288910", - "value": "kT3orG0YkipT9rAZbvAjtGk/7Pu1ZeCE8LSUF2jz2uiSs1rdlUVi/gccRlvCRLKvrtSicOyEkmk0FHPOGS3mgg==" - } -} diff --git a/test/p2p/data/mach3/core/config/priv_validator.json b/test/p2p/data/mach3/core/config/priv_validator.json deleted file mode 100644 index 84c98b98..00000000 --- a/test/p2p/data/mach3/core/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "7C747D7E002932B3864E3FBE9AC04287043F66A0", - "pub_key": { - "type": "AC26791624DE60", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "last_height": 0, - "last_round": 0, - "last_step": 0, - "priv_key": { - "type": "954568A3288910", - "value": "waTkfzSfxfVW9Kmie6d2uUQkwxK6ps9u5EuGc0jXw/KuZ6xpfRNaoLRgHqV+qrP+v0uqTyKcRaWYwphbEvzRoQ==" - } -} diff --git a/test/p2p/data/mach4/core/config/genesis.json b/test/p2p/data/mach4/core/config/genesis.json deleted file mode 100644 index 19577db0..00000000 --- a/test/p2p/data/mach4/core/config/genesis.json +++ /dev/null @@ -1,39 +0,0 @@ -{ - "genesis_time": "2016-06-24T20:01:19.322Z", - "chain_id": "chain-9ujDWI", - "validators": [ - { - "pub_key": { - "type": "AC26791624DE60", - "value": "vokz3/FgDAJuNHGPF4Wkzeq5DDVpizlOOLaUeukd4RY=" - }, - "power": 1, - "name": "mach1" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "bcU0RlMjEmWH0qKpO1nWibcXBzsd6WiiWm7xPVlTGK0=" - }, - "power": 1, - "name": "mach2" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "rmesaX0TWqC0YB6lfqqz/r9Lqk8inEWlmMKYWxL80aE=" - }, - "power": 1, - "name": "mach3" - }, - { - "pub_key": { - "type": "AC26791624DE60", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "power": 1, - "name": "mach4" - } - ], - "app_hash": "" -} diff --git a/test/p2p/data/mach4/core/config/node_key.json b/test/p2p/data/mach4/core/config/node_key.json deleted file mode 100644 index a0c8d391..00000000 --- a/test/p2p/data/mach4/core/config/node_key.json +++ /dev/null @@ -1,6 +0,0 @@ -{ - "priv_key": { - "type": "954568A3288910", - "value": "QIIm8/QEEawiJi3Zozv+J9b+1CufCEkGs3lxGMlRy4L4FVIXCoXJTwYIrotZtwoMqLYEqQV1hbKKJmFA3GFelw==" - } -} diff --git a/test/p2p/data/mach4/core/config/priv_validator.json b/test/p2p/data/mach4/core/config/priv_validator.json deleted file mode 100644 index 4f88045b..00000000 --- a/test/p2p/data/mach4/core/config/priv_validator.json +++ /dev/null @@ -1,14 +0,0 @@ -{ - "address": "CEBEFE3CA1363D425643EF63FC179E77A50A1E9A", - "pub_key": { - "type": "AC26791624DE60", - "value": "nryPWM7UtG3NWrirpZHdJTzXy1A3Jz/aMrwLZGHE79k=" - }, - "last_height": 0, - "last_round": 0, - "last_step": 0, - "priv_key": { - "type": "954568A3288910", - "value": "xMw+0o8CDC29qYvNvwjDztNwRw508l6TjV0pXo49KwyevI9YztS0bc1auKulkd0lPNfLUDcnP9oyvAtkYcTv2Q==" - } -} diff --git a/test/p2p/fast_sync/check_peer.sh b/test/p2p/fast_sync/check_peer.sh deleted file mode 100644 index e69f977f..00000000 --- a/test/p2p/fast_sync/check_peer.sh +++ /dev/null @@ -1,43 +0,0 @@ -#! /bin/bash -set -eu -set -o pipefail - -ID=$1 - -########################################### -# -# Wait for peer to catchup to other peers -# -########################################### - -addr=$(test/p2p/ip.sh $ID):26657 -peerID=$(( $(($ID % 4)) + 1 )) # 1->2 ... 3->4 ... 4->1 -peer_addr=$(test/p2p/ip.sh $peerID):26657 - -# get another peer's height -h1=`curl -s $peer_addr/status | jq .result.sync_info.latest_block_height` - -# get another peer's state -root1=`curl -s $peer_addr/status | jq .result.sync_info.latest_app_hash` - -echo "Other peer is on height $h1 with state $root1" -echo "Waiting for peer $ID to catch up" - -# wait for it to sync to past its previous height -set +e -set +o pipefail -h2="0" -while [[ "$h2" -lt "$(($h1+3))" ]]; do - sleep 1 - h2=`curl -s $addr/status | jq .result.sync_info.latest_block_height` - echo "... $h2" -done - -# check the app hash -root2=`curl -s $addr/status | jq .result.sync_info.latest_app_hash` - -if [[ "$root1" != "$root2" ]]; then - echo "App hash after fast sync does not match. Got $root2; expected $root1" - exit 1 -fi -echo "... fast sync successful" diff --git a/test/p2p/fast_sync/test.sh b/test/p2p/fast_sync/test.sh deleted file mode 100644 index 8820d199..00000000 --- a/test/p2p/fast_sync/test.sh +++ /dev/null @@ -1,16 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -N=$3 -PROXY_APP=$4 - -cd $GOPATH/src/github.com/tendermint/tendermint - -# run it on each of them -for i in `seq 1 $N`; do - bash test/p2p/fast_sync/test_peer.sh $DOCKER_IMAGE $NETWORK_NAME $i $N $PROXY_APP -done - - diff --git a/test/p2p/fast_sync/test_peer.sh b/test/p2p/fast_sync/test_peer.sh deleted file mode 100644 index 08ea9deb..00000000 --- a/test/p2p/fast_sync/test_peer.sh +++ /dev/null @@ -1,38 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -ID=$3 -N=$4 -PROXY_APP=$5 - -############################################################### -# this runs on each peer: -# kill peer -# bring it back online via fast sync -# wait for it to sync and check the app hash -############################################################### - - -echo "Testing fastsync on node $ID" - -# kill peer -set +e # circle sigh :( - docker rm -vf local_testnet_$ID - set -e - - # restart peer - should have an empty blockchain - PERSISTENT_PEERS="$(test/p2p/ip_plus_id.sh 1 $DOCKER_IMAGE):26656" - for j in `seq 2 $N`; do - PERSISTENT_PEERS="$PERSISTENT_PEERS,$(test/p2p/ip_plus_id.sh $j $DOCKER_IMAGE):26656" - done - bash test/p2p/peer.sh $DOCKER_IMAGE $NETWORK_NAME $ID $PROXY_APP "--p2p.persistent_peers $PERSISTENT_PEERS --p2p.pex --rpc.unsafe" - - # wait for peer to sync and check the app hash - bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME fs_$ID "test/p2p/fast_sync/check_peer.sh $ID" - - echo "" - echo "PASS" - echo "" - diff --git a/test/p2p/ip.sh b/test/p2p/ip.sh deleted file mode 100755 index 77753f54..00000000 --- a/test/p2p/ip.sh +++ /dev/null @@ -1,5 +0,0 @@ -#! /bin/bash -set -eu - -ID=$1 -echo "172.57.0.$((100+$ID))" diff --git a/test/p2p/ip_plus_id.sh b/test/p2p/ip_plus_id.sh deleted file mode 100755 index 0d2248fe..00000000 --- a/test/p2p/ip_plus_id.sh +++ /dev/null @@ -1,7 +0,0 @@ -#! /bin/bash -set -eu - -ID=$1 -DOCKER_IMAGE=$2 -NODEID="$(docker run --rm -e TMHOME=/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core $DOCKER_IMAGE tendermint show_node_id)" -echo "$NODEID@172.57.0.$((100+$ID))" diff --git a/test/p2p/kill_all/check_peers.sh b/test/p2p/kill_all/check_peers.sh deleted file mode 100644 index 87a76811..00000000 --- a/test/p2p/kill_all/check_peers.sh +++ /dev/null @@ -1,49 +0,0 @@ -#! /bin/bash -set -eu - -NUM_OF_PEERS=$1 - -# how many attempts for each peer to catch up by height -MAX_ATTEMPTS_TO_CATCH_UP=120 - -echo "Waiting for nodes to come online" -set +e -for i in $(seq 1 "$NUM_OF_PEERS"); do - addr=$(test/p2p/ip.sh "$i"):26657 - curl -s "$addr/status" > /dev/null - ERR=$? - while [ "$ERR" != 0 ]; do - sleep 1 - curl -s "$addr/status" > /dev/null - ERR=$? - done - echo "... node $i is up" -done -set -e - -# get the first peer's height -addr=$(test/p2p/ip.sh 1):26657 -h1=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height) -echo "1st peer is on height $h1" - -echo "Waiting until other peers reporting a height higher than the 1st one" -for i in $(seq 2 "$NUM_OF_PEERS"); do - attempt=1 - hi=0 - - while [[ $hi -le $h1 ]] ; do - addr=$(test/p2p/ip.sh "$i"):26657 - hi=$(curl -s "$addr/status" | jq .result.sync_info.latest_block_height) - - echo "... peer $i is on height $hi" - - ((attempt++)) - if [ "$attempt" -ge $MAX_ATTEMPTS_TO_CATCH_UP ] ; then - echo "$attempt unsuccessful attempts were made to catch up" - curl -s "$addr/dump_consensus_state" | jq .result - exit 1 - fi - - sleep 1 - done -done diff --git a/test/p2p/kill_all/test.sh b/test/p2p/kill_all/test.sh deleted file mode 100644 index 318a1fe4..00000000 --- a/test/p2p/kill_all/test.sh +++ /dev/null @@ -1,32 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -NUM_OF_PEERS=$3 -NUM_OF_CRASHES=$4 - -cd "$GOPATH/src/github.com/tendermint/tendermint" - -############################################################### -# NUM_OF_CRASHES times: -# restart all peers -# wait for them to sync and check that they are making progress -############################################################### - -for i in $(seq 1 "$NUM_OF_CRASHES"); do - echo "" - echo "Restarting all peers! Take $i ..." - - # restart all peers - for j in $(seq 1 "$NUM_OF_PEERS"); do - docker stop "local_testnet_$j" - docker start "local_testnet_$j" - done - - bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" kill_all_$i "test/p2p/kill_all/check_peers.sh $NUM_OF_PEERS" -done - -echo "" -echo "PASS" -echo "" diff --git a/test/p2p/local_testnet_start.sh b/test/p2p/local_testnet_start.sh deleted file mode 100644 index 25b3c6d3..00000000 --- a/test/p2p/local_testnet_start.sh +++ /dev/null @@ -1,24 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -N=$3 -APP_PROXY=$4 - -set +u -PERSISTENT_PEERS=$5 -if [[ "$PERSISTENT_PEERS" != "" ]]; then - echo "PersistentPeers: $PERSISTENT_PEERS" - PERSISTENT_PEERS="--p2p.persistent_peers $PERSISTENT_PEERS" -fi -set -u - -cd "$GOPATH/src/github.com/tendermint/tendermint" - -# create docker network -docker network create --driver bridge --subnet 172.57.0.0/16 "$NETWORK_NAME" - -for i in $(seq 1 "$N"); do - bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$i" "$APP_PROXY" "$PERSISTENT_PEERS --p2p.pex --rpc.unsafe" -done diff --git a/test/p2p/local_testnet_stop.sh b/test/p2p/local_testnet_stop.sh deleted file mode 100644 index 1dace469..00000000 --- a/test/p2p/local_testnet_stop.sh +++ /dev/null @@ -1,12 +0,0 @@ -#! /bin/bash -set -u - -NETWORK_NAME=$1 -N=$2 - -for i in $(seq 1 "$N"); do - docker stop "local_testnet_$i" - docker rm -vf "local_testnet_$i" -done - -docker network rm "$NETWORK_NAME" diff --git a/test/p2p/peer.sh b/test/p2p/peer.sh deleted file mode 100644 index 15d44ff3..00000000 --- a/test/p2p/peer.sh +++ /dev/null @@ -1,27 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -ID=$3 -APP_PROXY=$4 - -set +u -NODE_FLAGS=$5 -set -u - -echo "starting tendermint peer ID=$ID" -# start tendermint container on the network -# NOTE: $NODE_FLAGS should be unescaped (no quotes). otherwise it will be -# treated as one flag. -docker run -d \ - --net="$NETWORK_NAME" \ - --ip=$(test/p2p/ip.sh "$ID") \ - --name "local_testnet_$ID" \ - --entrypoint tendermint \ - -e TMHOME="/go/src/github.com/tendermint/tendermint/test/p2p/data/mach$ID/core" \ - --log-driver=syslog \ - --log-opt syslog-address=udp://127.0.0.1:5514 \ - --log-opt syslog-facility=daemon \ - --log-opt tag="{{.Name}}" \ - "$DOCKER_IMAGE" node $NODE_FLAGS --log_level=debug --proxy_app="$APP_PROXY" diff --git a/test/p2p/persistent_peers.sh b/test/p2p/persistent_peers.sh deleted file mode 100644 index 6d3e1ed6..00000000 --- a/test/p2p/persistent_peers.sh +++ /dev/null @@ -1,13 +0,0 @@ -#! /bin/bash -set -eu - -N=$1 -DOCKER_IMAGE=$2 - -cd "$GOPATH/src/github.com/tendermint/tendermint" - -persistent_peers="$(test/p2p/ip_plus_id.sh 1 $DOCKER_IMAGE):26656" -for i in $(seq 2 $N); do - persistent_peers="$persistent_peers,$(test/p2p/ip_plus_id.sh $i $DOCKER_IMAGE):26656" -done -echo "$persistent_peers" diff --git a/test/p2p/pex/check_peer.sh b/test/p2p/pex/check_peer.sh deleted file mode 100644 index 7ae42e9b..00000000 --- a/test/p2p/pex/check_peer.sh +++ /dev/null @@ -1,17 +0,0 @@ -#! /bin/bash -set -u - -ID=$1 -N=$2 - -addr=$(test/p2p/ip.sh "$ID"):26657 - -echo "2. wait until peer $ID connects to other nodes using pex reactor" -peers_count="0" -while [[ "$peers_count" -lt "$((N-1))" ]]; do - sleep 1 - peers_count=$(curl -s "$addr/net_info" | jq ".result.peers | length") - echo "... peers count = $peers_count, expected = $((N-1))" -done - -echo "... successful" diff --git a/test/p2p/pex/dial_peers.sh b/test/p2p/pex/dial_peers.sh deleted file mode 100644 index 43bde48b..00000000 --- a/test/p2p/pex/dial_peers.sh +++ /dev/null @@ -1,23 +0,0 @@ -#! /bin/bash -set -u - -N=$1 -PEERS=$2 - -cd "$GOPATH/src/github.com/tendermint/tendermint" - -echo "Waiting for nodes to come online" -for i in $(seq 1 "$N"); do - addr=$(test/p2p/ip.sh "$i"):26657 - curl -s "$addr/status" > /dev/null - ERR=$? - while [ "$ERR" != 0 ]; do - sleep 1 - curl -s "$addr/status" > /dev/null - ERR=$? - done - echo "... node $i is up" -done - -IP=$(test/p2p/ip.sh 1) -curl "$IP:26657/dial_peers?persistent=true&peers=\\[$PEERS\\]" diff --git a/test/p2p/pex/test.sh b/test/p2p/pex/test.sh deleted file mode 100644 index ffecd651..00000000 --- a/test/p2p/pex/test.sh +++ /dev/null @@ -1,15 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -N=$3 -PROXY_APP=$4 - -cd "$GOPATH/src/github.com/tendermint/tendermint" - -echo "Test reconnecting from the address book" -bash test/p2p/pex/test_addrbook.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP" - -echo "Test connecting via /dial_peers" -bash test/p2p/pex/test_dial_peers.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP" diff --git a/test/p2p/pex/test_addrbook.sh b/test/p2p/pex/test_addrbook.sh deleted file mode 100644 index d54bcf42..00000000 --- a/test/p2p/pex/test_addrbook.sh +++ /dev/null @@ -1,57 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -N=$3 -PROXY_APP=$4 - -ID=1 - -echo "----------------------------------------------------------------------" -echo "Testing pex creates the addrbook and uses it if persistent_peers are not provided" -echo "(assuming peers are started with pex enabled)" - -CLIENT_NAME="pex_addrbook_$ID" - -echo "1. restart peer $ID" -docker stop "local_testnet_$ID" -# preserve addrbook.json -docker cp "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" "/tmp/addrbook.json" -set +e #CIRCLE -docker rm -vf "local_testnet_$ID" -set -e - -# NOTE that we do not provide persistent_peers -bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$ID" "$PROXY_APP" "--p2p.pex --rpc.unsafe" -docker cp "/tmp/addrbook.json" "local_testnet_$ID:/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" -echo "with the following addrbook:" -cat /tmp/addrbook.json -# exec doesn't work on circle -# docker exec "local_testnet_$ID" cat "/go/src/github.com/tendermint/tendermint/test/p2p/data/mach1/core/config/addrbook.json" -echo "" - -# if the client runs forever, it means addrbook wasn't saved or was empty -bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N" - -echo "----------------------------------------------------------------------" -echo "Testing other peers connect to us if we have neither persistent_peers nor the addrbook" -echo "(assuming peers are started with pex enabled)" - -CLIENT_NAME="pex_no_addrbook_$ID" - -echo "1. restart peer $ID" -docker stop "local_testnet_$ID" -set +e #CIRCLE -docker rm -vf "local_testnet_$ID" -set -e - -# NOTE that we do not provide persistent_peers -bash test/p2p/peer.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$ID" "$PROXY_APP" "--p2p.pex --rpc.unsafe" - -# if the client runs forever, it means other peers have removed us from their books (which should not happen) -bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$CLIENT_NAME" "test/p2p/pex/check_peer.sh $ID $N" - -echo "" -echo "PASS" -echo "" diff --git a/test/p2p/pex/test_dial_peers.sh b/test/p2p/pex/test_dial_peers.sh deleted file mode 100644 index cb6e7e18..00000000 --- a/test/p2p/pex/test_dial_peers.sh +++ /dev/null @@ -1,39 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=$2 -N=$3 -PROXY_APP=$4 - -ID=1 - -cd $GOPATH/src/github.com/tendermint/tendermint - -echo "----------------------------------------------------------------------" -echo "Testing full network connection using one /dial_peers call" -echo "(assuming peers are started with pex enabled)" - -# stop the existing testnet and remove local network -set +e -bash test/p2p/local_testnet_stop.sh $NETWORK_NAME $N -set -e - -# start the testnet on a local network -# NOTE we re-use the same network for all tests -bash test/p2p/local_testnet_start.sh $DOCKER_IMAGE $NETWORK_NAME $N $PROXY_APP "" - -PERSISTENT_PEERS="\"$(test/p2p/ip_plus_id.sh 1 $DOCKER_IMAGE):26656\"" -for i in $(seq 2 $N); do - PERSISTENT_PEERS="$PERSISTENT_PEERS,\"$(test/p2p/ip_plus_id.sh $i $DOCKER_IMAGE):26656\"" -done -echo "$PERSISTENT_PEERS" - -# dial peers from one node -CLIENT_NAME="dial_peers" -bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME $CLIENT_NAME "test/p2p/pex/dial_peers.sh $N $PERSISTENT_PEERS" - -# test basic connectivity and consensus -# start client container and check the num peers and height for all nodes -CLIENT_NAME="dial_peers_basic" -bash test/p2p/client.sh $DOCKER_IMAGE $NETWORK_NAME $CLIENT_NAME "test/p2p/basic/test.sh $N" diff --git a/test/p2p/test.sh b/test/p2p/test.sh deleted file mode 100644 index abcf2ca0..00000000 --- a/test/p2p/test.sh +++ /dev/null @@ -1,38 +0,0 @@ -#! /bin/bash -set -eu - -DOCKER_IMAGE=$1 -NETWORK_NAME=local_testnet -N=4 -PROXY_APP=persistent_kvstore - -cd "$GOPATH/src/github.com/tendermint/tendermint" - -# stop the existing testnet and remove local network -set +e -bash test/p2p/local_testnet_stop.sh "$NETWORK_NAME" "$N" -set -e - -PERSISTENT_PEERS=$(bash test/p2p/persistent_peers.sh $N $DOCKER_IMAGE) - -# start the testnet on a local network -# NOTE we re-use the same network for all tests -bash test/p2p/local_testnet_start.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP" "$PERSISTENT_PEERS" - -# test basic connectivity and consensus -# start client container and check the num peers and height for all nodes -bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" basic "test/p2p/basic/test.sh $N" - -# test atomic broadcast: -# start client container and test sending a tx to each node -bash test/p2p/client.sh "$DOCKER_IMAGE" "$NETWORK_NAME" ab "test/p2p/atomic_broadcast/test.sh $N" - -# test fast sync (from current state of network): -# for each node, kill it and readd via fast sync -bash test/p2p/fast_sync/test.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP" - -# test killing all peers 3 times -bash test/p2p/kill_all/test.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" 3 - -# test pex -bash test/p2p/pex/test.sh "$DOCKER_IMAGE" "$NETWORK_NAME" "$N" "$PROXY_APP" diff --git a/test/persist/test_failure_indices.sh b/test/persist/test_failure_indices.sh deleted file mode 100644 index 4d523d94..00000000 --- a/test/persist/test_failure_indices.sh +++ /dev/null @@ -1,124 +0,0 @@ -#! /bin/bash - -export PATH="$GOBIN:$PATH" -export TMHOME=$HOME/.tendermint_persist - -rm -rf "$TMHOME" -tendermint init - -# use a unix socket so we can remove it -RPC_ADDR="$(pwd)/rpc.sock" - -TM_CMD="tendermint node --log_level=debug --rpc.laddr=unix://$RPC_ADDR" # &> tendermint_${name}.log" -DUMMY_CMD="abci-cli kvstore --persist $TMHOME/kvstore" # &> kvstore_${name}.log" - - -function start_procs(){ - name=$1 - indexToFail=$2 - echo "Starting persistent kvstore and tendermint" - if [[ "$CIRCLECI" == true ]]; then - $DUMMY_CMD & - else - $DUMMY_CMD &> "kvstore_${name}.log" & - fi - PID_DUMMY=$! - - # before starting tendermint, remove the rpc socket - rm -f $RPC_ADDR - if [[ "$indexToFail" == "" ]]; then - # run in background, dont fail - if [[ "$CIRCLECI" == true ]]; then - $TM_CMD & - else - $TM_CMD &> "tendermint_${name}.log" & - fi - PID_TENDERMINT=$! - else - # run in foreground, fail - if [[ "$CIRCLECI" == true ]]; then - FAIL_TEST_INDEX=$indexToFail $TM_CMD - else - FAIL_TEST_INDEX=$indexToFail $TM_CMD &> "tendermint_${name}.log" - fi - PID_TENDERMINT=$! - fi -} - -function kill_procs(){ - kill -9 "$PID_DUMMY" "$PID_TENDERMINT" - wait "$PID_DUMMY" - wait "$PID_TENDERMINT" -} - -# wait for port to be available -function wait_for_port() { - port=$1 - # this will succeed while port is bound - nc -z 127.0.0.1 $port - ERR=$? - i=0 - while [ "$ERR" == 0 ]; do - echo "... port $port is still bound. waiting ..." - sleep 1 - nc -z 127.0.0.1 $port - ERR=$? - i=$((i + 1)) - if [[ $i == 10 ]]; then - echo "Timed out waiting for port to be released" - exit 1 - fi - done - echo "... port $port is free!" -} - - -failsStart=0 -fails=$(grep -r "fail.Fail" --include \*.go . | wc -l) -failsEnd=$((fails-1)) - -for failIndex in $(seq $failsStart $failsEnd); do - echo "" - echo "* Test FailIndex $failIndex" - # test failure at failIndex - - bash $(dirname $0)/txs.sh "localhost:26657" & - start_procs 1 "$failIndex" - - # tendermint should already have exited when it hits the fail index - # but kill -9 for good measure - kill_procs - - start_procs 2 - - # wait for node to handshake and make a new block - # NOTE: --unix-socket is only available in curl v7.40+ - curl -s --unix-socket "$RPC_ADDR" http://localhost/status > /dev/null - ERR=$? - i=0 - while [ "$ERR" != 0 ]; do - sleep 1 - curl -s --unix-socket "$RPC_ADDR" http://localhost/status > /dev/null - ERR=$? - i=$((i + 1)) - if [[ $i == 20 ]]; then - echo "Timed out waiting for tendermint to start" - exit 1 - fi - done - - # wait for a new block - h1=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result.sync_info.latest_block_height) - h2=$h1 - while [ "$h2" == "$h1" ]; do - sleep 1 - h2=$(curl -s --unix-socket "$RPC_ADDR" http://localhost/status | jq .result.sync_info.latest_block_height) - done - - kill_procs - - echo "* Passed Test for FailIndex $failIndex" - echo "" -done - -echo "Passed Test: Persistence" diff --git a/test/persist/test_simple.sh b/test/persist/test_simple.sh deleted file mode 100644 index 706e04c2..00000000 --- a/test/persist/test_simple.sh +++ /dev/null @@ -1,70 +0,0 @@ -#! /bin/bash - - -export TMHOME=$HOME/.tendermint_persist - -rm -rf $TMHOME -tendermint init - -function start_procs(){ - name=$1 - echo "Starting persistent kvstore and tendermint" - abci-cli kvstore --persist $TMHOME/kvstore &> "kvstore_${name}.log" & - PID_DUMMY=$! - tendermint node &> tendermint_${name}.log & - PID_TENDERMINT=$! - sleep 5 -} - -function kill_procs(){ - kill -9 $PID_DUMMY $PID_TENDERMINT -} - - -function send_txs(){ - # send a bunch of txs over a few blocks - echo "Sending txs" - for i in `seq 1 5`; do - for j in `seq 1 100`; do - tx=`head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"'` - curl -s 127.0.0.1:26657/broadcast_tx_async?tx=0x$tx &> /dev/null - done - sleep 1 - done -} - - -start_procs 1 -send_txs -kill_procs - -start_procs 2 - -# wait for node to handshake and make a new block -addr="localhost:26657" -curl -s $addr/status > /dev/null -ERR=$? -i=0 -while [ "$ERR" != 0 ]; do - sleep 1 - curl -s $addr/status > /dev/null - ERR=$? - i=$(($i + 1)) - if [[ $i == 10 ]]; then - echo "Timed out waiting for tendermint to start" - exit 1 - fi -done - -# wait for a new block -h1=`curl -s $addr/status | jq .result.sync_info.latest_block_height` -h2=$h1 -while [ "$h2" == "$h1" ]; do - sleep 1 - h2=`curl -s $addr/status | jq .result.sync_info.latest_block_height` -done - -kill_procs -sleep 2 - -echo "Passed Test: Persistence" diff --git a/test/persist/txs.sh b/test/persist/txs.sh deleted file mode 100644 index 120aa8a5..00000000 --- a/test/persist/txs.sh +++ /dev/null @@ -1,23 +0,0 @@ -#! /bin/bash -set -u - -# wait till node is up, send txs -ADDR=$1 #="127.0.0.1:26657" -curl -s $ADDR/status > /dev/null -ERR=$? -while [ "$ERR" != 0 ]; do - sleep 1 - curl -s $ADDR/status > /dev/null - ERR=$? -done - -# send a bunch of txs over a few blocks -echo "Node is up, sending txs" -for i in $(seq 1 5); do - for _ in $(seq 1 100); do - tx=$(head -c 8 /dev/urandom | hexdump -ve '1/1 "%.2X"') - curl -s "$ADDR/broadcast_tx_async?tx=0x$tx" &> /dev/null - done - echo "sent 100" - sleep 1 -done diff --git a/test/test_cover.sh b/test/test_cover.sh deleted file mode 100644 index 59ce15b0..00000000 --- a/test/test_cover.sh +++ /dev/null @@ -1,14 +0,0 @@ -#! /bin/bash - -PKGS=$(go list github.com/tendermint/tendermint/... | grep -v /vendor/) - -set -e - -echo "mode: atomic" > coverage.txt -for pkg in ${PKGS[@]}; do - go test -v -timeout 30m -race -coverprofile=profile.out -covermode=atomic "$pkg" - if [ -f profile.out ]; then - tail -n +2 profile.out >> coverage.txt; - rm profile.out - fi -done diff --git a/types/block.go b/types/block.go deleted file mode 100644 index 6adc0c4c..00000000 --- a/types/block.go +++ /dev/null @@ -1,577 +0,0 @@ -package types - -import ( - "bytes" - "errors" - "fmt" - "strings" - "sync" - "time" - - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" - "golang.org/x/crypto/ripemd160" -) - -// Block defines the atomic unit of a Tendermint blockchain. -// TODO: add Version byte -type Block struct { - mtx sync.Mutex - *Header `json:"header"` - *Data `json:"data"` - Evidence EvidenceData `json:"evidence"` - LastCommit *Commit `json:"last_commit"` -} - -// MakeBlock returns a new block with an empty header, except what can be computed from itself. -// It populates the same set of fields validated by ValidateBasic -func MakeBlock(height int64, txs []Tx, commit *Commit) *Block { - block := &Block{ - Header: &Header{ - Height: height, - Time: time.Now(), - NumTxs: int64(len(txs)), - }, - LastCommit: commit, - Data: &Data{ - Txs: txs, - }, - } - block.fillHeader() - return block -} - -// AddEvidence appends the given evidence to the block -func (b *Block) AddEvidence(evidence []Evidence) { - b.Evidence.Evidence = append(b.Evidence.Evidence, evidence...) -} - -// ValidateBasic performs basic validation that doesn't involve state data. -// It checks the internal consistency of the block. -func (b *Block) ValidateBasic() error { - if b == nil { - return errors.New("Nil blocks are invalid") - } - b.mtx.Lock() - defer b.mtx.Unlock() - - newTxs := int64(len(b.Data.Txs)) - if b.NumTxs != newTxs { - return fmt.Errorf("Wrong Block.Header.NumTxs. Expected %v, got %v", newTxs, b.NumTxs) - } - if !bytes.Equal(b.LastCommitHash, b.LastCommit.Hash()) { - return fmt.Errorf("Wrong Block.Header.LastCommitHash. Expected %v, got %v", b.LastCommitHash, b.LastCommit.Hash()) - } - if b.Header.Height != 1 { - if err := b.LastCommit.ValidateBasic(); err != nil { - return err - } - } - if !bytes.Equal(b.DataHash, b.Data.Hash()) { - return fmt.Errorf("Wrong Block.Header.DataHash. Expected %v, got %v", b.DataHash, b.Data.Hash()) - } - if !bytes.Equal(b.EvidenceHash, b.Evidence.Hash()) { - return errors.New(cmn.Fmt("Wrong Block.Header.EvidenceHash. Expected %v, got %v", b.EvidenceHash, b.Evidence.Hash())) - } - return nil -} - -// fillHeader fills in any remaining header fields that are a function of the block data -func (b *Block) fillHeader() { - if b.LastCommitHash == nil { - b.LastCommitHash = b.LastCommit.Hash() - } - if b.DataHash == nil { - b.DataHash = b.Data.Hash() - } - if b.EvidenceHash == nil { - b.EvidenceHash = b.Evidence.Hash() - } -} - -// Hash computes and returns the block hash. -// If the block is incomplete, block hash is nil for safety. -func (b *Block) Hash() cmn.HexBytes { - if b == nil { - return nil - } - b.mtx.Lock() - defer b.mtx.Unlock() - - if b == nil || b.Header == nil || b.Data == nil || b.LastCommit == nil { - return nil - } - b.fillHeader() - return b.Header.Hash() -} - -// MakePartSet returns a PartSet containing parts of a serialized block. -// This is the form in which the block is gossipped to peers. -func (b *Block) MakePartSet(partSize int) *PartSet { - if b == nil { - return nil - } - b.mtx.Lock() - defer b.mtx.Unlock() - - // We prefix the byte length, so that unmarshaling - // can easily happen via a reader. - bz, err := cdc.MarshalBinary(b) - if err != nil { - panic(err) - } - return NewPartSetFromData(bz, partSize) -} - -// HashesTo is a convenience function that checks if a block hashes to the given argument. -// Returns false if the block is nil or the hash is empty. -func (b *Block) HashesTo(hash []byte) bool { - if len(hash) == 0 { - return false - } - if b == nil { - return false - } - return bytes.Equal(b.Hash(), hash) -} - -// Size returns size of the block in bytes. -func (b *Block) Size() int { - bz, err := cdc.MarshalBinaryBare(b) - if err != nil { - return 0 - } - return len(bz) -} - -// String returns a string representation of the block -func (b *Block) String() string { - return b.StringIndented("") -} - -// StringIndented returns a string representation of the block -func (b *Block) StringIndented(indent string) string { - if b == nil { - return "nil-Block" - } - return fmt.Sprintf(`Block{ -%s %v -%s %v -%s %v -%s %v -%s}#%v`, - indent, b.Header.StringIndented(indent+" "), - indent, b.Data.StringIndented(indent+" "), - indent, b.Evidence.StringIndented(indent+" "), - indent, b.LastCommit.StringIndented(indent+" "), - indent, b.Hash()) -} - -// StringShort returns a shortened string representation of the block -func (b *Block) StringShort() string { - if b == nil { - return "nil-Block" - } - return fmt.Sprintf("Block#%v", b.Hash()) -} - -//----------------------------------------------------------------------------- - -// Header defines the structure of a Tendermint block header -// TODO: limit header size -// NOTE: changes to the Header should be duplicated in the abci Header -type Header struct { - // basic block info - ChainID string `json:"chain_id"` - Height int64 `json:"height"` - Time time.Time `json:"time"` - NumTxs int64 `json:"num_txs"` - - // prev block info - LastBlockID BlockID `json:"last_block_id"` - TotalTxs int64 `json:"total_txs"` - - // hashes of block data - LastCommitHash cmn.HexBytes `json:"last_commit_hash"` // commit from validators from the last block - DataHash cmn.HexBytes `json:"data_hash"` // transactions - - // hashes from the app output from the prev block - ValidatorsHash cmn.HexBytes `json:"validators_hash"` // validators for the current block - ConsensusHash cmn.HexBytes `json:"consensus_hash"` // consensus params for current block - AppHash cmn.HexBytes `json:"app_hash"` // state after txs from the previous block - LastResultsHash cmn.HexBytes `json:"last_results_hash"` // root hash of all results from the txs from the previous block - - // consensus info - EvidenceHash cmn.HexBytes `json:"evidence_hash"` // evidence included in the block -} - -// Hash returns the hash of the header. -// Returns nil if ValidatorHash is missing, -// since a Header is not valid unless there is -// a ValidaotrsHash (corresponding to the validator set). -func (h *Header) Hash() cmn.HexBytes { - if h == nil || len(h.ValidatorsHash) == 0 { - return nil - } - return merkle.SimpleHashFromMap(map[string]merkle.Hasher{ - "ChainID": aminoHasher(h.ChainID), - "Height": aminoHasher(h.Height), - "Time": aminoHasher(h.Time), - "NumTxs": aminoHasher(h.NumTxs), - "TotalTxs": aminoHasher(h.TotalTxs), - "LastBlockID": aminoHasher(h.LastBlockID), - "LastCommit": aminoHasher(h.LastCommitHash), - "Data": aminoHasher(h.DataHash), - "Validators": aminoHasher(h.ValidatorsHash), - "App": aminoHasher(h.AppHash), - "Consensus": aminoHasher(h.ConsensusHash), - "Results": aminoHasher(h.LastResultsHash), - "Evidence": aminoHasher(h.EvidenceHash), - }) -} - -// StringIndented returns a string representation of the header -func (h *Header) StringIndented(indent string) string { - if h == nil { - return "nil-Header" - } - return fmt.Sprintf(`Header{ -%s ChainID: %v -%s Height: %v -%s Time: %v -%s NumTxs: %v -%s TotalTxs: %v -%s LastBlockID: %v -%s LastCommit: %v -%s Data: %v -%s Validators: %v -%s App: %v -%s Consensus: %v -%s Results: %v -%s Evidence: %v -%s}#%v`, - indent, h.ChainID, - indent, h.Height, - indent, h.Time, - indent, h.NumTxs, - indent, h.TotalTxs, - indent, h.LastBlockID, - indent, h.LastCommitHash, - indent, h.DataHash, - indent, h.ValidatorsHash, - indent, h.AppHash, - indent, h.ConsensusHash, - indent, h.LastResultsHash, - indent, h.EvidenceHash, - indent, h.Hash()) -} - -//------------------------------------- - -// Commit contains the evidence that a block was committed by a set of validators. -// NOTE: Commit is empty for height 1, but never nil. -type Commit struct { - // NOTE: The Precommits are in order of address to preserve the bonded ValidatorSet order. - // Any peer with a block can gossip precommits by index with a peer without recalculating the - // active ValidatorSet. - BlockID BlockID `json:"block_id"` - Precommits []*Vote `json:"precommits"` - - // Volatile - firstPrecommit *Vote - hash cmn.HexBytes - bitArray *cmn.BitArray -} - -// FirstPrecommit returns the first non-nil precommit in the commit. -// If all precommits are nil, it returns an empty precommit with height 0. -func (commit *Commit) FirstPrecommit() *Vote { - if len(commit.Precommits) == 0 { - return nil - } - if commit.firstPrecommit != nil { - return commit.firstPrecommit - } - for _, precommit := range commit.Precommits { - if precommit != nil { - commit.firstPrecommit = precommit - return precommit - } - } - return &Vote{ - Type: VoteTypePrecommit, - } -} - -// Height returns the height of the commit -func (commit *Commit) Height() int64 { - if len(commit.Precommits) == 0 { - return 0 - } - return commit.FirstPrecommit().Height -} - -// Round returns the round of the commit -func (commit *Commit) Round() int { - if len(commit.Precommits) == 0 { - return 0 - } - return commit.FirstPrecommit().Round -} - -// Type returns the vote type of the commit, which is always VoteTypePrecommit -func (commit *Commit) Type() byte { - return VoteTypePrecommit -} - -// Size returns the number of votes in the commit -func (commit *Commit) Size() int { - if commit == nil { - return 0 - } - return len(commit.Precommits) -} - -// BitArray returns a BitArray of which validators voted in this commit -func (commit *Commit) BitArray() *cmn.BitArray { - if commit.bitArray == nil { - commit.bitArray = cmn.NewBitArray(len(commit.Precommits)) - for i, precommit := range commit.Precommits { - // TODO: need to check the BlockID otherwise we could be counting conflicts, - // not just the one with +2/3 ! - commit.bitArray.SetIndex(i, precommit != nil) - } - } - return commit.bitArray -} - -// GetByIndex returns the vote corresponding to a given validator index -func (commit *Commit) GetByIndex(index int) *Vote { - return commit.Precommits[index] -} - -// IsCommit returns true if there is at least one vote -func (commit *Commit) IsCommit() bool { - return len(commit.Precommits) != 0 -} - -// ValidateBasic performs basic validation that doesn't involve state data. -func (commit *Commit) ValidateBasic() error { - if commit.BlockID.IsZero() { - return errors.New("Commit cannot be for nil block") - } - if len(commit.Precommits) == 0 { - return errors.New("No precommits in commit") - } - height, round := commit.Height(), commit.Round() - - // validate the precommits - for _, precommit := range commit.Precommits { - // It's OK for precommits to be missing. - if precommit == nil { - continue - } - // Ensure that all votes are precommits - if precommit.Type != VoteTypePrecommit { - return fmt.Errorf("Invalid commit vote. Expected precommit, got %v", - precommit.Type) - } - // Ensure that all heights are the same - if precommit.Height != height { - return fmt.Errorf("Invalid commit precommit height. Expected %v, got %v", - height, precommit.Height) - } - // Ensure that all rounds are the same - if precommit.Round != round { - return fmt.Errorf("Invalid commit precommit round. Expected %v, got %v", - round, precommit.Round) - } - } - return nil -} - -// Hash returns the hash of the commit -func (commit *Commit) Hash() cmn.HexBytes { - if commit.hash == nil { - bs := make([]merkle.Hasher, len(commit.Precommits)) - for i, precommit := range commit.Precommits { - bs[i] = aminoHasher(precommit) - } - commit.hash = merkle.SimpleHashFromHashers(bs) - } - return commit.hash -} - -// StringIndented returns a string representation of the commit -func (commit *Commit) StringIndented(indent string) string { - if commit == nil { - return "nil-Commit" - } - precommitStrings := make([]string, len(commit.Precommits)) - for i, precommit := range commit.Precommits { - precommitStrings[i] = precommit.String() - } - return fmt.Sprintf(`Commit{ -%s BlockID: %v -%s Precommits: %v -%s}#%v`, - indent, commit.BlockID, - indent, strings.Join(precommitStrings, "\n"+indent+" "), - indent, commit.hash) -} - -//----------------------------------------------------------------------------- - -// SignedHeader is a header along with the commits that prove it -type SignedHeader struct { - Header *Header `json:"header"` - Commit *Commit `json:"commit"` -} - -//----------------------------------------------------------------------------- - -// Data contains the set of transactions included in the block -type Data struct { - - // Txs that will be applied by state @ block.Height+1. - // NOTE: not all txs here are valid. We're just agreeing on the order first. - // This means that block.AppHash does not include these txs. - Txs Txs `json:"txs"` - - // Volatile - hash cmn.HexBytes -} - -// Hash returns the hash of the data -func (data *Data) Hash() cmn.HexBytes { - if data == nil { - return (Txs{}).Hash() - } - if data.hash == nil { - data.hash = data.Txs.Hash() // NOTE: leaves of merkle tree are TxIDs - } - return data.hash -} - -// StringIndented returns a string representation of the transactions -func (data *Data) StringIndented(indent string) string { - if data == nil { - return "nil-Data" - } - txStrings := make([]string, cmn.MinInt(len(data.Txs), 21)) - for i, tx := range data.Txs { - if i == 20 { - txStrings[i] = fmt.Sprintf("... (%v total)", len(data.Txs)) - break - } - txStrings[i] = fmt.Sprintf("Tx:%v", tx) - } - return fmt.Sprintf(`Data{ -%s %v -%s}#%v`, - indent, strings.Join(txStrings, "\n"+indent+" "), - indent, data.hash) -} - -//----------------------------------------------------------------------------- - -// EvidenceData contains any evidence of malicious wrong-doing by validators -type EvidenceData struct { - Evidence EvidenceList `json:"evidence"` - - // Volatile - hash cmn.HexBytes -} - -// Hash returns the hash of the data. -func (data *EvidenceData) Hash() cmn.HexBytes { - if data.hash == nil { - data.hash = data.Evidence.Hash() - } - return data.hash -} - -// StringIndented returns a string representation of the evidence. -func (data *EvidenceData) StringIndented(indent string) string { - if data == nil { - return "nil-Evidence" - } - evStrings := make([]string, cmn.MinInt(len(data.Evidence), 21)) - for i, ev := range data.Evidence { - if i == 20 { - evStrings[i] = fmt.Sprintf("... (%v total)", len(data.Evidence)) - break - } - evStrings[i] = fmt.Sprintf("Evidence:%v", ev) - } - return fmt.Sprintf(`Data{ -%s %v -%s}#%v`, - indent, strings.Join(evStrings, "\n"+indent+" "), - indent, data.hash) - return "" -} - -//-------------------------------------------------------------------------------- - -// BlockID defines the unique ID of a block as its Hash and its PartSetHeader -type BlockID struct { - Hash cmn.HexBytes `json:"hash"` - PartsHeader PartSetHeader `json:"parts"` -} - -// IsZero returns true if this is the BlockID for a nil-block -func (blockID BlockID) IsZero() bool { - return len(blockID.Hash) == 0 && blockID.PartsHeader.IsZero() -} - -// Equals returns true if the BlockID matches the given BlockID -func (blockID BlockID) Equals(other BlockID) bool { - return bytes.Equal(blockID.Hash, other.Hash) && - blockID.PartsHeader.Equals(other.PartsHeader) -} - -// Key returns a machine-readable string representation of the BlockID -func (blockID BlockID) Key() string { - bz, err := cdc.MarshalBinaryBare(blockID.PartsHeader) - if err != nil { - panic(err) - } - return string(blockID.Hash) + string(bz) -} - -// String returns a human readable string representation of the BlockID -func (blockID BlockID) String() string { - return fmt.Sprintf(`%v:%v`, blockID.Hash, blockID.PartsHeader) -} - -//------------------------------------------------------- - -type hasher struct { - item interface{} -} - -func (h hasher) Hash() []byte { - hasher := ripemd160.New() - if h.item != nil && !cmn.IsTypedNil(h.item) && !cmn.IsEmpty(h.item) { - bz, err := cdc.MarshalBinaryBare(h.item) - if err != nil { - panic(err) - } - _, err = hasher.Write(bz) - if err != nil { - panic(err) - } - } - return hasher.Sum(nil) - -} - -func aminoHash(item interface{}) []byte { - h := hasher{item} - return h.Hash() -} - -func aminoHasher(item interface{}) merkle.Hasher { - return hasher{item} -} diff --git a/types/block_meta.go b/types/block_meta.go deleted file mode 100644 index 6dd502e4..00000000 --- a/types/block_meta.go +++ /dev/null @@ -1,15 +0,0 @@ -package types - -// BlockMeta contains meta information about a block - namely, it's ID and Header. -type BlockMeta struct { - BlockID BlockID `json:"block_id"` // the block hash and partsethash - Header *Header `json:"header"` // The block's Header -} - -// NewBlockMeta returns a new BlockMeta from the block and its blockParts. -func NewBlockMeta(block *Block, blockParts *PartSet) *BlockMeta { - return &BlockMeta{ - BlockID: BlockID{block.Hash(), blockParts.Header()}, - Header: block.Header, - } -} diff --git a/types/block_test.go b/types/block_test.go deleted file mode 100644 index 3c2942cd..00000000 --- a/types/block_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" -) - -func TestValidateBlock(t *testing.T) { - txs := []Tx{Tx("foo"), Tx("bar")} - lastID := makeBlockIDRandom() - h := int64(3) - - voteSet, _, vals := randVoteSet(h-1, 1, VoteTypePrecommit, 10, 1) - commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals) - require.NoError(t, err) - - block := MakeBlock(h, txs, commit) - require.NotNil(t, block) - - // proper block must pass - err = block.ValidateBasic() - require.NoError(t, err) - - // tamper with NumTxs - block = MakeBlock(h, txs, commit) - block.NumTxs++ - err = block.ValidateBasic() - require.Error(t, err) - - // remove 1/2 the commits - block = MakeBlock(h, txs, commit) - block.LastCommit.Precommits = commit.Precommits[:commit.Size()/2] - block.LastCommit.hash = nil // clear hash or change wont be noticed - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with LastCommitHash - block = MakeBlock(h, txs, commit) - block.LastCommitHash = []byte("something else") - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with data - block = MakeBlock(h, txs, commit) - block.Data.Txs[0] = Tx("something else") - block.Data.hash = nil // clear hash or change wont be noticed - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with DataHash - block = MakeBlock(h, txs, commit) - block.DataHash = cmn.RandBytes(len(block.DataHash)) - err = block.ValidateBasic() - require.Error(t, err) -} - -func makeBlockIDRandom() BlockID { - blockHash, blockPartsHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} - return BlockID{blockHash, blockPartsHeader} -} - -func makeBlockID(hash string, partSetSize int, partSetHash string) BlockID { - return BlockID{ - Hash: []byte(hash), - PartsHeader: PartSetHeader{ - Total: partSetSize, - Hash: []byte(partSetHash), - }, - } - -} - -var nilBytes []byte - -func TestNilHeaderHashDoesntCrash(t *testing.T) { - assert.Equal(t, []byte((*Header)(nil).Hash()), nilBytes) - assert.Equal(t, []byte((new(Header)).Hash()), nilBytes) -} - -func TestNilDataHashDoesntCrash(t *testing.T) { - assert.Equal(t, []byte((*Data)(nil).Hash()), nilBytes) - assert.Equal(t, []byte(new(Data).Hash()), nilBytes) -} diff --git a/types/canonical_json.go b/types/canonical_json.go deleted file mode 100644 index 95ade9c6..00000000 --- a/types/canonical_json.go +++ /dev/null @@ -1,115 +0,0 @@ -package types - -import ( - "time" - - "github.com/tendermint/go-amino" - cmn "github.com/tendermint/tmlibs/common" -) - -// Canonical json is amino's json for structs with fields in alphabetical order - -// TimeFormat is used for generating the sigs -const TimeFormat = amino.RFC3339Millis - -type CanonicalJSONBlockID struct { - Hash cmn.HexBytes `json:"hash,omitempty"` - PartsHeader CanonicalJSONPartSetHeader `json:"parts,omitempty"` -} - -type CanonicalJSONPartSetHeader struct { - Hash cmn.HexBytes `json:"hash,omitempty"` - Total int `json:"total,omitempty"` -} - -type CanonicalJSONProposal struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - BlockPartsHeader CanonicalJSONPartSetHeader `json:"block_parts_header"` - Height int64 `json:"height"` - POLBlockID CanonicalJSONBlockID `json:"pol_block_id"` - POLRound int `json:"pol_round"` - Round int `json:"round"` - Timestamp string `json:"timestamp"` -} - -type CanonicalJSONVote struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - BlockID CanonicalJSONBlockID `json:"block_id"` - Height int64 `json:"height"` - Round int `json:"round"` - Timestamp string `json:"timestamp"` - VoteType byte `json:"type"` -} - -type CanonicalJSONHeartbeat struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - Height int64 `json:"height"` - Round int `json:"round"` - Sequence int `json:"sequence"` - ValidatorAddress Address `json:"validator_address"` - ValidatorIndex int `json:"validator_index"` -} - -//----------------------------------- -// Canonicalize the structs - -func CanonicalBlockID(blockID BlockID) CanonicalJSONBlockID { - return CanonicalJSONBlockID{ - Hash: blockID.Hash, - PartsHeader: CanonicalPartSetHeader(blockID.PartsHeader), - } -} - -func CanonicalPartSetHeader(psh PartSetHeader) CanonicalJSONPartSetHeader { - return CanonicalJSONPartSetHeader{ - psh.Hash, - psh.Total, - } -} - -func CanonicalProposal(chainID string, proposal *Proposal) CanonicalJSONProposal { - return CanonicalJSONProposal{ - ChainID: chainID, - Type: "proposal", - BlockPartsHeader: CanonicalPartSetHeader(proposal.BlockPartsHeader), - Height: proposal.Height, - Timestamp: CanonicalTime(proposal.Timestamp), - POLBlockID: CanonicalBlockID(proposal.POLBlockID), - POLRound: proposal.POLRound, - Round: proposal.Round, - } -} - -func CanonicalVote(chainID string, vote *Vote) CanonicalJSONVote { - return CanonicalJSONVote{ - ChainID: chainID, - Type: "vote", - BlockID: CanonicalBlockID(vote.BlockID), - Height: vote.Height, - Round: vote.Round, - Timestamp: CanonicalTime(vote.Timestamp), - VoteType: vote.Type, - } -} - -func CanonicalHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalJSONHeartbeat { - return CanonicalJSONHeartbeat{ - ChainID: chainID, - Type: "heartbeat", - Height: heartbeat.Height, - Round: heartbeat.Round, - Sequence: heartbeat.Sequence, - ValidatorAddress: heartbeat.ValidatorAddress, - ValidatorIndex: heartbeat.ValidatorIndex, - } -} - -func CanonicalTime(t time.Time) string { - // Note that sending time over amino resets it to - // local time, we need to force UTC here, so the - // signatures match - return t.UTC().Format(TimeFormat) -} diff --git a/types/event_buffer.go b/types/event_buffer.go deleted file mode 100644 index 18b41014..00000000 --- a/types/event_buffer.go +++ /dev/null @@ -1,50 +0,0 @@ -package types - -// Interface assertions -var _ TxEventPublisher = (*TxEventBuffer)(nil) - -// TxEventBuffer is a buffer of events, which uses a slice to temporarily store -// events. -type TxEventBuffer struct { - next TxEventPublisher - capacity int - events []EventDataTx -} - -// NewTxEventBuffer accepts a TxEventPublisher and returns a new buffer with the given -// capacity. -func NewTxEventBuffer(next TxEventPublisher, capacity int) *TxEventBuffer { - return &TxEventBuffer{ - next: next, - capacity: capacity, - events: make([]EventDataTx, 0, capacity), - } -} - -// Len returns the number of events cached. -func (b TxEventBuffer) Len() int { - return len(b.events) -} - -// PublishEventTx buffers an event to be fired upon finality. -func (b *TxEventBuffer) PublishEventTx(e EventDataTx) error { - b.events = append(b.events, e) - return nil -} - -// Flush publishes events by running next.PublishWithTags on all cached events. -// Blocks. Clears cached events. -func (b *TxEventBuffer) Flush() error { - for _, e := range b.events { - err := b.next.PublishEventTx(e) - if err != nil { - return err - } - } - - // Clear out the elements and set the length to 0 - // but maintain the underlying slice's capacity. - // See Issue https://github.com/tendermint/tendermint/issues/1189 - b.events = b.events[:0] - return nil -} diff --git a/types/event_buffer_test.go b/types/event_buffer_test.go deleted file mode 100644 index 74ae9da2..00000000 --- a/types/event_buffer_test.go +++ /dev/null @@ -1,21 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type eventBusMock struct{} - -func (eventBusMock) PublishEventTx(e EventDataTx) error { - return nil -} - -func TestEventBuffer(t *testing.T) { - b := NewTxEventBuffer(eventBusMock{}, 1) - b.PublishEventTx(EventDataTx{}) - assert.Equal(t, 1, b.Len()) - b.Flush() - assert.Equal(t, 0, b.Len()) -} diff --git a/types/event_bus.go b/types/event_bus.go deleted file mode 100644 index cb4b17d5..00000000 --- a/types/event_bus.go +++ /dev/null @@ -1,167 +0,0 @@ -package types - -import ( - "context" - "fmt" - - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/log" -) - -const defaultCapacity = 0 - -type EventBusSubscriber interface { - Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error - Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error - UnsubscribeAll(ctx context.Context, subscriber string) error -} - -// EventBus is a common bus for all events going through the system. All calls -// are proxied to underlying pubsub server. All events must be published using -// EventBus to ensure correct data types. -type EventBus struct { - cmn.BaseService - pubsub *tmpubsub.Server -} - -// NewEventBus returns a new event bus. -func NewEventBus() *EventBus { - return NewEventBusWithBufferCapacity(defaultCapacity) -} - -// NewEventBusWithBufferCapacity returns a new event bus with the given buffer capacity. -func NewEventBusWithBufferCapacity(cap int) *EventBus { - // capacity could be exposed later if needed - pubsub := tmpubsub.NewServer(tmpubsub.BufferCapacity(cap)) - b := &EventBus{pubsub: pubsub} - b.BaseService = *cmn.NewBaseService(nil, "EventBus", b) - return b -} - -func (b *EventBus) SetLogger(l log.Logger) { - b.BaseService.SetLogger(l) - b.pubsub.SetLogger(l.With("module", "pubsub")) -} - -func (b *EventBus) OnStart() error { - return b.pubsub.OnStart() -} - -func (b *EventBus) OnStop() { - b.pubsub.OnStop() -} - -func (b *EventBus) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { - return b.pubsub.Subscribe(ctx, subscriber, query, out) -} - -func (b *EventBus) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { - return b.pubsub.Unsubscribe(ctx, subscriber, query) -} - -func (b *EventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { - return b.pubsub.UnsubscribeAll(ctx, subscriber) -} - -func (b *EventBus) Publish(eventType string, eventData TMEventData) error { - // no explicit deadline for publishing events - ctx := context.Background() - b.pubsub.PublishWithTags(ctx, eventData, tmpubsub.NewTagMap(map[string]string{EventTypeKey: eventType})) - return nil -} - -//--- block, tx, and vote events - -func (b *EventBus) PublishEventNewBlock(event EventDataNewBlock) error { - return b.Publish(EventNewBlock, event) -} - -func (b *EventBus) PublishEventNewBlockHeader(event EventDataNewBlockHeader) error { - return b.Publish(EventNewBlockHeader, event) -} - -func (b *EventBus) PublishEventVote(event EventDataVote) error { - return b.Publish(EventVote, event) -} - -// PublishEventTx publishes tx event with tags from Result. Note it will add -// predefined tags (EventTypeKey, TxHashKey). Existing tags with the same names -// will be overwritten. -func (b *EventBus) PublishEventTx(event EventDataTx) error { - // no explicit deadline for publishing events - ctx := context.Background() - - tags := make(map[string]string) - - // validate and fill tags from tx result - for _, tag := range event.Result.Tags { - // basic validation - if len(tag.Key) == 0 { - b.Logger.Info("Got tag with an empty key (skipping)", "tag", tag, "tx", event.Tx) - continue - } - tags[string(tag.Key)] = string(tag.Value) - } - - // add predefined tags - logIfTagExists(EventTypeKey, tags, b.Logger) - tags[EventTypeKey] = EventTx - - logIfTagExists(TxHashKey, tags, b.Logger) - tags[TxHashKey] = fmt.Sprintf("%X", event.Tx.Hash()) - - logIfTagExists(TxHeightKey, tags, b.Logger) - tags[TxHeightKey] = fmt.Sprintf("%d", event.Height) - - b.pubsub.PublishWithTags(ctx, event, tmpubsub.NewTagMap(tags)) - return nil -} - -func (b *EventBus) PublishEventProposalHeartbeat(event EventDataProposalHeartbeat) error { - return b.Publish(EventProposalHeartbeat, event) -} - -//--- EventDataRoundState events - -func (b *EventBus) PublishEventNewRoundStep(event EventDataRoundState) error { - return b.Publish(EventNewRoundStep, event) -} - -func (b *EventBus) PublishEventTimeoutPropose(event EventDataRoundState) error { - return b.Publish(EventTimeoutPropose, event) -} - -func (b *EventBus) PublishEventTimeoutWait(event EventDataRoundState) error { - return b.Publish(EventTimeoutWait, event) -} - -func (b *EventBus) PublishEventNewRound(event EventDataRoundState) error { - return b.Publish(EventNewRound, event) -} - -func (b *EventBus) PublishEventCompleteProposal(event EventDataRoundState) error { - return b.Publish(EventCompleteProposal, event) -} - -func (b *EventBus) PublishEventPolka(event EventDataRoundState) error { - return b.Publish(EventPolka, event) -} - -func (b *EventBus) PublishEventUnlock(event EventDataRoundState) error { - return b.Publish(EventUnlock, event) -} - -func (b *EventBus) PublishEventRelock(event EventDataRoundState) error { - return b.Publish(EventRelock, event) -} - -func (b *EventBus) PublishEventLock(event EventDataRoundState) error { - return b.Publish(EventLock, event) -} - -func logIfTagExists(tag string, tags map[string]string, logger log.Logger) { - if value, ok := tags[tag]; ok { - logger.Error("Found predefined tag (value will be overwritten)", "tag", tag, "value", value) - } -} diff --git a/types/event_bus_test.go b/types/event_bus_test.go deleted file mode 100644 index d9b0995b..00000000 --- a/types/event_bus_test.go +++ /dev/null @@ -1,171 +0,0 @@ -package types - -import ( - "context" - "fmt" - "math/rand" - "testing" - "time" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - abci "github.com/tendermint/abci/types" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" - cmn "github.com/tendermint/tmlibs/common" -) - -func TestEventBusPublishEventTx(t *testing.T) { - eventBus := NewEventBus() - err := eventBus.Start() - require.NoError(t, err) - defer eventBus.Stop() - - tx := Tx("foo") - result := abci.ResponseDeliverTx{Data: []byte("bar"), Tags: []cmn.KVPair{{[]byte("baz"), []byte("1")}}, Fee: cmn.KI64Pair{Key: []uint8{}, Value: 0}} - - txEventsCh := make(chan interface{}) - - // PublishEventTx adds all these 3 tags, so the query below should work - query := fmt.Sprintf("tm.event='Tx' AND tx.height=1 AND tx.hash='%X' AND baz=1", tx.Hash()) - err = eventBus.Subscribe(context.Background(), "test", tmquery.MustParse(query), txEventsCh) - require.NoError(t, err) - - done := make(chan struct{}) - go func() { - for e := range txEventsCh { - edt := e.(EventDataTx) - assert.Equal(t, int64(1), edt.Height) - assert.Equal(t, uint32(0), edt.Index) - assert.Equal(t, tx, edt.Tx) - assert.Equal(t, result, edt.Result) - close(done) - } - }() - - err = eventBus.PublishEventTx(EventDataTx{TxResult{ - Height: 1, - Index: 0, - Tx: tx, - Result: result, - }}) - assert.NoError(t, err) - - select { - case <-done: - case <-time.After(1 * time.Second): - t.Fatal("did not receive a transaction after 1 sec.") - } -} - -func BenchmarkEventBus(b *testing.B) { - benchmarks := []struct { - name string - numClients int - randQueries bool - randEvents bool - }{ - {"10Clients1Query1Event", 10, false, false}, - {"100Clients", 100, false, false}, - {"1000Clients", 1000, false, false}, - - {"10ClientsRandQueries1Event", 10, true, false}, - {"100Clients", 100, true, false}, - {"1000Clients", 1000, true, false}, - - {"10ClientsRandQueriesRandEvents", 10, true, true}, - {"100Clients", 100, true, true}, - {"1000Clients", 1000, true, true}, - - {"10Clients1QueryRandEvents", 10, false, true}, - {"100Clients", 100, false, true}, - {"1000Clients", 1000, false, true}, - } - - for _, bm := range benchmarks { - b.Run(bm.name, func(b *testing.B) { - benchmarkEventBus(bm.numClients, bm.randQueries, bm.randEvents, b) - }) - } -} - -func benchmarkEventBus(numClients int, randQueries bool, randEvents bool, b *testing.B) { - // for random* functions - rand.Seed(time.Now().Unix()) - - eventBus := NewEventBusWithBufferCapacity(0) // set buffer capacity to 0 so we are not testing cache - eventBus.Start() - defer eventBus.Stop() - - ctx := context.Background() - q := EventQueryNewBlock - - for i := 0; i < numClients; i++ { - ch := make(chan interface{}) - go func() { - for range ch { - } - }() - if randQueries { - q = randQuery() - } - eventBus.Subscribe(ctx, fmt.Sprintf("client-%d", i), q, ch) - } - - eventType := EventNewBlock - - b.ReportAllocs() - b.ResetTimer() - for i := 0; i < b.N; i++ { - if randEvents { - eventType = randEvent() - } - - eventBus.Publish(eventType, EventDataString("Gamora")) - } -} - -var events = []string{EventBond, - EventUnbond, - EventRebond, - EventDupeout, - EventFork, - EventNewBlock, - EventNewBlockHeader, - EventNewRound, - EventNewRoundStep, - EventTimeoutPropose, - EventCompleteProposal, - EventPolka, - EventUnlock, - EventLock, - EventRelock, - EventTimeoutWait, - EventVote} - -func randEvent() string { - return events[rand.Intn(len(events))] -} - -var queries = []tmpubsub.Query{EventQueryBond, - EventQueryUnbond, - EventQueryRebond, - EventQueryDupeout, - EventQueryFork, - EventQueryNewBlock, - EventQueryNewBlockHeader, - EventQueryNewRound, - EventQueryNewRoundStep, - EventQueryTimeoutPropose, - EventQueryCompleteProposal, - EventQueryPolka, - EventQueryUnlock, - EventQueryLock, - EventQueryRelock, - EventQueryTimeoutWait, - EventQueryVote} - -func randQuery() tmpubsub.Query { - return queries[rand.Intn(len(queries))] -} diff --git a/types/events.go b/types/events.go deleted file mode 100644 index 2b87297c..00000000 --- a/types/events.go +++ /dev/null @@ -1,154 +0,0 @@ -package types - -import ( - "fmt" - - amino "github.com/tendermint/go-amino" - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" - tmquery "github.com/tendermint/tendermint/libs/pubsub/query" -) - -// Reserved event types -const ( - EventBond = "Bond" - EventCompleteProposal = "CompleteProposal" - EventDupeout = "Dupeout" - EventFork = "Fork" - EventLock = "Lock" - EventNewBlock = "NewBlock" - EventNewBlockHeader = "NewBlockHeader" - EventNewRound = "NewRound" - EventNewRoundStep = "NewRoundStep" - EventPolka = "Polka" - EventRebond = "Rebond" - EventRelock = "Relock" - EventTimeoutPropose = "TimeoutPropose" - EventTimeoutWait = "TimeoutWait" - EventTx = "Tx" - EventUnbond = "Unbond" - EventUnlock = "Unlock" - EventVote = "Vote" - EventProposalHeartbeat = "ProposalHeartbeat" -) - -/////////////////////////////////////////////////////////////////////////////// -// ENCODING / DECODING -/////////////////////////////////////////////////////////////////////////////// - -// implements events.EventData -type TMEventData interface { - AssertIsTMEventData() - // empty interface -} - -func (_ EventDataNewBlock) AssertIsTMEventData() {} -func (_ EventDataNewBlockHeader) AssertIsTMEventData() {} -func (_ EventDataTx) AssertIsTMEventData() {} -func (_ EventDataRoundState) AssertIsTMEventData() {} -func (_ EventDataVote) AssertIsTMEventData() {} -func (_ EventDataProposalHeartbeat) AssertIsTMEventData() {} -func (_ EventDataString) AssertIsTMEventData() {} - -func RegisterEventDatas(cdc *amino.Codec) { - cdc.RegisterInterface((*TMEventData)(nil), nil) - cdc.RegisterConcrete(EventDataNewBlock{}, "tendermint/event/NewBlock", nil) - cdc.RegisterConcrete(EventDataNewBlockHeader{}, "tendermint/event/NewBlockHeader", nil) - cdc.RegisterConcrete(EventDataTx{}, "tendermint/event/Tx", nil) - cdc.RegisterConcrete(EventDataRoundState{}, "tendermint/event/RoundState", nil) - cdc.RegisterConcrete(EventDataVote{}, "tendermint/event/Vote", nil) - cdc.RegisterConcrete(EventDataProposalHeartbeat{}, "tendermint/event/ProposalHeartbeat", nil) - cdc.RegisterConcrete(EventDataString(""), "tendermint/event/ProposalString", nil) -} - -// Most event messages are basic types (a block, a transaction) -// but some (an input to a call tx or a receive) are more exotic - -type EventDataNewBlock struct { - Block *Block `json:"block"` -} - -// light weight event for benchmarking -type EventDataNewBlockHeader struct { - Header *Header `json:"header"` -} - -// All txs fire EventDataTx -type EventDataTx struct { - TxResult -} - -type EventDataProposalHeartbeat struct { - Heartbeat *Heartbeat -} - -// NOTE: This goes into the replay WAL -type EventDataRoundState struct { - Height int64 `json:"height"` - Round int `json:"round"` - Step string `json:"step"` - - // private, not exposed to websockets - RoundState interface{} `json:"-"` -} - -type EventDataVote struct { - Vote *Vote -} - -type EventDataString string - -/////////////////////////////////////////////////////////////////////////////// -// PUBSUB -/////////////////////////////////////////////////////////////////////////////// - -const ( - // EventTypeKey is a reserved key, used to specify event type in tags. - EventTypeKey = "tm.event" - // TxHashKey is a reserved key, used to specify transaction's hash. - // see EventBus#PublishEventTx - TxHashKey = "tx.hash" - // TxHeightKey is a reserved key, used to specify transaction block's height. - // see EventBus#PublishEventTx - TxHeightKey = "tx.height" -) - -var ( - EventQueryBond = QueryForEvent(EventBond) - EventQueryUnbond = QueryForEvent(EventUnbond) - EventQueryRebond = QueryForEvent(EventRebond) - EventQueryDupeout = QueryForEvent(EventDupeout) - EventQueryFork = QueryForEvent(EventFork) - EventQueryNewBlock = QueryForEvent(EventNewBlock) - EventQueryNewBlockHeader = QueryForEvent(EventNewBlockHeader) - EventQueryNewRound = QueryForEvent(EventNewRound) - EventQueryNewRoundStep = QueryForEvent(EventNewRoundStep) - EventQueryTimeoutPropose = QueryForEvent(EventTimeoutPropose) - EventQueryCompleteProposal = QueryForEvent(EventCompleteProposal) - EventQueryPolka = QueryForEvent(EventPolka) - EventQueryUnlock = QueryForEvent(EventUnlock) - EventQueryLock = QueryForEvent(EventLock) - EventQueryRelock = QueryForEvent(EventRelock) - EventQueryTimeoutWait = QueryForEvent(EventTimeoutWait) - EventQueryVote = QueryForEvent(EventVote) - EventQueryProposalHeartbeat = QueryForEvent(EventProposalHeartbeat) - EventQueryTx = QueryForEvent(EventTx) -) - -func EventQueryTxFor(tx Tx) tmpubsub.Query { - return tmquery.MustParse(fmt.Sprintf("%s='%s' AND %s='%X'", EventTypeKey, EventTx, TxHashKey, tx.Hash())) -} - -func QueryForEvent(eventType string) tmpubsub.Query { - return tmquery.MustParse(fmt.Sprintf("%s='%s'", EventTypeKey, eventType)) -} - -// BlockEventPublisher publishes all block related events -type BlockEventPublisher interface { - PublishEventNewBlock(block EventDataNewBlock) error - PublishEventNewBlockHeader(header EventDataNewBlockHeader) error - PublishEventTx(EventDataTx) error -} - -type TxEventPublisher interface { - PublishEventTx(EventDataTx) error -} diff --git a/types/evidence.go b/types/evidence.go deleted file mode 100644 index 10907869..00000000 --- a/types/evidence.go +++ /dev/null @@ -1,212 +0,0 @@ -package types - -import ( - "bytes" - "fmt" - - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" - "github.com/tendermint/tmlibs/merkle" -) - -// ErrEvidenceInvalid wraps a piece of evidence and the error denoting how or why it is invalid. -type ErrEvidenceInvalid struct { - Evidence Evidence - ErrorValue error -} - -func NewEvidenceInvalidErr(ev Evidence, err error) *ErrEvidenceInvalid { - return &ErrEvidenceInvalid{ev, err} -} - -// Error returns a string representation of the error. -func (err *ErrEvidenceInvalid) Error() string { - return fmt.Sprintf("Invalid evidence: %v. Evidence: %v", err.ErrorValue, err.Evidence) -} - -//------------------------------------------- - -// Evidence represents any provable malicious activity by a validator -type Evidence interface { - Height() int64 // height of the equivocation - Address() []byte // address of the equivocating validator - Hash() []byte // hash of the evidence - Verify(chainID string, pubKey crypto.PubKey) error // verify the evidence - Equal(Evidence) bool // check equality of evidence - - String() string -} - -func RegisterEvidences(cdc *amino.Codec) { - cdc.RegisterInterface((*Evidence)(nil), nil) - cdc.RegisterConcrete(&DuplicateVoteEvidence{}, "tendermint/DuplicateVoteEvidence", nil) -} - -//------------------------------------------- - -// DuplicateVoteEvidence contains evidence a validator signed two conflicting votes. -type DuplicateVoteEvidence struct { - PubKey crypto.PubKey - VoteA *Vote - VoteB *Vote -} - -// String returns a string representation of the evidence. -func (dve *DuplicateVoteEvidence) String() string { - return fmt.Sprintf("VoteA: %v; VoteB: %v", dve.VoteA, dve.VoteB) - -} - -// Height returns the height this evidence refers to. -func (dve *DuplicateVoteEvidence) Height() int64 { - return dve.VoteA.Height -} - -// Address returns the address of the validator. -func (dve *DuplicateVoteEvidence) Address() []byte { - return dve.PubKey.Address() -} - -// Hash returns the hash of the evidence. -func (dve *DuplicateVoteEvidence) Hash() []byte { - return aminoHasher(dve).Hash() -} - -// Verify returns an error if the two votes aren't conflicting. -// To be conflicting, they must be from the same validator, for the same H/R/S, but for different blocks. -func (dve *DuplicateVoteEvidence) Verify(chainID string, pubKey crypto.PubKey) error { - // H/R/S must be the same - if dve.VoteA.Height != dve.VoteB.Height || - dve.VoteA.Round != dve.VoteB.Round || - dve.VoteA.Type != dve.VoteB.Type { - return fmt.Errorf("DuplicateVoteEvidence Error: H/R/S does not match. Got %v and %v", dve.VoteA, dve.VoteB) - } - - // Address must be the same - if !bytes.Equal(dve.VoteA.ValidatorAddress, dve.VoteB.ValidatorAddress) { - return fmt.Errorf("DuplicateVoteEvidence Error: Validator addresses do not match. Got %X and %X", dve.VoteA.ValidatorAddress, dve.VoteB.ValidatorAddress) - } - - // Index must be the same - if dve.VoteA.ValidatorIndex != dve.VoteB.ValidatorIndex { - return fmt.Errorf("DuplicateVoteEvidence Error: Validator indices do not match. Got %d and %d", dve.VoteA.ValidatorIndex, dve.VoteB.ValidatorIndex) - } - - // BlockIDs must be different - if dve.VoteA.BlockID.Equals(dve.VoteB.BlockID) { - return fmt.Errorf("DuplicateVoteEvidence Error: BlockIDs are the same (%v) - not a real duplicate vote", dve.VoteA.BlockID) - } - - // pubkey must match address (this should already be true, sanity check) - addr := dve.VoteA.ValidatorAddress - if !bytes.Equal(pubKey.Address(), addr) { - return fmt.Errorf("DuplicateVoteEvidence FAILED SANITY CHECK - address (%X) doesn't match pubkey (%v - %X)", - addr, pubKey, pubKey.Address()) - } - - // Signatures must be valid - if !pubKey.VerifyBytes(dve.VoteA.SignBytes(chainID), dve.VoteA.Signature) { - return fmt.Errorf("DuplicateVoteEvidence Error verifying VoteA: %v", ErrVoteInvalidSignature) - } - if !pubKey.VerifyBytes(dve.VoteB.SignBytes(chainID), dve.VoteB.Signature) { - return fmt.Errorf("DuplicateVoteEvidence Error verifying VoteB: %v", ErrVoteInvalidSignature) - } - - return nil -} - -// Equal checks if two pieces of evidence are equal. -func (dve *DuplicateVoteEvidence) Equal(ev Evidence) bool { - if _, ok := ev.(*DuplicateVoteEvidence); !ok { - return false - } - - // just check their hashes - dveHash := aminoHasher(dve).Hash() - evHash := aminoHasher(ev).Hash() - return bytes.Equal(dveHash, evHash) -} - -//----------------------------------------------------------------- - -// UNSTABLE -type MockGoodEvidence struct { - Height_ int64 - Address_ []byte -} - -// UNSTABLE -func NewMockGoodEvidence(height int64, idx int, address []byte) MockGoodEvidence { - return MockGoodEvidence{height, address} -} - -func (e MockGoodEvidence) Height() int64 { return e.Height_ } -func (e MockGoodEvidence) Address() []byte { return e.Address_ } -func (e MockGoodEvidence) Hash() []byte { - return []byte(fmt.Sprintf("%d-%x", e.Height_, e.Address_)) -} -func (e MockGoodEvidence) Verify(chainID string, pubKey crypto.PubKey) error { return nil } -func (e MockGoodEvidence) Equal(ev Evidence) bool { - e2 := ev.(MockGoodEvidence) - return e.Height_ == e2.Height_ && - bytes.Equal(e.Address_, e2.Address_) -} -func (e MockGoodEvidence) String() string { - return fmt.Sprintf("GoodEvidence: %d/%s", e.Height_, e.Address_) -} - -// UNSTABLE -type MockBadEvidence struct { - MockGoodEvidence -} - -func (e MockBadEvidence) Verify(chainID string, pubKey crypto.PubKey) error { - return fmt.Errorf("MockBadEvidence") -} -func (e MockBadEvidence) Equal(ev Evidence) bool { - e2 := ev.(MockBadEvidence) - return e.Height_ == e2.Height_ && - bytes.Equal(e.Address_, e2.Address_) -} -func (e MockBadEvidence) String() string { - return fmt.Sprintf("BadEvidence: %d/%s", e.Height_, e.Address_) -} - -//------------------------------------------- - -// EvidenceList is a list of Evidence. Evidences is not a word. -type EvidenceList []Evidence - -// Hash returns the simple merkle root hash of the EvidenceList. -func (evl EvidenceList) Hash() []byte { - // Recursive impl. - // Copied from tmlibs/merkle to avoid allocations - switch len(evl) { - case 0: - return nil - case 1: - return evl[0].Hash() - default: - left := EvidenceList(evl[:(len(evl)+1)/2]).Hash() - right := EvidenceList(evl[(len(evl)+1)/2:]).Hash() - return merkle.SimpleHashFromTwoHashes(left, right) - } -} - -func (evl EvidenceList) String() string { - s := "" - for _, e := range evl { - s += fmt.Sprintf("%s\t\t", e) - } - return s -} - -// Has returns true if the evidence is in the EvidenceList. -func (evl EvidenceList) Has(evidence Evidence) bool { - for _, ev := range evl { - if ev.Equal(evidence) { - return true - } - } - return false -} diff --git a/types/evidence_test.go b/types/evidence_test.go deleted file mode 100644 index 5bbb2a37..00000000 --- a/types/evidence_test.go +++ /dev/null @@ -1,74 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -type voteData struct { - vote1 *Vote - vote2 *Vote - valid bool -} - -func makeVote(val PrivValidator, chainID string, valIndex int, height int64, round, step int, blockID BlockID) *Vote { - v := &Vote{ - ValidatorAddress: val.GetAddress(), - ValidatorIndex: valIndex, - Height: height, - Round: round, - Type: byte(step), - BlockID: blockID, - } - err := val.SignVote(chainID, v) - if err != nil { - panic(err) - } - return v -} - -func TestEvidence(t *testing.T) { - val := NewMockPV() - val2 := NewMockPV() - blockID := makeBlockID("blockhash", 1000, "partshash") - blockID2 := makeBlockID("blockhash2", 1000, "partshash") - blockID3 := makeBlockID("blockhash", 10000, "partshash") - blockID4 := makeBlockID("blockhash", 10000, "partshash2") - - chainID := "mychain" - - vote1 := makeVote(val, chainID, 0, 10, 2, 1, blockID) - badVote := makeVote(val, chainID, 0, 10, 2, 1, blockID) - err := val2.SignVote(chainID, badVote) - if err != nil { - panic(err) - } - - cases := []voteData{ - {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID2), true}, // different block ids - {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID3), true}, - {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID4), true}, - {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID), false}, // wrong block id - {vote1, makeVote(val, "mychain2", 0, 10, 2, 1, blockID2), false}, // wrong chain id - {vote1, makeVote(val, chainID, 1, 10, 2, 1, blockID2), false}, // wrong val index - {vote1, makeVote(val, chainID, 0, 11, 2, 1, blockID2), false}, // wrong height - {vote1, makeVote(val, chainID, 0, 10, 3, 1, blockID2), false}, // wrong round - {vote1, makeVote(val, chainID, 0, 10, 2, 2, blockID2), false}, // wrong step - {vote1, makeVote(val2, chainID, 0, 10, 2, 1, blockID), false}, // wrong validator - {vote1, badVote, false}, // signed by wrong key - } - - pubKey := val.GetPubKey() - for _, c := range cases { - ev := &DuplicateVoteEvidence{ - VoteA: c.vote1, - VoteB: c.vote2, - } - if c.valid { - assert.Nil(t, ev.Verify(chainID, pubKey), "evidence should be valid") - } else { - assert.NotNil(t, ev.Verify(chainID, pubKey), "evidence should be invalid") - } - } -} diff --git a/types/genesis.go b/types/genesis.go deleted file mode 100644 index aee8e076..00000000 --- a/types/genesis.go +++ /dev/null @@ -1,123 +0,0 @@ -package types - -import ( - "encoding/json" - "io/ioutil" - "time" - - "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" -) - -//------------------------------------------------------------ -// core types for a genesis definition - -// GenesisValidator is an initial validator. -type GenesisValidator struct { - PubKey crypto.PubKey `json:"pub_key"` - Power int64 `json:"power"` - Name string `json:"name"` -} - -// GenesisDoc defines the initial conditions for a tendermint blockchain, in particular its validator set. -type GenesisDoc struct { - GenesisTime time.Time `json:"genesis_time"` - ChainID string `json:"chain_id"` - ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` - Validators []GenesisValidator `json:"validators"` - AppHash cmn.HexBytes `json:"app_hash"` - AppStateJSON json.RawMessage `json:"app_state,omitempty"` - AppOptions json.RawMessage `json:"app_options,omitempty"` // DEPRECATED -} - -// AppState returns raw application state. -// TODO: replace with AppState field during next breaking release (0.18) -func (genDoc *GenesisDoc) AppState() json.RawMessage { - if len(genDoc.AppOptions) > 0 { - return genDoc.AppOptions - } - return genDoc.AppStateJSON -} - -// SaveAs is a utility method for saving GenensisDoc as a JSON file. -func (genDoc *GenesisDoc) SaveAs(file string) error { - genDocBytes, err := cdc.MarshalJSONIndent(genDoc, "", " ") - if err != nil { - return err - } - return cmn.WriteFile(file, genDocBytes, 0644) -} - -// ValidatorHash returns the hash of the validator set contained in the GenesisDoc -func (genDoc *GenesisDoc) ValidatorHash() []byte { - vals := make([]*Validator, len(genDoc.Validators)) - for i, v := range genDoc.Validators { - vals[i] = NewValidator(v.PubKey, v.Power) - } - vset := NewValidatorSet(vals) - return vset.Hash() -} - -// ValidateAndComplete checks that all necessary fields are present -// and fills in defaults for optional fields left empty -func (genDoc *GenesisDoc) ValidateAndComplete() error { - - if genDoc.ChainID == "" { - return cmn.NewError("Genesis doc must include non-empty chain_id") - } - - if genDoc.ConsensusParams == nil { - genDoc.ConsensusParams = DefaultConsensusParams() - } else { - if err := genDoc.ConsensusParams.Validate(); err != nil { - return err - } - } - - if len(genDoc.Validators) == 0 { - return cmn.NewError("The genesis file must have at least one validator") - } - - for _, v := range genDoc.Validators { - if v.Power == 0 { - return cmn.NewError("The genesis file cannot contain validators with no voting power: %v", v) - } - } - - if genDoc.GenesisTime.IsZero() { - genDoc.GenesisTime = time.Now() - } - - return nil -} - -//------------------------------------------------------------ -// Make genesis state from file - -// GenesisDocFromJSON unmarshalls JSON data into a GenesisDoc. -func GenesisDocFromJSON(jsonBlob []byte) (*GenesisDoc, error) { - genDoc := GenesisDoc{} - err := cdc.UnmarshalJSON(jsonBlob, &genDoc) - if err != nil { - return nil, err - } - - if err := genDoc.ValidateAndComplete(); err != nil { - return nil, err - } - - return &genDoc, err -} - -// GenesisDocFromFile reads JSON data from a file and unmarshalls it into a GenesisDoc. -func GenesisDocFromFile(genDocFile string) (*GenesisDoc, error) { - jsonBlob, err := ioutil.ReadFile(genDocFile) - if err != nil { - return nil, cmn.ErrorWrap(err, "Couldn't read GenesisDoc file") - } - genDoc, err := GenesisDocFromJSON(jsonBlob) - if err != nil { - return nil, cmn.ErrorWrap(err, cmn.Fmt("Error reading GenesisDoc at %v", genDocFile)) - } - return genDoc, nil -} diff --git a/types/genesis_test.go b/types/genesis_test.go deleted file mode 100644 index bed4b90f..00000000 --- a/types/genesis_test.go +++ /dev/null @@ -1,61 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/tendermint/go-crypto" -) - -func TestGenesisBad(t *testing.T) { - // test some bad ones from raw json - testCases := [][]byte{ - []byte{}, // empty - []byte{1, 1, 1, 1, 1}, // junk - []byte(`{}`), // empty - []byte(`{"chain_id":"mychain"}`), // missing validators - []byte(`{"chain_id":"mychain","validators":[]}`), // missing validators - []byte(`{"chain_id":"mychain","validators":[{}]}`), // missing validators - []byte(`{"chain_id":"mychain","validators":null}`), // missing validators - []byte(`{"chain_id":"mychain"}`), // missing validators - []byte(`{"validators":[{"pub_key":{"type":"AC26791624DE60","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":10,"name":""}]}`), // missing chain_id - } - - for _, testCase := range testCases { - _, err := GenesisDocFromJSON(testCase) - assert.Error(t, err, "expected error for empty genDoc json") - } -} - -func TestGenesisGood(t *testing.T) { - // test a good one by raw json - genDocBytes := []byte(`{"genesis_time":"0001-01-01T00:00:00Z","chain_id":"test-chain-QDKdJr","consensus_params":null,"validators":[{"pub_key":{"type":"AC26791624DE60","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="},"power":10,"name":""}],"app_hash":"","app_state":{"account_owner": "Bob"}}`) - _, err := GenesisDocFromJSON(genDocBytes) - assert.NoError(t, err, "expected no error for good genDoc json") - - // create a base gendoc from struct - baseGenDoc := &GenesisDoc{ - ChainID: "abc", - Validators: []GenesisValidator{{crypto.GenPrivKeyEd25519().PubKey(), 10, "myval"}}, - } - genDocBytes, err = cdc.MarshalJSON(baseGenDoc) - assert.NoError(t, err, "error marshalling genDoc") - - // test base gendoc and check consensus params were filled - genDoc, err := GenesisDocFromJSON(genDocBytes) - assert.NoError(t, err, "expected no error for valid genDoc json") - assert.NotNil(t, genDoc.ConsensusParams, "expected consensus params to be filled in") - - // create json with consensus params filled - genDocBytes, err = cdc.MarshalJSON(genDoc) - assert.NoError(t, err, "error marshalling genDoc") - genDoc, err = GenesisDocFromJSON(genDocBytes) - assert.NoError(t, err, "expected no error for valid genDoc json") - - // test with invalid consensus params - genDoc.ConsensusParams.BlockSize.MaxBytes = 0 - genDocBytes, err = cdc.MarshalJSON(genDoc) - assert.NoError(t, err, "error marshalling genDoc") - genDoc, err = GenesisDocFromJSON(genDocBytes) - assert.Error(t, err, "expected error for genDoc json with block size of 0") -} diff --git a/types/heartbeat.go b/types/heartbeat.go deleted file mode 100644 index 097dd22d..00000000 --- a/types/heartbeat.go +++ /dev/null @@ -1,52 +0,0 @@ -package types - -import ( - "fmt" - - "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" -) - -// Heartbeat is a simple vote-like structure so validators can -// alert others that they are alive and waiting for transactions. -// Note: We aren't adding ",omitempty" to Heartbeat's -// json field tags because we always want the JSON -// representation to be in its canonical form. -type Heartbeat struct { - ValidatorAddress Address `json:"validator_address"` - ValidatorIndex int `json:"validator_index"` - Height int64 `json:"height"` - Round int `json:"round"` - Sequence int `json:"sequence"` - Signature crypto.Signature `json:"signature"` -} - -// SignBytes returns the Heartbeat bytes for signing. -// It panics if the Heartbeat is nil. -func (heartbeat *Heartbeat) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalJSON(CanonicalHeartbeat(chainID, heartbeat)) - if err != nil { - panic(err) - } - return bz -} - -// Copy makes a copy of the Heartbeat. -func (heartbeat *Heartbeat) Copy() *Heartbeat { - if heartbeat == nil { - return nil - } - heartbeatCopy := *heartbeat - return &heartbeatCopy -} - -// String returns a string representation of the Heartbeat. -func (heartbeat *Heartbeat) String() string { - if heartbeat == nil { - return "nil-heartbeat" - } - - return fmt.Sprintf("Heartbeat{%v:%X %v/%02d (%v) %v}", - heartbeat.ValidatorIndex, cmn.Fingerprint(heartbeat.ValidatorAddress), - heartbeat.Height, heartbeat.Round, heartbeat.Sequence, heartbeat.Signature) -} diff --git a/types/heartbeat_test.go b/types/heartbeat_test.go deleted file mode 100644 index 3c453602..00000000 --- a/types/heartbeat_test.go +++ /dev/null @@ -1,51 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/require" - - "github.com/tendermint/go-crypto" -) - -func TestHeartbeatCopy(t *testing.T) { - hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} - hbCopy := hb.Copy() - require.Equal(t, hbCopy, hb, "heartbeat copy should be the same") - hbCopy.Round = hb.Round + 10 - require.NotEqual(t, hbCopy, hb, "heartbeat copy mutation should not change original") - - var nilHb *Heartbeat - nilHbCopy := nilHb.Copy() - require.Nil(t, nilHbCopy, "copy of nil should also return nil") -} - -func TestHeartbeatString(t *testing.T) { - var nilHb *Heartbeat - require.Contains(t, nilHb.String(), "nil", "expecting a string and no panic") - - hb := &Heartbeat{ValidatorIndex: 1, Height: 11, Round: 2} - require.Equal(t, hb.String(), "Heartbeat{1:000000000000 11/02 (0) }") - - var key crypto.PrivKeyEd25519 - hb.Signature = key.Sign([]byte("Tendermint")) - require.Equal(t, hb.String(), "Heartbeat{1:000000000000 11/02 (0) /FF41E371B9BF.../}") -} - -func TestHeartbeatWriteSignBytes(t *testing.T) { - - hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} - bz := hb.SignBytes("0xdeadbeef") - // XXX HMMMMMMM - require.Equal(t, string(bz), `{"@chain_id":"0xdeadbeef","@type":"heartbeat","height":10,"round":1,"sequence":0,"validator_address":"","validator_index":1}`) - - plainHb := &Heartbeat{} - bz = plainHb.SignBytes("0xdeadbeef") - require.Equal(t, string(bz), `{"@chain_id":"0xdeadbeef","@type":"heartbeat","height":0,"round":0,"sequence":0,"validator_address":"","validator_index":0}`) - - require.Panics(t, func() { - var nilHb *Heartbeat - bz := nilHb.SignBytes("0xdeadbeef") - require.Equal(t, string(bz), "null") - }) -} diff --git a/types/keys.go b/types/keys.go deleted file mode 100644 index 99255119..00000000 --- a/types/keys.go +++ /dev/null @@ -1,7 +0,0 @@ -package types - -// UNSTABLE -var ( - PeerStateKey = "ConsensusReactor.peerState" - PeerMempoolChKey = "MempoolReactor.peerMempoolCh" -) diff --git a/types/nop_event_bus.go b/types/nop_event_bus.go deleted file mode 100644 index cd1eab8c..00000000 --- a/types/nop_event_bus.go +++ /dev/null @@ -1,77 +0,0 @@ -package types - -import ( - "context" - - tmpubsub "github.com/tendermint/tendermint/libs/pubsub" -) - -type NopEventBus struct{} - -func (NopEventBus) Subscribe(ctx context.Context, subscriber string, query tmpubsub.Query, out chan<- interface{}) error { - return nil -} - -func (NopEventBus) Unsubscribe(ctx context.Context, subscriber string, query tmpubsub.Query) error { - return nil -} - -func (NopEventBus) UnsubscribeAll(ctx context.Context, subscriber string) error { - return nil -} - -//--- block, tx, and vote events - -func (NopEventBus) PublishEventNewBlock(block EventDataNewBlock) error { - return nil -} - -func (NopEventBus) PublishEventNewBlockHeader(header EventDataNewBlockHeader) error { - return nil -} - -func (NopEventBus) PublishEventVote(vote EventDataVote) error { - return nil -} - -func (NopEventBus) PublishEventTx(tx EventDataTx) error { - return nil -} - -//--- EventDataRoundState events - -func (NopEventBus) PublishEventNewRoundStep(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventTimeoutPropose(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventTimeoutWait(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventNewRound(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventCompleteProposal(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventPolka(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventUnlock(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventRelock(rs EventDataRoundState) error { - return nil -} - -func (NopEventBus) PublishEventLock(rs EventDataRoundState) error { - return nil -} diff --git a/types/params.go b/types/params.go deleted file mode 100644 index 2df092d6..00000000 --- a/types/params.go +++ /dev/null @@ -1,155 +0,0 @@ -package types - -import ( - abci "github.com/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" -) - -const ( - MaxBlockSizeBytes = 104857600 // 100MB -) - -// ConsensusParams contains consensus critical parameters -// that determine the validity of blocks. -type ConsensusParams struct { - BlockSize `json:"block_size_params"` - TxSize `json:"tx_size_params"` - BlockGossip `json:"block_gossip_params"` - EvidenceParams `json:"evidence_params"` -} - -// BlockSize contain limits on the block size. -type BlockSize struct { - MaxBytes int `json:"max_bytes"` // NOTE: must not be 0 nor greater than 100MB - MaxTxs int `json:"max_txs"` - MaxGas int64 `json:"max_gas"` -} - -// TxSize contain limits on the tx size. -type TxSize struct { - MaxBytes int `json:"max_bytes"` - MaxGas int64 `json:"max_gas"` -} - -// BlockGossip determine consensus critical elements of how blocks are gossiped -type BlockGossip struct { - BlockPartSizeBytes int `json:"block_part_size_bytes"` // NOTE: must not be 0 -} - -// EvidenceParams determine how we handle evidence of malfeasance -type EvidenceParams struct { - MaxAge int64 `json:"max_age"` // only accept new evidence more recent than this -} - -// DefaultConsensusParams returns a default ConsensusParams. -func DefaultConsensusParams() *ConsensusParams { - return &ConsensusParams{ - DefaultBlockSize(), - DefaultTxSize(), - DefaultBlockGossip(), - DefaultEvidenceParams(), - } -} - -// DefaultBlockSize returns a default BlockSize. -func DefaultBlockSize() BlockSize { - return BlockSize{ - MaxBytes: 22020096, // 21MB - MaxTxs: 100000, - MaxGas: -1, - } -} - -// DefaultTxSize returns a default TxSize. -func DefaultTxSize() TxSize { - return TxSize{ - MaxBytes: 10240, // 10kB - MaxGas: -1, - } -} - -// DefaultBlockGossip returns a default BlockGossip. -func DefaultBlockGossip() BlockGossip { - return BlockGossip{ - BlockPartSizeBytes: 65536, // 64kB, - } -} - -// DefaultEvidence Params returns a default EvidenceParams. -func DefaultEvidenceParams() EvidenceParams { - return EvidenceParams{ - MaxAge: 100000, // 27.8 hrs at 1block/s - } -} - -// Validate validates the ConsensusParams to ensure all values -// are within their allowed limits, and returns an error if they are not. -func (params *ConsensusParams) Validate() error { - // ensure some values are greater than 0 - if params.BlockSize.MaxBytes <= 0 { - return cmn.NewError("BlockSize.MaxBytes must be greater than 0. Got %d", params.BlockSize.MaxBytes) - } - if params.BlockGossip.BlockPartSizeBytes <= 0 { - return cmn.NewError("BlockGossip.BlockPartSizeBytes must be greater than 0. Got %d", params.BlockGossip.BlockPartSizeBytes) - } - - // ensure blocks aren't too big - if params.BlockSize.MaxBytes > MaxBlockSizeBytes { - return cmn.NewError("BlockSize.MaxBytes is too big. %d > %d", - params.BlockSize.MaxBytes, MaxBlockSizeBytes) - } - return nil -} - -// Hash returns a merkle hash of the parameters to store -// in the block header -func (params *ConsensusParams) Hash() []byte { - return merkle.SimpleHashFromMap(map[string]merkle.Hasher{ - "block_gossip_part_size_bytes": aminoHasher(params.BlockGossip.BlockPartSizeBytes), - "block_size_max_bytes": aminoHasher(params.BlockSize.MaxBytes), - "block_size_max_gas": aminoHasher(params.BlockSize.MaxGas), - "block_size_max_txs": aminoHasher(params.BlockSize.MaxTxs), - "tx_size_max_bytes": aminoHasher(params.TxSize.MaxBytes), - "tx_size_max_gas": aminoHasher(params.TxSize.MaxGas), - }) -} - -// Update returns a copy of the params with updates from the non-zero fields of p2. -// NOTE: note: must not modify the original -func (params ConsensusParams) Update(params2 *abci.ConsensusParams) ConsensusParams { - res := params // explicit copy - - if params2 == nil { - return res - } - - // we must defensively consider any structs may be nil - // XXX: it's cast city over here. It's ok because we only do int32->int - // but still, watch it champ. - if params2.BlockSize != nil { - if params2.BlockSize.MaxBytes > 0 { - res.BlockSize.MaxBytes = int(params2.BlockSize.MaxBytes) - } - if params2.BlockSize.MaxTxs > 0 { - res.BlockSize.MaxTxs = int(params2.BlockSize.MaxTxs) - } - if params2.BlockSize.MaxGas > 0 { - res.BlockSize.MaxGas = params2.BlockSize.MaxGas - } - } - if params2.TxSize != nil { - if params2.TxSize.MaxBytes > 0 { - res.TxSize.MaxBytes = int(params2.TxSize.MaxBytes) - } - if params2.TxSize.MaxGas > 0 { - res.TxSize.MaxGas = params2.TxSize.MaxGas - } - } - if params2.BlockGossip != nil { - if params2.BlockGossip.BlockPartSizeBytes > 0 { - res.BlockGossip.BlockPartSizeBytes = int(params2.BlockGossip.BlockPartSizeBytes) - } - } - return res -} diff --git a/types/params_test.go b/types/params_test.go deleted file mode 100644 index f645585e..00000000 --- a/types/params_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package types - -import ( - "bytes" - "sort" - "testing" - - "github.com/stretchr/testify/assert" -) - -func newConsensusParams(blockSize, partSize int) ConsensusParams { - return ConsensusParams{ - BlockSize: BlockSize{MaxBytes: blockSize}, - BlockGossip: BlockGossip{BlockPartSizeBytes: partSize}, - } -} - -func TestConsensusParamsValidation(t *testing.T) { - testCases := []struct { - params ConsensusParams - valid bool - }{ - {newConsensusParams(1, 1), true}, - {newConsensusParams(1, 0), false}, - {newConsensusParams(0, 1), false}, - {newConsensusParams(0, 0), false}, - {newConsensusParams(0, 10), false}, - {newConsensusParams(10, -1), false}, - {newConsensusParams(47*1024*1024, 400), true}, - {newConsensusParams(10, 400), true}, - {newConsensusParams(100*1024*1024, 400), true}, - {newConsensusParams(101*1024*1024, 400), false}, - {newConsensusParams(1024*1024*1024, 400), false}, - } - for _, testCase := range testCases { - if testCase.valid { - assert.NoError(t, testCase.params.Validate(), "expected no error for valid params") - } else { - assert.Error(t, testCase.params.Validate(), "expected error for non valid params") - } - } -} - -func makeParams(blockBytes, blockTx, blockGas, txBytes, - txGas, partSize int) ConsensusParams { - - return ConsensusParams{ - BlockSize: BlockSize{ - MaxBytes: blockBytes, - MaxTxs: blockTx, - MaxGas: int64(blockGas), - }, - TxSize: TxSize{ - MaxBytes: txBytes, - MaxGas: int64(txGas), - }, - BlockGossip: BlockGossip{ - BlockPartSizeBytes: partSize, - }, - } -} - -func TestConsensusParamsHash(t *testing.T) { - params := []ConsensusParams{ - makeParams(1, 2, 3, 4, 5, 6), - makeParams(7, 2, 3, 4, 5, 6), - makeParams(1, 7, 3, 4, 5, 6), - makeParams(1, 2, 7, 4, 5, 6), - makeParams(1, 2, 3, 7, 5, 6), - makeParams(1, 2, 3, 4, 7, 6), - makeParams(1, 2, 3, 4, 5, 7), - makeParams(6, 5, 4, 3, 2, 1), - } - - hashes := make([][]byte, len(params)) - for i := range params { - hashes[i] = params[i].Hash() - } - - // make sure there are no duplicates... - // sort, then check in order for matches - sort.Slice(hashes, func(i, j int) bool { - return bytes.Compare(hashes[i], hashes[j]) < 0 - }) - for i := 0; i < len(hashes)-1; i++ { - assert.NotEqual(t, hashes[i], hashes[i+1]) - } -} diff --git a/types/part_set.go b/types/part_set.go deleted file mode 100644 index 18cfe802..00000000 --- a/types/part_set.go +++ /dev/null @@ -1,281 +0,0 @@ -package types - -import ( - "bytes" - "errors" - "fmt" - "io" - "sync" - - "golang.org/x/crypto/ripemd160" - - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" -) - -var ( - ErrPartSetUnexpectedIndex = errors.New("Error part set unexpected index") - ErrPartSetInvalidProof = errors.New("Error part set invalid proof") -) - -type Part struct { - Index int `json:"index"` - Bytes cmn.HexBytes `json:"bytes"` - Proof merkle.SimpleProof `json:"proof"` - - // Cache - hash []byte -} - -func (part *Part) Hash() []byte { - if part.hash != nil { - return part.hash - } - hasher := ripemd160.New() - hasher.Write(part.Bytes) // nolint: errcheck, gas - part.hash = hasher.Sum(nil) - return part.hash -} - -func (part *Part) String() string { - return part.StringIndented("") -} - -func (part *Part) StringIndented(indent string) string { - return fmt.Sprintf(`Part{#%v -%s Bytes: %X... -%s Proof: %v -%s}`, - part.Index, - indent, cmn.Fingerprint(part.Bytes), - indent, part.Proof.StringIndented(indent+" "), - indent) -} - -//------------------------------------- - -type PartSetHeader struct { - Total int `json:"total"` - Hash cmn.HexBytes `json:"hash"` -} - -func (psh PartSetHeader) String() string { - return fmt.Sprintf("%v:%X", psh.Total, cmn.Fingerprint(psh.Hash)) -} - -func (psh PartSetHeader) IsZero() bool { - return psh.Total == 0 -} - -func (psh PartSetHeader) Equals(other PartSetHeader) bool { - return psh.Total == other.Total && bytes.Equal(psh.Hash, other.Hash) -} - -//------------------------------------- - -type PartSet struct { - total int - hash []byte - - mtx sync.Mutex - parts []*Part - partsBitArray *cmn.BitArray - count int -} - -// Returns an immutable, full PartSet from the data bytes. -// The data bytes are split into "partSize" chunks, and merkle tree computed. -func NewPartSetFromData(data []byte, partSize int) *PartSet { - // divide data into 4kb parts. - total := (len(data) + partSize - 1) / partSize - parts := make([]*Part, total) - parts_ := make([]merkle.Hasher, total) - partsBitArray := cmn.NewBitArray(total) - for i := 0; i < total; i++ { - part := &Part{ - Index: i, - Bytes: data[i*partSize : cmn.MinInt(len(data), (i+1)*partSize)], - } - parts[i] = part - parts_[i] = part - partsBitArray.SetIndex(i, true) - } - // Compute merkle proofs - root, proofs := merkle.SimpleProofsFromHashers(parts_) - for i := 0; i < total; i++ { - parts[i].Proof = *proofs[i] - } - return &PartSet{ - total: total, - hash: root, - parts: parts, - partsBitArray: partsBitArray, - count: total, - } -} - -// Returns an empty PartSet ready to be populated. -func NewPartSetFromHeader(header PartSetHeader) *PartSet { - return &PartSet{ - total: header.Total, - hash: header.Hash, - parts: make([]*Part, header.Total), - partsBitArray: cmn.NewBitArray(header.Total), - count: 0, - } -} - -func (ps *PartSet) Header() PartSetHeader { - if ps == nil { - return PartSetHeader{} - } - return PartSetHeader{ - Total: ps.total, - Hash: ps.hash, - } -} - -func (ps *PartSet) HasHeader(header PartSetHeader) bool { - if ps == nil { - return false - } - return ps.Header().Equals(header) -} - -func (ps *PartSet) BitArray() *cmn.BitArray { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return ps.partsBitArray.Copy() -} - -func (ps *PartSet) Hash() []byte { - if ps == nil { - return nil - } - return ps.hash -} - -func (ps *PartSet) HashesTo(hash []byte) bool { - if ps == nil { - return false - } - return bytes.Equal(ps.hash, hash) -} - -func (ps *PartSet) Count() int { - if ps == nil { - return 0 - } - return ps.count -} - -func (ps *PartSet) Total() int { - if ps == nil { - return 0 - } - return ps.total -} - -func (ps *PartSet) AddPart(part *Part) (bool, error) { - ps.mtx.Lock() - defer ps.mtx.Unlock() - - // Invalid part index - if part.Index >= ps.total { - return false, ErrPartSetUnexpectedIndex - } - - // If part already exists, return false. - if ps.parts[part.Index] != nil { - return false, nil - } - - // Check hash proof - if !part.Proof.Verify(part.Index, ps.total, part.Hash(), ps.Hash()) { - return false, ErrPartSetInvalidProof - } - - // Add part - ps.parts[part.Index] = part - ps.partsBitArray.SetIndex(part.Index, true) - ps.count++ - return true, nil -} - -func (ps *PartSet) GetPart(index int) *Part { - ps.mtx.Lock() - defer ps.mtx.Unlock() - return ps.parts[index] -} - -func (ps *PartSet) IsComplete() bool { - return ps.count == ps.total -} - -func (ps *PartSet) GetReader() io.Reader { - if !ps.IsComplete() { - cmn.PanicSanity("Cannot GetReader() on incomplete PartSet") - } - return NewPartSetReader(ps.parts) -} - -type PartSetReader struct { - i int - parts []*Part - reader *bytes.Reader -} - -func NewPartSetReader(parts []*Part) *PartSetReader { - return &PartSetReader{ - i: 0, - parts: parts, - reader: bytes.NewReader(parts[0].Bytes), - } -} - -func (psr *PartSetReader) Read(p []byte) (n int, err error) { - readerLen := psr.reader.Len() - if readerLen >= len(p) { - return psr.reader.Read(p) - } else if readerLen > 0 { - n1, err := psr.Read(p[:readerLen]) - if err != nil { - return n1, err - } - n2, err := psr.Read(p[readerLen:]) - return n1 + n2, err - } - - psr.i++ - if psr.i >= len(psr.parts) { - return 0, io.EOF - } - psr.reader = bytes.NewReader(psr.parts[psr.i].Bytes) - return psr.Read(p) -} - -func (ps *PartSet) StringShort() string { - if ps == nil { - return "nil-PartSet" - } - ps.mtx.Lock() - defer ps.mtx.Unlock() - return fmt.Sprintf("(%v of %v)", ps.Count(), ps.Total()) -} - -func (ps *PartSet) MarshalJSON() ([]byte, error) { - if ps == nil { - return []byte("{}"), nil - } - - ps.mtx.Lock() - defer ps.mtx.Unlock() - - return cdc.MarshalJSON(struct { - CountTotal string `json:"count/total"` - PartsBitArray *cmn.BitArray `json:"parts_bit_array"` - }{ - fmt.Sprintf("%d/%d", ps.Count(), ps.Total()), - ps.partsBitArray, - }) -} diff --git a/types/part_set_test.go b/types/part_set_test.go deleted file mode 100644 index 545b4d42..00000000 --- a/types/part_set_test.go +++ /dev/null @@ -1,90 +0,0 @@ -package types - -import ( - "bytes" - "io/ioutil" - "testing" - - cmn "github.com/tendermint/tmlibs/common" -) - -const ( - testPartSize = 65536 // 64KB ... 4096 // 4KB -) - -func TestBasicPartSet(t *testing.T) { - - // Construct random data of size partSize * 100 - data := cmn.RandBytes(testPartSize * 100) - - partSet := NewPartSetFromData(data, testPartSize) - if len(partSet.Hash()) == 0 { - t.Error("Expected to get hash") - } - if partSet.Total() != 100 { - t.Errorf("Expected to get 100 parts, but got %v", partSet.Total()) - } - if !partSet.IsComplete() { - t.Errorf("PartSet should be complete") - } - - // Test adding parts to a new partSet. - partSet2 := NewPartSetFromHeader(partSet.Header()) - - for i := 0; i < partSet.Total(); i++ { - part := partSet.GetPart(i) - //t.Logf("\n%v", part) - added, err := partSet2.AddPart(part) - if !added || err != nil { - t.Errorf("Failed to add part %v, error: %v", i, err) - } - } - - if !bytes.Equal(partSet.Hash(), partSet2.Hash()) { - t.Error("Expected to get same hash") - } - if partSet2.Total() != 100 { - t.Errorf("Expected to get 100 parts, but got %v", partSet2.Total()) - } - if !partSet2.IsComplete() { - t.Errorf("Reconstructed PartSet should be complete") - } - - // Reconstruct data, assert that they are equal. - data2Reader := partSet2.GetReader() - data2, err := ioutil.ReadAll(data2Reader) - if err != nil { - t.Errorf("Error reading data2Reader: %v", err) - } - if !bytes.Equal(data, data2) { - t.Errorf("Got wrong data.") - } - -} - -func TestWrongProof(t *testing.T) { - - // Construct random data of size partSize * 100 - data := cmn.RandBytes(testPartSize * 100) - partSet := NewPartSetFromData(data, testPartSize) - - // Test adding a part with wrong data. - partSet2 := NewPartSetFromHeader(partSet.Header()) - - // Test adding a part with wrong trail. - part := partSet.GetPart(0) - part.Proof.Aunts[0][0] += byte(0x01) - added, err := partSet2.AddPart(part) - if added || err == nil { - t.Errorf("Expected to fail adding a part with bad trail.") - } - - // Test adding a part with wrong bytes. - part = partSet.GetPart(1) - part.Bytes[0] += byte(0x01) - added, err = partSet2.AddPart(part) - if added || err == nil { - t.Errorf("Expected to fail adding a part with bad bytes.") - } - -} diff --git a/types/priv_validator.go b/types/priv_validator.go deleted file mode 100644 index 8759d3f9..00000000 --- a/types/priv_validator.go +++ /dev/null @@ -1,95 +0,0 @@ -package types - -import ( - "bytes" - "fmt" - - "github.com/tendermint/go-crypto" -) - -// PrivValidator defines the functionality of a local Tendermint validator -// that signs votes, proposals, and heartbeats, and never double signs. -type PrivValidator interface { - GetAddress() Address // redundant since .PubKey().Address() - GetPubKey() crypto.PubKey - - SignVote(chainID string, vote *Vote) error - SignProposal(chainID string, proposal *Proposal) error - SignHeartbeat(chainID string, heartbeat *Heartbeat) error -} - -//---------------------------------------- -// Misc. - -type PrivValidatorsByAddress []PrivValidator - -func (pvs PrivValidatorsByAddress) Len() int { - return len(pvs) -} - -func (pvs PrivValidatorsByAddress) Less(i, j int) bool { - return bytes.Compare(pvs[i].GetAddress(), pvs[j].GetAddress()) == -1 -} - -func (pvs PrivValidatorsByAddress) Swap(i, j int) { - it := pvs[i] - pvs[i] = pvs[j] - pvs[j] = it -} - -//---------------------------------------- -// MockPV - -// MockPV implements PrivValidator without any safety or persistence. -// Only use it for testing. -type MockPV struct { - privKey crypto.PrivKey -} - -func NewMockPV() *MockPV { - return &MockPV{crypto.GenPrivKeyEd25519()} -} - -// Implements PrivValidator. -func (pv *MockPV) GetAddress() Address { - return pv.privKey.PubKey().Address() -} - -// Implements PrivValidator. -func (pv *MockPV) GetPubKey() crypto.PubKey { - return pv.privKey.PubKey() -} - -// Implements PrivValidator. -func (pv *MockPV) SignVote(chainID string, vote *Vote) error { - signBytes := vote.SignBytes(chainID) - sig := pv.privKey.Sign(signBytes) - vote.Signature = sig - return nil -} - -// Implements PrivValidator. -func (pv *MockPV) SignProposal(chainID string, proposal *Proposal) error { - signBytes := proposal.SignBytes(chainID) - sig := pv.privKey.Sign(signBytes) - proposal.Signature = sig - return nil -} - -// signHeartbeat signs the heartbeat without any checking. -func (pv *MockPV) SignHeartbeat(chainID string, heartbeat *Heartbeat) error { - sig := pv.privKey.Sign(heartbeat.SignBytes(chainID)) - heartbeat.Signature = sig - return nil -} - -// String returns a string representation of the MockPV. -func (pv *MockPV) String() string { - return fmt.Sprintf("MockPV{%v}", pv.GetAddress()) -} - -// XXX: Implement. -func (pv *MockPV) DisableChecks() { - // Currently this does nothing, - // as MockPV has no safety checks at all. -} diff --git a/types/proposal.go b/types/proposal.go deleted file mode 100644 index 95008897..00000000 --- a/types/proposal.go +++ /dev/null @@ -1,58 +0,0 @@ -package types - -import ( - "errors" - "fmt" - "time" - - "github.com/tendermint/go-crypto" -) - -var ( - ErrInvalidBlockPartSignature = errors.New("Error invalid block part signature") - ErrInvalidBlockPartHash = errors.New("Error invalid block part hash") -) - -// Proposal defines a block proposal for the consensus. -// It refers to the block only by its PartSetHeader. -// It must be signed by the correct proposer for the given Height/Round -// to be considered valid. It may depend on votes from a previous round, -// a so-called Proof-of-Lock (POL) round, as noted in the POLRound and POLBlockID. -type Proposal struct { - Height int64 `json:"height"` - Round int `json:"round"` - Timestamp time.Time `json:"timestamp"` - BlockPartsHeader PartSetHeader `json:"block_parts_header"` - POLRound int `json:"pol_round"` // -1 if null. - POLBlockID BlockID `json:"pol_block_id"` // zero if null. - Signature crypto.Signature `json:"signature"` -} - -// NewProposal returns a new Proposal. -// If there is no POLRound, polRound should be -1. -func NewProposal(height int64, round int, blockPartsHeader PartSetHeader, polRound int, polBlockID BlockID) *Proposal { - return &Proposal{ - Height: height, - Round: round, - Timestamp: time.Now().UTC(), - BlockPartsHeader: blockPartsHeader, - POLRound: polRound, - POLBlockID: polBlockID, - } -} - -// String returns a string representation of the Proposal. -func (p *Proposal) String() string { - return fmt.Sprintf("Proposal{%v/%v %v (%v,%v) %v @ %s}", - p.Height, p.Round, p.BlockPartsHeader, p.POLRound, - p.POLBlockID, p.Signature, CanonicalTime(p.Timestamp)) -} - -// SignBytes returns the Proposal bytes for signing -func (p *Proposal) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalJSON(CanonicalProposal(chainID, p)) - if err != nil { - panic(err) - } - return bz -} diff --git a/types/proposal_test.go b/types/proposal_test.go deleted file mode 100644 index 43fb7c20..00000000 --- a/types/proposal_test.go +++ /dev/null @@ -1,98 +0,0 @@ -package types - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -var testProposal *Proposal - -func init() { - var stamp, err = time.Parse(TimeFormat, "2018-02-11T07:09:22.765Z") - if err != nil { - panic(err) - } - testProposal = &Proposal{ - Height: 12345, - Round: 23456, - BlockPartsHeader: PartSetHeader{111, []byte("blockparts")}, - POLRound: -1, - Timestamp: stamp, - } -} - -func TestProposalSignable(t *testing.T) { - signBytes := testProposal.SignBytes("test_chain_id") - signStr := string(signBytes) - - expected := `{"@chain_id":"test_chain_id","@type":"proposal","block_parts_header":{"hash":"626C6F636B7061727473","total":111},"height":12345,"pol_block_id":{},"pol_round":-1,"round":23456,"timestamp":"2018-02-11T07:09:22.765Z"}` - if signStr != expected { - t.Errorf("Got unexpected sign string for Proposal. Expected:\n%v\nGot:\n%v", expected, signStr) - } -} - -func TestProposalString(t *testing.T) { - str := testProposal.String() - expected := `Proposal{12345/23456 111:626C6F636B70 (-1,:0:000000000000) @ 2018-02-11T07:09:22.765Z}` - if str != expected { - t.Errorf("Got unexpected string for Proposal. Expected:\n%v\nGot:\n%v", expected, str) - } -} - -func TestProposalVerifySignature(t *testing.T) { - privVal := NewMockPV() - pubKey := privVal.GetPubKey() - - prop := NewProposal(4, 2, PartSetHeader{777, []byte("proper")}, 2, BlockID{}) - signBytes := prop.SignBytes("test_chain_id") - - // sign it - err := privVal.SignProposal("test_chain_id", prop) - require.NoError(t, err) - - // verify the same proposal - valid := pubKey.VerifyBytes(signBytes, prop.Signature) - require.True(t, valid) - - // serialize, deserialize and verify again.... - newProp := new(Proposal) - bs, err := cdc.MarshalBinary(prop) - require.NoError(t, err) - err = cdc.UnmarshalBinary(bs, &newProp) - require.NoError(t, err) - - // verify the transmitted proposal - newSignBytes := newProp.SignBytes("test_chain_id") - require.Equal(t, string(signBytes), string(newSignBytes)) - valid = pubKey.VerifyBytes(newSignBytes, newProp.Signature) - require.True(t, valid) -} - -func BenchmarkProposalWriteSignBytes(b *testing.B) { - for i := 0; i < b.N; i++ { - testProposal.SignBytes("test_chain_id") - } -} - -func BenchmarkProposalSign(b *testing.B) { - privVal := NewMockPV() - for i := 0; i < b.N; i++ { - err := privVal.SignProposal("test_chain_id", testProposal) - if err != nil { - b.Error(err) - } - } -} - -func BenchmarkProposalVerifySignature(b *testing.B) { - privVal := NewMockPV() - err := privVal.SignProposal("test_chain_id", testProposal) - require.Nil(b, err) - pubKey := privVal.GetPubKey() - - for i := 0; i < b.N; i++ { - pubKey.VerifyBytes(testProposal.SignBytes("test_chain_id"), testProposal.Signature) - } -} diff --git a/types/protobuf.go b/types/protobuf.go deleted file mode 100644 index eb684ae7..00000000 --- a/types/protobuf.go +++ /dev/null @@ -1,221 +0,0 @@ -package types - -import ( - "bytes" - "fmt" - "reflect" - "time" - - abci "github.com/tendermint/abci/types" - crypto "github.com/tendermint/go-crypto" -) - -//------------------------------------------------------- -// Use strings to distinguish types in ABCI messages - -const ( - ABCIEvidenceTypeDuplicateVote = "duplicate/vote" - ABCIEvidenceTypeMockGood = "mock/good" -) - -const ( - ABCIPubKeyTypeEd25519 = "ed25519" - ABCIPubKeyTypeSecp256k1 = "secp256k1" -) - -//------------------------------------------------------- - -// TM2PB is used for converting Tendermint ABCI to protobuf ABCI. -// UNSTABLE -var TM2PB = tm2pb{} - -type tm2pb struct{} - -func (tm2pb) Header(header *Header) abci.Header { - return abci.Header{ - ChainID: header.ChainID, - Height: header.Height, - - Time: header.Time.Unix(), - NumTxs: int32(header.NumTxs), // XXX: overflow - TotalTxs: header.TotalTxs, - - LastBlockHash: header.LastBlockID.Hash, - ValidatorsHash: header.ValidatorsHash, - AppHash: header.AppHash, - - // Proposer: TODO - } -} - -// XXX: panics on unknown pubkey type -func (tm2pb) Validator(val *Validator) abci.Validator { - return abci.Validator{ - Address: val.PubKey.Address(), - PubKey: TM2PB.PubKey(val.PubKey), - Power: val.VotingPower, - } -} - -// XXX: panics on nil or unknown pubkey type -// TODO: add cases when new pubkey types are added to go-crypto -func (tm2pb) PubKey(pubKey crypto.PubKey) abci.PubKey { - switch pk := pubKey.(type) { - case crypto.PubKeyEd25519: - return abci.PubKey{ - Type: ABCIPubKeyTypeEd25519, - Data: pk[:], - } - case crypto.PubKeySecp256k1: - return abci.PubKey{ - Type: ABCIPubKeyTypeSecp256k1, - Data: pk[:], - } - default: - panic(fmt.Sprintf("unknown pubkey type: %v %v", pubKey, reflect.TypeOf(pubKey))) - } -} - -// XXX: panics on nil or unknown pubkey type -func (tm2pb) Validators(vals *ValidatorSet) []abci.Validator { - validators := make([]abci.Validator, len(vals.Validators)) - for i, val := range vals.Validators { - validators[i] = TM2PB.Validator(val) - } - return validators -} - -func (tm2pb) ConsensusParams(params *ConsensusParams) *abci.ConsensusParams { - return &abci.ConsensusParams{ - BlockSize: &abci.BlockSize{ - - MaxBytes: int32(params.BlockSize.MaxBytes), - MaxTxs: int32(params.BlockSize.MaxTxs), - MaxGas: params.BlockSize.MaxGas, - }, - TxSize: &abci.TxSize{ - MaxBytes: int32(params.TxSize.MaxBytes), - MaxGas: params.TxSize.MaxGas, - }, - BlockGossip: &abci.BlockGossip{ - BlockPartSizeBytes: int32(params.BlockGossip.BlockPartSizeBytes), - }, - } -} - -// ABCI Evidence includes information from the past that's not included in the evidence itself -// so Evidence types stays compact. -// XXX: panics on nil or unknown pubkey type -func (tm2pb) Evidence(ev Evidence, valSet *ValidatorSet, evTime time.Time) abci.Evidence { - _, val := valSet.GetByAddress(ev.Address()) - if val == nil { - // should already have checked this - panic(val) - } - - // set type - var evType string - switch ev.(type) { - case *DuplicateVoteEvidence: - evType = ABCIEvidenceTypeDuplicateVote - case MockGoodEvidence: - // XXX: not great to have test types in production paths ... - evType = ABCIEvidenceTypeMockGood - default: - panic(fmt.Sprintf("Unknown evidence type: %v %v", ev, reflect.TypeOf(ev))) - } - - return abci.Evidence{ - Type: evType, - Validator: TM2PB.Validator(val), - Height: ev.Height(), - Time: evTime.Unix(), - TotalVotingPower: valSet.TotalVotingPower(), - } -} - -// XXX: panics on nil or unknown pubkey type -func (tm2pb) ValidatorFromPubKeyAndPower(pubkey crypto.PubKey, power int64) abci.Validator { - pubkeyABCI := TM2PB.PubKey(pubkey) - return abci.Validator{ - Address: pubkey.Address(), - PubKey: pubkeyABCI, - Power: power, - } -} - -//---------------------------------------------------------------------------- - -// PB2TM is used for converting protobuf ABCI to Tendermint ABCI. -// UNSTABLE -var PB2TM = pb2tm{} - -type pb2tm struct{} - -func (pb2tm) PubKey(pubKey abci.PubKey) (crypto.PubKey, error) { - // TODO: define these in go-crypto and use them - sizeEd := 32 - sizeSecp := 33 - switch pubKey.Type { - case ABCIPubKeyTypeEd25519: - if len(pubKey.Data) != sizeEd { - return nil, fmt.Errorf("Invalid size for PubKeyEd25519. Got %d, expected %d", len(pubKey.Data), sizeEd) - } - var pk crypto.PubKeyEd25519 - copy(pk[:], pubKey.Data) - return pk, nil - case ABCIPubKeyTypeSecp256k1: - if len(pubKey.Data) != sizeSecp { - return nil, fmt.Errorf("Invalid size for PubKeyEd25519. Got %d, expected %d", len(pubKey.Data), sizeSecp) - } - var pk crypto.PubKeySecp256k1 - copy(pk[:], pubKey.Data) - return pk, nil - default: - return nil, fmt.Errorf("Unknown pubkey type %v", pubKey.Type) - } -} - -func (pb2tm) Validators(vals []abci.Validator) ([]*Validator, error) { - tmVals := make([]*Validator, len(vals)) - for i, v := range vals { - pub, err := PB2TM.PubKey(v.PubKey) - if err != nil { - return nil, err - } - // If the app provided an address too, it must match. - // This is just a sanity check. - if len(v.Address) > 0 { - if !bytes.Equal(pub.Address(), v.Address) { - return nil, fmt.Errorf("Validator.Address (%X) does not match PubKey.Address (%X)", - v.Address, pub.Address()) - } - } - tmVals[i] = &Validator{ - Address: pub.Address(), - PubKey: pub, - VotingPower: v.Power, - } - } - return tmVals, nil -} - -func (pb2tm) ConsensusParams(csp *abci.ConsensusParams) ConsensusParams { - return ConsensusParams{ - BlockSize: BlockSize{ - MaxBytes: int(csp.BlockSize.MaxBytes), // XXX - MaxTxs: int(csp.BlockSize.MaxTxs), // XXX - MaxGas: csp.BlockSize.MaxGas, - }, - TxSize: TxSize{ - MaxBytes: int(csp.TxSize.MaxBytes), // XXX - MaxGas: csp.TxSize.MaxGas, - }, - BlockGossip: BlockGossip{ - BlockPartSizeBytes: int(csp.BlockGossip.BlockPartSizeBytes), // XXX - }, - // TODO: EvidenceParams: EvidenceParams{ - // MaxAge: int(csp.Evidence.MaxAge), // XXX - // }, - } -} diff --git a/types/protobuf_test.go b/types/protobuf_test.go deleted file mode 100644 index 9f3b3187..00000000 --- a/types/protobuf_test.go +++ /dev/null @@ -1,69 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/assert" - abci "github.com/tendermint/abci/types" - crypto "github.com/tendermint/go-crypto" -) - -func TestABCIPubKey(t *testing.T) { - pkEd := crypto.GenPrivKeyEd25519().PubKey() - pkSecp := crypto.GenPrivKeySecp256k1().PubKey() - testABCIPubKey(t, pkEd, ABCIPubKeyTypeEd25519) - testABCIPubKey(t, pkSecp, ABCIPubKeyTypeSecp256k1) -} - -func testABCIPubKey(t *testing.T, pk crypto.PubKey, typeStr string) { - abciPubKey := TM2PB.PubKey(pk) - pk2, err := PB2TM.PubKey(abciPubKey) - assert.Nil(t, err) - assert.Equal(t, pk, pk2) -} - -func TestABCIValidators(t *testing.T) { - pkEd := crypto.GenPrivKeyEd25519().PubKey() - - // correct validator - tmValExpected := &Validator{ - Address: pkEd.Address(), - PubKey: pkEd, - VotingPower: 10, - } - - tmVal := &Validator{ - Address: pkEd.Address(), - PubKey: pkEd, - VotingPower: 10, - } - - abciVal := TM2PB.Validator(tmVal) - tmVals, err := PB2TM.Validators([]abci.Validator{abciVal}) - assert.Nil(t, err) - assert.Equal(t, tmValExpected, tmVals[0]) - - // val with address - tmVal.Address = pkEd.Address() - - abciVal = TM2PB.Validator(tmVal) - tmVals, err = PB2TM.Validators([]abci.Validator{abciVal}) - assert.Nil(t, err) - assert.Equal(t, tmValExpected, tmVals[0]) - - // val with incorrect address - abciVal = TM2PB.Validator(tmVal) - abciVal.Address = []byte("incorrect!") - tmVals, err = PB2TM.Validators([]abci.Validator{abciVal}) - assert.NotNil(t, err) - assert.Nil(t, tmVals) -} - -func TestABCIConsensusParams(t *testing.T) { - cp := DefaultConsensusParams() - cp.EvidenceParams.MaxAge = 0 // TODO add this to ABCI - abciCP := TM2PB.ConsensusParams(cp) - cp2 := PB2TM.ConsensusParams(abciCP) - - assert.Equal(t, *cp, cp2) -} diff --git a/types/results.go b/types/results.go deleted file mode 100644 index 326cee48..00000000 --- a/types/results.go +++ /dev/null @@ -1,70 +0,0 @@ -package types - -import ( - abci "github.com/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" -) - -//----------------------------------------------------------------------------- - -// ABCIResult is the deterministic component of a ResponseDeliverTx. -// TODO: add Tags -type ABCIResult struct { - Code uint32 `json:"code"` - Data cmn.HexBytes `json:"data"` -} - -// Hash returns the canonical hash of the ABCIResult -func (a ABCIResult) Hash() []byte { - bz := aminoHash(a) - return bz -} - -// ABCIResults wraps the deliver tx results to return a proof -type ABCIResults []ABCIResult - -// NewResults creates ABCIResults from ResponseDeliverTx -func NewResults(del []*abci.ResponseDeliverTx) ABCIResults { - res := make(ABCIResults, len(del)) - for i, d := range del { - res[i] = NewResultFromResponse(d) - } - return res -} - -func NewResultFromResponse(response *abci.ResponseDeliverTx) ABCIResult { - return ABCIResult{ - Code: response.Code, - Data: response.Data, - } -} - -// Bytes serializes the ABCIResponse using wire -func (a ABCIResults) Bytes() []byte { - bz, err := cdc.MarshalBinary(a) - if err != nil { - panic(err) - } - return bz -} - -// Hash returns a merkle hash of all results -func (a ABCIResults) Hash() []byte { - return merkle.SimpleHashFromHashers(a.toHashers()) -} - -// ProveResult returns a merkle proof of one result from the set -func (a ABCIResults) ProveResult(i int) merkle.SimpleProof { - _, proofs := merkle.SimpleProofsFromHashers(a.toHashers()) - return *proofs[i] -} - -func (a ABCIResults) toHashers() []merkle.Hasher { - l := len(a) - hashers := make([]merkle.Hasher, l) - for i := 0; i < l; i++ { - hashers[i] = a[i] - } - return hashers -} diff --git a/types/results_test.go b/types/results_test.go deleted file mode 100644 index 009e2693..00000000 --- a/types/results_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package types - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestABCIResults(t *testing.T) { - a := ABCIResult{Code: 0, Data: nil} - b := ABCIResult{Code: 0, Data: []byte{}} - c := ABCIResult{Code: 0, Data: []byte("one")} - d := ABCIResult{Code: 14, Data: nil} - e := ABCIResult{Code: 14, Data: []byte("foo")} - f := ABCIResult{Code: 14, Data: []byte("bar")} - - // Nil and []byte{} should produce the same hash. - require.Equal(t, a.Hash(), a.Hash()) - require.Equal(t, b.Hash(), b.Hash()) - require.Equal(t, a.Hash(), b.Hash()) - - // a and b should be the same, don't go in results. - results := ABCIResults{a, c, d, e, f} - - // Make sure each result hashes properly. - var last []byte - for i, res := range results { - h := res.Hash() - assert.NotEqual(t, last, h, "%d", i) - last = h - } - - // Make sure that we can get a root hash from results and verify proofs. - root := results.Hash() - assert.NotEmpty(t, root) - - for i, res := range results { - proof := results.ProveResult(i) - valid := proof.Verify(i, len(results), res.Hash(), root) - assert.True(t, valid, "%d", i) - } -} diff --git a/types/signable.go b/types/signable.go deleted file mode 100644 index cc649888..00000000 --- a/types/signable.go +++ /dev/null @@ -1,11 +0,0 @@ -package types - -// Signable is an interface for all signable things. -// It typically removes signatures before serializing. -// SignBytes returns the bytes to be signed -// NOTE: chainIDs are part of the SignBytes but not -// necessarily the object themselves. -// NOTE: Expected to panic if there is an error marshalling. -type Signable interface { - SignBytes(chainID string) []byte -} diff --git a/types/test_util.go b/types/test_util.go deleted file mode 100644 index f21c2831..00000000 --- a/types/test_util.go +++ /dev/null @@ -1,37 +0,0 @@ -package types - -import "time" - -func MakeCommit(blockID BlockID, height int64, round int, - voteSet *VoteSet, - validators []PrivValidator) (*Commit, error) { - - // all sign - for i := 0; i < len(validators); i++ { - - vote := &Vote{ - ValidatorAddress: validators[i].GetAddress(), - ValidatorIndex: i, - Height: height, - Round: round, - Type: VoteTypePrecommit, - BlockID: blockID, - Timestamp: time.Now().UTC(), - } - - _, err := signAddVote(validators[i], vote, voteSet) - if err != nil { - return nil, err - } - } - - return voteSet.MakeCommit(), nil -} - -func signAddVote(privVal PrivValidator, vote *Vote, voteSet *VoteSet) (signed bool, err error) { - err = privVal.SignVote(voteSet.ChainID(), vote) - if err != nil { - return false, err - } - return voteSet.AddVote(vote) -} diff --git a/types/tx.go b/types/tx.go deleted file mode 100644 index e7247693..00000000 --- a/types/tx.go +++ /dev/null @@ -1,128 +0,0 @@ -package types - -import ( - "bytes" - "errors" - "fmt" - - abci "github.com/tendermint/abci/types" - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" -) - -// Tx is an arbitrary byte array. -// NOTE: Tx has no types at this level, so when wire encoded it's just length-prefixed. -// Alternatively, it may make sense to add types here and let -// []byte be type 0x1 so we can have versioned txs if need be in the future. -type Tx []byte - -// Hash computes the RIPEMD160 hash of the wire encoded transaction. -func (tx Tx) Hash() []byte { - return aminoHasher(tx).Hash() -} - -// String returns the hex-encoded transaction as a string. -func (tx Tx) String() string { - return fmt.Sprintf("Tx{%X}", []byte(tx)) -} - -// Txs is a slice of Tx. -type Txs []Tx - -// Hash returns the simple Merkle root hash of the transactions. -func (txs Txs) Hash() []byte { - // Recursive impl. - // Copied from tmlibs/merkle to avoid allocations - switch len(txs) { - case 0: - return nil - case 1: - return txs[0].Hash() - default: - left := Txs(txs[:(len(txs)+1)/2]).Hash() - right := Txs(txs[(len(txs)+1)/2:]).Hash() - return merkle.SimpleHashFromTwoHashes(left, right) - } -} - -// Index returns the index of this transaction in the list, or -1 if not found -func (txs Txs) Index(tx Tx) int { - for i := range txs { - if bytes.Equal(txs[i], tx) { - return i - } - } - return -1 -} - -// IndexByHash returns the index of this transaction hash in the list, or -1 if not found -func (txs Txs) IndexByHash(hash []byte) int { - for i := range txs { - if bytes.Equal(txs[i].Hash(), hash) { - return i - } - } - return -1 -} - -// Proof returns a simple merkle proof for this node. -// Panics if i < 0 or i >= len(txs) -// TODO: optimize this! -func (txs Txs) Proof(i int) TxProof { - l := len(txs) - hashers := make([]merkle.Hasher, l) - for i := 0; i < l; i++ { - hashers[i] = txs[i] - } - root, proofs := merkle.SimpleProofsFromHashers(hashers) - - return TxProof{ - Index: i, - Total: l, - RootHash: root, - Data: txs[i], - Proof: *proofs[i], - } -} - -// TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. -type TxProof struct { - Index, Total int - RootHash cmn.HexBytes - Data Tx - Proof merkle.SimpleProof -} - -// LeadHash returns the hash of the this proof refers to. -func (tp TxProof) LeafHash() []byte { - return tp.Data.Hash() -} - -// Validate verifies the proof. It returns nil if the RootHash matches the dataHash argument, -// and if the proof is internally consistent. Otherwise, it returns a sensible error. -func (tp TxProof) Validate(dataHash []byte) error { - if !bytes.Equal(dataHash, tp.RootHash) { - return errors.New("Proof matches different data hash") - } - if tp.Index < 0 { - return errors.New("Proof index cannot be negative") - } - if tp.Total <= 0 { - return errors.New("Proof total must be positive") - } - valid := tp.Proof.Verify(tp.Index, tp.Total, tp.LeafHash(), tp.RootHash) - if !valid { - return errors.New("Proof is not internally consistent") - } - return nil -} - -// TxResult contains results of executing the transaction. -// -// One usage is indexing transaction results. -type TxResult struct { - Height int64 `json:"height"` - Index uint32 `json:"index"` - Tx Tx `json:"tx"` - Result abci.ResponseDeliverTx `json:"result"` -} diff --git a/types/tx_test.go b/types/tx_test.go deleted file mode 100644 index 2a93ceb3..00000000 --- a/types/tx_test.go +++ /dev/null @@ -1,125 +0,0 @@ -package types - -import ( - "bytes" - "testing" - - "github.com/stretchr/testify/assert" - - cmn "github.com/tendermint/tmlibs/common" - ctest "github.com/tendermint/tmlibs/test" -) - -func makeTxs(cnt, size int) Txs { - txs := make(Txs, cnt) - for i := 0; i < cnt; i++ { - txs[i] = cmn.RandBytes(size) - } - return txs -} - -func randInt(low, high int) int { - off := cmn.RandInt() % (high - low) - return low + off -} - -func TestTxIndex(t *testing.T) { - assert := assert.New(t) - for i := 0; i < 20; i++ { - txs := makeTxs(15, 60) - for j := 0; j < len(txs); j++ { - tx := txs[j] - idx := txs.Index(tx) - assert.Equal(j, idx) - } - assert.Equal(-1, txs.Index(nil)) - assert.Equal(-1, txs.Index(Tx("foodnwkf"))) - } -} - -func TestValidTxProof(t *testing.T) { - assert := assert.New(t) - cases := []struct { - txs Txs - }{ - {Txs{{1, 4, 34, 87, 163, 1}}}, - {Txs{{5, 56, 165, 2}, {4, 77}}}, - {Txs{Tx("foo"), Tx("bar"), Tx("baz")}}, - {makeTxs(20, 5)}, - {makeTxs(7, 81)}, - {makeTxs(61, 15)}, - } - - for h, tc := range cases { - txs := tc.txs - root := txs.Hash() - // make sure valid proof for every tx - for i := range txs { - leaf := txs[i] - leafHash := leaf.Hash() - proof := txs.Proof(i) - assert.Equal(i, proof.Index, "%d: %d", h, i) - assert.Equal(len(txs), proof.Total, "%d: %d", h, i) - assert.EqualValues(root, proof.RootHash, "%d: %d", h, i) - assert.EqualValues(leaf, proof.Data, "%d: %d", h, i) - assert.EqualValues(leafHash, proof.LeafHash(), "%d: %d", h, i) - assert.Nil(proof.Validate(root), "%d: %d", h, i) - assert.NotNil(proof.Validate([]byte("foobar")), "%d: %d", h, i) - - // read-write must also work - var p2 TxProof - bin, err := cdc.MarshalBinary(proof) - assert.Nil(err) - err = cdc.UnmarshalBinary(bin, &p2) - if assert.Nil(err, "%d: %d: %+v", h, i, err) { - assert.Nil(p2.Validate(root), "%d: %d", h, i) - } - } - } -} - -func TestTxProofUnchangable(t *testing.T) { - // run the other test a bunch... - for i := 0; i < 40; i++ { - testTxProofUnchangable(t) - } -} - -func testTxProofUnchangable(t *testing.T) { - assert := assert.New(t) - - // make some proof - txs := makeTxs(randInt(2, 100), randInt(16, 128)) - root := txs.Hash() - i := randInt(0, len(txs)-1) - proof := txs.Proof(i) - - // make sure it is valid to start with - assert.Nil(proof.Validate(root)) - bin, err := cdc.MarshalBinary(proof) - assert.Nil(err) - - // try mutating the data and make sure nothing breaks - for j := 0; j < 500; j++ { - bad := ctest.MutateByteSlice(bin) - if !bytes.Equal(bad, bin) { - assertBadProof(t, root, bad, proof) - } - } -} - -// This makes sure that the proof doesn't deserialize into something valid. -func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) { - var proof TxProof - err := cdc.UnmarshalBinary(bad, &proof) - if err == nil { - err = proof.Validate(root) - if err == nil { - // XXX Fix simple merkle proofs so the following is *not* OK. - // This can happen if we have a slightly different total (where the - // path ends up the same). If it is something else, we have a real - // problem. - assert.NotEqual(t, proof.Total, good.Total, "bad: %#v\ngood: %#v", proof, good) - } - } -} diff --git a/types/validator.go b/types/validator.go deleted file mode 100644 index 46dc61d0..00000000 --- a/types/validator.go +++ /dev/null @@ -1,98 +0,0 @@ -package types - -import ( - "bytes" - "fmt" - - "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" -) - -// Volatile state for each Validator -// NOTE: The Accum is not included in Validator.Hash(); -// make sure to update that method if changes are made here -type Validator struct { - Address Address `json:"address"` - PubKey crypto.PubKey `json:"pub_key"` - VotingPower int64 `json:"voting_power"` - - Accum int64 `json:"accum"` -} - -func NewValidator(pubKey crypto.PubKey, votingPower int64) *Validator { - return &Validator{ - Address: pubKey.Address(), - PubKey: pubKey, - VotingPower: votingPower, - Accum: 0, - } -} - -// Creates a new copy of the validator so we can mutate accum. -// Panics if the validator is nil. -func (v *Validator) Copy() *Validator { - vCopy := *v - return &vCopy -} - -// Returns the one with higher Accum. -func (v *Validator) CompareAccum(other *Validator) *Validator { - if v == nil { - return other - } - if v.Accum > other.Accum { - return v - } else if v.Accum < other.Accum { - return other - } else { - result := bytes.Compare(v.Address, other.Address) - if result < 0 { - return v - } else if result > 0 { - return other - } else { - cmn.PanicSanity("Cannot compare identical validators") - return nil - } - } -} - -func (v *Validator) String() string { - if v == nil { - return "nil-Validator" - } - return fmt.Sprintf("Validator{%v %v VP:%v A:%v}", - v.Address, - v.PubKey, - v.VotingPower, - v.Accum) -} - -// Hash computes the unique ID of a validator with a given voting power. -// It excludes the Accum value, which changes with every round. -func (v *Validator) Hash() []byte { - return aminoHash(struct { - Address Address - PubKey crypto.PubKey - VotingPower int64 - }{ - v.Address, - v.PubKey, - v.VotingPower, - }) -} - -//---------------------------------------- -// RandValidator - -// RandValidator returns a randomized validator, useful for testing. -// UNSTABLE -func RandValidator(randPower bool, minPower int64) (*Validator, PrivValidator) { - privVal := NewMockPV() - votePower := minPower - if randPower { - votePower += int64(cmn.RandUint32()) - } - val := NewValidator(privVal.GetPubKey(), votePower) - return val, privVal -} diff --git a/types/validator_set.go b/types/validator_set.go deleted file mode 100644 index f2fac292..00000000 --- a/types/validator_set.go +++ /dev/null @@ -1,517 +0,0 @@ -package types - -import ( - "bytes" - "fmt" - "math" - "sort" - "strings" - - cmn "github.com/tendermint/tmlibs/common" - "github.com/tendermint/tmlibs/merkle" -) - -// ValidatorSet represent a set of *Validator at a given height. -// The validators can be fetched by address or index. -// The index is in order of .Address, so the indices are fixed -// for all rounds of a given blockchain height. -// On the other hand, the .AccumPower of each validator and -// the designated .GetProposer() of a set changes every round, -// upon calling .IncrementAccum(). -// NOTE: Not goroutine-safe. -// NOTE: All get/set to validators should copy the value for safety. -type ValidatorSet struct { - // NOTE: persisted via reflect, must be exported. - Validators []*Validator `json:"validators"` - Proposer *Validator `json:"proposer"` - - // cached (unexported) - totalVotingPower int64 -} - -func NewValidatorSet(vals []*Validator) *ValidatorSet { - validators := make([]*Validator, len(vals)) - for i, val := range vals { - validators[i] = val.Copy() - } - sort.Sort(ValidatorsByAddress(validators)) - vs := &ValidatorSet{ - Validators: validators, - } - - if vals != nil { - vs.IncrementAccum(1) - } - - return vs -} - -// incrementAccum and update the proposer -func (valSet *ValidatorSet) IncrementAccum(times int) { - // Add VotingPower * times to each validator and order into heap. - validatorsHeap := cmn.NewHeap() - for _, val := range valSet.Validators { - // check for overflow both multiplication and sum - val.Accum = safeAddClip(val.Accum, safeMulClip(val.VotingPower, int64(times))) - validatorsHeap.PushComparable(val, accumComparable{val}) - } - - // Decrement the validator with most accum times times - for i := 0; i < times; i++ { - mostest := validatorsHeap.Peek().(*Validator) - // mind underflow - mostest.Accum = safeSubClip(mostest.Accum, valSet.TotalVotingPower()) - - if i == times-1 { - valSet.Proposer = mostest - } else { - validatorsHeap.Update(mostest, accumComparable{mostest}) - } - } -} - -// Copy each validator into a new ValidatorSet -func (valSet *ValidatorSet) Copy() *ValidatorSet { - validators := make([]*Validator, len(valSet.Validators)) - for i, val := range valSet.Validators { - // NOTE: must copy, since IncrementAccum updates in place. - validators[i] = val.Copy() - } - return &ValidatorSet{ - Validators: validators, - Proposer: valSet.Proposer, - totalVotingPower: valSet.totalVotingPower, - } -} - -// HasAddress returns true if address given is in the validator set, false - -// otherwise. -func (valSet *ValidatorSet) HasAddress(address []byte) bool { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 - }) - return idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) -} - -// GetByAddress returns an index of the validator with address and validator -// itself if found. Otherwise, -1 and nil are returned. -func (valSet *ValidatorSet) GetByAddress(address []byte) (index int, val *Validator) { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 - }) - if idx < len(valSet.Validators) && bytes.Equal(valSet.Validators[idx].Address, address) { - return idx, valSet.Validators[idx].Copy() - } - return -1, nil -} - -// GetByIndex returns the validator's address and validator itself by index. -// It returns nil values if index is less than 0 or greater or equal to -// len(ValidatorSet.Validators). -func (valSet *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) { - if index < 0 || index >= len(valSet.Validators) { - return nil, nil - } - val = valSet.Validators[index] - return val.Address, val.Copy() -} - -// Size returns the length of the validator set. -func (valSet *ValidatorSet) Size() int { - return len(valSet.Validators) -} - -// TotalVotingPower returns the sum of the voting powers of all validators. -func (valSet *ValidatorSet) TotalVotingPower() int64 { - if valSet.totalVotingPower == 0 { - for _, val := range valSet.Validators { - // mind overflow - valSet.totalVotingPower = safeAddClip(valSet.totalVotingPower, val.VotingPower) - } - } - return valSet.totalVotingPower -} - -// GetProposer returns the current proposer. If the validator set is empty, nil -// is returned. -func (valSet *ValidatorSet) GetProposer() (proposer *Validator) { - if len(valSet.Validators) == 0 { - return nil - } - if valSet.Proposer == nil { - valSet.Proposer = valSet.findProposer() - } - return valSet.Proposer.Copy() -} - -func (valSet *ValidatorSet) findProposer() *Validator { - var proposer *Validator - for _, val := range valSet.Validators { - if proposer == nil || !bytes.Equal(val.Address, proposer.Address) { - proposer = proposer.CompareAccum(val) - } - } - return proposer -} - -// Hash returns the Merkle root hash build using validators (as leaves) in the -// set. -func (valSet *ValidatorSet) Hash() []byte { - if len(valSet.Validators) == 0 { - return nil - } - hashers := make([]merkle.Hasher, len(valSet.Validators)) - for i, val := range valSet.Validators { - hashers[i] = val - } - return merkle.SimpleHashFromHashers(hashers) -} - -// Add adds val to the validator set and returns true. It returns false if val -// is already in the set. -func (valSet *ValidatorSet) Add(val *Validator) (added bool) { - val = val.Copy() - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(val.Address, valSet.Validators[i].Address) <= 0 - }) - if idx >= len(valSet.Validators) { - valSet.Validators = append(valSet.Validators, val) - // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 - return true - } else if bytes.Equal(valSet.Validators[idx].Address, val.Address) { - return false - } else { - newValidators := make([]*Validator, len(valSet.Validators)+1) - copy(newValidators[:idx], valSet.Validators[:idx]) - newValidators[idx] = val - copy(newValidators[idx+1:], valSet.Validators[idx:]) - valSet.Validators = newValidators - // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 - return true - } -} - -// Update updates val and returns true. It returns false if val is not present -// in the set. -func (valSet *ValidatorSet) Update(val *Validator) (updated bool) { - index, sameVal := valSet.GetByAddress(val.Address) - if sameVal == nil { - return false - } - valSet.Validators[index] = val.Copy() - // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 - return true -} - -// Remove deletes the validator with address. It returns the validator removed -// and true. If returns nil and false if validator is not present in the set. -func (valSet *ValidatorSet) Remove(address []byte) (val *Validator, removed bool) { - idx := sort.Search(len(valSet.Validators), func(i int) bool { - return bytes.Compare(address, valSet.Validators[i].Address) <= 0 - }) - if idx >= len(valSet.Validators) || !bytes.Equal(valSet.Validators[idx].Address, address) { - return nil, false - } - removedVal := valSet.Validators[idx] - newValidators := valSet.Validators[:idx] - if idx+1 < len(valSet.Validators) { - newValidators = append(newValidators, valSet.Validators[idx+1:]...) - } - valSet.Validators = newValidators - // Invalidate cache - valSet.Proposer = nil - valSet.totalVotingPower = 0 - return removedVal, true -} - -// Iterate will run the given function over the set. -func (valSet *ValidatorSet) Iterate(fn func(index int, val *Validator) bool) { - for i, val := range valSet.Validators { - stop := fn(i, val.Copy()) - if stop { - break - } - } -} - -// Verify that +2/3 of the set had signed the given signBytes -func (valSet *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, height int64, commit *Commit) error { - if valSet.Size() != len(commit.Precommits) { - return fmt.Errorf("Invalid commit -- wrong set size: %v vs %v", valSet.Size(), len(commit.Precommits)) - } - if height != commit.Height() { - return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) - } - - talliedVotingPower := int64(0) - round := commit.Round() - - for idx, precommit := range commit.Precommits { - // may be nil if validator skipped. - if precommit == nil { - continue - } - if precommit.Height != height { - return fmt.Errorf("Invalid commit -- wrong height: %v vs %v", height, precommit.Height) - } - if precommit.Round != round { - return fmt.Errorf("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) - } - if precommit.Type != VoteTypePrecommit { - return fmt.Errorf("Invalid commit -- not precommit @ index %v", idx) - } - _, val := valSet.GetByIndex(idx) - // Validate signature - precommitSignBytes := precommit.SignBytes(chainID) - if !val.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { - return fmt.Errorf("Invalid commit -- invalid signature: %v", precommit) - } - if !blockID.Equals(precommit.BlockID) { - continue // Not an error, but doesn't count - } - // Good precommit! - talliedVotingPower += val.VotingPower - } - - if talliedVotingPower > valSet.TotalVotingPower()*2/3 { - return nil - } - return fmt.Errorf("Invalid commit -- insufficient voting power: got %v, needed %v", - talliedVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) -} - -// VerifyCommitAny will check to see if the set would -// be valid with a different validator set. -// -// valSet is the validator set that we know -// * over 2/3 of the power in old signed this block -// -// newSet is the validator set that signed this block -// * only votes from old are sufficient for 2/3 majority -// in the new set as well -// -// That means that: -// * 10% of the valset can't just declare themselves kings -// * If the validator set is 3x old size, we need more proof to trust -func (valSet *ValidatorSet) VerifyCommitAny(newSet *ValidatorSet, chainID string, - blockID BlockID, height int64, commit *Commit) error { - - if newSet.Size() != len(commit.Precommits) { - return cmn.NewError("Invalid commit -- wrong set size: %v vs %v", newSet.Size(), len(commit.Precommits)) - } - if height != commit.Height() { - return cmn.NewError("Invalid commit -- wrong height: %v vs %v", height, commit.Height()) - } - - oldVotingPower := int64(0) - newVotingPower := int64(0) - seen := map[int]bool{} - round := commit.Round() - - for idx, precommit := range commit.Precommits { - // first check as in VerifyCommit - if precommit == nil { - continue - } - if precommit.Height != height { - // return certerr.ErrHeightMismatch(height, precommit.Height) - return cmn.NewError("Blocks don't match - %d vs %d", round, precommit.Round) - } - if precommit.Round != round { - return cmn.NewError("Invalid commit -- wrong round: %v vs %v", round, precommit.Round) - } - if precommit.Type != VoteTypePrecommit { - return cmn.NewError("Invalid commit -- not precommit @ index %v", idx) - } - if !blockID.Equals(precommit.BlockID) { - continue // Not an error, but doesn't count - } - - // we only grab by address, ignoring unknown validators - vi, ov := valSet.GetByAddress(precommit.ValidatorAddress) - if ov == nil || seen[vi] { - continue // missing or double vote... - } - seen[vi] = true - - // Validate signature old school - precommitSignBytes := precommit.SignBytes(chainID) - if !ov.PubKey.VerifyBytes(precommitSignBytes, precommit.Signature) { - return cmn.NewError("Invalid commit -- invalid signature: %v", precommit) - } - // Good precommit! - oldVotingPower += ov.VotingPower - - // check new school - _, cv := newSet.GetByIndex(idx) - if cv.PubKey.Equals(ov.PubKey) { - // make sure this is properly set in the current block as well - newVotingPower += cv.VotingPower - } - } - - if oldVotingPower <= valSet.TotalVotingPower()*2/3 { - return cmn.NewError("Invalid commit -- insufficient old voting power: got %v, needed %v", - oldVotingPower, (valSet.TotalVotingPower()*2/3 + 1)) - } else if newVotingPower <= newSet.TotalVotingPower()*2/3 { - return cmn.NewError("Invalid commit -- insufficient cur voting power: got %v, needed %v", - newVotingPower, (newSet.TotalVotingPower()*2/3 + 1)) - } - return nil -} - -func (valSet *ValidatorSet) String() string { - return valSet.StringIndented("") -} - -// String -func (valSet *ValidatorSet) StringIndented(indent string) string { - if valSet == nil { - return "nil-ValidatorSet" - } - valStrings := []string{} - valSet.Iterate(func(index int, val *Validator) bool { - valStrings = append(valStrings, val.String()) - return false - }) - return fmt.Sprintf(`ValidatorSet{ -%s Proposer: %v -%s Validators: -%s %v -%s}`, - indent, valSet.GetProposer().String(), - indent, - indent, strings.Join(valStrings, "\n"+indent+" "), - indent) - -} - -//------------------------------------- -// Implements sort for sorting validators by address. - -// Sort validators by address -type ValidatorsByAddress []*Validator - -func (vs ValidatorsByAddress) Len() int { - return len(vs) -} - -func (vs ValidatorsByAddress) Less(i, j int) bool { - return bytes.Compare(vs[i].Address, vs[j].Address) == -1 -} - -func (vs ValidatorsByAddress) Swap(i, j int) { - it := vs[i] - vs[i] = vs[j] - vs[j] = it -} - -//------------------------------------- -// Use with Heap for sorting validators by accum - -type accumComparable struct { - *Validator -} - -// We want to find the validator with the greatest accum. -func (ac accumComparable) Less(o interface{}) bool { - other := o.(accumComparable).Validator - larger := ac.CompareAccum(other) - return bytes.Equal(larger.Address, ac.Address) -} - -//---------------------------------------- -// For testing - -// RandValidatorSet returns a randomized validator set, useful for testing. -// NOTE: PrivValidator are in order. -// UNSTABLE -func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []PrivValidator) { - vals := make([]*Validator, numValidators) - privValidators := make([]PrivValidator, numValidators) - for i := 0; i < numValidators; i++ { - val, privValidator := RandValidator(false, votingPower) - vals[i] = val - privValidators[i] = privValidator - } - valSet := NewValidatorSet(vals) - sort.Sort(PrivValidatorsByAddress(privValidators)) - return valSet, privValidators -} - -/////////////////////////////////////////////////////////////////////////////// -// Safe multiplication and addition/subtraction - -func safeMul(a, b int64) (int64, bool) { - if a == 0 || b == 0 { - return 0, false - } - if a == 1 { - return b, false - } - if b == 1 { - return a, false - } - if a == math.MinInt64 || b == math.MinInt64 { - return -1, true - } - c := a * b - return c, c/b != a -} - -func safeAdd(a, b int64) (int64, bool) { - if b > 0 && a > math.MaxInt64-b { - return -1, true - } else if b < 0 && a < math.MinInt64-b { - return -1, true - } - return a + b, false -} - -func safeSub(a, b int64) (int64, bool) { - if b > 0 && a < math.MinInt64+b { - return -1, true - } else if b < 0 && a > math.MaxInt64+b { - return -1, true - } - return a - b, false -} - -func safeMulClip(a, b int64) int64 { - c, overflow := safeMul(a, b) - if overflow { - if (a < 0 || b < 0) && !(a < 0 && b < 0) { - return math.MinInt64 - } - return math.MaxInt64 - } - return c -} - -func safeAddClip(a, b int64) int64 { - c, overflow := safeAdd(a, b) - if overflow { - if b < 0 { - return math.MinInt64 - } - return math.MaxInt64 - } - return c -} - -func safeSubClip(a, b int64) int64 { - c, overflow := safeSub(a, b) - if overflow { - if b > 0 { - return math.MinInt64 - } - return math.MaxInt64 - } - return c -} diff --git a/types/validator_set_test.go b/types/validator_set_test.go deleted file mode 100644 index 8db53a53..00000000 --- a/types/validator_set_test.go +++ /dev/null @@ -1,370 +0,0 @@ -package types - -import ( - "bytes" - "math" - "strings" - "testing" - "testing/quick" - "time" - - "github.com/stretchr/testify/assert" - - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" -) - -func TestCopy(t *testing.T) { - vset := randValidatorSet(10) - vsetHash := vset.Hash() - if len(vsetHash) == 0 { - t.Fatalf("ValidatorSet had unexpected zero hash") - } - - vsetCopy := vset.Copy() - vsetCopyHash := vsetCopy.Hash() - - if !bytes.Equal(vsetHash, vsetCopyHash) { - t.Fatalf("ValidatorSet copy had wrong hash. Orig: %X, Copy: %X", vsetHash, vsetCopyHash) - } -} - -func BenchmarkValidatorSetCopy(b *testing.B) { - b.StopTimer() - vset := NewValidatorSet([]*Validator{}) - for i := 0; i < 1000; i++ { - privKey := crypto.GenPrivKeyEd25519() - pubKey := privKey.PubKey() - val := NewValidator(pubKey, 0) - if !vset.Add(val) { - panic("Failed to add validator") - } - } - b.StartTimer() - - for i := 0; i < b.N; i++ { - vset.Copy() - } -} - -//------------------------------------------------------------------- - -func TestProposerSelection1(t *testing.T) { - vset := NewValidatorSet([]*Validator{ - newValidator([]byte("foo"), 1000), - newValidator([]byte("bar"), 300), - newValidator([]byte("baz"), 330), - }) - proposers := []string{} - for i := 0; i < 99; i++ { - val := vset.GetProposer() - proposers = append(proposers, string(val.Address)) - vset.IncrementAccum(1) - } - expected := `foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo foo baz bar foo foo foo baz foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo bar foo foo baz foo foo bar foo baz foo foo bar foo baz foo foo bar foo baz foo foo` - if expected != strings.Join(proposers, " ") { - t.Errorf("Expected sequence of proposers was\n%v\nbut got \n%v", expected, strings.Join(proposers, " ")) - } -} - -func TestProposerSelection2(t *testing.T) { - addr0 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} - addr1 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1} - addr2 := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2} - - // when all voting power is same, we go in order of addresses - val0, val1, val2 := newValidator(addr0, 100), newValidator(addr1, 100), newValidator(addr2, 100) - valList := []*Validator{val0, val1, val2} - vals := NewValidatorSet(valList) - for i := 0; i < len(valList)*5; i++ { - ii := (i) % len(valList) - prop := vals.GetProposer() - if !bytes.Equal(prop.Address, valList[ii].Address) { - t.Fatalf("(%d): Expected %X. Got %X", i, valList[ii].Address, prop.Address) - } - vals.IncrementAccum(1) - } - - // One validator has more than the others, but not enough to propose twice in a row - *val2 = *newValidator(addr2, 400) - vals = NewValidatorSet(valList) - // vals.IncrementAccum(1) - prop := vals.GetProposer() - if !bytes.Equal(prop.Address, addr2) { - t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address) - } - vals.IncrementAccum(1) - prop = vals.GetProposer() - if !bytes.Equal(prop.Address, addr0) { - t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address) - } - - // One validator has more than the others, and enough to be proposer twice in a row - *val2 = *newValidator(addr2, 401) - vals = NewValidatorSet(valList) - prop = vals.GetProposer() - if !bytes.Equal(prop.Address, addr2) { - t.Fatalf("Expected address with highest voting power to be first proposer. Got %X", prop.Address) - } - vals.IncrementAccum(1) - prop = vals.GetProposer() - if !bytes.Equal(prop.Address, addr2) { - t.Fatalf("Expected address with highest voting power to be second proposer. Got %X", prop.Address) - } - vals.IncrementAccum(1) - prop = vals.GetProposer() - if !bytes.Equal(prop.Address, addr0) { - t.Fatalf("Expected smallest address to be validator. Got %X", prop.Address) - } - - // each validator should be the proposer a proportional number of times - val0, val1, val2 = newValidator(addr0, 4), newValidator(addr1, 5), newValidator(addr2, 3) - valList = []*Validator{val0, val1, val2} - propCount := make([]int, 3) - vals = NewValidatorSet(valList) - N := 1 - for i := 0; i < 120*N; i++ { - prop := vals.GetProposer() - ii := prop.Address[19] - propCount[ii]++ - vals.IncrementAccum(1) - } - - if propCount[0] != 40*N { - t.Fatalf("Expected prop count for validator with 4/12 of voting power to be %d/%d. Got %d/%d", 40*N, 120*N, propCount[0], 120*N) - } - if propCount[1] != 50*N { - t.Fatalf("Expected prop count for validator with 5/12 of voting power to be %d/%d. Got %d/%d", 50*N, 120*N, propCount[1], 120*N) - } - if propCount[2] != 30*N { - t.Fatalf("Expected prop count for validator with 3/12 of voting power to be %d/%d. Got %d/%d", 30*N, 120*N, propCount[2], 120*N) - } -} - -func TestProposerSelection3(t *testing.T) { - vset := NewValidatorSet([]*Validator{ - newValidator([]byte("a"), 1), - newValidator([]byte("b"), 1), - newValidator([]byte("c"), 1), - newValidator([]byte("d"), 1), - }) - - proposerOrder := make([]*Validator, 4) - for i := 0; i < 4; i++ { - proposerOrder[i] = vset.GetProposer() - vset.IncrementAccum(1) - } - - // i for the loop - // j for the times - // we should go in order for ever, despite some IncrementAccums with times > 1 - var i, j int - for ; i < 10000; i++ { - got := vset.GetProposer().Address - expected := proposerOrder[j%4].Address - if !bytes.Equal(got, expected) { - t.Fatalf(cmn.Fmt("vset.Proposer (%X) does not match expected proposer (%X) for (%d, %d)", got, expected, i, j)) - } - - // serialize, deserialize, check proposer - b := vset.toBytes() - vset.fromBytes(b) - - computed := vset.GetProposer() // findGetProposer() - if i != 0 { - if !bytes.Equal(got, computed.Address) { - t.Fatalf(cmn.Fmt("vset.Proposer (%X) does not match computed proposer (%X) for (%d, %d)", got, computed.Address, i, j)) - } - } - - // times is usually 1 - times := 1 - mod := (cmn.RandInt() % 5) + 1 - if cmn.RandInt()%mod > 0 { - // sometimes its up to 5 - times = cmn.RandInt() % 5 - } - vset.IncrementAccum(times) - - j += times - } -} - -func newValidator(address []byte, power int64) *Validator { - return &Validator{Address: address, VotingPower: power} -} - -func randPubKey() crypto.PubKey { - var pubKey [32]byte - copy(pubKey[:], cmn.RandBytes(32)) - return crypto.PubKeyEd25519(pubKey) -} - -func randValidator_() *Validator { - val := NewValidator(randPubKey(), cmn.RandInt64()) - val.Accum = cmn.RandInt64() - return val -} - -func randValidatorSet(numValidators int) *ValidatorSet { - validators := make([]*Validator, numValidators) - for i := 0; i < numValidators; i++ { - validators[i] = randValidator_() - } - return NewValidatorSet(validators) -} - -func (valSet *ValidatorSet) toBytes() []byte { - bz, err := cdc.MarshalBinary(valSet) - if err != nil { - panic(err) - } - return bz -} - -func (valSet *ValidatorSet) fromBytes(b []byte) { - err := cdc.UnmarshalBinary(b, &valSet) - if err != nil { - // DATA HAS BEEN CORRUPTED OR THE SPEC HAS CHANGED - panic(err) - } -} - -//------------------------------------------------------------------- - -func TestValidatorSetTotalVotingPowerOverflows(t *testing.T) { - vset := NewValidatorSet([]*Validator{ - {Address: []byte("a"), VotingPower: math.MaxInt64, Accum: 0}, - {Address: []byte("b"), VotingPower: math.MaxInt64, Accum: 0}, - {Address: []byte("c"), VotingPower: math.MaxInt64, Accum: 0}, - }) - - assert.EqualValues(t, math.MaxInt64, vset.TotalVotingPower()) -} - -func TestValidatorSetIncrementAccumOverflows(t *testing.T) { - // NewValidatorSet calls IncrementAccum(1) - vset := NewValidatorSet([]*Validator{ - // too much voting power - 0: {Address: []byte("a"), VotingPower: math.MaxInt64, Accum: 0}, - // too big accum - 1: {Address: []byte("b"), VotingPower: 10, Accum: math.MaxInt64}, - // almost too big accum - 2: {Address: []byte("c"), VotingPower: 10, Accum: math.MaxInt64 - 5}, - }) - - assert.Equal(t, int64(0), vset.Validators[0].Accum, "0") // because we decrement val with most voting power - assert.EqualValues(t, math.MaxInt64, vset.Validators[1].Accum, "1") - assert.EqualValues(t, math.MaxInt64, vset.Validators[2].Accum, "2") -} - -func TestValidatorSetIncrementAccumUnderflows(t *testing.T) { - // NewValidatorSet calls IncrementAccum(1) - vset := NewValidatorSet([]*Validator{ - 0: {Address: []byte("a"), VotingPower: math.MaxInt64, Accum: math.MinInt64}, - 1: {Address: []byte("b"), VotingPower: 1, Accum: math.MinInt64}, - }) - - vset.IncrementAccum(5) - - assert.EqualValues(t, math.MinInt64, vset.Validators[0].Accum, "0") - assert.EqualValues(t, math.MinInt64, vset.Validators[1].Accum, "1") -} - -func TestSafeMul(t *testing.T) { - f := func(a, b int64) bool { - c, overflow := safeMul(a, b) - return overflow || (!overflow && c == a*b) - } - if err := quick.Check(f, nil); err != nil { - t.Error(err) - } -} - -func TestSafeAdd(t *testing.T) { - f := func(a, b int64) bool { - c, overflow := safeAdd(a, b) - return overflow || (!overflow && c == a+b) - } - if err := quick.Check(f, nil); err != nil { - t.Error(err) - } -} - -func TestSafeMulClip(t *testing.T) { - assert.EqualValues(t, math.MaxInt64, safeMulClip(math.MinInt64, math.MinInt64)) - assert.EqualValues(t, math.MinInt64, safeMulClip(math.MaxInt64, math.MinInt64)) - assert.EqualValues(t, math.MinInt64, safeMulClip(math.MinInt64, math.MaxInt64)) - assert.EqualValues(t, math.MaxInt64, safeMulClip(math.MaxInt64, 2)) -} - -func TestSafeAddClip(t *testing.T) { - assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, 10)) - assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, math.MaxInt64)) - assert.EqualValues(t, math.MinInt64, safeAddClip(math.MinInt64, -10)) -} - -func TestSafeSubClip(t *testing.T) { - assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, 10)) - assert.EqualValues(t, 0, safeSubClip(math.MinInt64, math.MinInt64)) - assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, math.MaxInt64)) - assert.EqualValues(t, math.MaxInt64, safeSubClip(math.MaxInt64, -10)) -} - -//------------------------------------------------------------------- - -func TestValidatorSetVerifyCommit(t *testing.T) { - privKey := crypto.GenPrivKeyEd25519() - pubKey := privKey.PubKey() - v1 := NewValidator(pubKey, 1000) - vset := NewValidatorSet([]*Validator{v1}) - - chainID := "mychainID" - blockID := BlockID{Hash: []byte("hello")} - height := int64(5) - vote := &Vote{ - ValidatorAddress: v1.Address, - ValidatorIndex: 0, - Height: height, - Round: 0, - Timestamp: time.Now().UTC(), - Type: VoteTypePrecommit, - BlockID: blockID, - } - vote.Signature = privKey.Sign(vote.SignBytes(chainID)) - commit := &Commit{ - BlockID: blockID, - Precommits: []*Vote{vote}, - } - - badChainID := "notmychainID" - badBlockID := BlockID{Hash: []byte("goodbye")} - badHeight := height + 1 - badCommit := &Commit{ - BlockID: blockID, - Precommits: []*Vote{nil}, - } - - // test some error cases - // TODO: test more cases! - cases := []struct { - chainID string - blockID BlockID - height int64 - commit *Commit - }{ - {badChainID, blockID, height, commit}, - {chainID, badBlockID, height, commit}, - {chainID, blockID, badHeight, commit}, - {chainID, blockID, height, badCommit}, - } - - for i, c := range cases { - err := vset.VerifyCommit(c.chainID, c.blockID, c.height, c.commit) - assert.NotNil(t, err, i) - } - - // test a good one - err := vset.VerifyCommit(chainID, blockID, height, commit) - assert.Nil(t, err) -} diff --git a/types/vote.go b/types/vote.go deleted file mode 100644 index e4ead612..00000000 --- a/types/vote.go +++ /dev/null @@ -1,117 +0,0 @@ -package types - -import ( - "bytes" - "errors" - "fmt" - "time" - - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" -) - -var ( - ErrVoteUnexpectedStep = errors.New("Unexpected step") - ErrVoteInvalidValidatorIndex = errors.New("Invalid validator index") - ErrVoteInvalidValidatorAddress = errors.New("Invalid validator address") - ErrVoteInvalidSignature = errors.New("Invalid signature") - ErrVoteInvalidBlockHash = errors.New("Invalid block hash") - ErrVoteNonDeterministicSignature = errors.New("Non-deterministic signature") - ErrVoteNil = errors.New("Nil vote") -) - -type ErrVoteConflictingVotes struct { - *DuplicateVoteEvidence -} - -func (err *ErrVoteConflictingVotes) Error() string { - return fmt.Sprintf("Conflicting votes from validator %v", err.PubKey.Address()) -} - -func NewConflictingVoteError(val *Validator, voteA, voteB *Vote) *ErrVoteConflictingVotes { - return &ErrVoteConflictingVotes{ - &DuplicateVoteEvidence{ - PubKey: val.PubKey, - VoteA: voteA, - VoteB: voteB, - }, - } -} - -// Types of votes -// TODO Make a new type "VoteType" -const ( - VoteTypePrevote = byte(0x01) - VoteTypePrecommit = byte(0x02) -) - -func IsVoteTypeValid(type_ byte) bool { - switch type_ { - case VoteTypePrevote: - return true - case VoteTypePrecommit: - return true - default: - return false - } -} - -// Address is hex bytes. TODO: crypto.Address -type Address = cmn.HexBytes - -// Represents a prevote, precommit, or commit vote from validators for consensus. -type Vote struct { - ValidatorAddress Address `json:"validator_address"` - ValidatorIndex int `json:"validator_index"` - Height int64 `json:"height"` - Round int `json:"round"` - Timestamp time.Time `json:"timestamp"` - Type byte `json:"type"` - BlockID BlockID `json:"block_id"` // zero if vote is nil. - Signature crypto.Signature `json:"signature"` -} - -func (vote *Vote) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalJSON(CanonicalVote(chainID, vote)) - if err != nil { - panic(err) - } - return bz -} - -func (vote *Vote) Copy() *Vote { - voteCopy := *vote - return &voteCopy -} - -func (vote *Vote) String() string { - if vote == nil { - return "nil-Vote" - } - var typeString string - switch vote.Type { - case VoteTypePrevote: - typeString = "Prevote" - case VoteTypePrecommit: - typeString = "Precommit" - default: - cmn.PanicSanity("Unknown vote type") - } - - return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %v @ %s}", - vote.ValidatorIndex, cmn.Fingerprint(vote.ValidatorAddress), - vote.Height, vote.Round, vote.Type, typeString, - cmn.Fingerprint(vote.BlockID.Hash), vote.Signature, - CanonicalTime(vote.Timestamp)) -} - -func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error { - if !bytes.Equal(pubKey.Address(), vote.ValidatorAddress) { - return ErrVoteInvalidValidatorAddress - } - - if !pubKey.VerifyBytes(vote.SignBytes(chainID), vote.Signature) { - return ErrVoteInvalidSignature - } - return nil -} diff --git a/types/vote_set.go b/types/vote_set.go deleted file mode 100644 index a60d95da..00000000 --- a/types/vote_set.go +++ /dev/null @@ -1,603 +0,0 @@ -package types - -import ( - "bytes" - "fmt" - "strings" - "sync" - - "github.com/pkg/errors" - - cmn "github.com/tendermint/tmlibs/common" -) - -// UNSTABLE -// XXX: duplicate of p2p.ID to avoid dependence between packages. -// Perhaps we can have a minimal types package containing this (and other things?) -// that both `types` and `p2p` import ? -type P2PID string - -/* - VoteSet helps collect signatures from validators at each height+round for a - predefined vote type. - - We need VoteSet to be able to keep track of conflicting votes when validators - double-sign. Yet, we can't keep track of *all* the votes seen, as that could - be a DoS attack vector. - - There are two storage areas for votes. - 1. voteSet.votes - 2. voteSet.votesByBlock - - `.votes` is the "canonical" list of votes. It always has at least one vote, - if a vote from a validator had been seen at all. Usually it keeps track of - the first vote seen, but when a 2/3 majority is found, votes for that get - priority and are copied over from `.votesByBlock`. - - `.votesByBlock` keeps track of a list of votes for a particular block. There - are two ways a &blockVotes{} gets created in `.votesByBlock`. - 1. the first vote seen by a validator was for the particular block. - 2. a peer claims to have seen 2/3 majority for the particular block. - - Since the first vote from a validator will always get added in `.votesByBlock` - , all votes in `.votes` will have a corresponding entry in `.votesByBlock`. - - When a &blockVotes{} in `.votesByBlock` reaches a 2/3 majority quorum, its - votes are copied into `.votes`. - - All this is memory bounded because conflicting votes only get added if a peer - told us to track that block, each peer only gets to tell us 1 such block, and, - there's only a limited number of peers. - - NOTE: Assumes that the sum total of voting power does not exceed MaxUInt64. -*/ -type VoteSet struct { - chainID string - height int64 - round int - type_ byte - valSet *ValidatorSet - - mtx sync.Mutex - votesBitArray *cmn.BitArray - votes []*Vote // Primary votes to share - sum int64 // Sum of voting power for seen votes, discounting conflicts - maj23 *BlockID // First 2/3 majority seen - votesByBlock map[string]*blockVotes // string(blockHash|blockParts) -> blockVotes - peerMaj23s map[P2PID]BlockID // Maj23 for each peer -} - -// Constructs a new VoteSet struct used to accumulate votes for given height/round. -func NewVoteSet(chainID string, height int64, round int, type_ byte, valSet *ValidatorSet) *VoteSet { - if height == 0 { - cmn.PanicSanity("Cannot make VoteSet for height == 0, doesn't make sense.") - } - return &VoteSet{ - chainID: chainID, - height: height, - round: round, - type_: type_, - valSet: valSet, - votesBitArray: cmn.NewBitArray(valSet.Size()), - votes: make([]*Vote, valSet.Size()), - sum: 0, - maj23: nil, - votesByBlock: make(map[string]*blockVotes, valSet.Size()), - peerMaj23s: make(map[P2PID]BlockID), - } -} - -func (voteSet *VoteSet) ChainID() string { - return voteSet.chainID -} - -func (voteSet *VoteSet) Height() int64 { - if voteSet == nil { - return 0 - } - return voteSet.height -} - -func (voteSet *VoteSet) Round() int { - if voteSet == nil { - return -1 - } - return voteSet.round -} - -func (voteSet *VoteSet) Type() byte { - if voteSet == nil { - return 0x00 - } - return voteSet.type_ -} - -func (voteSet *VoteSet) Size() int { - if voteSet == nil { - return 0 - } - return voteSet.valSet.Size() -} - -// Returns added=true if vote is valid and new. -// Otherwise returns err=ErrVote[ -// UnexpectedStep | InvalidIndex | InvalidAddress | -// InvalidSignature | InvalidBlockHash | ConflictingVotes ] -// Duplicate votes return added=false, err=nil. -// Conflicting votes return added=*, err=ErrVoteConflictingVotes. -// NOTE: vote should not be mutated after adding. -// NOTE: VoteSet must not be nil -// NOTE: Vote must not be nil -func (voteSet *VoteSet) AddVote(vote *Vote) (added bool, err error) { - if voteSet == nil { - cmn.PanicSanity("AddVote() on nil VoteSet") - } - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - - return voteSet.addVote(vote) -} - -// NOTE: Validates as much as possible before attempting to verify the signature. -func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { - if vote == nil { - return false, ErrVoteNil - } - valIndex := vote.ValidatorIndex - valAddr := vote.ValidatorAddress - blockKey := vote.BlockID.Key() - - // Ensure that validator index was set - if valIndex < 0 { - return false, errors.Wrap(ErrVoteInvalidValidatorIndex, "Index < 0") - } else if len(valAddr) == 0 { - return false, errors.Wrap(ErrVoteInvalidValidatorAddress, "Empty address") - } - - // Make sure the step matches. - if (vote.Height != voteSet.height) || - (vote.Round != voteSet.round) || - (vote.Type != voteSet.type_) { - return false, errors.Wrapf(ErrVoteUnexpectedStep, "Got %d/%d/%d, expected %d/%d/%d", - voteSet.height, voteSet.round, voteSet.type_, - vote.Height, vote.Round, vote.Type) - } - - // Ensure that signer is a validator. - lookupAddr, val := voteSet.valSet.GetByIndex(valIndex) - if val == nil { - return false, errors.Wrapf(ErrVoteInvalidValidatorIndex, - "Cannot find validator %d in valSet of size %d", valIndex, voteSet.valSet.Size()) - } - - // Ensure that the signer has the right address - if !bytes.Equal(valAddr, lookupAddr) { - return false, errors.Wrapf(ErrVoteInvalidValidatorAddress, - "vote.ValidatorAddress (%X) does not match address (%X) for vote.ValidatorIndex (%d)\nEnsure the genesis file is correct across all validators.", - valAddr, lookupAddr, valIndex) - } - - // If we already know of this vote, return false. - if existing, ok := voteSet.getVote(valIndex, blockKey); ok { - if existing.Signature.Equals(vote.Signature) { - return false, nil // duplicate - } - return false, errors.Wrapf(ErrVoteNonDeterministicSignature, "Existing vote: %v; New vote: %v", existing, vote) - } - - // Check signature. - if err := vote.Verify(voteSet.chainID, val.PubKey); err != nil { - return false, errors.Wrapf(err, "Failed to verify vote with ChainID %s and PubKey %s", voteSet.chainID, val.PubKey) - } - - // Add vote and get conflicting vote if any - added, conflicting := voteSet.addVerifiedVote(vote, blockKey, val.VotingPower) - if conflicting != nil { - return added, NewConflictingVoteError(val, conflicting, vote) - } - if !added { - cmn.PanicSanity("Expected to add non-conflicting vote") - } - return added, nil -} - -// Returns (vote, true) if vote exists for valIndex and blockKey -func (voteSet *VoteSet) getVote(valIndex int, blockKey string) (vote *Vote, ok bool) { - if existing := voteSet.votes[valIndex]; existing != nil && existing.BlockID.Key() == blockKey { - return existing, true - } - if existing := voteSet.votesByBlock[blockKey].getByIndex(valIndex); existing != nil { - return existing, true - } - return nil, false -} - -// Assumes signature is valid. -// If conflicting vote exists, returns it. -func (voteSet *VoteSet) addVerifiedVote(vote *Vote, blockKey string, votingPower int64) (added bool, conflicting *Vote) { - valIndex := vote.ValidatorIndex - - // Already exists in voteSet.votes? - if existing := voteSet.votes[valIndex]; existing != nil { - if existing.BlockID.Equals(vote.BlockID) { - cmn.PanicSanity("addVerifiedVote does not expect duplicate votes") - } else { - conflicting = existing - } - // Replace vote if blockKey matches voteSet.maj23. - if voteSet.maj23 != nil && voteSet.maj23.Key() == blockKey { - voteSet.votes[valIndex] = vote - voteSet.votesBitArray.SetIndex(valIndex, true) - } - // Otherwise don't add it to voteSet.votes - } else { - // Add to voteSet.votes and incr .sum - voteSet.votes[valIndex] = vote - voteSet.votesBitArray.SetIndex(valIndex, true) - voteSet.sum += votingPower - } - - votesByBlock, ok := voteSet.votesByBlock[blockKey] - if ok { - if conflicting != nil && !votesByBlock.peerMaj23 { - // There's a conflict and no peer claims that this block is special. - return false, conflicting - } - // We'll add the vote in a bit. - } else { - // .votesByBlock doesn't exist... - if conflicting != nil { - // ... and there's a conflicting vote. - // We're not even tracking this blockKey, so just forget it. - return false, conflicting - } - // ... and there's no conflicting vote. - // Start tracking this blockKey - votesByBlock = newBlockVotes(false, voteSet.valSet.Size()) - voteSet.votesByBlock[blockKey] = votesByBlock - // We'll add the vote in a bit. - } - - // Before adding to votesByBlock, see if we'll exceed quorum - origSum := votesByBlock.sum - quorum := voteSet.valSet.TotalVotingPower()*2/3 + 1 - - // Add vote to votesByBlock - votesByBlock.addVerifiedVote(vote, votingPower) - - // If we just crossed the quorum threshold and have 2/3 majority... - if origSum < quorum && quorum <= votesByBlock.sum { - // Only consider the first quorum reached - if voteSet.maj23 == nil { - maj23BlockID := vote.BlockID - voteSet.maj23 = &maj23BlockID - // And also copy votes over to voteSet.votes - for i, vote := range votesByBlock.votes { - if vote != nil { - voteSet.votes[i] = vote - } - } - } - } - - return true, conflicting -} - -// If a peer claims that it has 2/3 majority for given blockKey, call this. -// NOTE: if there are too many peers, or too much peer churn, -// this can cause memory issues. -// TODO: implement ability to remove peers too -// NOTE: VoteSet must not be nil -func (voteSet *VoteSet) SetPeerMaj23(peerID P2PID, blockID BlockID) error { - if voteSet == nil { - cmn.PanicSanity("SetPeerMaj23() on nil VoteSet") - } - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - - blockKey := blockID.Key() - - // Make sure peer hasn't already told us something. - if existing, ok := voteSet.peerMaj23s[peerID]; ok { - if existing.Equals(blockID) { - return nil // Nothing to do - } - return fmt.Errorf("SetPeerMaj23: Received conflicting blockID from peer %v. Got %v, expected %v", - peerID, blockID, existing) - } - voteSet.peerMaj23s[peerID] = blockID - - // Create .votesByBlock entry if needed. - votesByBlock, ok := voteSet.votesByBlock[blockKey] - if ok { - if votesByBlock.peerMaj23 { - return nil // Nothing to do - } - votesByBlock.peerMaj23 = true - // No need to copy votes, already there. - } else { - votesByBlock = newBlockVotes(true, voteSet.valSet.Size()) - voteSet.votesByBlock[blockKey] = votesByBlock - // No need to copy votes, no votes to copy over. - } - return nil -} - -func (voteSet *VoteSet) BitArray() *cmn.BitArray { - if voteSet == nil { - return nil - } - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - return voteSet.votesBitArray.Copy() -} - -func (voteSet *VoteSet) BitArrayByBlockID(blockID BlockID) *cmn.BitArray { - if voteSet == nil { - return nil - } - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - votesByBlock, ok := voteSet.votesByBlock[blockID.Key()] - if ok { - return votesByBlock.bitArray.Copy() - } - return nil -} - -// NOTE: if validator has conflicting votes, returns "canonical" vote -func (voteSet *VoteSet) GetByIndex(valIndex int) *Vote { - if voteSet == nil { - return nil - } - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - return voteSet.votes[valIndex] -} - -func (voteSet *VoteSet) GetByAddress(address []byte) *Vote { - if voteSet == nil { - return nil - } - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - valIndex, val := voteSet.valSet.GetByAddress(address) - if val == nil { - cmn.PanicSanity("GetByAddress(address) returned nil") - } - return voteSet.votes[valIndex] -} - -func (voteSet *VoteSet) HasTwoThirdsMajority() bool { - if voteSet == nil { - return false - } - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - return voteSet.maj23 != nil -} - -func (voteSet *VoteSet) IsCommit() bool { - if voteSet == nil { - return false - } - if voteSet.type_ != VoteTypePrecommit { - return false - } - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - return voteSet.maj23 != nil -} - -func (voteSet *VoteSet) HasTwoThirdsAny() bool { - if voteSet == nil { - return false - } - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - return voteSet.sum > voteSet.valSet.TotalVotingPower()*2/3 -} - -func (voteSet *VoteSet) HasAll() bool { - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - return voteSet.sum == voteSet.valSet.TotalVotingPower() -} - -// If there was a +2/3 majority for blockID, return blockID and true. -// Else, return the empty BlockID{} and false. -func (voteSet *VoteSet) TwoThirdsMajority() (blockID BlockID, ok bool) { - if voteSet == nil { - return BlockID{}, false - } - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - if voteSet.maj23 != nil { - return *voteSet.maj23, true - } - return BlockID{}, false -} - -//-------------------------------------------------------------------------------- -// Strings and JSON - -func (voteSet *VoteSet) String() string { - if voteSet == nil { - return "nil-VoteSet" - } - return voteSet.StringIndented("") -} - -func (voteSet *VoteSet) StringIndented(indent string) string { - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - voteStrings := make([]string, len(voteSet.votes)) - for i, vote := range voteSet.votes { - if vote == nil { - voteStrings[i] = "nil-Vote" - } else { - voteStrings[i] = vote.String() - } - } - return fmt.Sprintf(`VoteSet{ -%s H:%v R:%v T:%v -%s %v -%s %v -%s %v -%s}`, - indent, voteSet.height, voteSet.round, voteSet.type_, - indent, strings.Join(voteStrings, "\n"+indent+" "), - indent, voteSet.votesBitArray, - indent, voteSet.peerMaj23s, - indent) -} - -// Marshal the VoteSet to JSON. Same as String(), just in JSON, -// and without the height/round/type_ (since its already included in the votes). -func (voteSet *VoteSet) MarshalJSON() ([]byte, error) { - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - return cdc.MarshalJSON(VoteSetJSON{ - voteSet.voteStrings(), - voteSet.bitArrayString(), - voteSet.peerMaj23s, - }) -} - -// More human readable JSON of the vote set -// NOTE: insufficient for unmarshalling from (compressed votes) -// TODO: make the peerMaj23s nicer to read (eg just the block hash) -type VoteSetJSON struct { - Votes []string `json:"votes"` - VotesBitArray string `json:"votes_bit_array"` - PeerMaj23s map[P2PID]BlockID `json:"peer_maj_23s"` -} - -// Return the bit-array of votes including -// the fraction of power that has voted like: -// "BA{29:xx__x__x_x___x__x_______xxx__} 856/1304 = 0.66" -func (voteSet *VoteSet) BitArrayString() string { - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - return voteSet.bitArrayString() -} - -func (voteSet *VoteSet) bitArrayString() string { - bAString := voteSet.votesBitArray.String() - voted, total, fracVoted := voteSet.sumTotalFrac() - return fmt.Sprintf("%s %d/%d = %.2f", bAString, voted, total, fracVoted) -} - -// Returns a list of votes compressed to more readable strings. -func (voteSet *VoteSet) VoteStrings() []string { - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - return voteSet.voteStrings() -} - -func (voteSet *VoteSet) voteStrings() []string { - voteStrings := make([]string, len(voteSet.votes)) - for i, vote := range voteSet.votes { - if vote == nil { - voteStrings[i] = "nil-Vote" - } else { - voteStrings[i] = vote.String() - } - } - return voteStrings -} - -func (voteSet *VoteSet) StringShort() string { - if voteSet == nil { - return "nil-VoteSet" - } - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - _, _, frac := voteSet.sumTotalFrac() - return fmt.Sprintf(`VoteSet{H:%v R:%v T:%v +2/3:%v(%v) %v %v}`, - voteSet.height, voteSet.round, voteSet.type_, voteSet.maj23, frac, voteSet.votesBitArray, voteSet.peerMaj23s) -} - -// return the power voted, the total, and the fraction -func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) { - voted, total := voteSet.sum, voteSet.valSet.TotalVotingPower() - fracVoted := float64(voted) / float64(total) - return voted, total, fracVoted -} - -//-------------------------------------------------------------------------------- -// Commit - -func (voteSet *VoteSet) MakeCommit() *Commit { - if voteSet.type_ != VoteTypePrecommit { - cmn.PanicSanity("Cannot MakeCommit() unless VoteSet.Type is VoteTypePrecommit") - } - voteSet.mtx.Lock() - defer voteSet.mtx.Unlock() - - // Make sure we have a 2/3 majority - if voteSet.maj23 == nil { - cmn.PanicSanity("Cannot MakeCommit() unless a blockhash has +2/3") - } - - // For every validator, get the precommit - votesCopy := make([]*Vote, len(voteSet.votes)) - copy(votesCopy, voteSet.votes) - return &Commit{ - BlockID: *voteSet.maj23, - Precommits: votesCopy, - } -} - -//-------------------------------------------------------------------------------- - -/* - Votes for a particular block - There are two ways a *blockVotes gets created for a blockKey. - 1. first (non-conflicting) vote of a validator w/ blockKey (peerMaj23=false) - 2. A peer claims to have a 2/3 majority w/ blockKey (peerMaj23=true) -*/ -type blockVotes struct { - peerMaj23 bool // peer claims to have maj23 - bitArray *cmn.BitArray // valIndex -> hasVote? - votes []*Vote // valIndex -> *Vote - sum int64 // vote sum -} - -func newBlockVotes(peerMaj23 bool, numValidators int) *blockVotes { - return &blockVotes{ - peerMaj23: peerMaj23, - bitArray: cmn.NewBitArray(numValidators), - votes: make([]*Vote, numValidators), - sum: 0, - } -} - -func (vs *blockVotes) addVerifiedVote(vote *Vote, votingPower int64) { - valIndex := vote.ValidatorIndex - if existing := vs.votes[valIndex]; existing == nil { - vs.bitArray.SetIndex(valIndex, true) - vs.votes[valIndex] = vote - vs.sum += votingPower - } -} - -func (vs *blockVotes) getByIndex(index int) *Vote { - if vs == nil { - return nil - } - return vs.votes[index] -} - -//-------------------------------------------------------------------------------- - -// Common interface between *consensus.VoteSet and types.Commit -type VoteSetReader interface { - Height() int64 - Round() int - Type() byte - Size() int - BitArray() *cmn.BitArray - GetByIndex(int) *Vote - IsCommit() bool -} diff --git a/types/vote_set_test.go b/types/vote_set_test.go deleted file mode 100644 index d424667b..00000000 --- a/types/vote_set_test.go +++ /dev/null @@ -1,508 +0,0 @@ -package types - -import ( - "bytes" - "testing" - "time" - - crypto "github.com/tendermint/go-crypto" - cmn "github.com/tendermint/tmlibs/common" - tst "github.com/tendermint/tmlibs/test" -) - -// NOTE: privValidators are in order -func randVoteSet(height int64, round int, type_ byte, numValidators int, votingPower int64) (*VoteSet, *ValidatorSet, []PrivValidator) { - valSet, privValidators := RandValidatorSet(numValidators, votingPower) - return NewVoteSet("test_chain_id", height, round, type_, valSet), valSet, privValidators -} - -// Convenience: Return new vote with different validator address/index -func withValidator(vote *Vote, addr []byte, idx int) *Vote { - vote = vote.Copy() - vote.ValidatorAddress = addr - vote.ValidatorIndex = idx - return vote -} - -// Convenience: Return new vote with different height -func withHeight(vote *Vote, height int64) *Vote { - vote = vote.Copy() - vote.Height = height - return vote -} - -// Convenience: Return new vote with different round -func withRound(vote *Vote, round int) *Vote { - vote = vote.Copy() - vote.Round = round - return vote -} - -// Convenience: Return new vote with different type -func withType(vote *Vote, type_ byte) *Vote { - vote = vote.Copy() - vote.Type = type_ - return vote -} - -// Convenience: Return new vote with different blockHash -func withBlockHash(vote *Vote, blockHash []byte) *Vote { - vote = vote.Copy() - vote.BlockID.Hash = blockHash - return vote -} - -// Convenience: Return new vote with different blockParts -func withBlockPartsHeader(vote *Vote, blockPartsHeader PartSetHeader) *Vote { - vote = vote.Copy() - vote.BlockID.PartsHeader = blockPartsHeader - return vote -} - -func TestAddVote(t *testing.T) { - height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) - val0 := privValidators[0] - - // t.Logf(">> %v", voteSet) - - if voteSet.GetByAddress(val0.GetAddress()) != nil { - t.Errorf("Expected GetByAddress(val0.Address) to be nil") - } - if voteSet.BitArray().GetIndex(0) { - t.Errorf("Expected BitArray.GetIndex(0) to be false") - } - blockID, ok := voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("There should be no 2/3 majority") - } - - vote := &Vote{ - ValidatorAddress: val0.GetAddress(), - ValidatorIndex: 0, // since privValidators are in order - Height: height, - Round: round, - Type: VoteTypePrevote, - Timestamp: time.Now().UTC(), - BlockID: BlockID{nil, PartSetHeader{}}, - } - _, err := signAddVote(val0, vote, voteSet) - if err != nil { - t.Error(err) - } - - if voteSet.GetByAddress(val0.GetAddress()) == nil { - t.Errorf("Expected GetByAddress(val0.Address) to be present") - } - if !voteSet.BitArray().GetIndex(0) { - t.Errorf("Expected BitArray.GetIndex(0) to be true") - } - blockID, ok = voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("There should be no 2/3 majority") - } -} - -func Test2_3Majority(t *testing.T) { - height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) - - voteProto := &Vote{ - ValidatorAddress: nil, // NOTE: must fill in - ValidatorIndex: -1, // NOTE: must fill in - Height: height, - Round: round, - Type: VoteTypePrevote, - Timestamp: time.Now().UTC(), - BlockID: BlockID{nil, PartSetHeader{}}, - } - // 6 out of 10 voted for nil. - for i := 0; i < 6; i++ { - vote := withValidator(voteProto, privValidators[i].GetAddress(), i) - _, err := signAddVote(privValidators[i], vote, voteSet) - if err != nil { - t.Error(err) - } - } - blockID, ok := voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("There should be no 2/3 majority") - } - - // 7th validator voted for some blockhash - { - vote := withValidator(voteProto, privValidators[6].GetAddress(), 6) - _, err := signAddVote(privValidators[6], withBlockHash(vote, cmn.RandBytes(32)), voteSet) - if err != nil { - t.Error(err) - } - blockID, ok = voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("There should be no 2/3 majority") - } - } - - // 8th validator voted for nil. - { - vote := withValidator(voteProto, privValidators[7].GetAddress(), 7) - _, err := signAddVote(privValidators[7], vote, voteSet) - if err != nil { - t.Error(err) - } - blockID, ok = voteSet.TwoThirdsMajority() - if !ok || !blockID.IsZero() { - t.Errorf("There should be 2/3 majority for nil") - } - } -} - -func Test2_3MajorityRedux(t *testing.T) { - height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 100, 1) - - blockHash := crypto.CRandBytes(32) - blockPartsTotal := 123 - blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} - - voteProto := &Vote{ - ValidatorAddress: nil, // NOTE: must fill in - ValidatorIndex: -1, // NOTE: must fill in - Height: height, - Round: round, - Timestamp: time.Now().UTC(), - Type: VoteTypePrevote, - BlockID: BlockID{blockHash, blockPartsHeader}, - } - - // 66 out of 100 voted for nil. - for i := 0; i < 66; i++ { - vote := withValidator(voteProto, privValidators[i].GetAddress(), i) - _, err := signAddVote(privValidators[i], vote, voteSet) - if err != nil { - t.Error(err) - } - } - blockID, ok := voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("There should be no 2/3 majority") - } - - // 67th validator voted for nil - { - vote := withValidator(voteProto, privValidators[66].GetAddress(), 66) - _, err := signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet) - if err != nil { - t.Error(err) - } - blockID, ok = voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("There should be no 2/3 majority: last vote added was nil") - } - } - - // 68th validator voted for a different BlockParts PartSetHeader - { - vote := withValidator(voteProto, privValidators[67].GetAddress(), 67) - blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} - _, err := signAddVote(privValidators[67], withBlockPartsHeader(vote, blockPartsHeader), voteSet) - if err != nil { - t.Error(err) - } - blockID, ok = voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("There should be no 2/3 majority: last vote added had different PartSetHeader Hash") - } - } - - // 69th validator voted for different BlockParts Total - { - vote := withValidator(voteProto, privValidators[68].GetAddress(), 68) - blockPartsHeader := PartSetHeader{blockPartsTotal + 1, blockPartsHeader.Hash} - _, err := signAddVote(privValidators[68], withBlockPartsHeader(vote, blockPartsHeader), voteSet) - if err != nil { - t.Error(err) - } - blockID, ok = voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("There should be no 2/3 majority: last vote added had different PartSetHeader Total") - } - } - - // 70th validator voted for different BlockHash - { - vote := withValidator(voteProto, privValidators[69].GetAddress(), 69) - _, err := signAddVote(privValidators[69], withBlockHash(vote, cmn.RandBytes(32)), voteSet) - if err != nil { - t.Error(err) - } - blockID, ok = voteSet.TwoThirdsMajority() - if ok || !blockID.IsZero() { - t.Errorf("There should be no 2/3 majority: last vote added had different BlockHash") - } - } - - // 71st validator voted for the right BlockHash & BlockPartsHeader - { - vote := withValidator(voteProto, privValidators[70].GetAddress(), 70) - _, err := signAddVote(privValidators[70], vote, voteSet) - if err != nil { - t.Error(err) - } - blockID, ok = voteSet.TwoThirdsMajority() - if !ok || !blockID.Equals(BlockID{blockHash, blockPartsHeader}) { - t.Errorf("There should be 2/3 majority") - } - } -} - -func TestBadVotes(t *testing.T) { - height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 10, 1) - - voteProto := &Vote{ - ValidatorAddress: nil, - ValidatorIndex: -1, - Height: height, - Round: round, - Timestamp: time.Now().UTC(), - Type: VoteTypePrevote, - BlockID: BlockID{nil, PartSetHeader{}}, - } - - // val0 votes for nil. - { - vote := withValidator(voteProto, privValidators[0].GetAddress(), 0) - added, err := signAddVote(privValidators[0], vote, voteSet) - if !added || err != nil { - t.Errorf("Expected VoteSet.Add to succeed") - } - } - - // val0 votes again for some block. - { - vote := withValidator(voteProto, privValidators[0].GetAddress(), 0) - added, err := signAddVote(privValidators[0], withBlockHash(vote, cmn.RandBytes(32)), voteSet) - if added || err == nil { - t.Errorf("Expected VoteSet.Add to fail, conflicting vote.") - } - } - - // val1 votes on another height - { - vote := withValidator(voteProto, privValidators[1].GetAddress(), 1) - added, err := signAddVote(privValidators[1], withHeight(vote, height+1), voteSet) - if added || err == nil { - t.Errorf("Expected VoteSet.Add to fail, wrong height") - } - } - - // val2 votes on another round - { - vote := withValidator(voteProto, privValidators[2].GetAddress(), 2) - added, err := signAddVote(privValidators[2], withRound(vote, round+1), voteSet) - if added || err == nil { - t.Errorf("Expected VoteSet.Add to fail, wrong round") - } - } - - // val3 votes of another type. - { - vote := withValidator(voteProto, privValidators[3].GetAddress(), 3) - added, err := signAddVote(privValidators[3], withType(vote, VoteTypePrecommit), voteSet) - if added || err == nil { - t.Errorf("Expected VoteSet.Add to fail, wrong type") - } - } -} - -func TestConflicts(t *testing.T) { - height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrevote, 4, 1) - blockHash1 := cmn.RandBytes(32) - blockHash2 := cmn.RandBytes(32) - - voteProto := &Vote{ - ValidatorAddress: nil, - ValidatorIndex: -1, - Height: height, - Round: round, - Timestamp: time.Now().UTC(), - Type: VoteTypePrevote, - BlockID: BlockID{nil, PartSetHeader{}}, - } - - // val0 votes for nil. - { - vote := withValidator(voteProto, privValidators[0].GetAddress(), 0) - added, err := signAddVote(privValidators[0], vote, voteSet) - if !added || err != nil { - t.Errorf("Expected VoteSet.Add to succeed") - } - } - - // val0 votes again for blockHash1. - { - vote := withValidator(voteProto, privValidators[0].GetAddress(), 0) - added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash1), voteSet) - if added { - t.Errorf("Expected VoteSet.Add to fail, conflicting vote.") - } - if err == nil { - t.Errorf("Expected VoteSet.Add to return error, conflicting vote.") - } - } - - // start tracking blockHash1 - voteSet.SetPeerMaj23("peerA", BlockID{blockHash1, PartSetHeader{}}) - - // val0 votes again for blockHash1. - { - vote := withValidator(voteProto, privValidators[0].GetAddress(), 0) - added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash1), voteSet) - if !added { - t.Errorf("Expected VoteSet.Add to succeed, called SetPeerMaj23().") - } - if err == nil { - t.Errorf("Expected VoteSet.Add to return error, conflicting vote.") - } - } - - // attempt tracking blockHash2, should fail because already set for peerA. - voteSet.SetPeerMaj23("peerA", BlockID{blockHash2, PartSetHeader{}}) - - // val0 votes again for blockHash1. - { - vote := withValidator(voteProto, privValidators[0].GetAddress(), 0) - added, err := signAddVote(privValidators[0], withBlockHash(vote, blockHash2), voteSet) - if added { - t.Errorf("Expected VoteSet.Add to fail, duplicate SetPeerMaj23() from peerA") - } - if err == nil { - t.Errorf("Expected VoteSet.Add to return error, conflicting vote.") - } - } - - // val1 votes for blockHash1. - { - vote := withValidator(voteProto, privValidators[1].GetAddress(), 1) - added, err := signAddVote(privValidators[1], withBlockHash(vote, blockHash1), voteSet) - if !added || err != nil { - t.Errorf("Expected VoteSet.Add to succeed") - } - } - - // check - if voteSet.HasTwoThirdsMajority() { - t.Errorf("We shouldn't have 2/3 majority yet") - } - if voteSet.HasTwoThirdsAny() { - t.Errorf("We shouldn't have 2/3 if any votes yet") - } - - // val2 votes for blockHash2. - { - vote := withValidator(voteProto, privValidators[2].GetAddress(), 2) - added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash2), voteSet) - if !added || err != nil { - t.Errorf("Expected VoteSet.Add to succeed") - } - } - - // check - if voteSet.HasTwoThirdsMajority() { - t.Errorf("We shouldn't have 2/3 majority yet") - } - if !voteSet.HasTwoThirdsAny() { - t.Errorf("We should have 2/3 if any votes") - } - - // now attempt tracking blockHash1 - voteSet.SetPeerMaj23("peerB", BlockID{blockHash1, PartSetHeader{}}) - - // val2 votes for blockHash1. - { - vote := withValidator(voteProto, privValidators[2].GetAddress(), 2) - added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash1), voteSet) - if !added { - t.Errorf("Expected VoteSet.Add to succeed") - } - if err == nil { - t.Errorf("Expected VoteSet.Add to return error, conflicting vote") - } - } - - // check - if !voteSet.HasTwoThirdsMajority() { - t.Errorf("We should have 2/3 majority for blockHash1") - } - blockIDMaj23, _ := voteSet.TwoThirdsMajority() - if !bytes.Equal(blockIDMaj23.Hash, blockHash1) { - t.Errorf("Got the wrong 2/3 majority blockhash") - } - if !voteSet.HasTwoThirdsAny() { - t.Errorf("We should have 2/3 if any votes") - } - -} - -func TestMakeCommit(t *testing.T) { - height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, VoteTypePrecommit, 10, 1) - blockHash, blockPartsHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} - - voteProto := &Vote{ - ValidatorAddress: nil, - ValidatorIndex: -1, - Height: height, - Round: round, - Timestamp: time.Now().UTC(), - Type: VoteTypePrecommit, - BlockID: BlockID{blockHash, blockPartsHeader}, - } - - // 6 out of 10 voted for some block. - for i := 0; i < 6; i++ { - vote := withValidator(voteProto, privValidators[i].GetAddress(), i) - _, err := signAddVote(privValidators[i], vote, voteSet) - if err != nil { - t.Error(err) - } - } - - // MakeCommit should fail. - tst.AssertPanics(t, "Doesn't have +2/3 majority", func() { voteSet.MakeCommit() }) - - // 7th voted for some other block. - { - vote := withValidator(voteProto, privValidators[6].GetAddress(), 6) - vote = withBlockHash(vote, cmn.RandBytes(32)) - vote = withBlockPartsHeader(vote, PartSetHeader{123, cmn.RandBytes(32)}) - - _, err := signAddVote(privValidators[6], vote, voteSet) - if err != nil { - t.Error(err) - } - } - - // The 8th voted like everyone else. - { - vote := withValidator(voteProto, privValidators[7].GetAddress(), 7) - _, err := signAddVote(privValidators[7], vote, voteSet) - if err != nil { - t.Error(err) - } - } - - commit := voteSet.MakeCommit() - - // Commit should have 10 elements - if len(commit.Precommits) != 10 { - t.Errorf("Commit Precommits should have the same number of precommits as validators") - } - - // Ensure that Commit precommits are ordered. - if err := commit.ValidateBasic(); err != nil { - t.Errorf("Error in Commit.ValidateBasic(): %v", err) - } - -} diff --git a/types/vote_test.go b/types/vote_test.go deleted file mode 100644 index 263d2c35..00000000 --- a/types/vote_test.go +++ /dev/null @@ -1,121 +0,0 @@ -package types - -import ( - "testing" - "time" - - "github.com/stretchr/testify/require" -) - -func examplePrevote() *Vote { - return exampleVote(VoteTypePrevote) -} - -func examplePrecommit() *Vote { - return exampleVote(VoteTypePrecommit) -} - -func exampleVote(t byte) *Vote { - var stamp, err = time.Parse(TimeFormat, "2017-12-25T03:00:01.234Z") - if err != nil { - panic(err) - } - - return &Vote{ - ValidatorAddress: []byte("addr"), - ValidatorIndex: 56789, - Height: 12345, - Round: 2, - Timestamp: stamp, - Type: t, - BlockID: BlockID{ - Hash: []byte("hash"), - PartsHeader: PartSetHeader{ - Total: 1000000, - Hash: []byte("parts_hash"), - }, - }, - } -} - -func TestVoteSignable(t *testing.T) { - vote := examplePrecommit() - signBytes := vote.SignBytes("test_chain_id") - signStr := string(signBytes) - - expected := `{"@chain_id":"test_chain_id","@type":"vote","block_id":{"hash":"68617368","parts":{"hash":"70617274735F68617368","total":1000000}},"height":12345,"round":2,"timestamp":"2017-12-25T03:00:01.234Z","type":2}` - if signStr != expected { - // NOTE: when this fails, you probably want to fix up consensus/replay_test too - t.Errorf("Got unexpected sign string for Vote. Expected:\n%v\nGot:\n%v", expected, signStr) - } -} - -func TestVoteString(t *testing.T) { - tc := []struct { - name string - in string - out string - }{ - {"Precommit", examplePrecommit().String(), `Vote{56789:616464720000 12345/02/2(Precommit) 686173680000 @ 2017-12-25T03:00:01.234Z}`}, - {"Prevote", examplePrevote().String(), `Vote{56789:616464720000 12345/02/1(Prevote) 686173680000 @ 2017-12-25T03:00:01.234Z}`}, - } - - for _, tt := range tc { - tt := tt - t.Run(tt.name, func(st *testing.T) { - if tt.in != tt.out { - t.Errorf("Got unexpected string for Proposal. Expected:\n%v\nGot:\n%v", tt.in, tt.out) - } - }) - } -} - -func TestVoteVerifySignature(t *testing.T) { - privVal := NewMockPV() - pubKey := privVal.GetPubKey() - - vote := examplePrecommit() - signBytes := vote.SignBytes("test_chain_id") - - // sign it - err := privVal.SignVote("test_chain_id", vote) - require.NoError(t, err) - - // verify the same vote - valid := pubKey.VerifyBytes(vote.SignBytes("test_chain_id"), vote.Signature) - require.True(t, valid) - - // serialize, deserialize and verify again.... - precommit := new(Vote) - bs, err := cdc.MarshalBinary(vote) - require.NoError(t, err) - err = cdc.UnmarshalBinary(bs, &precommit) - require.NoError(t, err) - - // verify the transmitted vote - newSignBytes := precommit.SignBytes("test_chain_id") - require.Equal(t, string(signBytes), string(newSignBytes)) - valid = pubKey.VerifyBytes(newSignBytes, precommit.Signature) - require.True(t, valid) -} - -func TestIsVoteTypeValid(t *testing.T) { - tc := []struct { - name string - in byte - out bool - }{ - {"Prevote", VoteTypePrevote, true}, - {"Precommit", VoteTypePrecommit, true}, - {"InvalidType", byte(3), false}, - } - - for _, tt := range tc { - tt := tt - t.Run(tt.name, func(st *testing.T) { - if rs := IsVoteTypeValid(tt.in); rs != tt.out { - t.Errorf("Got unexpected Vote type. Expected:\n%v\nGot:\n%v", rs, tt.out) - } - }) - } -} diff --git a/types/wire.go b/types/wire.go deleted file mode 100644 index bd5c4497..00000000 --- a/types/wire.go +++ /dev/null @@ -1,12 +0,0 @@ -package types - -import ( - "github.com/tendermint/go-amino" - "github.com/tendermint/go-crypto" -) - -var cdc = amino.NewCodec() - -func init() { - crypto.RegisterAmino(cdc) -} diff --git a/version/version.go b/version/version.go deleted file mode 100644 index 791da51c..00000000 --- a/version/version.go +++ /dev/null @@ -1,23 +0,0 @@ -package version - -// Version components -const ( - Maj = "0" - Min = "20" - Fix = "1" -) - -var ( - // Version is the current version of Tendermint - // Must be a string because scripts like dist.sh read this file. - Version = "0.20.1-dev" - - // GitCommit is the current HEAD set using ldflags. - GitCommit string -) - -func init() { - if GitCommit != "" { - Version += "-" + GitCommit - } -}