From 4c4a95ca53b17dd3a73eb03669cf6013d46e1bdf Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 26 Sep 2018 14:04:44 +0400 Subject: [PATCH 01/18] config: Add ValidateBasic (#2485) * add missing options to config.toml template and docs Refs #2232 * config#ValidateBasic Refs #2232 * [config] timeouts as time.Duration, not ints Why: - native type provides better guarantees than ", in ms" comment (harder to shoot yourself in the leg) - flexibility: you can change units --- CHANGELOG_PENDING.md | 2 + cmd/tendermint/commands/root.go | 4 + config/config.go | 194 ++++++++++++++++++-------- config/config_test.go | 10 ++ config/toml.go | 40 +++--- consensus/mempool_test.go | 2 +- consensus/reactor.go | 24 ++-- consensus/state.go | 2 +- consensus/state_test.go | 4 +- docs/tendermint-core/configuration.md | 38 ++--- 10 files changed, 210 insertions(+), 110 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 26a31461..c1db6763 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -10,12 +10,14 @@ BREAKING CHANGES: * Go API - [node] Remove node.RunForever +- [config] \#2232 timeouts as time.Duration, not ints FEATURES: IMPROVEMENTS: - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics - [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics +- [config] \#2232 added ValidateBasic method, which performs basic checks BUG FIXES: - [autofile] \#2428 Group.RotateFile need call Flush() before rename (@goolAdapter) diff --git a/cmd/tendermint/commands/root.go b/cmd/tendermint/commands/root.go index 3c67ddc1..89ffbe74 100644 --- a/cmd/tendermint/commands/root.go +++ b/cmd/tendermint/commands/root.go @@ -1,6 +1,7 @@ package commands import ( + "fmt" "os" "github.com/spf13/cobra" @@ -35,6 +36,9 @@ func ParseConfig() (*cfg.Config, error) { } conf.SetRoot(conf.RootDir) cfg.EnsureRoot(conf.RootDir) + if err = conf.ValidateBasic(); err != nil { + return nil, fmt.Errorf("Error in config file: %v", err) + } return conf, err } diff --git a/config/config.go b/config/config.go index ebb7a9ac..87a74131 100644 --- a/config/config.go +++ b/config/config.go @@ -1,6 +1,7 @@ package config import ( + "errors" "fmt" "os" "path/filepath" @@ -89,6 +90,88 @@ func (cfg *Config) SetRoot(root string) *Config { return cfg } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *Config) ValidateBasic() error { + // RPCConfig + if cfg.RPC.GRPCMaxOpenConnections < 0 { + return errors.New("[rpc] grpc_max_open_connections can't be negative") + } + if cfg.RPC.MaxOpenConnections < 0 { + return errors.New("[rpc] max_open_connections can't be negative") + } + + // P2PConfig + if cfg.P2P.MaxNumInboundPeers < 0 { + return errors.New("[p2p] max_num_inbound_peers can't be negative") + } + if cfg.P2P.MaxNumOutboundPeers < 0 { + return errors.New("[p2p] max_num_outbound_peers can't be negative") + } + if cfg.P2P.FlushThrottleTimeout < 0 { + return errors.New("[p2p] flush_throttle_timeout can't be negative") + } + if cfg.P2P.MaxPacketMsgPayloadSize < 0 { + return errors.New("[p2p] max_packet_msg_payload_size can't be negative") + } + if cfg.P2P.SendRate < 0 { + return errors.New("[p2p] send_rate can't be negative") + } + if cfg.P2P.RecvRate < 0 { + return errors.New("[p2p] recv_rate can't be negative") + } + + // MempoolConfig + if cfg.Mempool.Size < 0 { + return errors.New("[mempool] size can't be negative") + } + if cfg.Mempool.CacheSize < 0 { + return errors.New("[mempool] cache_size can't be negative") + } + + // ConsensusConfig + if cfg.Consensus.TimeoutPropose < 0 { + return errors.New("[consensus] timeout_propose can't be negative") + } + if cfg.Consensus.TimeoutProposeDelta < 0 { + return errors.New("[consensus] timeout_propose_delta can't be negative") + } + if cfg.Consensus.TimeoutPrevote < 0 { + return errors.New("[consensus] timeout_prevote can't be negative") + } + if cfg.Consensus.TimeoutPrevoteDelta < 0 { + return errors.New("[consensus] timeout_prevote_delta can't be negative") + } + if cfg.Consensus.TimeoutPrecommit < 0 { + return errors.New("[consensus] timeout_precommit can't be negative") + } + if cfg.Consensus.TimeoutPrecommitDelta < 0 { + return errors.New("[consensus] timeout_precommit_delta can't be negative") + } + if cfg.Consensus.TimeoutCommit < 0 { + return errors.New("[consensus] timeout_commit can't be negative") + } + if cfg.Consensus.CreateEmptyBlocksInterval < 0 { + return errors.New("[consensus] create_empty_blocks_interval can't be negative") + } + if cfg.Consensus.PeerGossipSleepDuration < 0 { + return errors.New("[consensus] peer_gossip_sleep_duration can't be negative") + } + if cfg.Consensus.PeerQueryMaj23SleepDuration < 0 { + return errors.New("[consensus] peer_query_maj23_sleep_duration can't be negative") + } + if cfg.Consensus.BlockTimeIota < 0 { + return errors.New("[consensus] blocktime_iota can't be negative") + } + + // InstrumentationConfig + if cfg.Instrumentation.MaxOpenConnections < 0 { + return errors.New("[instrumentation] max_open_connections can't be negative") + } + + return nil +} + //----------------------------------------------------------------------------- // BaseConfig @@ -301,8 +384,8 @@ type P2PConfig struct { // Maximum number of outbound peers to connect to, excluding persistent peers MaxNumOutboundPeers int `mapstructure:"max_num_outbound_peers"` - // Time to wait before flushing messages out on the connection, in ms - FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"` + // Time to wait before flushing messages out on the connection + FlushThrottleTimeout time.Duration `mapstructure:"flush_throttle_timeout"` // Maximum size of a message packet payload, in bytes MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"` @@ -351,7 +434,7 @@ func DefaultP2PConfig() *P2PConfig { AddrBookStrict: true, MaxNumInboundPeers: 40, MaxNumOutboundPeers: 10, - FlushThrottleTimeout: 100, + FlushThrottleTimeout: 100 * time.Millisecond, MaxPacketMsgPayloadSize: 1024, // 1 kB SendRate: 5120000, // 5 mB/s RecvRate: 5120000, // 5 mB/s @@ -450,72 +533,70 @@ type ConsensusConfig struct { WalPath string `mapstructure:"wal_file"` walFile string // overrides WalPath if set - // All timeouts are in milliseconds - TimeoutPropose int `mapstructure:"timeout_propose"` - TimeoutProposeDelta int `mapstructure:"timeout_propose_delta"` - TimeoutPrevote int `mapstructure:"timeout_prevote"` - TimeoutPrevoteDelta int `mapstructure:"timeout_prevote_delta"` - TimeoutPrecommit int `mapstructure:"timeout_precommit"` - TimeoutPrecommitDelta int `mapstructure:"timeout_precommit_delta"` - TimeoutCommit int `mapstructure:"timeout_commit"` + TimeoutPropose time.Duration `mapstructure:"timeout_propose"` + TimeoutProposeDelta time.Duration `mapstructure:"timeout_propose_delta"` + TimeoutPrevote time.Duration `mapstructure:"timeout_prevote"` + TimeoutPrevoteDelta time.Duration `mapstructure:"timeout_prevote_delta"` + TimeoutPrecommit time.Duration `mapstructure:"timeout_precommit"` + TimeoutPrecommitDelta time.Duration `mapstructure:"timeout_precommit_delta"` + TimeoutCommit time.Duration `mapstructure:"timeout_commit"` // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"` - // EmptyBlocks mode and possible interval between empty blocks in seconds - CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"` - CreateEmptyBlocksInterval int `mapstructure:"create_empty_blocks_interval"` + // EmptyBlocks mode and possible interval between empty blocks + CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"` + CreateEmptyBlocksInterval time.Duration `mapstructure:"create_empty_blocks_interval"` - // Reactor sleep duration parameters are in milliseconds - PeerGossipSleepDuration int `mapstructure:"peer_gossip_sleep_duration"` - PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"` + // Reactor sleep duration parameters + PeerGossipSleepDuration time.Duration `mapstructure:"peer_gossip_sleep_duration"` + PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer_query_maj23_sleep_duration"` - // Block time parameters in milliseconds. Corresponds to the minimum time increment between consecutive blocks. - BlockTimeIota int `mapstructure:"blocktime_iota"` + // Block time parameters. Corresponds to the minimum time increment between consecutive blocks. + BlockTimeIota time.Duration `mapstructure:"blocktime_iota"` } // DefaultConsensusConfig returns a default configuration for the consensus service func DefaultConsensusConfig() *ConsensusConfig { return &ConsensusConfig{ WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"), - TimeoutPropose: 3000, - TimeoutProposeDelta: 500, - TimeoutPrevote: 1000, - TimeoutPrevoteDelta: 500, - TimeoutPrecommit: 1000, - TimeoutPrecommitDelta: 500, - TimeoutCommit: 1000, + TimeoutPropose: 3000 * time.Millisecond, + TimeoutProposeDelta: 500 * time.Millisecond, + TimeoutPrevote: 1000 * time.Millisecond, + TimeoutPrevoteDelta: 500 * time.Millisecond, + TimeoutPrecommit: 1000 * time.Millisecond, + TimeoutPrecommitDelta: 500 * time.Millisecond, + TimeoutCommit: 1000 * time.Millisecond, SkipTimeoutCommit: false, CreateEmptyBlocks: true, - CreateEmptyBlocksInterval: 0, - PeerGossipSleepDuration: 100, - PeerQueryMaj23SleepDuration: 2000, - BlockTimeIota: 1000, + CreateEmptyBlocksInterval: 0 * time.Second, + PeerGossipSleepDuration: 100 * time.Millisecond, + PeerQueryMaj23SleepDuration: 2000 * time.Millisecond, + BlockTimeIota: 1000 * time.Millisecond, } } // TestConsensusConfig returns a configuration for testing the consensus service func TestConsensusConfig() *ConsensusConfig { cfg := DefaultConsensusConfig() - cfg.TimeoutPropose = 100 - cfg.TimeoutProposeDelta = 1 - cfg.TimeoutPrevote = 10 - cfg.TimeoutPrevoteDelta = 1 - cfg.TimeoutPrecommit = 10 - cfg.TimeoutPrecommitDelta = 1 - cfg.TimeoutCommit = 10 + cfg.TimeoutPropose = 100 * time.Millisecond + cfg.TimeoutProposeDelta = 1 * time.Millisecond + cfg.TimeoutPrevote = 10 * time.Millisecond + cfg.TimeoutPrevoteDelta = 1 * time.Millisecond + cfg.TimeoutPrecommit = 10 * time.Millisecond + cfg.TimeoutPrecommitDelta = 1 * time.Millisecond + cfg.TimeoutCommit = 10 * time.Millisecond cfg.SkipTimeoutCommit = true - cfg.PeerGossipSleepDuration = 5 - cfg.PeerQueryMaj23SleepDuration = 250 - cfg.BlockTimeIota = 10 + cfg.PeerGossipSleepDuration = 5 * time.Millisecond + cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond + cfg.BlockTimeIota = 10 * time.Millisecond return cfg } // MinValidVoteTime returns the minimum acceptable block time. // See the [BFT time spec](https://godoc.org/github.com/tendermint/tendermint/docs/spec/consensus/bft-time.md). func (cfg *ConsensusConfig) MinValidVoteTime(lastBlockTime time.Time) time.Time { - return lastBlockTime. - Add(time.Duration(cfg.BlockTimeIota) * time.Millisecond) + return lastBlockTime.Add(cfg.BlockTimeIota) } // WaitForTxs returns true if the consensus should wait for transactions before entering the propose step @@ -523,39 +604,30 @@ func (cfg *ConsensusConfig) WaitForTxs() bool { return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0 } -// EmptyBlocks returns the amount of time to wait before proposing an empty block or starting the propose timer if there are no txs available -func (cfg *ConsensusConfig) EmptyBlocksInterval() time.Duration { - return time.Duration(cfg.CreateEmptyBlocksInterval) * time.Second -} - // Propose returns the amount of time to wait for a proposal func (cfg *ConsensusConfig) Propose(round int) time.Duration { - return time.Duration(cfg.TimeoutPropose+cfg.TimeoutProposeDelta*round) * time.Millisecond + return time.Duration( + cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round), + ) * time.Nanosecond } // Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes func (cfg *ConsensusConfig) Prevote(round int) time.Duration { - return time.Duration(cfg.TimeoutPrevote+cfg.TimeoutPrevoteDelta*round) * time.Millisecond + return time.Duration( + cfg.TimeoutPrevote.Nanoseconds()+cfg.TimeoutPrevoteDelta.Nanoseconds()*int64(round), + ) * time.Nanosecond } // Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits func (cfg *ConsensusConfig) Precommit(round int) time.Duration { - return time.Duration(cfg.TimeoutPrecommit+cfg.TimeoutPrecommitDelta*round) * time.Millisecond + return time.Duration( + cfg.TimeoutPrecommit.Nanoseconds()+cfg.TimeoutPrecommitDelta.Nanoseconds()*int64(round), + ) * time.Nanosecond } // Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits for a single block (ie. a commit). func (cfg *ConsensusConfig) Commit(t time.Time) time.Time { - return t.Add(time.Duration(cfg.TimeoutCommit) * time.Millisecond) -} - -// PeerGossipSleep returns the amount of time to sleep if there is nothing to send from the ConsensusReactor -func (cfg *ConsensusConfig) PeerGossipSleep() time.Duration { - return time.Duration(cfg.PeerGossipSleepDuration) * time.Millisecond -} - -// PeerQueryMaj23Sleep returns the amount of time to sleep after each VoteSetMaj23Message is sent in the ConsensusReactor -func (cfg *ConsensusConfig) PeerQueryMaj23Sleep() time.Duration { - return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond + return t.Add(cfg.TimeoutCommit) } // WalFile returns the full path to the write-ahead log file diff --git a/config/config_test.go b/config/config_test.go index 6379960f..afdbed18 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -2,6 +2,7 @@ package config import ( "testing" + "time" "github.com/stretchr/testify/assert" ) @@ -26,3 +27,12 @@ func TestDefaultConfig(t *testing.T) { assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir()) } + +func TestConfigValidateBasic(t *testing.T) { + cfg := DefaultConfig() + assert.NoError(t, cfg.ValidateBasic()) + + // tamper with timeout_propose + cfg.Consensus.TimeoutPropose = -10 * time.Second + assert.Error(t, cfg.ValidateBasic()) +} diff --git a/config/toml.go b/config/toml.go index bc10590c..846b33d1 100644 --- a/config/toml.go +++ b/config/toml.go @@ -99,7 +99,7 @@ priv_validator_file = "{{ js .BaseConfig.PrivValidator }}" priv_validator_laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}" # Path to the JSON file containing the private key to use for node authentication in the p2p protocol -node_key_file = "{{ js .BaseConfig.NodeKey}}" +node_key_file = "{{ js .BaseConfig.NodeKey }}" # Mechanism to connect to the ABCI application: socket | grpc abci = "{{ .BaseConfig.ABCI }}" @@ -172,15 +172,15 @@ addr_book_file = "{{ js .P2P.AddrBook }}" # Set false for private or local networks addr_book_strict = {{ .P2P.AddrBookStrict }} -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }} - # Maximum number of inbound peers max_num_inbound_peers = {{ .P2P.MaxNumInboundPeers }} # Maximum number of outbound peers to connect to, excluding persistent peers max_num_outbound_peers = {{ .P2P.MaxNumOutboundPeers }} +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "{{ .P2P.FlushThrottleTimeout }}" + # Maximum size of a message packet payload, in bytes max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }} @@ -202,6 +202,13 @@ seed_mode = {{ .P2P.SeedMode }} # Comma separated list of peer IDs to keep private (will not be gossiped to other peers) private_peer_ids = "{{ .P2P.PrivatePeerIDs }}" +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = {{ .P2P.AllowDuplicateIP }} + +# Peer connection configuration. +handshake_timeout = "{{ .P2P.HandshakeTimeout }}" +dial_timeout = "{{ .P2P.DialTimeout }}" + ##### mempool configuration options ##### [mempool] @@ -221,25 +228,24 @@ cache_size = {{ .Mempool.CacheSize }} wal_file = "{{ js .Consensus.WalPath }}" -# All timeouts are in milliseconds -timeout_propose = {{ .Consensus.TimeoutPropose }} -timeout_propose_delta = {{ .Consensus.TimeoutProposeDelta }} -timeout_prevote = {{ .Consensus.TimeoutPrevote }} -timeout_prevote_delta = {{ .Consensus.TimeoutPrevoteDelta }} -timeout_precommit = {{ .Consensus.TimeoutPrecommit }} -timeout_precommit_delta = {{ .Consensus.TimeoutPrecommitDelta }} -timeout_commit = {{ .Consensus.TimeoutCommit }} +timeout_propose = "{{ .Consensus.TimeoutPropose }}" +timeout_propose_delta = "{{ .Consensus.TimeoutProposeDelta }}" +timeout_prevote = "{{ .Consensus.TimeoutPrevote }}" +timeout_prevote_delta = "{{ .Consensus.TimeoutPrevoteDelta }}" +timeout_precommit = "{{ .Consensus.TimeoutPrecommit }}" +timeout_precommit_delta = "{{ .Consensus.TimeoutPrecommitDelta }}" +timeout_commit = "{{ .Consensus.TimeoutCommit }}" # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }} -# EmptyBlocks mode and possible interval between empty blocks in seconds +# EmptyBlocks mode and possible interval between empty blocks create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }} -create_empty_blocks_interval = {{ .Consensus.CreateEmptyBlocksInterval }} +create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}" -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = {{ .Consensus.PeerGossipSleepDuration }} -peer_query_maj23_sleep_duration = {{ .Consensus.PeerQueryMaj23SleepDuration }} +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}" +peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}" ##### transactions indexer configuration options ##### [tx_index] diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index 950cf67d..179766fd 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -38,7 +38,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { config := ResetConfig("consensus_mempool_txs_available_test") - config.Consensus.CreateEmptyBlocksInterval = int(ensureTimeout.Seconds()) + config.Consensus.CreateEmptyBlocksInterval = ensureTimeout state, privVals := randGenesisState(1, false, 10) cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication()) cs.mempool.EnableTxsAvailable() diff --git a/consensus/reactor.go b/consensus/reactor.go index 2b4bab13..16e2e7e2 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -508,7 +508,7 @@ OUTER_LOOP: // If height and round don't match, sleep. if (rs.Height != prs.Height) || (rs.Round != prs.Round) { //logger.Info("Peer Height|Round mismatch, sleeping", "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer) - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) continue OUTER_LOOP } @@ -544,7 +544,7 @@ OUTER_LOOP: } // Nothing to do. Sleep. - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) continue OUTER_LOOP } } @@ -558,12 +558,12 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype if blockMeta == nil { logger.Error("Failed to load block meta", "ourHeight", rs.Height, "blockstoreHeight", conR.conS.blockStore.Height()) - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) return } else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) { logger.Info("Peer ProposalBlockPartsHeader mismatch, sleeping", "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) return } // Load the part @@ -571,7 +571,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype if part == nil { logger.Error("Could not load part", "index", index, "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) return } // Send the part @@ -589,7 +589,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype return } //logger.Info("No parts to send in catch-up, sleeping") - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) } func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) { @@ -658,7 +658,7 @@ OUTER_LOOP: sleeping = 1 } - time.Sleep(conR.conS.config.PeerGossipSleep()) + time.Sleep(conR.conS.config.PeerGossipSleepDuration) continue OUTER_LOOP } } @@ -742,7 +742,7 @@ OUTER_LOOP: Type: types.VoteTypePrevote, BlockID: maj23, })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } } @@ -759,7 +759,7 @@ OUTER_LOOP: Type: types.VoteTypePrecommit, BlockID: maj23, })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } } @@ -776,7 +776,7 @@ OUTER_LOOP: Type: types.VoteTypePrevote, BlockID: maj23, })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } } @@ -795,11 +795,11 @@ OUTER_LOOP: Type: types.VoteTypePrecommit, BlockID: commit.BlockID, })) - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) } } - time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) continue OUTER_LOOP } diff --git a/consensus/state.go b/consensus/state.go index 12dfa4ed..35bbca0f 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -782,7 +782,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) { waitForTxs := cs.config.WaitForTxs() && round == 0 && !cs.needProofBlock(height) if waitForTxs { if cs.config.CreateEmptyBlocksInterval > 0 { - cs.scheduleTimeout(cs.config.EmptyBlocksInterval(), height, round, cstypes.RoundStepNewRound) + cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round, cstypes.RoundStepNewRound) } go cs.proposalHeartbeat(height, round) } else { diff --git a/consensus/state_test.go b/consensus/state_test.go index 32fc5fd6..4c34d9d2 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -21,8 +21,8 @@ func init() { config = ResetConfig("consensus_state_test") } -func ensureProposeTimeout(timeoutPropose int) time.Duration { - return time.Duration(timeoutPropose*2) * time.Millisecond +func ensureProposeTimeout(timeoutPropose time.Duration) time.Duration { + return time.Duration(timeoutPropose.Nanoseconds()*2) * time.Nanosecond } /* diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index d759ab9f..c5b07497 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -115,15 +115,15 @@ addr_book_file = "addrbook.json" # Set false for private or local networks addr_book_strict = true -# Time to wait before flushing messages out on the connection, in ms -flush_throttle_timeout = 100 - # Maximum number of inbound peers max_num_inbound_peers = 40 # Maximum number of outbound peers to connect to, excluding persistent peers max_num_outbound_peers = 10 +# Time to wait before flushing messages out on the connection +flush_throttle_timeout = "100ms" + # Maximum size of a message packet payload, in bytes max_packet_msg_payload_size = 1024 @@ -145,6 +145,13 @@ seed_mode = false # Comma separated list of peer IDs to keep private (will not be gossiped to other peers) private_peer_ids = "" +# Toggle to disable guard against peers connecting from the same ip. +allow_duplicate_ip = true + +# Peer connection configuration. +handshake_timeout = "20s" +dial_timeout = "3s" + ##### mempool configuration options ##### [mempool] @@ -164,25 +171,24 @@ cache_size = 100000 wal_file = "data/cs.wal/wal" -# All timeouts are in milliseconds -timeout_propose = 3000 -timeout_propose_delta = 500 -timeout_prevote = 1000 -timeout_prevote_delta = 500 -timeout_precommit = 1000 -timeout_precommit_delta = 500 -timeout_commit = 1000 +timeout_propose = "3000ms" +timeout_propose_delta = "500ms" +timeout_prevote = "1000ms" +timeout_prevote_delta = "500ms" +timeout_precommit = "1000ms" +timeout_precommit_delta = "500ms" +timeout_commit = "1000ms" # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) skip_timeout_commit = false -# EmptyBlocks mode and possible interval between empty blocks in seconds +# EmptyBlocks mode and possible interval between empty blocks create_empty_blocks = true -create_empty_blocks_interval = 0 +create_empty_blocks_interval = "0s" -# Reactor sleep duration parameters are in milliseconds -peer_gossip_sleep_duration = 100 -peer_query_maj23_sleep_duration = 2000 +# Reactor sleep duration parameters +peer_gossip_sleep_duration = "100ms" +peer_query_maj23_sleep_duration = "2000ms" ##### transactions indexer configuration options ##### [tx_index] From d007ade6c35d38d5441adbb87d8df37dff562df5 Mon Sep 17 00:00:00 2001 From: zramsay Date: Wed, 26 Sep 2018 17:49:20 -0400 Subject: [PATCH 02/18] add version to docs --- docs/DOCS_README.md | 3 ++- docs/README.md | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index e87ef23d..e2f22ff6 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -20,7 +20,8 @@ a private website repository has make targets consumed by a standard Jenkins tas ## README The [README.md](./README.md) is also the landing page for the documentation -on the website. +on the website. During the Jenkins build, the current commit is added to the bottom +of the README. ## Config.js diff --git a/docs/README.md b/docs/README.md index 58b3bcb6..2ecf625e 100644 --- a/docs/README.md +++ b/docs/README.md @@ -39,3 +39,7 @@ Dive deep into the spec. There's one for each Tendermint and the ABCI See [this file](./DOCS_README.md) for details of the build process and considerations when making changes. + +## Version + +This documentation is built from the following commit: From 8dda3c3b28e70a0305ea297ee5d95e80f9105860 Mon Sep 17 00:00:00 2001 From: HaoyangLiu Date: Sat, 29 Sep 2018 07:23:21 +0800 Subject: [PATCH 03/18] lite: Add synchronization in lite verify (#2396) * Implement issues 2386: add synchronization in lite verify and change all Certify to Verify * Replace make(chan struct{}, 0) with make(chan struct{}) * Parameterize memroy cache size and add concurrent test * Refactor import order --- cmd/tendermint/commands/lite.go | 4 +- lite/base_verifier.go | 14 +++--- lite/base_verifier_test.go | 2 +- lite/doc.go | 4 +- lite/dynamic_verifier.go | 58 +++++++++++++++++++++---- lite/dynamic_verifier_test.go | 75 +++++++++++++++++++++++++++++---- lite/proxy/query.go | 2 +- lite/proxy/verifier.go | 4 +- lite/proxy/wrapper.go | 4 +- lite/types.go | 2 +- 10 files changed, 135 insertions(+), 34 deletions(-) diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go index edad4fbb..150371d6 100644 --- a/cmd/tendermint/commands/lite.go +++ b/cmd/tendermint/commands/lite.go @@ -30,6 +30,7 @@ var ( nodeAddr string chainID string home string + cacheSize int ) func init() { @@ -37,6 +38,7 @@ func init() { LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:26657", "Connect to a Tendermint node at this address") LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID") LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory") + LiteCmd.Flags().IntVar(&cacheSize, "cache-size", 10, "Specify the memory trust store cache size") } func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) { @@ -69,7 +71,7 @@ func runProxy(cmd *cobra.Command, args []string) error { node := rpcclient.NewHTTP(nodeAddr, "/websocket") logger.Info("Constructing Verifier...") - cert, err := proxy.NewVerifier(chainID, home, node, logger) + cert, err := proxy.NewVerifier(chainID, home, node, logger, cacheSize) if err != nil { return cmn.ErrorWrap(err, "constructing Verifier") } diff --git a/lite/base_verifier.go b/lite/base_verifier.go index e60d3953..fcde01c0 100644 --- a/lite/base_verifier.go +++ b/lite/base_verifier.go @@ -12,7 +12,7 @@ var _ Verifier = (*BaseVerifier)(nil) // BaseVerifier lets us check the validity of SignedHeaders at height or // later, requiring sufficient votes (> 2/3) from the given valset. -// To certify blocks produced by a blockchain with mutable validator sets, +// To verify blocks produced by a blockchain with mutable validator sets, // use the DynamicVerifier. // TODO: Handle unbonding time. type BaseVerifier struct { @@ -40,15 +40,15 @@ func (bc *BaseVerifier) ChainID() string { } // Implements Verifier. -func (bc *BaseVerifier) Certify(signedHeader types.SignedHeader) error { +func (bc *BaseVerifier) Verify(signedHeader types.SignedHeader) error { - // We can't certify commits older than bc.height. + // We can't verify commits older than bc.height. if signedHeader.Height < bc.height { - return cmn.NewError("BaseVerifier height is %v, cannot certify height %v", + return cmn.NewError("BaseVerifier height is %v, cannot verify height %v", bc.height, signedHeader.Height) } - // We can't certify with the wrong validator set. + // We can't verify with the wrong validator set. if !bytes.Equal(signedHeader.ValidatorsHash, bc.valset.Hash()) { return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bc.valset.Hash()) @@ -57,7 +57,7 @@ func (bc *BaseVerifier) Certify(signedHeader types.SignedHeader) error { // Do basic sanity checks. err := signedHeader.ValidateBasic(bc.chainID) if err != nil { - return cmn.ErrorWrap(err, "in certify") + return cmn.ErrorWrap(err, "in verify") } // Check commit signatures. @@ -65,7 +65,7 @@ func (bc *BaseVerifier) Certify(signedHeader types.SignedHeader) error { bc.chainID, signedHeader.Commit.BlockID, signedHeader.Height, signedHeader.Commit) if err != nil { - return cmn.ErrorWrap(err, "in certify") + return cmn.ErrorWrap(err, "in verify") } return nil diff --git a/lite/base_verifier_test.go b/lite/base_verifier_test.go index dab7885f..2ef1203f 100644 --- a/lite/base_verifier_test.go +++ b/lite/base_verifier_test.go @@ -43,7 +43,7 @@ func TestBaseCert(t *testing.T) { for _, tc := range cases { sh := tc.keys.GenSignedHeader(chainID, tc.height, nil, tc.vals, tc.vals, []byte("foo"), []byte("params"), []byte("results"), tc.first, tc.last) - err := cert.Certify(sh) + err := cert.Verify(sh) if tc.proper { assert.Nil(err, "%+v", err) } else { diff --git a/lite/doc.go b/lite/doc.go index 59f77056..2a0ba23e 100644 --- a/lite/doc.go +++ b/lite/doc.go @@ -54,11 +54,11 @@ validator set, and that the height of the commit is at least height (or greater). SignedHeader.Commit may be signed by a different validator set, it can get -certified with a BaseVerifier as long as sufficient signatures from the +verified with a BaseVerifier as long as sufficient signatures from the previous validator set are present in the commit. DynamicVerifier - this Verifier implements an auto-update and persistence -strategy to certify any SignedHeader of the blockchain. +strategy to verify any SignedHeader of the blockchain. ## Provider and PersistentProvider diff --git a/lite/dynamic_verifier.go b/lite/dynamic_verifier.go index 3d1a70f2..2dee69f9 100644 --- a/lite/dynamic_verifier.go +++ b/lite/dynamic_verifier.go @@ -2,12 +2,15 @@ package lite import ( "bytes" - + "fmt" + "sync" log "github.com/tendermint/tendermint/libs/log" lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" ) +const sizeOfPendingMap = 1024 + var _ Verifier = (*DynamicVerifier)(nil) // DynamicVerifier implements an auto-updating Verifier. It uses a @@ -21,6 +24,11 @@ type DynamicVerifier struct { trusted PersistentProvider // This is a source of new info, like a node rpc, or other import method. source Provider + + // pending map for synchronize concurrent verification requests + pendingVerifications map[int64]chan struct{} + + mtx sync.Mutex } // NewDynamicVerifier returns a new DynamicVerifier. It uses the @@ -31,10 +39,11 @@ type DynamicVerifier struct { // files.Provider. The source provider should be a client.HTTPProvider. func NewDynamicVerifier(chainID string, trusted PersistentProvider, source Provider) *DynamicVerifier { return &DynamicVerifier{ - logger: log.NewNopLogger(), - chainID: chainID, - trusted: trusted, - source: source, + logger: log.NewNopLogger(), + chainID: chainID, + trusted: trusted, + source: source, + pendingVerifications: make(map[int64]chan struct{}, sizeOfPendingMap), } } @@ -56,7 +65,40 @@ func (ic *DynamicVerifier) ChainID() string { // ic.trusted and ic.source to prove the new validators. On success, it will // try to store the SignedHeader in ic.trusted if the next // validator can be sourced. -func (ic *DynamicVerifier) Certify(shdr types.SignedHeader) error { +func (ic *DynamicVerifier) Verify(shdr types.SignedHeader) error { + + // Performs synchronization for multi-threads verification at the same height. + ic.mtx.Lock() + if pending := ic.pendingVerifications[shdr.Height]; pending != nil { + ic.mtx.Unlock() + <-pending // pending is chan struct{} + } else { + pending := make(chan struct{}) + ic.pendingVerifications[shdr.Height] = pending + defer func() { + close(pending) + ic.mtx.Lock() + delete(ic.pendingVerifications, shdr.Height) + ic.mtx.Unlock() + }() + ic.mtx.Unlock() + } + //Get the exact trusted commit for h, and if it is + // equal to shdr, then don't even verify it, + // and just return nil. + trustedFCSameHeight, err := ic.trusted.LatestFullCommit(ic.chainID, shdr.Height, shdr.Height) + if err == nil { + // If loading trust commit successfully, and trust commit equal to shdr, then don't verify it, + // just return nil. + if bytes.Equal(trustedFCSameHeight.SignedHeader.Hash(), shdr.Hash()) { + ic.logger.Info(fmt.Sprintf("Load full commit at height %d from cache, there is not need to verify.", shdr.Height)) + return nil + } + } else if !lerr.IsErrCommitNotFound(err) { + // Return error if it is not CommitNotFound error + ic.logger.Info(fmt.Sprintf("Encountered unknown error in loading full commit at height %d.", shdr.Height)) + return err + } // Get the latest known full commit <= h-1 from our trusted providers. // The full commit at h-1 contains the valset to sign for h. @@ -94,9 +136,9 @@ func (ic *DynamicVerifier) Certify(shdr types.SignedHeader) error { } } - // Certify the signed header using the matching valset. + // Verify the signed header using the matching valset. cert := NewBaseVerifier(ic.chainID, trustedFC.Height()+1, trustedFC.NextValidators) - err = cert.Certify(shdr) + err = cert.Verify(shdr) if err != nil { return err } diff --git a/lite/dynamic_verifier_test.go b/lite/dynamic_verifier_test.go index 74e2d55a..401c1487 100644 --- a/lite/dynamic_verifier_test.go +++ b/lite/dynamic_verifier_test.go @@ -2,8 +2,8 @@ package lite import ( "fmt" + "sync" "testing" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -49,7 +49,7 @@ func TestInquirerValidPath(t *testing.T) { // This should fail validation: sh := fcz[count-1].SignedHeader - err = cert.Certify(sh) + err = cert.Verify(sh) require.NotNil(err) // Adding a few commits in the middle should be insufficient. @@ -57,7 +57,7 @@ func TestInquirerValidPath(t *testing.T) { err := source.SaveFullCommit(fcz[i]) require.Nil(err) } - err = cert.Certify(sh) + err = cert.Verify(sh) assert.NotNil(err) // With more info, we succeed. @@ -65,7 +65,7 @@ func TestInquirerValidPath(t *testing.T) { err := source.SaveFullCommit(fcz[i]) require.Nil(err) } - err = cert.Certify(sh) + err = cert.Verify(sh) assert.Nil(err, "%+v", err) } @@ -115,18 +115,18 @@ func TestInquirerVerifyHistorical(t *testing.T) { err = source.SaveFullCommit(fcz[7]) require.Nil(err, "%+v", err) sh := fcz[8].SignedHeader - err = cert.Certify(sh) + err = cert.Verify(sh) require.Nil(err, "%+v", err) assert.Equal(fcz[7].Height(), cert.LastTrustedHeight()) fc_, err := trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) require.NotNil(err, "%+v", err) assert.Equal(fc_, (FullCommit{})) - // With fcz[9] Certify will update last trusted height. + // With fcz[9] Verify will update last trusted height. err = source.SaveFullCommit(fcz[9]) require.Nil(err, "%+v", err) sh = fcz[8].SignedHeader - err = cert.Certify(sh) + err = cert.Verify(sh) require.Nil(err, "%+v", err) assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) fc_, err = trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) @@ -141,13 +141,70 @@ func TestInquirerVerifyHistorical(t *testing.T) { // Try to check an unknown seed in the past. sh = fcz[3].SignedHeader - err = cert.Certify(sh) + err = cert.Verify(sh) require.Nil(err, "%+v", err) assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) // Jump all the way forward again. sh = fcz[count-1].SignedHeader - err = cert.Certify(sh) + err = cert.Verify(sh) require.Nil(err, "%+v", err) assert.Equal(fcz[9].Height(), cert.LastTrustedHeight()) } + +func TestConcurrencyInquirerVerify(t *testing.T) { + _, require := assert.New(t), require.New(t) + trust := NewDBProvider("trust", dbm.NewMemDB()).SetLimit(10) + source := NewDBProvider("source", dbm.NewMemDB()) + + // Set up the validators to generate test blocks. + var vote int64 = 10 + keys := genPrivKeys(5) + nkeys := keys.Extend(1) + + // Construct a bunch of commits, each with one more height than the last. + chainID := "inquiry-test" + count := 10 + consHash := []byte("special-params") + fcz := make([]FullCommit, count) + for i := 0; i < count; i++ { + vals := keys.ToValidators(vote, 0) + nextVals := nkeys.ToValidators(vote, 0) + h := int64(1 + i) + appHash := []byte(fmt.Sprintf("h=%d", h)) + resHash := []byte(fmt.Sprintf("res=%d", h)) + fcz[i] = keys.GenFullCommit( + chainID, h, nil, + vals, nextVals, + appHash, consHash, resHash, 0, len(keys)) + // Extend the keys by 1 each time. + keys = nkeys + nkeys = nkeys.Extend(1) + } + + // Initialize a Verifier with the initial state. + err := trust.SaveFullCommit(fcz[0]) + require.Nil(err) + cert := NewDynamicVerifier(chainID, trust, source) + cert.SetLogger(log.TestingLogger()) + + err = source.SaveFullCommit(fcz[7]) + err = source.SaveFullCommit(fcz[8]) + require.Nil(err, "%+v", err) + sh := fcz[8].SignedHeader + + var wg sync.WaitGroup + count = 100 + errList := make([]error, count) + for i := 0; i < count; i++ { + wg.Add(1) + go func(index int) { + errList[index] = cert.Verify(sh) + defer wg.Done() + }(i) + } + wg.Wait() + for _, err := range errList { + require.Nil(err) + } +} diff --git a/lite/proxy/query.go b/lite/proxy/query.go index 6f5a2899..84ff98b4 100644 --- a/lite/proxy/query.go +++ b/lite/proxy/query.go @@ -146,7 +146,7 @@ func GetCertifiedCommit(h int64, client rpcclient.Client, cert lite.Verifier) (t h, sh.Height) } - if err = cert.Certify(sh); err != nil { + if err = cert.Verify(sh); err != nil { return types.SignedHeader{}, err } diff --git a/lite/proxy/verifier.go b/lite/proxy/verifier.go index a93d30c7..b7c11f18 100644 --- a/lite/proxy/verifier.go +++ b/lite/proxy/verifier.go @@ -8,12 +8,12 @@ import ( lclient "github.com/tendermint/tendermint/lite/client" ) -func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger) (*lite.DynamicVerifier, error) { +func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger, cacheSize int) (*lite.DynamicVerifier, error) { logger = logger.With("module", "lite/proxy") logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client) - memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(10) + memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(cacheSize) lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir)) trust := lite.NewMultiProvider( memProvider, diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go index 522511a8..4c0df022 100644 --- a/lite/proxy/wrapper.go +++ b/lite/proxy/wrapper.go @@ -134,10 +134,10 @@ func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) { } rpcclient.WaitForHeight(w.Client, *height, nil) res, err := w.Client.Commit(height) - // if we got it, then certify it + // if we got it, then verify it if err == nil { sh := res.SignedHeader - err = w.cert.Certify(sh) + err = w.cert.Verify(sh) } return res, err } diff --git a/lite/types.go b/lite/types.go index 7228c74a..643f5ad4 100644 --- a/lite/types.go +++ b/lite/types.go @@ -8,6 +8,6 @@ import ( // Verifier must know the current or recent set of validitors by some other // means. type Verifier interface { - Certify(sheader types.SignedHeader) error + Verify(sheader types.SignedHeader) error ChainID() string } From 47bc15c27a2bc8f01c6d31eca01691659871cfdb Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Sat, 29 Sep 2018 03:28:42 +0400 Subject: [PATCH 04/18] disable mempool WAL by default (#2490) --- CHANGELOG_PENDING.md | 1 + config/config.go | 2 +- docs/tendermint-core/running-in-production.md | 4 ++++ 3 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index c1db6763..89ac9c13 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -5,6 +5,7 @@ Special thanks to external contributors on this release: BREAKING CHANGES: * CLI/RPC/Config +- [config] `mempool.wal` is disabled by default * Apps diff --git a/config/config.go b/config/config.go index 87a74131..2ccb4908 100644 --- a/config/config.go +++ b/config/config.go @@ -503,7 +503,7 @@ func DefaultMempoolConfig() *MempoolConfig { Recheck: true, RecheckEmpty: true, Broadcast: true, - WalPath: filepath.Join(defaultDataDir, "mempool.wal"), + WalPath: "", // Each signature verification takes .5ms, size reduced until we implement // ABCI Recheck Size: 5000, diff --git a/docs/tendermint-core/running-in-production.md b/docs/tendermint-core/running-in-production.md index c774cd13..fb98626a 100644 --- a/docs/tendermint-core/running-in-production.md +++ b/docs/tendermint-core/running-in-production.md @@ -74,6 +74,10 @@ propose it. Clients must monitor their txs by subscribing over websockets, polling for them, or using `/broadcast_tx_commit`. In the worst case, txs can be resent from the mempool WAL manually. +For the above reasons, the `mempool.wal` is disabled by default. To enable, set +`mempool.wal_dir` to where you want the WAL to be located (e.g. +`data/mempool.wal`). + ## DOS Exposure and Mitigation Validators are supposed to setup [Sentry Node From fc073746a0c12da3f1de7113c6e141638e707f77 Mon Sep 17 00:00:00 2001 From: Ismail Khoffi Date: Sat, 29 Sep 2018 01:57:29 +0200 Subject: [PATCH 05/18] privval: Switch to amino encoding in SignBytes (#2459) * switch to amino for SignBytes and add Reply with error message - currently only Vote is done * switch Reply type in socket for other messages - add error description on error * add TODOs regarding error handling * address comments from peer review session (thx @xla) - contains all changes besides the test-coverage / error'ing branches * increase test coverage: - add tests for each newly introduced error'ing code path * return error if received wrong response * add test for wrong response branches (ErrUnexpectedResponse) * update CHANGELOG_PENDING and related documentation (spec) * fix typo: s/CanonicallockID/CanonicalBlockID * fixes from review --- CHANGELOG_PENDING.md | 6 + docs/spec/blockchain/blockchain.md | 16 ++- docs/spec/blockchain/encoding.md | 24 ++-- privval/priv_validator.go | 31 ++--- privval/socket.go | 120 ++++++++++++++----- privval/socket_test.go | 185 +++++++++++++++++++++++++++-- types/canonical.go | 116 ++++++++++++++++++ types/canonical_json.go | 115 ------------------ types/heartbeat.go | 2 +- types/heartbeat_test.go | 26 ++-- types/priv_validator.go | 27 +++++ types/proposal.go | 2 +- types/proposal_test.go | 15 +-- types/vote.go | 12 +- types/vote_test.go | 10 +- 15 files changed, 490 insertions(+), 217 deletions(-) create mode 100644 types/canonical.go delete mode 100644 types/canonical_json.go diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 89ac9c13..c6346f6a 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -13,6 +13,12 @@ BREAKING CHANGES: - [node] Remove node.RunForever - [config] \#2232 timeouts as time.Duration, not ints +* Blockchain Protocol + * [types] \#2459 `Vote`/`Proposal`/`Heartbeat` use amino encoding instead of JSON in `SignBytes`. + * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer). + +* P2P Protocol + FEATURES: IMPROVEMENTS: diff --git a/docs/spec/blockchain/blockchain.md b/docs/spec/blockchain/blockchain.md index bd4d8ddd..bd0af70a 100644 --- a/docs/spec/blockchain/blockchain.md +++ b/docs/spec/blockchain/blockchain.md @@ -401,14 +401,22 @@ must be greater than 2/3 of the total voting power of the complete validator set A vote is a signed message broadcast in the consensus for a particular block at a particular height and round. When stored in the blockchain or propagated over the network, votes are encoded in Amino. -For signing, votes are encoded in JSON, and the ChainID is included, in the form of the `CanonicalSignBytes`. +For signing, votes are represented via `CanonicalVote` and also encoded using amino (protobuf compatible) via +`Vote.SignBytes` which includes the `ChainID`. -We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the CanonicalSignBytes +We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the `SignBytes` using the given ChainID: ```go -func (v Vote) Verify(chainID string, pubKey PubKey) bool { - return pubKey.Verify(v.Signature, CanonicalSignBytes(chainID, v)) +func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error { + if !bytes.Equal(pubKey.Address(), vote.ValidatorAddress) { + return ErrVoteInvalidValidatorAddress + } + + if !pubKey.VerifyBytes(vote.SignBytes(chainID), vote.Signature) { + return ErrVoteInvalidSignature + } + return nil } ``` diff --git a/docs/spec/blockchain/encoding.md b/docs/spec/blockchain/encoding.md index 4ad30df6..2ff024ce 100644 --- a/docs/spec/blockchain/encoding.md +++ b/docs/spec/blockchain/encoding.md @@ -298,14 +298,22 @@ Where the `"value"` is the base64 encoding of the raw pubkey bytes, and the ### Signed Messages -Signed messages (eg. votes, proposals) in the consensus are encoded using Amino-JSON, rather than in the standard binary format -(NOTE: this is subject to change: https://github.com/tendermint/tendermint/issues/1622) +Signed messages (eg. votes, proposals) in the consensus are encoded using Amino. -When signing, the elements of a message are sorted by key and prepended with -a `@chain_id` and `@type` field. -We call this encoding the CanonicalSignBytes. For instance, CanonicalSignBytes for a vote would look -like: +When signing, the elements of a message are sorted alphabetically by key and prepended with +a `chain_id` and `type` field. +We call this encoding the SignBytes. For instance, SignBytes for a vote is the Amino encoding of the following struct: -```json -{"@chain_id":"test_chain_id","@type":"vote","block_id":{"hash":"8B01023386C371778ECB6368573E539AFC3CC860","parts":{"hash":"72DB3D959635DFF1BB567BEDAA70573392C51596","total":"1000000"}},"height":"12345","round":"2","timestamp":"2017-12-25T03:00:01.234Z","type":2} +```go +type CanonicalVote struct { + ChainID string + Type string + BlockID CanonicalBlockID + Height int64 + Round int + Timestamp time.Time + VoteType byte +} ``` + +NOTE: see [#1622](https://github.com/tendermint/tendermint/issues/1622) for how field ordering will change diff --git a/privval/priv_validator.go b/privval/priv_validator.go index 3ba0519c..8091744c 100644 --- a/privval/priv_validator.go +++ b/privval/priv_validator.go @@ -311,21 +311,18 @@ func (pv *FilePV) String() string { // returns the timestamp from the lastSignBytes. // returns true if the only difference in the votes is their timestamp. func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { - var lastVote, newVote types.CanonicalJSONVote - if err := cdc.UnmarshalJSON(lastSignBytes, &lastVote); err != nil { + var lastVote, newVote types.CanonicalVote + if err := cdc.UnmarshalBinary(lastSignBytes, &lastVote); err != nil { panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err)) } - if err := cdc.UnmarshalJSON(newSignBytes, &newVote); err != nil { + if err := cdc.UnmarshalBinary(newSignBytes, &newVote); err != nil { panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err)) } - lastTime, err := time.Parse(types.TimeFormat, lastVote.Timestamp) - if err != nil { - panic(err) - } + lastTime := lastVote.Timestamp // set the times to the same value and check equality - now := types.CanonicalTime(tmtime.Now()) + now := tmtime.Now() lastVote.Timestamp = now newVote.Timestamp = now lastVoteBytes, _ := cdc.MarshalJSON(lastVote) @@ -337,25 +334,21 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.T // returns the timestamp from the lastSignBytes. // returns true if the only difference in the proposals is their timestamp func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { - var lastProposal, newProposal types.CanonicalJSONProposal - if err := cdc.UnmarshalJSON(lastSignBytes, &lastProposal); err != nil { + var lastProposal, newProposal types.CanonicalProposal + if err := cdc.UnmarshalBinary(lastSignBytes, &lastProposal); err != nil { panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err)) } - if err := cdc.UnmarshalJSON(newSignBytes, &newProposal); err != nil { + if err := cdc.UnmarshalBinary(newSignBytes, &newProposal); err != nil { panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err)) } - lastTime, err := time.Parse(types.TimeFormat, lastProposal.Timestamp) - if err != nil { - panic(err) - } - + lastTime := lastProposal.Timestamp // set the times to the same value and check equality - now := types.CanonicalTime(tmtime.Now()) + now := tmtime.Now() lastProposal.Timestamp = now newProposal.Timestamp = now - lastProposalBytes, _ := cdc.MarshalJSON(lastProposal) - newProposalBytes, _ := cdc.MarshalJSON(newProposal) + lastProposalBytes, _ := cdc.MarshalBinary(lastProposal) + newProposalBytes, _ := cdc.MarshalBinary(newProposal) return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes) } diff --git a/privval/socket.go b/privval/socket.go index d5ede471..da95f8fb 100644 --- a/privval/socket.go +++ b/privval/socket.go @@ -7,7 +7,7 @@ import ( "net" "time" - amino "github.com/tendermint/go-amino" + "github.com/tendermint/go-amino" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" @@ -27,9 +27,10 @@ const ( // Socket errors. var ( - ErrDialRetryMax = errors.New("dialed maximum retries") - ErrConnWaitTimeout = errors.New("waited for remote signer for too long") - ErrConnTimeout = errors.New("remote signer timed out") + ErrDialRetryMax = errors.New("dialed maximum retries") + ErrConnWaitTimeout = errors.New("waited for remote signer for too long") + ErrConnTimeout = errors.New("remote signer timed out") + ErrUnexpectedResponse = errors.New("received unexpected response") ) var ( @@ -150,7 +151,7 @@ func (sc *SocketPV) getPubKey() (crypto.PubKey, error) { // SignVote implements PrivValidator. func (sc *SocketPV) SignVote(chainID string, vote *types.Vote) error { - err := writeMsg(sc.conn, &SignVoteMsg{Vote: vote}) + err := writeMsg(sc.conn, &SignVoteRequest{Vote: vote}) if err != nil { return err } @@ -160,7 +161,16 @@ func (sc *SocketPV) SignVote(chainID string, vote *types.Vote) error { return err } - *vote = *res.(*SignVoteMsg).Vote + resp, ok := res.(*SignedVoteResponse) + if !ok { + return ErrUnexpectedResponse + } + if resp.Error != nil { + return fmt.Errorf("remote error occurred: code: %v, description: %s", + resp.Error.Code, + resp.Error.Description) + } + *vote = *resp.Vote return nil } @@ -170,7 +180,7 @@ func (sc *SocketPV) SignProposal( chainID string, proposal *types.Proposal, ) error { - err := writeMsg(sc.conn, &SignProposalMsg{Proposal: proposal}) + err := writeMsg(sc.conn, &SignProposalRequest{Proposal: proposal}) if err != nil { return err } @@ -179,8 +189,16 @@ func (sc *SocketPV) SignProposal( if err != nil { return err } - - *proposal = *res.(*SignProposalMsg).Proposal + resp, ok := res.(*SignedProposalResponse) + if !ok { + return ErrUnexpectedResponse + } + if resp.Error != nil { + return fmt.Errorf("remote error occurred: code: %v, description: %s", + resp.Error.Code, + resp.Error.Description) + } + *proposal = *resp.Proposal return nil } @@ -190,7 +208,7 @@ func (sc *SocketPV) SignHeartbeat( chainID string, heartbeat *types.Heartbeat, ) error { - err := writeMsg(sc.conn, &SignHeartbeatMsg{Heartbeat: heartbeat}) + err := writeMsg(sc.conn, &SignHeartbeatRequest{Heartbeat: heartbeat}) if err != nil { return err } @@ -199,8 +217,16 @@ func (sc *SocketPV) SignHeartbeat( if err != nil { return err } - - *heartbeat = *res.(*SignHeartbeatMsg).Heartbeat + resp, ok := res.(*SignedHeartbeatResponse) + if !ok { + return ErrUnexpectedResponse + } + if resp.Error != nil { + return fmt.Errorf("remote error occurred: code: %v, description: %s", + resp.Error.Code, + resp.Error.Description) + } + *heartbeat = *resp.Heartbeat return nil } @@ -462,22 +488,34 @@ func (rs *RemoteSigner) handleConnection(conn net.Conn) { var p crypto.PubKey p = rs.privVal.GetPubKey() res = &PubKeyMsg{p} - case *SignVoteMsg: + case *SignVoteRequest: err = rs.privVal.SignVote(rs.chainID, r.Vote) - res = &SignVoteMsg{r.Vote} - case *SignProposalMsg: + if err != nil { + res = &SignedVoteResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedVoteResponse{r.Vote, nil} + } + case *SignProposalRequest: err = rs.privVal.SignProposal(rs.chainID, r.Proposal) - res = &SignProposalMsg{r.Proposal} - case *SignHeartbeatMsg: + if err != nil { + res = &SignedProposalResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedProposalResponse{r.Proposal, nil} + } + case *SignHeartbeatRequest: err = rs.privVal.SignHeartbeat(rs.chainID, r.Heartbeat) - res = &SignHeartbeatMsg{r.Heartbeat} + if err != nil { + res = &SignedHeartbeatResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &SignedHeartbeatResponse{r.Heartbeat, nil} + } default: err = fmt.Errorf("unknown msg: %v", r) } if err != nil { + // only log the error; we'll reply with an error in res rs.Logger.Error("handleConnection", "err", err) - return } err = writeMsg(conn, res) @@ -496,9 +534,12 @@ type SocketPVMsg interface{} func RegisterSocketPVMsg(cdc *amino.Codec) { cdc.RegisterInterface((*SocketPVMsg)(nil), nil) cdc.RegisterConcrete(&PubKeyMsg{}, "tendermint/socketpv/PubKeyMsg", nil) - cdc.RegisterConcrete(&SignVoteMsg{}, "tendermint/socketpv/SignVoteMsg", nil) - cdc.RegisterConcrete(&SignProposalMsg{}, "tendermint/socketpv/SignProposalMsg", nil) - cdc.RegisterConcrete(&SignHeartbeatMsg{}, "tendermint/socketpv/SignHeartbeatMsg", nil) + cdc.RegisterConcrete(&SignVoteRequest{}, "tendermint/socketpv/SignVoteRequest", nil) + cdc.RegisterConcrete(&SignedVoteResponse{}, "tendermint/socketpv/SignedVoteResponse", nil) + cdc.RegisterConcrete(&SignProposalRequest{}, "tendermint/socketpv/SignProposalRequest", nil) + cdc.RegisterConcrete(&SignedProposalResponse{}, "tendermint/socketpv/SignedProposalResponse", nil) + cdc.RegisterConcrete(&SignHeartbeatRequest{}, "tendermint/socketpv/SignHeartbeatRequest", nil) + cdc.RegisterConcrete(&SignedHeartbeatResponse{}, "tendermint/socketpv/SignedHeartbeatResponse", nil) } // PubKeyMsg is a PrivValidatorSocket message containing the public key. @@ -506,21 +547,44 @@ type PubKeyMsg struct { PubKey crypto.PubKey } -// SignVoteMsg is a PrivValidatorSocket message containing a vote. -type SignVoteMsg struct { +// SignVoteRequest is a PrivValidatorSocket message containing a vote. +type SignVoteRequest struct { Vote *types.Vote } -// SignProposalMsg is a PrivValidatorSocket message containing a Proposal. -type SignProposalMsg struct { +// SignedVoteResponse is a PrivValidatorSocket message containing a signed vote along with a potenial error message. +type SignedVoteResponse struct { + Vote *types.Vote + Error *RemoteSignerError +} + +// SignProposalRequest is a PrivValidatorSocket message containing a Proposal. +type SignProposalRequest struct { Proposal *types.Proposal } -// SignHeartbeatMsg is a PrivValidatorSocket message containing a Heartbeat. -type SignHeartbeatMsg struct { +type SignedProposalResponse struct { + Proposal *types.Proposal + Error *RemoteSignerError +} + +// SignHeartbeatRequest is a PrivValidatorSocket message containing a Heartbeat. +type SignHeartbeatRequest struct { Heartbeat *types.Heartbeat } +type SignedHeartbeatResponse struct { + Heartbeat *types.Heartbeat + Error *RemoteSignerError +} + +// RemoteSignerError allows (remote) validators to include meaningful error descriptions in their reply. +type RemoteSignerError struct { + // TODO(ismail): create an enum of known errors + Code int + Description string +} + func readMsg(r io.Reader) (msg SocketPVMsg, err error) { const maxSocketPVMsgSize = 1024 * 10 _, err = cdc.UnmarshalBinaryReader(r, &msg, maxSocketPVMsgSize) diff --git a/privval/socket_test.go b/privval/socket_test.go index 461ce3f8..84e721be 100644 --- a/privval/socket_test.go +++ b/privval/socket_test.go @@ -20,7 +20,7 @@ import ( func TestSocketPVAddress(t *testing.T) { var ( chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) ) defer sc.Stop() defer rs.Stop() @@ -40,7 +40,7 @@ func TestSocketPVAddress(t *testing.T) { func TestSocketPVPubKey(t *testing.T) { var ( chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) ) defer sc.Stop() defer rs.Stop() @@ -59,7 +59,7 @@ func TestSocketPVPubKey(t *testing.T) { func TestSocketPVProposal(t *testing.T) { var ( chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) ts = time.Now() privProposal = &types.Proposal{Timestamp: ts} @@ -76,7 +76,7 @@ func TestSocketPVProposal(t *testing.T) { func TestSocketPVVote(t *testing.T) { var ( chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) ts = time.Now() vType = types.VoteTypePrecommit @@ -94,7 +94,7 @@ func TestSocketPVVote(t *testing.T) { func TestSocketPVHeartbeat(t *testing.T) { var ( chainID = cmn.RandStr(12) - sc, rs = testSetupSocketPair(t, chainID) + sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV()) want = &types.Heartbeat{} have = &types.Heartbeat{} @@ -231,14 +231,163 @@ func TestRemoteSignerRetry(t *testing.T) { } } +func TestRemoteSignVoteErrors(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV()) + + ts = time.Now() + vType = types.VoteTypePrecommit + vote = &types.Vote{Timestamp: ts, Type: vType} + ) + defer sc.Stop() + defer rs.Stop() + + err := writeMsg(sc.conn, &SignVoteRequest{Vote: vote}) + require.NoError(t, err) + + res, err := readMsg(sc.conn) + require.NoError(t, err) + + resp := *res.(*SignedVoteResponse) + require.NotNil(t, resp.Error) + require.Equal(t, resp.Error.Description, types.ErroringMockPVErr.Error()) + + err = rs.privVal.SignVote(chainID, vote) + require.Error(t, err) + err = sc.SignVote(chainID, vote) + require.Error(t, err) +} + +func TestRemoteSignProposalErrors(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV()) + + ts = time.Now() + proposal = &types.Proposal{Timestamp: ts} + ) + defer sc.Stop() + defer rs.Stop() + + err := writeMsg(sc.conn, &SignProposalRequest{Proposal: proposal}) + require.NoError(t, err) + + res, err := readMsg(sc.conn) + require.NoError(t, err) + + resp := *res.(*SignedProposalResponse) + require.NotNil(t, resp.Error) + require.Equal(t, resp.Error.Description, types.ErroringMockPVErr.Error()) + + err = rs.privVal.SignProposal(chainID, proposal) + require.Error(t, err) + + err = sc.SignProposal(chainID, proposal) + require.Error(t, err) +} + +func TestRemoteSignHeartbeatErrors(t *testing.T) { + var ( + chainID = cmn.RandStr(12) + sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV()) + hb = &types.Heartbeat{} + ) + defer sc.Stop() + defer rs.Stop() + + err := writeMsg(sc.conn, &SignHeartbeatRequest{Heartbeat: hb}) + require.NoError(t, err) + + res, err := readMsg(sc.conn) + require.NoError(t, err) + + resp := *res.(*SignedHeartbeatResponse) + require.NotNil(t, resp.Error) + require.Equal(t, resp.Error.Description, types.ErroringMockPVErr.Error()) + + err = rs.privVal.SignHeartbeat(chainID, hb) + require.Error(t, err) + + err = sc.SignHeartbeat(chainID, hb) + require.Error(t, err) +} + +func TestErrUnexpectedResponse(t *testing.T) { + var ( + addr = testFreeAddr(t) + logger = log.TestingLogger() + chainID = cmn.RandStr(12) + readyc = make(chan struct{}) + errc = make(chan error, 1) + + rs = NewRemoteSigner( + logger, + chainID, + addr, + types.NewMockPV(), + ed25519.GenPrivKey(), + ) + sc = NewSocketPV( + logger, + addr, + ed25519.GenPrivKey(), + ) + ) + + testStartSocketPV(t, readyc, sc) + defer sc.Stop() + RemoteSignerConnDeadline(time.Millisecond)(rs) + RemoteSignerConnRetries(1e6)(rs) + + // we do not want to Start() the remote signer here and instead use the connection to + // reply with intentionally wrong replies below: + rsConn, err := rs.connect() + defer rsConn.Close() + require.NoError(t, err) + require.NotNil(t, rsConn) + <-readyc + + // Heartbeat: + go func(errc chan error) { + errc <- sc.SignHeartbeat(chainID, &types.Heartbeat{}) + }(errc) + // read request and write wrong response: + go testReadWriteResponse(t, &SignedVoteResponse{}, rsConn) + err = <-errc + require.Error(t, err) + require.Equal(t, err, ErrUnexpectedResponse) + + // Proposal: + go func(errc chan error) { + errc <- sc.SignProposal(chainID, &types.Proposal{}) + }(errc) + // read request and write wrong response: + go testReadWriteResponse(t, &SignedHeartbeatResponse{}, rsConn) + err = <-errc + require.Error(t, err) + require.Equal(t, err, ErrUnexpectedResponse) + + // Vote: + go func(errc chan error) { + errc <- sc.SignVote(chainID, &types.Vote{}) + }(errc) + // read request and write wrong response: + go testReadWriteResponse(t, &SignedHeartbeatResponse{}, rsConn) + err = <-errc + require.Error(t, err) + require.Equal(t, err, ErrUnexpectedResponse) +} + func testSetupSocketPair( t *testing.T, chainID string, + privValidator types.PrivValidator, ) (*SocketPV, *RemoteSigner) { var ( addr = testFreeAddr(t) logger = log.TestingLogger() - privVal = types.NewMockPV() + privVal = privValidator readyc = make(chan struct{}) rs = NewRemoteSigner( logger, @@ -254,12 +403,7 @@ func testSetupSocketPair( ) ) - go func(sc *SocketPV) { - require.NoError(t, sc.Start()) - assert.True(t, sc.IsRunning()) - - readyc <- struct{}{} - }(sc) + testStartSocketPV(t, readyc, sc) RemoteSignerConnDeadline(time.Millisecond)(rs) RemoteSignerConnRetries(1e6)(rs) @@ -272,6 +416,23 @@ func testSetupSocketPair( return sc, rs } +func testReadWriteResponse(t *testing.T, resp SocketPVMsg, rsConn net.Conn) { + _, err := readMsg(rsConn) + require.NoError(t, err) + + err = writeMsg(rsConn, resp) + require.NoError(t, err) +} + +func testStartSocketPV(t *testing.T, readyc chan struct{}, sc *SocketPV) { + go func(sc *SocketPV) { + require.NoError(t, sc.Start()) + assert.True(t, sc.IsRunning()) + + readyc <- struct{}{} + }(sc) +} + // testFreeAddr claims a free port so we don't block on listener being ready. func testFreeAddr(t *testing.T) string { ln, err := net.Listen("tcp", "127.0.0.1:0") diff --git a/types/canonical.go b/types/canonical.go new file mode 100644 index 00000000..cdf0bd7b --- /dev/null +++ b/types/canonical.go @@ -0,0 +1,116 @@ +package types + +import ( + "time" + + cmn "github.com/tendermint/tendermint/libs/common" + tmtime "github.com/tendermint/tendermint/types/time" +) + +// Canonical* wraps the structs in types for amino encoding them for use in SignBytes / the Signable interface. + +// TimeFormat is used for generating the sigs +const TimeFormat = time.RFC3339Nano + +type CanonicalBlockID struct { + Hash cmn.HexBytes `json:"hash,omitempty"` + PartsHeader CanonicalPartSetHeader `json:"parts,omitempty"` +} + +type CanonicalPartSetHeader struct { + Hash cmn.HexBytes `json:"hash,omitempty"` + Total int `json:"total,omitempty"` +} + +type CanonicalProposal struct { + ChainID string `json:"@chain_id"` + Type string `json:"@type"` + BlockPartsHeader CanonicalPartSetHeader `json:"block_parts_header"` + Height int64 `json:"height"` + POLBlockID CanonicalBlockID `json:"pol_block_id"` + POLRound int `json:"pol_round"` + Round int `json:"round"` + Timestamp time.Time `json:"timestamp"` +} + +type CanonicalVote struct { + ChainID string `json:"@chain_id"` + Type string `json:"@type"` + BlockID CanonicalBlockID `json:"block_id"` + Height int64 `json:"height"` + Round int `json:"round"` + Timestamp time.Time `json:"timestamp"` + VoteType byte `json:"type"` +} + +type CanonicalHeartbeat struct { + ChainID string `json:"@chain_id"` + Type string `json:"@type"` + Height int64 `json:"height"` + Round int `json:"round"` + Sequence int `json:"sequence"` + ValidatorAddress Address `json:"validator_address"` + ValidatorIndex int `json:"validator_index"` +} + +//----------------------------------- +// Canonicalize the structs + +func CanonicalizeBlockID(blockID BlockID) CanonicalBlockID { + return CanonicalBlockID{ + Hash: blockID.Hash, + PartsHeader: CanonicalizePartSetHeader(blockID.PartsHeader), + } +} + +func CanonicalizePartSetHeader(psh PartSetHeader) CanonicalPartSetHeader { + return CanonicalPartSetHeader{ + psh.Hash, + psh.Total, + } +} + +func CanonicalizeProposal(chainID string, proposal *Proposal) CanonicalProposal { + return CanonicalProposal{ + ChainID: chainID, + Type: "proposal", + BlockPartsHeader: CanonicalizePartSetHeader(proposal.BlockPartsHeader), + Height: proposal.Height, + Timestamp: proposal.Timestamp, + POLBlockID: CanonicalizeBlockID(proposal.POLBlockID), + POLRound: proposal.POLRound, + Round: proposal.Round, + } +} + +func CanonicalizeVote(chainID string, vote *Vote) CanonicalVote { + return CanonicalVote{ + ChainID: chainID, + Type: "vote", + BlockID: CanonicalizeBlockID(vote.BlockID), + Height: vote.Height, + Round: vote.Round, + Timestamp: vote.Timestamp, + VoteType: vote.Type, + } +} + +func CanonicalizeHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalHeartbeat { + return CanonicalHeartbeat{ + ChainID: chainID, + Type: "heartbeat", + Height: heartbeat.Height, + Round: heartbeat.Round, + Sequence: heartbeat.Sequence, + ValidatorAddress: heartbeat.ValidatorAddress, + ValidatorIndex: heartbeat.ValidatorIndex, + } +} + +// CanonicalTime can be used to stringify time in a canonical way. +func CanonicalTime(t time.Time) string { + // Note that sending time over amino resets it to + // local time, we need to force UTC here, so the + // signatures match + return tmtime.Canonical(t).Format(TimeFormat) +} diff --git a/types/canonical_json.go b/types/canonical_json.go deleted file mode 100644 index d8399ff1..00000000 --- a/types/canonical_json.go +++ /dev/null @@ -1,115 +0,0 @@ -package types - -import ( - "time" - - cmn "github.com/tendermint/tendermint/libs/common" - tmtime "github.com/tendermint/tendermint/types/time" -) - -// Canonical json is amino's json for structs with fields in alphabetical order - -// TimeFormat is used for generating the sigs -const TimeFormat = time.RFC3339Nano - -type CanonicalJSONBlockID struct { - Hash cmn.HexBytes `json:"hash,omitempty"` - PartsHeader CanonicalJSONPartSetHeader `json:"parts,omitempty"` -} - -type CanonicalJSONPartSetHeader struct { - Hash cmn.HexBytes `json:"hash,omitempty"` - Total int `json:"total,omitempty"` -} - -type CanonicalJSONProposal struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - BlockPartsHeader CanonicalJSONPartSetHeader `json:"block_parts_header"` - Height int64 `json:"height"` - POLBlockID CanonicalJSONBlockID `json:"pol_block_id"` - POLRound int `json:"pol_round"` - Round int `json:"round"` - Timestamp string `json:"timestamp"` -} - -type CanonicalJSONVote struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - BlockID CanonicalJSONBlockID `json:"block_id"` - Height int64 `json:"height"` - Round int `json:"round"` - Timestamp string `json:"timestamp"` - VoteType byte `json:"type"` -} - -type CanonicalJSONHeartbeat struct { - ChainID string `json:"@chain_id"` - Type string `json:"@type"` - Height int64 `json:"height"` - Round int `json:"round"` - Sequence int `json:"sequence"` - ValidatorAddress Address `json:"validator_address"` - ValidatorIndex int `json:"validator_index"` -} - -//----------------------------------- -// Canonicalize the structs - -func CanonicalBlockID(blockID BlockID) CanonicalJSONBlockID { - return CanonicalJSONBlockID{ - Hash: blockID.Hash, - PartsHeader: CanonicalPartSetHeader(blockID.PartsHeader), - } -} - -func CanonicalPartSetHeader(psh PartSetHeader) CanonicalJSONPartSetHeader { - return CanonicalJSONPartSetHeader{ - psh.Hash, - psh.Total, - } -} - -func CanonicalProposal(chainID string, proposal *Proposal) CanonicalJSONProposal { - return CanonicalJSONProposal{ - ChainID: chainID, - Type: "proposal", - BlockPartsHeader: CanonicalPartSetHeader(proposal.BlockPartsHeader), - Height: proposal.Height, - Timestamp: CanonicalTime(proposal.Timestamp), - POLBlockID: CanonicalBlockID(proposal.POLBlockID), - POLRound: proposal.POLRound, - Round: proposal.Round, - } -} - -func CanonicalVote(chainID string, vote *Vote) CanonicalJSONVote { - return CanonicalJSONVote{ - ChainID: chainID, - Type: "vote", - BlockID: CanonicalBlockID(vote.BlockID), - Height: vote.Height, - Round: vote.Round, - Timestamp: CanonicalTime(vote.Timestamp), - VoteType: vote.Type, - } -} - -func CanonicalHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalJSONHeartbeat { - return CanonicalJSONHeartbeat{ - ChainID: chainID, - Type: "heartbeat", - Height: heartbeat.Height, - Round: heartbeat.Round, - Sequence: heartbeat.Sequence, - ValidatorAddress: heartbeat.ValidatorAddress, - ValidatorIndex: heartbeat.ValidatorIndex, - } -} - -func CanonicalTime(t time.Time) string { - // Note that sending time over amino resets it to - // local time, we need to force UTC here, so the - // signatures match - return tmtime.Canonical(t).Format(TimeFormat) -} diff --git a/types/heartbeat.go b/types/heartbeat.go index 151f1b0b..de03d5cc 100644 --- a/types/heartbeat.go +++ b/types/heartbeat.go @@ -23,7 +23,7 @@ type Heartbeat struct { // SignBytes returns the Heartbeat bytes for signing. // It panics if the Heartbeat is nil. func (heartbeat *Heartbeat) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalJSON(CanonicalHeartbeat(chainID, heartbeat)) + bz, err := cdc.MarshalBinary(CanonicalizeHeartbeat(chainID, heartbeat)) if err != nil { panic(err) } diff --git a/types/heartbeat_test.go b/types/heartbeat_test.go index ce9e4923..550bcc73 100644 --- a/types/heartbeat_test.go +++ b/types/heartbeat_test.go @@ -34,19 +34,27 @@ func TestHeartbeatString(t *testing.T) { } func TestHeartbeatWriteSignBytes(t *testing.T) { + chainID := "test_chain_id" - hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} - bz := hb.SignBytes("0xdeadbeef") - // XXX HMMMMMMM - require.Equal(t, string(bz), `{"@chain_id":"0xdeadbeef","@type":"heartbeat","height":"10","round":"1","sequence":"0","validator_address":"","validator_index":"1"}`) + { + testHeartbeat := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} + signBytes := testHeartbeat.SignBytes(chainID) + expected, err := cdc.MarshalBinary(CanonicalizeHeartbeat(chainID, testHeartbeat)) + require.NoError(t, err) + require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Heartbeat") + } - plainHb := &Heartbeat{} - bz = plainHb.SignBytes("0xdeadbeef") - require.Equal(t, string(bz), `{"@chain_id":"0xdeadbeef","@type":"heartbeat","height":"0","round":"0","sequence":"0","validator_address":"","validator_index":"0"}`) + { + testHeartbeat := &Heartbeat{} + signBytes := testHeartbeat.SignBytes(chainID) + expected, err := cdc.MarshalBinary(CanonicalizeHeartbeat(chainID, testHeartbeat)) + require.NoError(t, err) + require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Heartbeat") + } require.Panics(t, func() { var nilHb *Heartbeat - bz := nilHb.SignBytes("0xdeadbeef") - require.Equal(t, string(bz), "null") + signBytes := nilHb.SignBytes(chainID) + require.Equal(t, string(signBytes), "null") }) } diff --git a/types/priv_validator.go b/types/priv_validator.go index 1642be41..25be5220 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -2,6 +2,7 @@ package types import ( "bytes" + "errors" "fmt" "github.com/tendermint/tendermint/crypto" @@ -103,3 +104,29 @@ func (pv *MockPV) DisableChecks() { // Currently this does nothing, // as MockPV has no safety checks at all. } + +type erroringMockPV struct { + *MockPV +} + +var ErroringMockPVErr = errors.New("erroringMockPV always returns an error") + +// Implements PrivValidator. +func (pv *erroringMockPV) SignVote(chainID string, vote *Vote) error { + return ErroringMockPVErr +} + +// Implements PrivValidator. +func (pv *erroringMockPV) SignProposal(chainID string, proposal *Proposal) error { + return ErroringMockPVErr +} + +// signHeartbeat signs the heartbeat without any checking. +func (pv *erroringMockPV) SignHeartbeat(chainID string, heartbeat *Heartbeat) error { + return ErroringMockPVErr +} + +// NewErroringMockPV returns a MockPV that fails on each signing request. Again, for testing only. +func NewErroringMockPV() *erroringMockPV { + return &erroringMockPV{&MockPV{ed25519.GenPrivKey()}} +} diff --git a/types/proposal.go b/types/proposal.go index 97e0dca3..a2bc8e36 100644 --- a/types/proposal.go +++ b/types/proposal.go @@ -52,7 +52,7 @@ func (p *Proposal) String() string { // SignBytes returns the Proposal bytes for signing func (p *Proposal) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalJSON(CanonicalProposal(chainID, p)) + bz, err := cdc.MarshalBinary(CanonicalizeProposal(chainID, p)) if err != nil { panic(err) } diff --git a/types/proposal_test.go b/types/proposal_test.go index 7396fb76..5f943308 100644 --- a/types/proposal_test.go +++ b/types/proposal_test.go @@ -24,17 +24,12 @@ func init() { } func TestProposalSignable(t *testing.T) { - signBytes := testProposal.SignBytes("test_chain_id") - signStr := string(signBytes) + chainID := "test_chain_id" + signBytes := testProposal.SignBytes(chainID) - expected := `{"@chain_id":"test_chain_id","@type":"proposal","block_parts_header":{"hash":"626C6F636B7061727473","total":"111"},"height":"12345","pol_block_id":{},"pol_round":"-1","round":"23456","timestamp":"2018-02-11T07:09:22.765Z"}` - if signStr != expected { - t.Errorf("Got unexpected sign string for Proposal. Expected:\n%v\nGot:\n%v", expected, signStr) - } - - if signStr != expected { - t.Errorf("Got unexpected sign string for Proposal. Expected:\n%v\nGot:\n%v", expected, signStr) - } + expected, err := cdc.MarshalBinary(CanonicalizeProposal(chainID, testProposal)) + require.NoError(t, err) + require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Proposal") } func TestProposalString(t *testing.T) { diff --git a/types/vote.go b/types/vote.go index 4a90a718..ba2f1dfe 100644 --- a/types/vote.go +++ b/types/vote.go @@ -6,7 +6,7 @@ import ( "fmt" "time" - crypto "github.com/tendermint/tendermint/crypto" + "github.com/tendermint/tendermint/crypto" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -77,7 +77,7 @@ type Vote struct { } func (vote *Vote) SignBytes(chainID string) []byte { - bz, err := cdc.MarshalJSON(CanonicalVote(chainID, vote)) + bz, err := cdc.MarshalBinary(CanonicalizeVote(chainID, vote)) if err != nil { panic(err) } @@ -104,8 +104,12 @@ func (vote *Vote) String() string { } return fmt.Sprintf("Vote{%v:%X %v/%02d/%v(%v) %X %X @ %s}", - vote.ValidatorIndex, cmn.Fingerprint(vote.ValidatorAddress), - vote.Height, vote.Round, vote.Type, typeString, + vote.ValidatorIndex, + cmn.Fingerprint(vote.ValidatorAddress), + vote.Height, + vote.Round, + vote.Type, + typeString, cmn.Fingerprint(vote.BlockID.Hash), cmn.Fingerprint(vote.Signature), CanonicalTime(vote.Timestamp)) diff --git a/types/vote_test.go b/types/vote_test.go index dd7663e5..d0c41a06 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -46,13 +46,11 @@ func exampleVote(t byte) *Vote { func TestVoteSignable(t *testing.T) { vote := examplePrecommit() signBytes := vote.SignBytes("test_chain_id") - signStr := string(signBytes) - expected := `{"@chain_id":"test_chain_id","@type":"vote","block_id":{"hash":"8B01023386C371778ECB6368573E539AFC3CC860","parts":{"hash":"72DB3D959635DFF1BB567BEDAA70573392C51596","total":"1000000"}},"height":"12345","round":"2","timestamp":"2017-12-25T03:00:01.234Z","type":2}` - if signStr != expected { - // NOTE: when this fails, you probably want to fix up consensus/replay_test too - t.Errorf("Got unexpected sign string for Vote. Expected:\n%v\nGot:\n%v", expected, signStr) - } + expected, err := cdc.MarshalBinary(CanonicalizeVote("test_chain_id", vote)) + require.NoError(t, err) + + require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Vote.") } func TestVoteVerifySignature(t *testing.T) { From 71a34adfe58e654e36583a5db3b0ad4d78e3c0b3 Mon Sep 17 00:00:00 2001 From: Joon Date: Sat, 29 Sep 2018 09:03:19 +0900 Subject: [PATCH 06/18] General Merkle Proof (#2298) * first commit finalize rebase add protoc_merkle to Makefile * in progress * fix kvstore * fix tests * remove iavl dependency * fix tx_test * fix test_abci_cli fix test_apps * fix test_apps * fix test_cover * rm rebase residue * address comment in progress * finalize rebase --- Makefile | 4 +- abci/cmd/abci-cli/abci-cli.go | 5 +- abci/example/code/code.go | 1 + abci/example/kvstore/kvstore.go | 3 +- abci/tests/test_cli/ex1.abci.out | 4 + abci/types/types.pb.go | 817 +++++++++++---------------- abci/types/types.proto | 3 +- abci/types/typespb_test.go | 1 + crypto/merkle/compile.sh | 6 + crypto/merkle/merkle.pb.go | 792 ++++++++++++++++++++++++++ crypto/merkle/merkle.proto | 30 + crypto/merkle/proof.go | 132 +++++ crypto/merkle/proof_key_path.go | 107 ++++ crypto/merkle/proof_key_path_test.go | 41 ++ crypto/merkle/proof_simple_value.go | 91 +++ crypto/merkle/simple_proof.go | 53 +- crypto/merkle/simple_tree_test.go | 59 +- crypto/merkle/wire.go | 12 + docs/app-dev/app-development.md | 68 ++- lite/errors/errors.go | 21 + lite/proxy/proof.go | 14 + lite/proxy/query.go | 128 ++--- lite/proxy/query_test.go | 98 ++-- lite/proxy/wrapper.go | 7 +- rpc/client/httpclient.go | 2 +- rpc/client/localclient.go | 2 +- rpc/client/mock/abci.go | 23 +- rpc/client/mock/abci_test.go | 16 +- rpc/client/mock/client.go | 5 +- rpc/client/rpc_test.go | 14 +- rpc/client/types.go | 9 +- rpc/core/abci.go | 16 +- test/app/kvstore_test.sh | 2 +- types/block.go | 1 - types/part_set.go | 2 +- types/results_test.go | 4 +- types/tx.go | 17 +- types/tx_test.go | 6 +- 38 files changed, 1867 insertions(+), 749 deletions(-) create mode 100644 crypto/merkle/compile.sh create mode 100644 crypto/merkle/merkle.pb.go create mode 100644 crypto/merkle/merkle.proto create mode 100644 crypto/merkle/proof.go create mode 100644 crypto/merkle/proof_key_path.go create mode 100644 crypto/merkle/proof_key_path_test.go create mode 100644 crypto/merkle/proof_simple_value.go create mode 100644 crypto/merkle/wire.go create mode 100644 lite/proxy/proof.go diff --git a/Makefile b/Makefile index ffc72c46..73bd67b0 100644 --- a/Makefile +++ b/Makefile @@ -35,7 +35,7 @@ install: ######################################## ### Protobuf -protoc_all: protoc_libs protoc_abci protoc_grpc +protoc_all: protoc_libs protoc_merkle protoc_abci protoc_grpc %.pb.go: %.proto ## If you get the following error, @@ -137,6 +137,8 @@ grpc_dbserver: protoc_grpc: rpc/grpc/types.pb.go +protoc_merkle: crypto/merkle/merkle.pb.go + ######################################## ### Testing diff --git a/abci/cmd/abci-cli/abci-cli.go b/abci/cmd/abci-cli/abci-cli.go index b7b8e7d7..50972ec3 100644 --- a/abci/cmd/abci-cli/abci-cli.go +++ b/abci/cmd/abci-cli/abci-cli.go @@ -22,6 +22,7 @@ import ( servertest "github.com/tendermint/tendermint/abci/tests/server" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/abci/version" + "github.com/tendermint/tendermint/crypto/merkle" ) // client is a global variable so it can be reused by the console @@ -100,7 +101,7 @@ type queryResponse struct { Key []byte Value []byte Height int64 - Proof []byte + Proof *merkle.Proof } func Execute() error { @@ -748,7 +749,7 @@ func printResponse(cmd *cobra.Command, args []string, rsp response) { fmt.Printf("-> value.hex: %X\n", rsp.Query.Value) } if rsp.Query.Proof != nil { - fmt.Printf("-> proof: %X\n", rsp.Query.Proof) + fmt.Printf("-> proof: %#v\n", rsp.Query.Proof) } } } diff --git a/abci/example/code/code.go b/abci/example/code/code.go index 94e9d015..988b2a93 100644 --- a/abci/example/code/code.go +++ b/abci/example/code/code.go @@ -6,4 +6,5 @@ const ( CodeTypeEncodingError uint32 = 1 CodeTypeBadNonce uint32 = 2 CodeTypeUnauthorized uint32 = 3 + CodeTypeUnknownError uint32 = 4 ) diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index c1554cc5..9523bf74 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -81,7 +81,7 @@ func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx { app.state.Size += 1 tags := []cmn.KVPair{ - {Key: []byte("app.creator"), Value: []byte("jae")}, + {Key: []byte("app.creator"), Value: []byte("Cosmoshi Netowoko")}, {Key: []byte("app.key"), Value: key}, } return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags} @@ -114,6 +114,7 @@ func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery type } return } else { + resQuery.Key = reqQuery.Data value := app.state.db.Get(prefixKey(reqQuery.Data)) resQuery.Value = value if value != nil { diff --git a/abci/tests/test_cli/ex1.abci.out b/abci/tests/test_cli/ex1.abci.out index 5d4c196d..0cdd43df 100644 --- a/abci/tests/test_cli/ex1.abci.out +++ b/abci/tests/test_cli/ex1.abci.out @@ -28,6 +28,8 @@ -> code: OK -> log: exists -> height: 0 +-> key: abc +-> key.hex: 616263 -> value: abc -> value.hex: 616263 @@ -42,6 +44,8 @@ -> code: OK -> log: exists -> height: 0 +-> key: def +-> key.hex: 646566 -> value: xyz -> value.hex: 78797A diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 3c7f81ab..427315df 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -9,6 +9,7 @@ import fmt "fmt" import math "math" import _ "github.com/gogo/protobuf/gogoproto" import _ "github.com/golang/protobuf/ptypes/timestamp" +import merkle "github.com/tendermint/tendermint/crypto/merkle" import common "github.com/tendermint/tendermint/libs/common" import time "time" @@ -60,7 +61,7 @@ func (m *Request) Reset() { *m = Request{} } func (m *Request) String() string { return proto.CompactTextString(m) } func (*Request) ProtoMessage() {} func (*Request) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{0} + return fileDescriptor_types_03c41ca87033c976, []int{0} } func (m *Request) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -482,7 +483,7 @@ func (m *RequestEcho) Reset() { *m = RequestEcho{} } func (m *RequestEcho) String() string { return proto.CompactTextString(m) } func (*RequestEcho) ProtoMessage() {} func (*RequestEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{1} + return fileDescriptor_types_03c41ca87033c976, []int{1} } func (m *RequestEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -528,7 +529,7 @@ func (m *RequestFlush) Reset() { *m = RequestFlush{} } func (m *RequestFlush) String() string { return proto.CompactTextString(m) } func (*RequestFlush) ProtoMessage() {} func (*RequestFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{2} + return fileDescriptor_types_03c41ca87033c976, []int{2} } func (m *RequestFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -568,7 +569,7 @@ func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (m *RequestInfo) String() string { return proto.CompactTextString(m) } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{3} + return fileDescriptor_types_03c41ca87033c976, []int{3} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -617,7 +618,7 @@ func (m *RequestSetOption) Reset() { *m = RequestSetOption{} } func (m *RequestSetOption) String() string { return proto.CompactTextString(m) } func (*RequestSetOption) ProtoMessage() {} func (*RequestSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{4} + return fileDescriptor_types_03c41ca87033c976, []int{4} } func (m *RequestSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -675,7 +676,7 @@ func (m *RequestInitChain) Reset() { *m = RequestInitChain{} } func (m *RequestInitChain) String() string { return proto.CompactTextString(m) } func (*RequestInitChain) ProtoMessage() {} func (*RequestInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{5} + return fileDescriptor_types_03c41ca87033c976, []int{5} } func (m *RequestInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -753,7 +754,7 @@ func (m *RequestQuery) Reset() { *m = RequestQuery{} } func (m *RequestQuery) String() string { return proto.CompactTextString(m) } func (*RequestQuery) ProtoMessage() {} func (*RequestQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{6} + return fileDescriptor_types_03c41ca87033c976, []int{6} } func (m *RequestQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -825,7 +826,7 @@ func (m *RequestBeginBlock) Reset() { *m = RequestBeginBlock{} } func (m *RequestBeginBlock) String() string { return proto.CompactTextString(m) } func (*RequestBeginBlock) ProtoMessage() {} func (*RequestBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{7} + return fileDescriptor_types_03c41ca87033c976, []int{7} } func (m *RequestBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -893,7 +894,7 @@ func (m *RequestCheckTx) Reset() { *m = RequestCheckTx{} } func (m *RequestCheckTx) String() string { return proto.CompactTextString(m) } func (*RequestCheckTx) ProtoMessage() {} func (*RequestCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{8} + return fileDescriptor_types_03c41ca87033c976, []int{8} } func (m *RequestCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -940,7 +941,7 @@ func (m *RequestDeliverTx) Reset() { *m = RequestDeliverTx{} } func (m *RequestDeliverTx) String() string { return proto.CompactTextString(m) } func (*RequestDeliverTx) ProtoMessage() {} func (*RequestDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{9} + return fileDescriptor_types_03c41ca87033c976, []int{9} } func (m *RequestDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -987,7 +988,7 @@ func (m *RequestEndBlock) Reset() { *m = RequestEndBlock{} } func (m *RequestEndBlock) String() string { return proto.CompactTextString(m) } func (*RequestEndBlock) ProtoMessage() {} func (*RequestEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{10} + return fileDescriptor_types_03c41ca87033c976, []int{10} } func (m *RequestEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1033,7 +1034,7 @@ func (m *RequestCommit) Reset() { *m = RequestCommit{} } func (m *RequestCommit) String() string { return proto.CompactTextString(m) } func (*RequestCommit) ProtoMessage() {} func (*RequestCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{11} + return fileDescriptor_types_03c41ca87033c976, []int{11} } func (m *RequestCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1086,7 +1087,7 @@ func (m *Response) Reset() { *m = Response{} } func (m *Response) String() string { return proto.CompactTextString(m) } func (*Response) ProtoMessage() {} func (*Response) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{12} + return fileDescriptor_types_03c41ca87033c976, []int{12} } func (m *Response) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1539,7 +1540,7 @@ func (m *ResponseException) Reset() { *m = ResponseException{} } func (m *ResponseException) String() string { return proto.CompactTextString(m) } func (*ResponseException) ProtoMessage() {} func (*ResponseException) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{13} + return fileDescriptor_types_03c41ca87033c976, []int{13} } func (m *ResponseException) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1586,7 +1587,7 @@ func (m *ResponseEcho) Reset() { *m = ResponseEcho{} } func (m *ResponseEcho) String() string { return proto.CompactTextString(m) } func (*ResponseEcho) ProtoMessage() {} func (*ResponseEcho) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{14} + return fileDescriptor_types_03c41ca87033c976, []int{14} } func (m *ResponseEcho) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1632,7 +1633,7 @@ func (m *ResponseFlush) Reset() { *m = ResponseFlush{} } func (m *ResponseFlush) String() string { return proto.CompactTextString(m) } func (*ResponseFlush) ProtoMessage() {} func (*ResponseFlush) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{15} + return fileDescriptor_types_03c41ca87033c976, []int{15} } func (m *ResponseFlush) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1675,7 +1676,7 @@ func (m *ResponseInfo) Reset() { *m = ResponseInfo{} } func (m *ResponseInfo) String() string { return proto.CompactTextString(m) } func (*ResponseInfo) ProtoMessage() {} func (*ResponseInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{16} + return fileDescriptor_types_03c41ca87033c976, []int{16} } func (m *ResponseInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1747,7 +1748,7 @@ func (m *ResponseSetOption) Reset() { *m = ResponseSetOption{} } func (m *ResponseSetOption) String() string { return proto.CompactTextString(m) } func (*ResponseSetOption) ProtoMessage() {} func (*ResponseSetOption) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{17} + return fileDescriptor_types_03c41ca87033c976, []int{17} } func (m *ResponseSetOption) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1809,7 +1810,7 @@ func (m *ResponseInitChain) Reset() { *m = ResponseInitChain{} } func (m *ResponseInitChain) String() string { return proto.CompactTextString(m) } func (*ResponseInitChain) ProtoMessage() {} func (*ResponseInitChain) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{18} + return fileDescriptor_types_03c41ca87033c976, []int{18} } func (m *ResponseInitChain) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1855,23 +1856,23 @@ func (m *ResponseInitChain) GetValidators() []ValidatorUpdate { type ResponseQuery struct { Code uint32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // bytes data = 2; // use "value" instead. - Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` - Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` - Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` - Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` - Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` - Proof []byte `protobuf:"bytes,8,opt,name=proof,proto3" json:"proof,omitempty"` - Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Log string `protobuf:"bytes,3,opt,name=log,proto3" json:"log,omitempty"` + Info string `protobuf:"bytes,4,opt,name=info,proto3" json:"info,omitempty"` + Index int64 `protobuf:"varint,5,opt,name=index,proto3" json:"index,omitempty"` + Key []byte `protobuf:"bytes,6,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,7,opt,name=value,proto3" json:"value,omitempty"` + Proof *merkle.Proof `protobuf:"bytes,8,opt,name=proof" json:"proof,omitempty"` + Height int64 `protobuf:"varint,9,opt,name=height,proto3" json:"height,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *ResponseQuery) Reset() { *m = ResponseQuery{} } func (m *ResponseQuery) String() string { return proto.CompactTextString(m) } func (*ResponseQuery) ProtoMessage() {} func (*ResponseQuery) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{19} + return fileDescriptor_types_03c41ca87033c976, []int{19} } func (m *ResponseQuery) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1942,7 +1943,7 @@ func (m *ResponseQuery) GetValue() []byte { return nil } -func (m *ResponseQuery) GetProof() []byte { +func (m *ResponseQuery) GetProof() *merkle.Proof { if m != nil { return m.Proof } @@ -1967,7 +1968,7 @@ func (m *ResponseBeginBlock) Reset() { *m = ResponseBeginBlock{} } func (m *ResponseBeginBlock) String() string { return proto.CompactTextString(m) } func (*ResponseBeginBlock) ProtoMessage() {} func (*ResponseBeginBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{20} + return fileDescriptor_types_03c41ca87033c976, []int{20} } func (m *ResponseBeginBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2020,7 +2021,7 @@ func (m *ResponseCheckTx) Reset() { *m = ResponseCheckTx{} } func (m *ResponseCheckTx) String() string { return proto.CompactTextString(m) } func (*ResponseCheckTx) ProtoMessage() {} func (*ResponseCheckTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{21} + return fileDescriptor_types_03c41ca87033c976, []int{21} } func (m *ResponseCheckTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2115,7 +2116,7 @@ func (m *ResponseDeliverTx) Reset() { *m = ResponseDeliverTx{} } func (m *ResponseDeliverTx) String() string { return proto.CompactTextString(m) } func (*ResponseDeliverTx) ProtoMessage() {} func (*ResponseDeliverTx) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{22} + return fileDescriptor_types_03c41ca87033c976, []int{22} } func (m *ResponseDeliverTx) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2206,7 +2207,7 @@ func (m *ResponseEndBlock) Reset() { *m = ResponseEndBlock{} } func (m *ResponseEndBlock) String() string { return proto.CompactTextString(m) } func (*ResponseEndBlock) ProtoMessage() {} func (*ResponseEndBlock) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{23} + return fileDescriptor_types_03c41ca87033c976, []int{23} } func (m *ResponseEndBlock) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2268,7 +2269,7 @@ func (m *ResponseCommit) Reset() { *m = ResponseCommit{} } func (m *ResponseCommit) String() string { return proto.CompactTextString(m) } func (*ResponseCommit) ProtoMessage() {} func (*ResponseCommit) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{24} + return fileDescriptor_types_03c41ca87033c976, []int{24} } func (m *ResponseCommit) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2318,7 +2319,7 @@ func (m *ConsensusParams) Reset() { *m = ConsensusParams{} } func (m *ConsensusParams) String() string { return proto.CompactTextString(m) } func (*ConsensusParams) ProtoMessage() {} func (*ConsensusParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{25} + return fileDescriptor_types_03c41ca87033c976, []int{25} } func (m *ConsensusParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2376,7 +2377,7 @@ func (m *BlockSize) Reset() { *m = BlockSize{} } func (m *BlockSize) String() string { return proto.CompactTextString(m) } func (*BlockSize) ProtoMessage() {} func (*BlockSize) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{26} + return fileDescriptor_types_03c41ca87033c976, []int{26} } func (m *BlockSize) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2432,7 +2433,7 @@ func (m *EvidenceParams) Reset() { *m = EvidenceParams{} } func (m *EvidenceParams) String() string { return proto.CompactTextString(m) } func (*EvidenceParams) ProtoMessage() {} func (*EvidenceParams) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{27} + return fileDescriptor_types_03c41ca87033c976, []int{27} } func (m *EvidenceParams) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2480,7 +2481,7 @@ func (m *LastCommitInfo) Reset() { *m = LastCommitInfo{} } func (m *LastCommitInfo) String() string { return proto.CompactTextString(m) } func (*LastCommitInfo) ProtoMessage() {} func (*LastCommitInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{28} + return fileDescriptor_types_03c41ca87033c976, []int{28} } func (m *LastCommitInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2553,7 +2554,7 @@ func (m *Header) Reset() { *m = Header{} } func (m *Header) String() string { return proto.CompactTextString(m) } func (*Header) ProtoMessage() {} func (*Header) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{29} + return fileDescriptor_types_03c41ca87033c976, []int{29} } func (m *Header) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2699,7 +2700,7 @@ func (m *BlockID) Reset() { *m = BlockID{} } func (m *BlockID) String() string { return proto.CompactTextString(m) } func (*BlockID) ProtoMessage() {} func (*BlockID) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{30} + return fileDescriptor_types_03c41ca87033c976, []int{30} } func (m *BlockID) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2754,7 +2755,7 @@ func (m *PartSetHeader) Reset() { *m = PartSetHeader{} } func (m *PartSetHeader) String() string { return proto.CompactTextString(m) } func (*PartSetHeader) ProtoMessage() {} func (*PartSetHeader) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{31} + return fileDescriptor_types_03c41ca87033c976, []int{31} } func (m *PartSetHeader) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2811,7 +2812,7 @@ func (m *Validator) Reset() { *m = Validator{} } func (m *Validator) String() string { return proto.CompactTextString(m) } func (*Validator) ProtoMessage() {} func (*Validator) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{32} + return fileDescriptor_types_03c41ca87033c976, []int{32} } func (m *Validator) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2867,7 +2868,7 @@ func (m *ValidatorUpdate) Reset() { *m = ValidatorUpdate{} } func (m *ValidatorUpdate) String() string { return proto.CompactTextString(m) } func (*ValidatorUpdate) ProtoMessage() {} func (*ValidatorUpdate) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{33} + return fileDescriptor_types_03c41ca87033c976, []int{33} } func (m *ValidatorUpdate) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2923,7 +2924,7 @@ func (m *VoteInfo) Reset() { *m = VoteInfo{} } func (m *VoteInfo) String() string { return proto.CompactTextString(m) } func (*VoteInfo) ProtoMessage() {} func (*VoteInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{34} + return fileDescriptor_types_03c41ca87033c976, []int{34} } func (m *VoteInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2978,7 +2979,7 @@ func (m *PubKey) Reset() { *m = PubKey{} } func (m *PubKey) String() string { return proto.CompactTextString(m) } func (*PubKey) ProtoMessage() {} func (*PubKey) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{35} + return fileDescriptor_types_03c41ca87033c976, []int{35} } func (m *PubKey) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -3036,7 +3037,7 @@ func (m *Evidence) Reset() { *m = Evidence{} } func (m *Evidence) String() string { return proto.CompactTextString(m) } func (*Evidence) ProtoMessage() {} func (*Evidence) Descriptor() ([]byte, []int) { - return fileDescriptor_types_8495fed925debe52, []int{36} + return fileDescriptor_types_03c41ca87033c976, []int{36} } func (m *Evidence) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -4347,7 +4348,7 @@ func (this *ResponseQuery) Equal(that interface{}) bool { if !bytes.Equal(this.Value, that1.Value) { return false } - if !bytes.Equal(this.Proof, that1.Proof) { + if !this.Proof.Equal(that1.Proof) { return false } if this.Height != that1.Height { @@ -6377,11 +6378,15 @@ func (m *ResponseQuery) MarshalTo(dAtA []byte) (int, error) { i = encodeVarintTypes(dAtA, i, uint64(len(m.Value))) i += copy(dAtA[i:], m.Value) } - if len(m.Proof) > 0 { + if m.Proof != nil { dAtA[i] = 0x42 i++ - i = encodeVarintTypes(dAtA, i, uint64(len(m.Proof))) - i += copy(dAtA[i:], m.Proof) + i = encodeVarintTypes(dAtA, i, uint64(m.Proof.Size())) + n31, err := m.Proof.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n31 } if m.Height != 0 { dAtA[i] = 0x48 @@ -6590,11 +6595,11 @@ func (m *ResponseEndBlock) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.ConsensusParamUpdates.Size())) - n31, err := m.ConsensusParamUpdates.MarshalTo(dAtA[i:]) + n32, err := m.ConsensusParamUpdates.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n31 + i += n32 } if len(m.Tags) > 0 { for _, msg := range m.Tags { @@ -6660,21 +6665,21 @@ func (m *ConsensusParams) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.BlockSize.Size())) - n32, err := m.BlockSize.MarshalTo(dAtA[i:]) + n33, err := m.BlockSize.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n32 + i += n33 } if m.EvidenceParams != nil { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.EvidenceParams.Size())) - n33, err := m.EvidenceParams.MarshalTo(dAtA[i:]) + n34, err := m.EvidenceParams.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n33 + i += n34 } if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) @@ -6806,11 +6811,11 @@ func (m *Header) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x1a i++ i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) - n34, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + n35, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) if err != nil { return 0, err } - i += n34 + i += n35 if m.NumTxs != 0 { dAtA[i] = 0x20 i++ @@ -6824,11 +6829,11 @@ func (m *Header) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x32 i++ i = encodeVarintTypes(dAtA, i, uint64(m.LastBlockId.Size())) - n35, err := m.LastBlockId.MarshalTo(dAtA[i:]) + n36, err := m.LastBlockId.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n35 + i += n36 if len(m.LastCommitHash) > 0 { dAtA[i] = 0x3a i++ @@ -6913,11 +6918,11 @@ func (m *BlockID) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.PartsHeader.Size())) - n36, err := m.PartsHeader.MarshalTo(dAtA[i:]) + n37, err := m.PartsHeader.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n36 + i += n37 if m.XXX_unrecognized != nil { i += copy(dAtA[i:], m.XXX_unrecognized) } @@ -7006,11 +7011,11 @@ func (m *ValidatorUpdate) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.PubKey.Size())) - n37, err := m.PubKey.MarshalTo(dAtA[i:]) + n38, err := m.PubKey.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n37 + i += n38 if m.Power != 0 { dAtA[i] = 0x10 i++ @@ -7040,11 +7045,11 @@ func (m *VoteInfo) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0xa i++ i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n38, err := m.Validator.MarshalTo(dAtA[i:]) + n39, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n38 + i += n39 if m.SignedLastBlock { dAtA[i] = 0x10 i++ @@ -7118,11 +7123,11 @@ func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x12 i++ i = encodeVarintTypes(dAtA, i, uint64(m.Validator.Size())) - n39, err := m.Validator.MarshalTo(dAtA[i:]) + n40, err := m.Validator.MarshalTo(dAtA[i:]) if err != nil { return 0, err } - i += n39 + i += n40 if m.Height != 0 { dAtA[i] = 0x18 i++ @@ -7131,11 +7136,11 @@ func (m *Evidence) MarshalTo(dAtA []byte) (int, error) { dAtA[i] = 0x22 i++ i = encodeVarintTypes(dAtA, i, uint64(github_com_gogo_protobuf_types.SizeOfStdTime(m.Time))) - n40, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) + n41, err := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Time, dAtA[i:]) if err != nil { return 0, err } - i += n40 + i += n41 if m.TotalVotingPower != 0 { dAtA[i] = 0x28 i++ @@ -7586,10 +7591,8 @@ func NewPopulatedResponseQuery(r randyTypes, easy bool) *ResponseQuery { for i := 0; i < v17; i++ { this.Value[i] = byte(r.Intn(256)) } - v18 := r.Intn(100) - this.Proof = make([]byte, v18) - for i := 0; i < v18; i++ { - this.Proof[i] = byte(r.Intn(256)) + if r.Intn(10) != 0 { + this.Proof = merkle.NewPopulatedProof(r, easy) } this.Height = int64(r.Int63()) if r.Intn(2) == 0 { @@ -7604,11 +7607,11 @@ func NewPopulatedResponseQuery(r randyTypes, easy bool) *ResponseQuery { func NewPopulatedResponseBeginBlock(r randyTypes, easy bool) *ResponseBeginBlock { this := &ResponseBeginBlock{} if r.Intn(10) != 0 { - v19 := r.Intn(5) - this.Tags = make([]common.KVPair, v19) - for i := 0; i < v19; i++ { - v20 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v20 + v18 := r.Intn(5) + this.Tags = make([]common.KVPair, v18) + for i := 0; i < v18; i++ { + v19 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v19 } } if !easy && r.Intn(10) != 0 { @@ -7620,9 +7623,9 @@ func NewPopulatedResponseBeginBlock(r randyTypes, easy bool) *ResponseBeginBlock func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { this := &ResponseCheckTx{} this.Code = uint32(r.Uint32()) - v21 := r.Intn(100) - this.Data = make([]byte, v21) - for i := 0; i < v21; i++ { + v20 := r.Intn(100) + this.Data = make([]byte, v20) + for i := 0; i < v20; i++ { this.Data[i] = byte(r.Intn(256)) } this.Log = string(randStringTypes(r)) @@ -7636,11 +7639,11 @@ func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { this.GasUsed *= -1 } if r.Intn(10) != 0 { - v22 := r.Intn(5) - this.Tags = make([]common.KVPair, v22) - for i := 0; i < v22; i++ { - v23 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v23 + v21 := r.Intn(5) + this.Tags = make([]common.KVPair, v21) + for i := 0; i < v21; i++ { + v22 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v22 } } if !easy && r.Intn(10) != 0 { @@ -7652,9 +7655,9 @@ func NewPopulatedResponseCheckTx(r randyTypes, easy bool) *ResponseCheckTx { func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { this := &ResponseDeliverTx{} this.Code = uint32(r.Uint32()) - v24 := r.Intn(100) - this.Data = make([]byte, v24) - for i := 0; i < v24; i++ { + v23 := r.Intn(100) + this.Data = make([]byte, v23) + for i := 0; i < v23; i++ { this.Data[i] = byte(r.Intn(256)) } this.Log = string(randStringTypes(r)) @@ -7668,11 +7671,11 @@ func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { this.GasUsed *= -1 } if r.Intn(10) != 0 { - v25 := r.Intn(5) - this.Tags = make([]common.KVPair, v25) - for i := 0; i < v25; i++ { - v26 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v26 + v24 := r.Intn(5) + this.Tags = make([]common.KVPair, v24) + for i := 0; i < v24; i++ { + v25 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v25 } } if !easy && r.Intn(10) != 0 { @@ -7684,22 +7687,22 @@ func NewPopulatedResponseDeliverTx(r randyTypes, easy bool) *ResponseDeliverTx { func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { this := &ResponseEndBlock{} if r.Intn(10) != 0 { - v27 := r.Intn(5) - this.ValidatorUpdates = make([]ValidatorUpdate, v27) - for i := 0; i < v27; i++ { - v28 := NewPopulatedValidatorUpdate(r, easy) - this.ValidatorUpdates[i] = *v28 + v26 := r.Intn(5) + this.ValidatorUpdates = make([]ValidatorUpdate, v26) + for i := 0; i < v26; i++ { + v27 := NewPopulatedValidatorUpdate(r, easy) + this.ValidatorUpdates[i] = *v27 } } if r.Intn(10) != 0 { this.ConsensusParamUpdates = NewPopulatedConsensusParams(r, easy) } if r.Intn(10) != 0 { - v29 := r.Intn(5) - this.Tags = make([]common.KVPair, v29) - for i := 0; i < v29; i++ { - v30 := common.NewPopulatedKVPair(r, easy) - this.Tags[i] = *v30 + v28 := r.Intn(5) + this.Tags = make([]common.KVPair, v28) + for i := 0; i < v28; i++ { + v29 := common.NewPopulatedKVPair(r, easy) + this.Tags[i] = *v29 } } if !easy && r.Intn(10) != 0 { @@ -7710,9 +7713,9 @@ func NewPopulatedResponseEndBlock(r randyTypes, easy bool) *ResponseEndBlock { func NewPopulatedResponseCommit(r randyTypes, easy bool) *ResponseCommit { this := &ResponseCommit{} - v31 := r.Intn(100) - this.Data = make([]byte, v31) - for i := 0; i < v31; i++ { + v30 := r.Intn(100) + this.Data = make([]byte, v30) + for i := 0; i < v30; i++ { this.Data[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -7770,11 +7773,11 @@ func NewPopulatedLastCommitInfo(r randyTypes, easy bool) *LastCommitInfo { this.Round *= -1 } if r.Intn(10) != 0 { - v32 := r.Intn(5) - this.Votes = make([]VoteInfo, v32) - for i := 0; i < v32; i++ { - v33 := NewPopulatedVoteInfo(r, easy) - this.Votes[i] = *v33 + v31 := r.Intn(5) + this.Votes = make([]VoteInfo, v31) + for i := 0; i < v31; i++ { + v32 := NewPopulatedVoteInfo(r, easy) + this.Votes[i] = *v32 } } if !easy && r.Intn(10) != 0 { @@ -7790,8 +7793,8 @@ func NewPopulatedHeader(r randyTypes, easy bool) *Header { if r.Intn(2) == 0 { this.Height *= -1 } - v34 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v34 + v33 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v33 this.NumTxs = int64(r.Int63()) if r.Intn(2) == 0 { this.NumTxs *= -1 @@ -7800,51 +7803,51 @@ func NewPopulatedHeader(r randyTypes, easy bool) *Header { if r.Intn(2) == 0 { this.TotalTxs *= -1 } - v35 := NewPopulatedBlockID(r, easy) - this.LastBlockId = *v35 - v36 := r.Intn(100) - this.LastCommitHash = make([]byte, v36) - for i := 0; i < v36; i++ { + v34 := NewPopulatedBlockID(r, easy) + this.LastBlockId = *v34 + v35 := r.Intn(100) + this.LastCommitHash = make([]byte, v35) + for i := 0; i < v35; i++ { this.LastCommitHash[i] = byte(r.Intn(256)) } - v37 := r.Intn(100) - this.DataHash = make([]byte, v37) - for i := 0; i < v37; i++ { + v36 := r.Intn(100) + this.DataHash = make([]byte, v36) + for i := 0; i < v36; i++ { this.DataHash[i] = byte(r.Intn(256)) } - v38 := r.Intn(100) - this.ValidatorsHash = make([]byte, v38) - for i := 0; i < v38; i++ { + v37 := r.Intn(100) + this.ValidatorsHash = make([]byte, v37) + for i := 0; i < v37; i++ { this.ValidatorsHash[i] = byte(r.Intn(256)) } - v39 := r.Intn(100) - this.NextValidatorsHash = make([]byte, v39) - for i := 0; i < v39; i++ { + v38 := r.Intn(100) + this.NextValidatorsHash = make([]byte, v38) + for i := 0; i < v38; i++ { this.NextValidatorsHash[i] = byte(r.Intn(256)) } - v40 := r.Intn(100) - this.ConsensusHash = make([]byte, v40) - for i := 0; i < v40; i++ { + v39 := r.Intn(100) + this.ConsensusHash = make([]byte, v39) + for i := 0; i < v39; i++ { this.ConsensusHash[i] = byte(r.Intn(256)) } - v41 := r.Intn(100) - this.AppHash = make([]byte, v41) - for i := 0; i < v41; i++ { + v40 := r.Intn(100) + this.AppHash = make([]byte, v40) + for i := 0; i < v40; i++ { this.AppHash[i] = byte(r.Intn(256)) } - v42 := r.Intn(100) - this.LastResultsHash = make([]byte, v42) - for i := 0; i < v42; i++ { + v41 := r.Intn(100) + this.LastResultsHash = make([]byte, v41) + for i := 0; i < v41; i++ { this.LastResultsHash[i] = byte(r.Intn(256)) } - v43 := r.Intn(100) - this.EvidenceHash = make([]byte, v43) - for i := 0; i < v43; i++ { + v42 := r.Intn(100) + this.EvidenceHash = make([]byte, v42) + for i := 0; i < v42; i++ { this.EvidenceHash[i] = byte(r.Intn(256)) } - v44 := r.Intn(100) - this.ProposerAddress = make([]byte, v44) - for i := 0; i < v44; i++ { + v43 := r.Intn(100) + this.ProposerAddress = make([]byte, v43) + for i := 0; i < v43; i++ { this.ProposerAddress[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -7855,13 +7858,13 @@ func NewPopulatedHeader(r randyTypes, easy bool) *Header { func NewPopulatedBlockID(r randyTypes, easy bool) *BlockID { this := &BlockID{} - v45 := r.Intn(100) - this.Hash = make([]byte, v45) - for i := 0; i < v45; i++ { + v44 := r.Intn(100) + this.Hash = make([]byte, v44) + for i := 0; i < v44; i++ { this.Hash[i] = byte(r.Intn(256)) } - v46 := NewPopulatedPartSetHeader(r, easy) - this.PartsHeader = *v46 + v45 := NewPopulatedPartSetHeader(r, easy) + this.PartsHeader = *v45 if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) } @@ -7874,9 +7877,9 @@ func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { if r.Intn(2) == 0 { this.Total *= -1 } - v47 := r.Intn(100) - this.Hash = make([]byte, v47) - for i := 0; i < v47; i++ { + v46 := r.Intn(100) + this.Hash = make([]byte, v46) + for i := 0; i < v46; i++ { this.Hash[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -7887,9 +7890,9 @@ func NewPopulatedPartSetHeader(r randyTypes, easy bool) *PartSetHeader { func NewPopulatedValidator(r randyTypes, easy bool) *Validator { this := &Validator{} - v48 := r.Intn(100) - this.Address = make([]byte, v48) - for i := 0; i < v48; i++ { + v47 := r.Intn(100) + this.Address = make([]byte, v47) + for i := 0; i < v47; i++ { this.Address[i] = byte(r.Intn(256)) } this.Power = int64(r.Int63()) @@ -7904,8 +7907,8 @@ func NewPopulatedValidator(r randyTypes, easy bool) *Validator { func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { this := &ValidatorUpdate{} - v49 := NewPopulatedPubKey(r, easy) - this.PubKey = *v49 + v48 := NewPopulatedPubKey(r, easy) + this.PubKey = *v48 this.Power = int64(r.Int63()) if r.Intn(2) == 0 { this.Power *= -1 @@ -7918,8 +7921,8 @@ func NewPopulatedValidatorUpdate(r randyTypes, easy bool) *ValidatorUpdate { func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { this := &VoteInfo{} - v50 := NewPopulatedValidator(r, easy) - this.Validator = *v50 + v49 := NewPopulatedValidator(r, easy) + this.Validator = *v49 this.SignedLastBlock = bool(bool(r.Intn(2) == 0)) if !easy && r.Intn(10) != 0 { this.XXX_unrecognized = randUnrecognizedTypes(r, 3) @@ -7930,9 +7933,9 @@ func NewPopulatedVoteInfo(r randyTypes, easy bool) *VoteInfo { func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { this := &PubKey{} this.Type = string(randStringTypes(r)) - v51 := r.Intn(100) - this.Data = make([]byte, v51) - for i := 0; i < v51; i++ { + v50 := r.Intn(100) + this.Data = make([]byte, v50) + for i := 0; i < v50; i++ { this.Data[i] = byte(r.Intn(256)) } if !easy && r.Intn(10) != 0 { @@ -7944,14 +7947,14 @@ func NewPopulatedPubKey(r randyTypes, easy bool) *PubKey { func NewPopulatedEvidence(r randyTypes, easy bool) *Evidence { this := &Evidence{} this.Type = string(randStringTypes(r)) - v52 := NewPopulatedValidator(r, easy) - this.Validator = *v52 + v51 := NewPopulatedValidator(r, easy) + this.Validator = *v51 this.Height = int64(r.Int63()) if r.Intn(2) == 0 { this.Height *= -1 } - v53 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) - this.Time = *v53 + v52 := github_com_gogo_protobuf_types.NewPopulatedStdTime(r, easy) + this.Time = *v52 this.TotalVotingPower = int64(r.Int63()) if r.Intn(2) == 0 { this.TotalVotingPower *= -1 @@ -7981,9 +7984,9 @@ func randUTF8RuneTypes(r randyTypes) rune { return rune(ru + 61) } func randStringTypes(r randyTypes) string { - v54 := r.Intn(100) - tmps := make([]rune, v54) - for i := 0; i < v54; i++ { + v53 := r.Intn(100) + tmps := make([]rune, v53) + for i := 0; i < v53; i++ { tmps[i] = randUTF8RuneTypes(r) } return string(tmps) @@ -8005,11 +8008,11 @@ func randFieldTypes(dAtA []byte, r randyTypes, fieldNumber int, wire int) []byte switch wire { case 0: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) - v55 := r.Int63() + v54 := r.Int63() if r.Intn(2) == 0 { - v55 *= -1 + v54 *= -1 } - dAtA = encodeVarintPopulateTypes(dAtA, uint64(v55)) + dAtA = encodeVarintPopulateTypes(dAtA, uint64(v54)) case 1: dAtA = encodeVarintPopulateTypes(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) @@ -8035,9 +8038,6 @@ func encodeVarintPopulateTypes(dAtA []byte, v uint64) []byte { return dAtA } func (m *Request) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Value != nil { @@ -8050,9 +8050,6 @@ func (m *Request) Size() (n int) { } func (m *Request_Echo) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Echo != nil { @@ -8062,9 +8059,6 @@ func (m *Request_Echo) Size() (n int) { return n } func (m *Request_Flush) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Flush != nil { @@ -8074,9 +8068,6 @@ func (m *Request_Flush) Size() (n int) { return n } func (m *Request_Info) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Info != nil { @@ -8086,9 +8077,6 @@ func (m *Request_Info) Size() (n int) { return n } func (m *Request_SetOption) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.SetOption != nil { @@ -8098,9 +8086,6 @@ func (m *Request_SetOption) Size() (n int) { return n } func (m *Request_InitChain) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.InitChain != nil { @@ -8110,9 +8095,6 @@ func (m *Request_InitChain) Size() (n int) { return n } func (m *Request_Query) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Query != nil { @@ -8122,9 +8104,6 @@ func (m *Request_Query) Size() (n int) { return n } func (m *Request_BeginBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.BeginBlock != nil { @@ -8134,9 +8113,6 @@ func (m *Request_BeginBlock) Size() (n int) { return n } func (m *Request_CheckTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.CheckTx != nil { @@ -8146,9 +8122,6 @@ func (m *Request_CheckTx) Size() (n int) { return n } func (m *Request_EndBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.EndBlock != nil { @@ -8158,9 +8131,6 @@ func (m *Request_EndBlock) Size() (n int) { return n } func (m *Request_Commit) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Commit != nil { @@ -8170,9 +8140,6 @@ func (m *Request_Commit) Size() (n int) { return n } func (m *Request_DeliverTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.DeliverTx != nil { @@ -8182,9 +8149,6 @@ func (m *Request_DeliverTx) Size() (n int) { return n } func (m *RequestEcho) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Message) @@ -8198,9 +8162,6 @@ func (m *RequestEcho) Size() (n int) { } func (m *RequestFlush) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.XXX_unrecognized != nil { @@ -8210,9 +8171,6 @@ func (m *RequestFlush) Size() (n int) { } func (m *RequestInfo) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Version) @@ -8226,9 +8184,6 @@ func (m *RequestInfo) Size() (n int) { } func (m *RequestSetOption) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Key) @@ -8246,9 +8201,6 @@ func (m *RequestSetOption) Size() (n int) { } func (m *RequestInitChain) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = github_com_gogo_protobuf_types.SizeOfStdTime(m.Time) @@ -8278,9 +8230,6 @@ func (m *RequestInitChain) Size() (n int) { } func (m *RequestQuery) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Data) @@ -8304,9 +8253,6 @@ func (m *RequestQuery) Size() (n int) { } func (m *RequestBeginBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Hash) @@ -8330,9 +8276,6 @@ func (m *RequestBeginBlock) Size() (n int) { } func (m *RequestCheckTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Tx) @@ -8346,9 +8289,6 @@ func (m *RequestCheckTx) Size() (n int) { } func (m *RequestDeliverTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Tx) @@ -8362,9 +8302,6 @@ func (m *RequestDeliverTx) Size() (n int) { } func (m *RequestEndBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Height != 0 { @@ -8377,9 +8314,6 @@ func (m *RequestEndBlock) Size() (n int) { } func (m *RequestCommit) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.XXX_unrecognized != nil { @@ -8389,9 +8323,6 @@ func (m *RequestCommit) Size() (n int) { } func (m *Response) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Value != nil { @@ -8404,9 +8335,6 @@ func (m *Response) Size() (n int) { } func (m *Response_Exception) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Exception != nil { @@ -8416,9 +8344,6 @@ func (m *Response_Exception) Size() (n int) { return n } func (m *Response_Echo) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Echo != nil { @@ -8428,9 +8353,6 @@ func (m *Response_Echo) Size() (n int) { return n } func (m *Response_Flush) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Flush != nil { @@ -8440,9 +8362,6 @@ func (m *Response_Flush) Size() (n int) { return n } func (m *Response_Info) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Info != nil { @@ -8452,9 +8371,6 @@ func (m *Response_Info) Size() (n int) { return n } func (m *Response_SetOption) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.SetOption != nil { @@ -8464,9 +8380,6 @@ func (m *Response_SetOption) Size() (n int) { return n } func (m *Response_InitChain) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.InitChain != nil { @@ -8476,9 +8389,6 @@ func (m *Response_InitChain) Size() (n int) { return n } func (m *Response_Query) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Query != nil { @@ -8488,9 +8398,6 @@ func (m *Response_Query) Size() (n int) { return n } func (m *Response_BeginBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.BeginBlock != nil { @@ -8500,9 +8407,6 @@ func (m *Response_BeginBlock) Size() (n int) { return n } func (m *Response_CheckTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.CheckTx != nil { @@ -8512,9 +8416,6 @@ func (m *Response_CheckTx) Size() (n int) { return n } func (m *Response_DeliverTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.DeliverTx != nil { @@ -8524,9 +8425,6 @@ func (m *Response_DeliverTx) Size() (n int) { return n } func (m *Response_EndBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.EndBlock != nil { @@ -8536,9 +8434,6 @@ func (m *Response_EndBlock) Size() (n int) { return n } func (m *Response_Commit) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Commit != nil { @@ -8548,9 +8443,6 @@ func (m *Response_Commit) Size() (n int) { return n } func (m *ResponseException) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Error) @@ -8564,9 +8456,6 @@ func (m *ResponseException) Size() (n int) { } func (m *ResponseEcho) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Message) @@ -8580,9 +8469,6 @@ func (m *ResponseEcho) Size() (n int) { } func (m *ResponseFlush) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.XXX_unrecognized != nil { @@ -8592,9 +8478,6 @@ func (m *ResponseFlush) Size() (n int) { } func (m *ResponseInfo) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Data) @@ -8619,9 +8502,6 @@ func (m *ResponseInfo) Size() (n int) { } func (m *ResponseSetOption) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Code != 0 { @@ -8642,9 +8522,6 @@ func (m *ResponseSetOption) Size() (n int) { } func (m *ResponseInitChain) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.ConsensusParams != nil { @@ -8664,9 +8541,6 @@ func (m *ResponseInitChain) Size() (n int) { } func (m *ResponseQuery) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Code != 0 { @@ -8691,8 +8565,8 @@ func (m *ResponseQuery) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } - l = len(m.Proof) - if l > 0 { + if m.Proof != nil { + l = m.Proof.Size() n += 1 + l + sovTypes(uint64(l)) } if m.Height != 0 { @@ -8705,9 +8579,6 @@ func (m *ResponseQuery) Size() (n int) { } func (m *ResponseBeginBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.Tags) > 0 { @@ -8723,9 +8594,6 @@ func (m *ResponseBeginBlock) Size() (n int) { } func (m *ResponseCheckTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Code != 0 { @@ -8762,9 +8630,6 @@ func (m *ResponseCheckTx) Size() (n int) { } func (m *ResponseDeliverTx) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Code != 0 { @@ -8801,9 +8666,6 @@ func (m *ResponseDeliverTx) Size() (n int) { } func (m *ResponseEndBlock) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if len(m.ValidatorUpdates) > 0 { @@ -8829,9 +8691,6 @@ func (m *ResponseEndBlock) Size() (n int) { } func (m *ResponseCommit) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Data) @@ -8845,9 +8704,6 @@ func (m *ResponseCommit) Size() (n int) { } func (m *ConsensusParams) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.BlockSize != nil { @@ -8865,9 +8721,6 @@ func (m *ConsensusParams) Size() (n int) { } func (m *BlockSize) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.MaxBytes != 0 { @@ -8883,9 +8736,6 @@ func (m *BlockSize) Size() (n int) { } func (m *EvidenceParams) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.MaxAge != 0 { @@ -8898,9 +8748,6 @@ func (m *EvidenceParams) Size() (n int) { } func (m *LastCommitInfo) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Round != 0 { @@ -8919,9 +8766,6 @@ func (m *LastCommitInfo) Size() (n int) { } func (m *Header) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.ChainID) @@ -8984,9 +8828,6 @@ func (m *Header) Size() (n int) { } func (m *BlockID) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Hash) @@ -9002,9 +8843,6 @@ func (m *BlockID) Size() (n int) { } func (m *PartSetHeader) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l if m.Total != 0 { @@ -9021,9 +8859,6 @@ func (m *PartSetHeader) Size() (n int) { } func (m *Validator) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Address) @@ -9040,9 +8875,6 @@ func (m *Validator) Size() (n int) { } func (m *ValidatorUpdate) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.PubKey.Size() @@ -9057,9 +8889,6 @@ func (m *ValidatorUpdate) Size() (n int) { } func (m *VoteInfo) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = m.Validator.Size() @@ -9074,9 +8903,6 @@ func (m *VoteInfo) Size() (n int) { } func (m *PubKey) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -9094,9 +8920,6 @@ func (m *PubKey) Size() (n int) { } func (m *Evidence) Size() (n int) { - if m == nil { - return 0 - } var l int _ = l l = len(m.Type) @@ -11907,7 +11730,7 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Proof", wireType) } - var byteLen int + var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { return ErrIntOverflowTypes @@ -11917,21 +11740,23 @@ func (m *ResponseQuery) Unmarshal(dAtA []byte) error { } b := dAtA[iNdEx] iNdEx++ - byteLen |= (int(b) & 0x7F) << shift + msglen |= (int(b) & 0x7F) << shift if b < 0x80 { break } } - if byteLen < 0 { + if msglen < 0 { return ErrInvalidLengthTypes } - postIndex := iNdEx + byteLen + postIndex := iNdEx + msglen if postIndex > l { return io.ErrUnexpectedEOF } - m.Proof = append(m.Proof[:0], dAtA[iNdEx:postIndex]...) if m.Proof == nil { - m.Proof = []byte{} + m.Proof = &merkle.Proof{} + } + if err := m.Proof.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err } iNdEx = postIndex case 9: @@ -14503,140 +14328,142 @@ var ( ErrIntOverflowTypes = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_8495fed925debe52) } +func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_03c41ca87033c976) } func init() { - golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_8495fed925debe52) + golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_types_03c41ca87033c976) } -var fileDescriptor_types_8495fed925debe52 = []byte{ - // 2062 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4f, 0x6f, 0x23, 0x49, - 0x15, 0x4f, 0xdb, 0x8e, 0xed, 0x7e, 0x49, 0xec, 0x4c, 0x25, 0x93, 0x78, 0x0c, 0x24, 0xa3, 0x06, - 0x76, 0x13, 0x36, 0x9b, 0xac, 0xb2, 0x2c, 0xca, 0xec, 0x2c, 0x2b, 0xc5, 0x33, 0x03, 0x89, 0x76, - 0x81, 0xd0, 0x33, 0x13, 0x2e, 0x48, 0xad, 0xb2, 0xbb, 0x62, 0xb7, 0xc6, 0xee, 0xee, 0xed, 0x2e, - 0x67, 0x9d, 0x39, 0x72, 0xde, 0xc3, 0x1e, 0x90, 0xf8, 0x0a, 0x7c, 0x01, 0x24, 0x8e, 0x9c, 0xd0, - 0x1e, 0x11, 0x02, 0x71, 0x1b, 0x20, 0x88, 0x03, 0x7c, 0x02, 0x8e, 0xa8, 0x5e, 0x55, 0xf5, 0xbf, - 0xb4, 0x47, 0x33, 0xc3, 0x8d, 0x4b, 0xab, 0xab, 0xde, 0x7b, 0x55, 0xf5, 0x5e, 0xbd, 0xf7, 0x7e, - 0xef, 0x15, 0x6c, 0xd0, 0xfe, 0xc0, 0x3b, 0xe0, 0x57, 0x21, 0x8b, 0xe5, 0x77, 0x3f, 0x8c, 0x02, - 0x1e, 0x90, 0x45, 0x1c, 0x74, 0xdf, 0x1d, 0x7a, 0x7c, 0x34, 0xed, 0xef, 0x0f, 0x82, 0xc9, 0xc1, - 0x30, 0x18, 0x06, 0x07, 0x48, 0xed, 0x4f, 0x2f, 0x70, 0x84, 0x03, 0xfc, 0x93, 0x52, 0xdd, 0xed, - 0x61, 0x10, 0x0c, 0xc7, 0x2c, 0xe5, 0xe2, 0xde, 0x84, 0xc5, 0x9c, 0x4e, 0x42, 0xc5, 0x70, 0x94, - 0x59, 0x8f, 0x33, 0xdf, 0x65, 0xd1, 0xc4, 0xf3, 0x79, 0xf6, 0x77, 0xec, 0xf5, 0xe3, 0x83, 0x41, - 0x30, 0x99, 0x04, 0x7e, 0xf6, 0x40, 0xd6, 0xef, 0x6b, 0xd0, 0xb0, 0xd9, 0x67, 0x53, 0x16, 0x73, - 0xb2, 0x03, 0x35, 0x36, 0x18, 0x05, 0x9d, 0xca, 0x5d, 0x63, 0x67, 0xe9, 0x90, 0xec, 0x4b, 0x3e, - 0x45, 0x7d, 0x34, 0x18, 0x05, 0x27, 0x0b, 0x36, 0x72, 0x90, 0x77, 0x60, 0xf1, 0x62, 0x3c, 0x8d, - 0x47, 0x9d, 0x2a, 0xb2, 0xae, 0xe5, 0x59, 0x7f, 0x20, 0x48, 0x27, 0x0b, 0xb6, 0xe4, 0x11, 0xcb, - 0x7a, 0xfe, 0x45, 0xd0, 0xa9, 0x95, 0x2d, 0x7b, 0xea, 0x5f, 0xe0, 0xb2, 0x82, 0x83, 0x1c, 0x01, - 0xc4, 0x8c, 0x3b, 0x41, 0xc8, 0xbd, 0xc0, 0xef, 0x2c, 0x22, 0xff, 0x66, 0x9e, 0xff, 0x31, 0xe3, - 0x3f, 0x41, 0xf2, 0xc9, 0x82, 0x6d, 0xc6, 0x7a, 0x20, 0x24, 0x3d, 0xdf, 0xe3, 0xce, 0x60, 0x44, - 0x3d, 0xbf, 0x53, 0x2f, 0x93, 0x3c, 0xf5, 0x3d, 0xfe, 0x40, 0x90, 0x85, 0xa4, 0xa7, 0x07, 0x42, - 0x95, 0xcf, 0xa6, 0x2c, 0xba, 0xea, 0x34, 0xca, 0x54, 0xf9, 0xa9, 0x20, 0x09, 0x55, 0x90, 0x87, - 0xdc, 0x87, 0xa5, 0x3e, 0x1b, 0x7a, 0xbe, 0xd3, 0x1f, 0x07, 0x83, 0x67, 0x9d, 0x26, 0x8a, 0x74, - 0xf2, 0x22, 0x3d, 0xc1, 0xd0, 0x13, 0xf4, 0x93, 0x05, 0x1b, 0xfa, 0xc9, 0x88, 0x1c, 0x42, 0x73, - 0x30, 0x62, 0x83, 0x67, 0x0e, 0x9f, 0x75, 0x4c, 0x94, 0xbc, 0x9d, 0x97, 0x7c, 0x20, 0xa8, 0x4f, - 0x66, 0x27, 0x0b, 0x76, 0x63, 0x20, 0x7f, 0xc9, 0x07, 0x60, 0x32, 0xdf, 0x55, 0xdb, 0x2d, 0xa1, - 0xd0, 0x46, 0xe1, 0x5e, 0x7c, 0x57, 0x6f, 0xd6, 0x64, 0xea, 0x9f, 0xec, 0x43, 0x5d, 0xdc, 0xb5, - 0xc7, 0x3b, 0xcb, 0x28, 0xb3, 0x5e, 0xd8, 0x08, 0x69, 0x27, 0x0b, 0xb6, 0xe2, 0x12, 0xe6, 0x73, - 0xd9, 0xd8, 0xbb, 0x64, 0x91, 0x38, 0xdc, 0x5a, 0x99, 0xf9, 0x1e, 0x4a, 0x3a, 0x1e, 0xcf, 0x74, - 0xf5, 0xa0, 0xd7, 0x80, 0xc5, 0x4b, 0x3a, 0x9e, 0x32, 0xeb, 0x6d, 0x58, 0xca, 0x78, 0x0a, 0xe9, - 0x40, 0x63, 0xc2, 0xe2, 0x98, 0x0e, 0x59, 0xc7, 0xb8, 0x6b, 0xec, 0x98, 0xb6, 0x1e, 0x5a, 0x2d, - 0x58, 0xce, 0xfa, 0x49, 0x46, 0x50, 0xf8, 0x82, 0x10, 0xbc, 0x64, 0x51, 0x2c, 0x1c, 0x40, 0x09, - 0xaa, 0xa1, 0xf5, 0x21, 0xac, 0x16, 0x9d, 0x80, 0xac, 0x42, 0xf5, 0x19, 0xbb, 0x52, 0x9c, 0xe2, - 0x97, 0xac, 0xab, 0x03, 0xa1, 0x17, 0x9b, 0xb6, 0x3a, 0xdd, 0x97, 0x95, 0x44, 0x38, 0xf1, 0x03, - 0x72, 0x04, 0x35, 0x11, 0x48, 0x28, 0xbd, 0x74, 0xd8, 0xdd, 0x97, 0x51, 0xb6, 0xaf, 0xa3, 0x6c, - 0xff, 0x89, 0x8e, 0xb2, 0x5e, 0xf3, 0xab, 0x17, 0xdb, 0x0b, 0x5f, 0xfe, 0x75, 0xdb, 0xb0, 0x51, - 0x82, 0xdc, 0x11, 0x57, 0x49, 0x3d, 0xdf, 0xf1, 0x5c, 0xb5, 0x4f, 0x03, 0xc7, 0xa7, 0x2e, 0x39, - 0x86, 0xd5, 0x41, 0xe0, 0xc7, 0xcc, 0x8f, 0xa7, 0xb1, 0x13, 0xd2, 0x88, 0x4e, 0x62, 0x15, 0x25, - 0xfa, 0xe2, 0x1e, 0x68, 0xf2, 0x19, 0x52, 0xed, 0xf6, 0x20, 0x3f, 0x41, 0x3e, 0x02, 0xb8, 0xa4, - 0x63, 0xcf, 0xa5, 0x3c, 0x88, 0xe2, 0x4e, 0xed, 0x6e, 0x35, 0x23, 0x7c, 0xae, 0x09, 0x4f, 0x43, - 0x97, 0x72, 0xd6, 0xab, 0x89, 0x93, 0xd9, 0x19, 0x7e, 0xf2, 0x16, 0xb4, 0x69, 0x18, 0x3a, 0x31, - 0xa7, 0x9c, 0x39, 0xfd, 0x2b, 0xce, 0x62, 0x8c, 0xa4, 0x65, 0x7b, 0x85, 0x86, 0xe1, 0x63, 0x31, - 0xdb, 0x13, 0x93, 0x96, 0x9b, 0xdc, 0x03, 0x3a, 0x39, 0x21, 0x50, 0x73, 0x29, 0xa7, 0x68, 0x8d, - 0x65, 0x1b, 0xff, 0xc5, 0x5c, 0x48, 0xf9, 0x48, 0xe9, 0x88, 0xff, 0x64, 0x03, 0xea, 0x23, 0xe6, - 0x0d, 0x47, 0x1c, 0xd5, 0xaa, 0xda, 0x6a, 0x24, 0x0c, 0x1f, 0x46, 0xc1, 0x25, 0xc3, 0x38, 0x6f, - 0xda, 0x72, 0x60, 0xfd, 0xd3, 0x80, 0x5b, 0x37, 0x02, 0x43, 0xac, 0x3b, 0xa2, 0xf1, 0x48, 0xef, - 0x25, 0xfe, 0xc9, 0x3b, 0x62, 0x5d, 0xea, 0xb2, 0x48, 0xe5, 0x9f, 0x15, 0xa5, 0xf1, 0x09, 0x4e, - 0x2a, 0x45, 0x15, 0x0b, 0x79, 0x04, 0xab, 0x63, 0x1a, 0x73, 0x47, 0xfa, 0xaf, 0x83, 0xf9, 0xa5, - 0x9a, 0x8b, 0xa9, 0x4f, 0xa9, 0xf6, 0x73, 0xe1, 0x56, 0x4a, 0xbc, 0x35, 0xce, 0xcd, 0x92, 0x13, - 0x58, 0xef, 0x5f, 0x3d, 0xa7, 0x3e, 0xf7, 0x7c, 0xe6, 0xdc, 0xb0, 0x79, 0x5b, 0x2d, 0xf5, 0xe8, - 0xd2, 0x73, 0x99, 0x3f, 0xd0, 0xc6, 0x5e, 0x4b, 0x44, 0x92, 0xcb, 0x88, 0xad, 0xbb, 0xd0, 0xca, - 0x47, 0x31, 0x69, 0x41, 0x85, 0xcf, 0x94, 0x86, 0x15, 0x3e, 0xb3, 0xac, 0xc4, 0x03, 0x93, 0x50, - 0xba, 0xc1, 0xb3, 0x0b, 0xed, 0x42, 0x58, 0x67, 0xcc, 0x6d, 0x64, 0xcd, 0x6d, 0xb5, 0x61, 0x25, - 0x17, 0xcd, 0xd6, 0x17, 0x8b, 0xd0, 0xb4, 0x59, 0x1c, 0x0a, 0x67, 0x22, 0x47, 0x60, 0xb2, 0xd9, - 0x80, 0xc9, 0x44, 0x6a, 0x14, 0xd2, 0x94, 0xe4, 0x79, 0xa4, 0xe9, 0x22, 0xa0, 0x13, 0x66, 0xb2, - 0x9b, 0x03, 0x81, 0xb5, 0xa2, 0x50, 0x16, 0x05, 0xf6, 0xf2, 0x28, 0xb0, 0x5e, 0xe0, 0x2d, 0xc0, - 0xc0, 0x6e, 0x0e, 0x06, 0x8a, 0x0b, 0xe7, 0x70, 0xe0, 0x5e, 0x09, 0x0e, 0x14, 0x8f, 0x3f, 0x07, - 0x08, 0xee, 0x95, 0x00, 0x41, 0xe7, 0xc6, 0x5e, 0xa5, 0x48, 0xb0, 0x97, 0x47, 0x82, 0xa2, 0x3a, - 0x05, 0x28, 0xf8, 0xa8, 0x0c, 0x0a, 0xee, 0x14, 0x64, 0xe6, 0x62, 0xc1, 0xfb, 0x37, 0xb0, 0x60, - 0xa3, 0x20, 0x5a, 0x02, 0x06, 0xf7, 0x72, 0x59, 0x1a, 0x4a, 0x75, 0x2b, 0x4f, 0xd3, 0xe4, 0x7b, - 0x37, 0x71, 0x64, 0xb3, 0x78, 0xb5, 0x65, 0x40, 0x72, 0x50, 0x00, 0x92, 0xdb, 0xc5, 0x53, 0x16, - 0x90, 0x24, 0xc5, 0x83, 0x5d, 0x11, 0xf7, 0x05, 0x4f, 0x13, 0x39, 0x82, 0x45, 0x51, 0x10, 0xa9, - 0x84, 0x2d, 0x07, 0xd6, 0x8e, 0xc8, 0x44, 0xa9, 0x7f, 0xbd, 0x04, 0x3b, 0xd0, 0xe9, 0x33, 0xde, - 0x65, 0xfd, 0xca, 0x48, 0x65, 0x31, 0xa2, 0xb3, 0x59, 0xcc, 0x54, 0x59, 0x2c, 0x03, 0x29, 0x95, - 0x1c, 0xa4, 0x90, 0xef, 0xc0, 0x2d, 0x4c, 0x23, 0x68, 0x17, 0x27, 0x97, 0xd6, 0xda, 0x82, 0x20, - 0x0d, 0x22, 0xf3, 0xdb, 0xbb, 0xb0, 0x96, 0xe1, 0x15, 0x29, 0x16, 0x53, 0x58, 0x0d, 0x83, 0x77, - 0x35, 0xe1, 0x3e, 0x0e, 0xc3, 0x13, 0x1a, 0x8f, 0xac, 0x1f, 0xa5, 0xfa, 0xa7, 0x70, 0x45, 0xa0, - 0x36, 0x08, 0x5c, 0xa9, 0xd6, 0x8a, 0x8d, 0xff, 0x02, 0xc2, 0xc6, 0xc1, 0x10, 0x77, 0x35, 0x6d, - 0xf1, 0x2b, 0xb8, 0x92, 0x48, 0x31, 0x65, 0x48, 0x58, 0xbf, 0x34, 0xd2, 0xf5, 0x52, 0x04, 0x2b, - 0x03, 0x1b, 0xe3, 0x7f, 0x01, 0x9b, 0xca, 0xeb, 0x81, 0x8d, 0xf5, 0x1b, 0x23, 0xbd, 0x91, 0x04, - 0x46, 0xde, 0x4c, 0x45, 0xe1, 0x1c, 0x9e, 0xef, 0xb2, 0x19, 0x06, 0x7c, 0xd5, 0x96, 0x03, 0x8d, - 0xf0, 0x75, 0x34, 0x73, 0x1e, 0xe1, 0x1b, 0x38, 0x27, 0x07, 0x0a, 0x7e, 0x82, 0x0b, 0x8c, 0xc4, - 0x65, 0x5b, 0x0e, 0x32, 0xd9, 0xd3, 0xcc, 0x65, 0xcf, 0x33, 0x20, 0x37, 0x63, 0x94, 0x7c, 0x08, - 0x35, 0x4e, 0x87, 0xc2, 0x84, 0xc2, 0x0a, 0xad, 0x7d, 0x59, 0x2f, 0xef, 0x7f, 0x72, 0x7e, 0x46, - 0xbd, 0xa8, 0xb7, 0x21, 0xb4, 0xff, 0xf7, 0x8b, 0xed, 0x96, 0xe0, 0xd9, 0x0b, 0x26, 0x1e, 0x67, - 0x93, 0x90, 0x5f, 0xd9, 0x28, 0x63, 0xfd, 0xd9, 0x10, 0xb9, 0x3b, 0x17, 0xbb, 0xa5, 0xb6, 0xd0, - 0x0e, 0x5a, 0xc9, 0xc0, 0xec, 0xab, 0xd9, 0xe7, 0x1b, 0x00, 0x43, 0x1a, 0x3b, 0x9f, 0x53, 0x9f, - 0x33, 0x57, 0x19, 0xc9, 0x1c, 0xd2, 0xf8, 0x67, 0x38, 0x21, 0x6a, 0x12, 0x41, 0x9e, 0xc6, 0xcc, - 0x45, 0x6b, 0x55, 0xed, 0xc6, 0x90, 0xc6, 0x4f, 0x63, 0xe6, 0x26, 0x7a, 0x35, 0xde, 0x40, 0xaf, - 0xbf, 0x64, 0x1c, 0x2f, 0x05, 0xae, 0xff, 0x07, 0xcd, 0xfe, 0x65, 0x08, 0x44, 0xce, 0x27, 0x3f, - 0x72, 0x0a, 0xb7, 0x12, 0xf7, 0x76, 0xa6, 0xe8, 0xf6, 0xda, 0x1f, 0x5e, 0x1e, 0x15, 0xab, 0x97, - 0xf9, 0xe9, 0x98, 0xfc, 0x18, 0x36, 0x0b, 0xc1, 0x99, 0x2c, 0x58, 0x79, 0x69, 0x8c, 0xde, 0xce, - 0xc7, 0xa8, 0x5e, 0x4f, 0xeb, 0x5a, 0x7d, 0x03, 0x5d, 0xbf, 0x25, 0xca, 0x93, 0x6c, 0xca, 0x2e, - 0xbb, 0x2d, 0xeb, 0x17, 0x06, 0xb4, 0x0b, 0x87, 0x21, 0x07, 0x00, 0x32, 0xe3, 0xc5, 0xde, 0x73, - 0x5d, 0x2a, 0xaf, 0xaa, 0x83, 0xa3, 0xc9, 0x1e, 0x7b, 0xcf, 0x99, 0x6d, 0xf6, 0xf5, 0x2f, 0xf9, - 0x18, 0xda, 0x4c, 0x15, 0x4c, 0x3a, 0x25, 0x55, 0x72, 0xd8, 0xa1, 0xcb, 0x29, 0xa5, 0x6d, 0x8b, - 0xe5, 0xc6, 0xd6, 0x31, 0x98, 0xc9, 0xba, 0xe4, 0x6b, 0x60, 0x4e, 0xe8, 0x4c, 0x95, 0xb1, 0xb2, - 0x00, 0x6a, 0x4e, 0xe8, 0x0c, 0x2b, 0x58, 0xb2, 0x09, 0x0d, 0x41, 0x1c, 0x52, 0xb9, 0x43, 0xd5, - 0xae, 0x4f, 0xe8, 0xec, 0x87, 0x34, 0xb6, 0x76, 0xa1, 0x95, 0xdf, 0x44, 0xb3, 0x6a, 0x48, 0x91, - 0xac, 0xc7, 0x43, 0x66, 0x3d, 0x86, 0x56, 0xbe, 0x52, 0x14, 0x89, 0x24, 0x0a, 0xa6, 0xbe, 0x8b, - 0x8c, 0x8b, 0xb6, 0x1c, 0x88, 0x36, 0xf1, 0x32, 0x90, 0x57, 0x97, 0x2d, 0x0d, 0xcf, 0x03, 0xce, - 0x32, 0xf5, 0xa5, 0xe4, 0xb1, 0xfe, 0x58, 0x83, 0xba, 0x2c, 0x5b, 0xc9, 0x5b, 0x99, 0x4e, 0x01, - 0x31, 0xa9, 0xb7, 0x74, 0xfd, 0x62, 0xbb, 0x81, 0xe9, 0xfb, 0xf4, 0x61, 0xda, 0x36, 0xa4, 0x89, - 0xaa, 0x92, 0xab, 0xaa, 0x75, 0x8f, 0x52, 0x7d, 0xed, 0x1e, 0x65, 0x13, 0x1a, 0xfe, 0x74, 0xe2, - 0xf0, 0x59, 0x8c, 0xb1, 0x56, 0xb5, 0xeb, 0xfe, 0x74, 0xf2, 0x64, 0x16, 0x0b, 0x9b, 0xf2, 0x80, - 0xd3, 0x31, 0x92, 0x64, 0xb0, 0x35, 0x71, 0x42, 0x10, 0x8f, 0x60, 0x25, 0x83, 0x72, 0x9e, 0xab, - 0x4a, 0xa8, 0x56, 0xf6, 0xc6, 0x4f, 0x1f, 0x2a, 0x75, 0x97, 0x12, 0xd4, 0x3b, 0x75, 0xc9, 0x4e, - 0xbe, 0x24, 0x47, 0x70, 0x94, 0x19, 0x3a, 0x53, 0x75, 0x0b, 0x68, 0x14, 0x07, 0x10, 0xee, 0x26, - 0x59, 0x64, 0xba, 0x6e, 0x8a, 0x09, 0x24, 0xbe, 0x0d, 0xed, 0x14, 0x5f, 0x24, 0x8b, 0x29, 0x57, - 0x49, 0xa7, 0x91, 0xf1, 0x3d, 0x58, 0xf7, 0xd9, 0x8c, 0x3b, 0x45, 0x6e, 0x40, 0x6e, 0x22, 0x68, - 0xe7, 0x79, 0x89, 0x6f, 0x43, 0x2b, 0x0d, 0x48, 0xe4, 0x5d, 0x92, 0x8d, 0x51, 0x32, 0x8b, 0x6c, - 0x77, 0xa0, 0x99, 0xa0, 0xfb, 0x32, 0x32, 0x34, 0xa8, 0x04, 0xf5, 0xa4, 0x5e, 0x88, 0x58, 0x3c, - 0x1d, 0x73, 0xb5, 0xc8, 0x0a, 0xf2, 0x60, 0xbd, 0x60, 0xcb, 0x79, 0xe4, 0xfd, 0x26, 0xac, 0x24, - 0x71, 0x80, 0x7c, 0x2d, 0xe4, 0x5b, 0xd6, 0x93, 0xc8, 0xb4, 0x0b, 0xab, 0x61, 0x14, 0x84, 0x41, - 0xcc, 0x22, 0x87, 0xba, 0x6e, 0xc4, 0xe2, 0xb8, 0xd3, 0x96, 0xeb, 0xe9, 0xf9, 0x63, 0x39, 0x6d, - 0xfd, 0x1c, 0x1a, 0xca, 0xfa, 0xa5, 0xed, 0xd3, 0xf7, 0x61, 0x39, 0xa4, 0x91, 0x38, 0x53, 0xb6, - 0x89, 0xd2, 0x45, 0xec, 0x19, 0x8d, 0x44, 0xd7, 0x9c, 0xeb, 0xa5, 0x96, 0x90, 0x5f, 0x4e, 0x59, - 0xf7, 0x60, 0x25, 0xc7, 0x23, 0xc2, 0x00, 0x9d, 0x42, 0x87, 0x01, 0x0e, 0x92, 0x9d, 0x2b, 0xe9, - 0xce, 0xd6, 0x7d, 0x30, 0x13, 0x43, 0x8b, 0x5a, 0x4b, 0xeb, 0x61, 0x28, 0xdb, 0xc9, 0x21, 0x02, - 0x74, 0xf0, 0x39, 0x8b, 0x54, 0x7d, 0x25, 0x07, 0xd6, 0x53, 0x68, 0x17, 0xf2, 0x29, 0xd9, 0x83, - 0x46, 0x38, 0xed, 0x3b, 0xba, 0xaf, 0x4f, 0x3b, 0xc1, 0xb3, 0x69, 0xff, 0x13, 0x76, 0xa5, 0x3b, - 0xc1, 0x10, 0x47, 0xe9, 0xb2, 0x95, 0xec, 0xb2, 0x63, 0x68, 0xea, 0xd0, 0x24, 0xdf, 0x05, 0x33, - 0xf1, 0x91, 0x42, 0x02, 0x4b, 0xb6, 0x56, 0x8b, 0xa6, 0x8c, 0xe2, 0xaa, 0x63, 0x6f, 0xe8, 0x33, - 0xd7, 0x49, 0xe3, 0x01, 0xf7, 0x68, 0xda, 0x6d, 0x49, 0xf8, 0x54, 0x3b, 0xbf, 0xf5, 0x1e, 0xd4, - 0xe5, 0xd9, 0x84, 0x7d, 0xc4, 0xca, 0xba, 0xfc, 0x14, 0xff, 0xa5, 0x99, 0xf6, 0x4f, 0x06, 0x34, - 0x75, 0x8a, 0x2a, 0x15, 0xca, 0x1d, 0xba, 0xf2, 0xaa, 0x87, 0x9e, 0xd7, 0x9b, 0xeb, 0x2c, 0x52, - 0x7b, 0xed, 0x2c, 0xb2, 0x07, 0x44, 0x26, 0x8b, 0xcb, 0x80, 0x7b, 0xfe, 0xd0, 0x91, 0xb6, 0x96, - 0x59, 0x63, 0x15, 0x29, 0xe7, 0x48, 0x38, 0x13, 0xf3, 0x87, 0x5f, 0x2c, 0x42, 0xfb, 0xb8, 0xf7, - 0xe0, 0xf4, 0x38, 0x0c, 0xc7, 0xde, 0x80, 0x62, 0xcd, 0x7b, 0x00, 0x35, 0xac, 0xea, 0x4b, 0xde, - 0x13, 0xbb, 0x65, 0xed, 0x25, 0x39, 0x84, 0x45, 0x2c, 0xee, 0x49, 0xd9, 0xb3, 0x62, 0xb7, 0xb4, - 0xcb, 0x14, 0x9b, 0xc8, 0xf2, 0xff, 0xe6, 0xeb, 0x62, 0xb7, 0xac, 0xd5, 0x24, 0x1f, 0x83, 0x99, - 0x96, 0xe5, 0xf3, 0xde, 0x18, 0xbb, 0x73, 0x9b, 0x4e, 0x21, 0x9f, 0x56, 0x43, 0xf3, 0x9e, 0xca, - 0xba, 0x73, 0xbb, 0x33, 0x72, 0x04, 0x0d, 0x5d, 0x25, 0x96, 0xbf, 0x02, 0x76, 0xe7, 0x34, 0x84, - 0xc2, 0x3c, 0xb2, 0xd2, 0x2e, 0x7b, 0xaa, 0xec, 0x96, 0x76, 0xad, 0xe4, 0x03, 0xa8, 0x2b, 0xd8, - 0x2f, 0x7d, 0x09, 0xec, 0x96, 0xb7, 0x75, 0x42, 0xc9, 0xb4, 0xd7, 0x98, 0xf7, 0x9c, 0xda, 0x9d, - 0xdb, 0x5e, 0x93, 0x63, 0x80, 0x4c, 0x75, 0x3d, 0xf7, 0x9d, 0xb4, 0x3b, 0xbf, 0x6d, 0x26, 0xf7, - 0xa1, 0x99, 0x3e, 0x85, 0x94, 0xbf, 0x7c, 0x76, 0xe7, 0x75, 0xb2, 0xbd, 0xaf, 0xff, 0xe7, 0xef, - 0x5b, 0xc6, 0xaf, 0xaf, 0xb7, 0x8c, 0xdf, 0x5e, 0x6f, 0x19, 0x5f, 0x5d, 0x6f, 0x19, 0x7f, 0xb8, - 0xde, 0x32, 0xfe, 0x76, 0xbd, 0x65, 0xfc, 0xee, 0x1f, 0x5b, 0x46, 0xbf, 0x8e, 0xee, 0xff, 0xfe, - 0x7f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xef, 0x95, 0x6c, 0x08, 0xac, 0x17, 0x00, 0x00, +var fileDescriptor_types_03c41ca87033c976 = []byte{ + // 2089 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x58, 0x4f, 0x73, 0x1b, 0x49, + 0x15, 0xf7, 0x48, 0xb2, 0xa4, 0x79, 0xb6, 0x25, 0xa7, 0x93, 0xd8, 0x8a, 0x00, 0x3b, 0x35, 0x0b, + 0xbb, 0x36, 0xeb, 0x95, 0xb7, 0xbc, 0x2c, 0xe5, 0x6c, 0x96, 0xad, 0xb2, 0x92, 0x80, 0x5d, 0xbb, + 0x80, 0x99, 0x24, 0xe6, 0x42, 0xd5, 0x54, 0x4b, 0xd3, 0x96, 0xa6, 0x22, 0xcd, 0xcc, 0xce, 0xb4, + 0xbc, 0x72, 0x8e, 0x9c, 0xf7, 0xb0, 0x07, 0xaa, 0xf8, 0x0a, 0x7c, 0x04, 0x8e, 0x1c, 0x28, 0x6a, + 0x8f, 0x14, 0x05, 0xc5, 0x2d, 0x80, 0x29, 0x0e, 0xf0, 0x09, 0x38, 0x52, 0xfd, 0xba, 0x7b, 0xfe, + 0x79, 0x14, 0x36, 0xe1, 0xc6, 0x45, 0xea, 0xee, 0xf7, 0x5e, 0x77, 0xbf, 0x37, 0xef, 0xbd, 0xdf, + 0x7b, 0x0d, 0x1b, 0x74, 0x30, 0xf4, 0xf6, 0xf9, 0x65, 0xc8, 0x62, 0xf9, 0xdb, 0x0b, 0xa3, 0x80, + 0x07, 0x64, 0x19, 0x27, 0xdd, 0x77, 0x46, 0x1e, 0x1f, 0xcf, 0x06, 0xbd, 0x61, 0x30, 0xdd, 0x1f, + 0x05, 0xa3, 0x60, 0x1f, 0xa9, 0x83, 0xd9, 0x39, 0xce, 0x70, 0x82, 0x23, 0x29, 0xd5, 0xdd, 0x1e, + 0x05, 0xc1, 0x68, 0xc2, 0x52, 0x2e, 0xee, 0x4d, 0x59, 0xcc, 0xe9, 0x34, 0x54, 0x0c, 0x87, 0x99, + 0xfd, 0x38, 0xf3, 0x5d, 0x16, 0x4d, 0x3d, 0x9f, 0x67, 0x87, 0x13, 0x6f, 0x10, 0xef, 0x0f, 0x83, + 0xe9, 0x34, 0xf0, 0xb3, 0x17, 0xea, 0xde, 0xff, 0xaf, 0x92, 0xc3, 0xe8, 0x32, 0xe4, 0xc1, 0xfe, + 0x94, 0x45, 0xcf, 0x26, 0x4c, 0xfd, 0x49, 0x61, 0xeb, 0x77, 0x35, 0x68, 0xd8, 0xec, 0xd3, 0x19, + 0x8b, 0x39, 0xd9, 0x81, 0x1a, 0x1b, 0x8e, 0x83, 0x4e, 0xe5, 0xae, 0xb1, 0xb3, 0x72, 0x40, 0x7a, + 0xf2, 0x10, 0x45, 0x7d, 0x34, 0x1c, 0x07, 0xc7, 0x4b, 0x36, 0x72, 0x90, 0xb7, 0x61, 0xf9, 0x7c, + 0x32, 0x8b, 0xc7, 0x9d, 0x2a, 0xb2, 0xde, 0xcc, 0xb3, 0x7e, 0x5f, 0x90, 0x8e, 0x97, 0x6c, 0xc9, + 0x23, 0xb6, 0xf5, 0xfc, 0xf3, 0xa0, 0x53, 0x2b, 0xdb, 0xf6, 0xc4, 0x3f, 0xc7, 0x6d, 0x05, 0x07, + 0x39, 0x04, 0x88, 0x19, 0x77, 0x82, 0x90, 0x7b, 0x81, 0xdf, 0x59, 0x46, 0xfe, 0xcd, 0x3c, 0xff, + 0x63, 0xc6, 0x7f, 0x8c, 0xe4, 0xe3, 0x25, 0xdb, 0x8c, 0xf5, 0x44, 0x48, 0x7a, 0xbe, 0xc7, 0x9d, + 0xe1, 0x98, 0x7a, 0x7e, 0xa7, 0x5e, 0x26, 0x79, 0xe2, 0x7b, 0xfc, 0x81, 0x20, 0x0b, 0x49, 0x4f, + 0x4f, 0x84, 0x2a, 0x9f, 0xce, 0x58, 0x74, 0xd9, 0x69, 0x94, 0xa9, 0xf2, 0x13, 0x41, 0x12, 0xaa, + 0x20, 0x0f, 0xb9, 0x0f, 0x2b, 0x03, 0x36, 0xf2, 0x7c, 0x67, 0x30, 0x09, 0x86, 0xcf, 0x3a, 0x4d, + 0x14, 0xe9, 0xe4, 0x45, 0xfa, 0x82, 0xa1, 0x2f, 0xe8, 0xc7, 0x4b, 0x36, 0x0c, 0x92, 0x19, 0x39, + 0x80, 0xe6, 0x70, 0xcc, 0x86, 0xcf, 0x1c, 0x3e, 0xef, 0x98, 0x28, 0x79, 0x3b, 0x2f, 0xf9, 0x40, + 0x50, 0x9f, 0xcc, 0x8f, 0x97, 0xec, 0xc6, 0x50, 0x0e, 0xc9, 0xfb, 0x60, 0x32, 0xdf, 0x55, 0xc7, + 0xad, 0xa0, 0xd0, 0x46, 0xe1, 0xbb, 0xf8, 0xae, 0x3e, 0xac, 0xc9, 0xd4, 0x98, 0xf4, 0xa0, 0x2e, + 0x1c, 0xc5, 0xe3, 0x9d, 0x55, 0x94, 0xb9, 0x55, 0x38, 0x08, 0x69, 0xc7, 0x4b, 0xb6, 0xe2, 0x12, + 0xe6, 0x73, 0xd9, 0xc4, 0xbb, 0x60, 0x91, 0xb8, 0xdc, 0xcd, 0x32, 0xf3, 0x3d, 0x94, 0x74, 0xbc, + 0x9e, 0xe9, 0xea, 0x49, 0xbf, 0x01, 0xcb, 0x17, 0x74, 0x32, 0x63, 0xd6, 0x5b, 0xb0, 0x92, 0xf1, + 0x14, 0xd2, 0x81, 0xc6, 0x94, 0xc5, 0x31, 0x1d, 0xb1, 0x8e, 0x71, 0xd7, 0xd8, 0x31, 0x6d, 0x3d, + 0xb5, 0x5a, 0xb0, 0x9a, 0xf5, 0x93, 0x8c, 0xa0, 0xf0, 0x05, 0x21, 0x78, 0xc1, 0xa2, 0x58, 0x38, + 0x80, 0x12, 0x54, 0x53, 0xeb, 0x03, 0x58, 0x2f, 0x3a, 0x01, 0x59, 0x87, 0xea, 0x33, 0x76, 0xa9, + 0x38, 0xc5, 0x90, 0xdc, 0x52, 0x17, 0x42, 0x2f, 0x36, 0x6d, 0x75, 0xbb, 0x2f, 0x2a, 0x89, 0x70, + 0xe2, 0x07, 0xe4, 0x10, 0x6a, 0x22, 0x0a, 0x51, 0x7a, 0xe5, 0xa0, 0xdb, 0x93, 0x21, 0xda, 0xd3, + 0x21, 0xda, 0x7b, 0xa2, 0x43, 0xb4, 0xdf, 0xfc, 0xf2, 0xc5, 0xf6, 0xd2, 0x17, 0x7f, 0xd9, 0x36, + 0x6c, 0x94, 0x20, 0x77, 0xc4, 0xa7, 0xa4, 0x9e, 0xef, 0x78, 0xae, 0x3a, 0xa7, 0x81, 0xf3, 0x13, + 0x97, 0x1c, 0xc1, 0xfa, 0x30, 0xf0, 0x63, 0xe6, 0xc7, 0xb3, 0xd8, 0x09, 0x69, 0x44, 0xa7, 0xb1, + 0x8a, 0x12, 0xfd, 0xe1, 0x1e, 0x68, 0xf2, 0x29, 0x52, 0xed, 0xf6, 0x30, 0xbf, 0x40, 0x3e, 0x04, + 0xb8, 0xa0, 0x13, 0xcf, 0xa5, 0x3c, 0x88, 0xe2, 0x4e, 0xed, 0x6e, 0x35, 0x23, 0x7c, 0xa6, 0x09, + 0x4f, 0x43, 0x97, 0x72, 0xd6, 0xaf, 0x89, 0x9b, 0xd9, 0x19, 0x7e, 0xf2, 0x26, 0xb4, 0x69, 0x18, + 0x3a, 0x31, 0xa7, 0x9c, 0x39, 0x83, 0x4b, 0xce, 0x62, 0x8c, 0xa4, 0x55, 0x7b, 0x8d, 0x86, 0xe1, + 0x63, 0xb1, 0xda, 0x17, 0x8b, 0x96, 0x9b, 0x7c, 0x07, 0x74, 0x72, 0x42, 0xa0, 0xe6, 0x52, 0x4e, + 0xd1, 0x1a, 0xab, 0x36, 0x8e, 0xc5, 0x5a, 0x48, 0xf9, 0x58, 0xe9, 0x88, 0x63, 0xb2, 0x01, 0xf5, + 0x31, 0xf3, 0x46, 0x63, 0x8e, 0x6a, 0x55, 0x6d, 0x35, 0x13, 0x86, 0x0f, 0xa3, 0xe0, 0x82, 0x61, + 0x9c, 0x37, 0x6d, 0x39, 0xb1, 0xfe, 0x61, 0xc0, 0x8d, 0x6b, 0x81, 0x21, 0xf6, 0x1d, 0xd3, 0x78, + 0xac, 0xcf, 0x12, 0x63, 0xf2, 0xb6, 0xd8, 0x97, 0xba, 0x2c, 0x52, 0xf9, 0x67, 0x4d, 0x69, 0x7c, + 0x8c, 0x8b, 0x4a, 0x51, 0xc5, 0x42, 0x1e, 0xc1, 0xfa, 0x84, 0xc6, 0xdc, 0x91, 0xfe, 0xeb, 0x60, + 0x7e, 0xa9, 0xe6, 0x62, 0xea, 0x13, 0xaa, 0xfd, 0x5c, 0xb8, 0x95, 0x12, 0x6f, 0x4d, 0x72, 0xab, + 0xe4, 0x18, 0x6e, 0x0d, 0x2e, 0x9f, 0x53, 0x9f, 0x7b, 0x3e, 0x73, 0xae, 0xd9, 0xbc, 0xad, 0xb6, + 0x7a, 0x74, 0xe1, 0xb9, 0xcc, 0x1f, 0x6a, 0x63, 0xdf, 0x4c, 0x44, 0x92, 0x8f, 0x11, 0x5b, 0x77, + 0xa1, 0x95, 0x8f, 0x62, 0xd2, 0x82, 0x0a, 0x9f, 0x2b, 0x0d, 0x2b, 0x7c, 0x6e, 0x59, 0x89, 0x07, + 0x26, 0xa1, 0x74, 0x8d, 0x67, 0x17, 0xda, 0x85, 0xb0, 0xce, 0x98, 0xdb, 0xc8, 0x9a, 0xdb, 0x6a, + 0xc3, 0x5a, 0x2e, 0x9a, 0xad, 0xcf, 0x97, 0xa1, 0x69, 0xb3, 0x38, 0x14, 0xce, 0x44, 0x0e, 0xc1, + 0x64, 0xf3, 0x21, 0x93, 0x89, 0xd4, 0x28, 0xa4, 0x29, 0xc9, 0xf3, 0x48, 0xd3, 0x45, 0x40, 0x27, + 0xcc, 0x64, 0x37, 0x07, 0x02, 0x37, 0x8b, 0x42, 0x59, 0x14, 0xd8, 0xcb, 0xa3, 0xc0, 0xad, 0x02, + 0x6f, 0x01, 0x06, 0x76, 0x73, 0x30, 0x50, 0xdc, 0x38, 0x87, 0x03, 0xf7, 0x4a, 0x70, 0xa0, 0x78, + 0xfd, 0x05, 0x40, 0x70, 0xaf, 0x04, 0x08, 0x3a, 0xd7, 0xce, 0x2a, 0x45, 0x82, 0xbd, 0x3c, 0x12, + 0x14, 0xd5, 0x29, 0x40, 0xc1, 0x87, 0x65, 0x50, 0x70, 0xa7, 0x20, 0xb3, 0x10, 0x0b, 0xde, 0xbb, + 0x86, 0x05, 0x1b, 0x05, 0xd1, 0x12, 0x30, 0xb8, 0x97, 0xcb, 0xd2, 0x50, 0xaa, 0x5b, 0x79, 0x9a, + 0x26, 0xdf, 0xbd, 0x8e, 0x23, 0x9b, 0xc5, 0x4f, 0x5b, 0x06, 0x24, 0xfb, 0x05, 0x20, 0xb9, 0x5d, + 0xbc, 0x65, 0x01, 0x49, 0x52, 0x3c, 0xd8, 0x15, 0x71, 0x5f, 0xf0, 0x34, 0x91, 0x23, 0x58, 0x14, + 0x05, 0x91, 0x4a, 0xd8, 0x72, 0x62, 0xed, 0x88, 0x4c, 0x94, 0xfa, 0xd7, 0x4b, 0xb0, 0x03, 0x9d, + 0x3e, 0xe3, 0x5d, 0xd6, 0x2f, 0x8d, 0x54, 0x16, 0x23, 0x3a, 0x9b, 0xc5, 0x4c, 0x95, 0xc5, 0x32, + 0x90, 0x52, 0xc9, 0x41, 0x0a, 0xf9, 0x36, 0xdc, 0xc0, 0x34, 0x82, 0x76, 0x71, 0x72, 0x69, 0xad, + 0x2d, 0x08, 0xd2, 0x20, 0x32, 0xbf, 0xbd, 0x03, 0x37, 0x33, 0xbc, 0x22, 0xc5, 0x62, 0x0a, 0xab, + 0x61, 0xf0, 0xae, 0x27, 0xdc, 0x47, 0x61, 0x78, 0x4c, 0xe3, 0xb1, 0xf5, 0xc3, 0x54, 0xff, 0x14, + 0xae, 0x08, 0xd4, 0x86, 0x81, 0x2b, 0xd5, 0x5a, 0xb3, 0x71, 0x2c, 0x20, 0x6c, 0x12, 0x8c, 0xf0, + 0x54, 0xd3, 0x16, 0x43, 0xc1, 0x95, 0x44, 0x8a, 0x29, 0x43, 0xc2, 0xfa, 0x85, 0x91, 0xee, 0x97, + 0x22, 0x58, 0x19, 0xd8, 0x18, 0xff, 0x0b, 0xd8, 0x54, 0x5e, 0x0d, 0x6c, 0xac, 0xdf, 0x1a, 0xe9, + 0x17, 0x49, 0x60, 0xe4, 0xf5, 0x54, 0x14, 0xce, 0xe1, 0xf9, 0x2e, 0x9b, 0x63, 0xc0, 0x57, 0x6d, + 0x39, 0xd1, 0x08, 0x5f, 0x47, 0x33, 0xe7, 0x11, 0xbe, 0x81, 0x6b, 0x72, 0x42, 0xde, 0x40, 0xf8, + 0x09, 0xce, 0x55, 0x24, 0xae, 0xf5, 0x54, 0x99, 0x7b, 0x2a, 0x16, 0x6d, 0x49, 0xcb, 0x24, 0x53, + 0x33, 0x97, 0x4c, 0x4f, 0x81, 0x5c, 0x0f, 0x59, 0xf2, 0x01, 0xd4, 0x38, 0x1d, 0x09, 0x8b, 0x0a, + 0xa3, 0xb4, 0x7a, 0xb2, 0xf6, 0xee, 0x7d, 0x7c, 0x76, 0x4a, 0xbd, 0xa8, 0xbf, 0x21, 0x8c, 0xf1, + 0xaf, 0x17, 0xdb, 0x2d, 0xc1, 0xb3, 0x17, 0x4c, 0x3d, 0xce, 0xa6, 0x21, 0xbf, 0xb4, 0x51, 0xc6, + 0xfa, 0x93, 0x21, 0x52, 0x79, 0x2e, 0x94, 0x4b, 0x4d, 0xa3, 0xfd, 0xb5, 0x92, 0x41, 0xdd, 0xaf, + 0x66, 0xae, 0x6f, 0x00, 0x8c, 0x68, 0xec, 0x7c, 0x46, 0x7d, 0xce, 0x5c, 0x65, 0x33, 0x73, 0x44, + 0xe3, 0x9f, 0xe2, 0x82, 0x28, 0x51, 0x04, 0x79, 0x16, 0x33, 0x17, 0x8d, 0x57, 0xb5, 0x1b, 0x23, + 0x1a, 0x3f, 0x8d, 0x99, 0x9b, 0xe8, 0xd5, 0x78, 0x0d, 0xbd, 0xfe, 0x9c, 0xf1, 0xc3, 0x14, 0xc7, + 0xfe, 0x1f, 0x34, 0xfb, 0xa7, 0x21, 0x00, 0x3a, 0x9f, 0x0b, 0xc9, 0x09, 0xdc, 0x48, 0xbc, 0xdd, + 0x99, 0x61, 0x14, 0x68, 0x7f, 0x78, 0x79, 0x90, 0xac, 0x5f, 0xe4, 0x97, 0x63, 0xf2, 0x23, 0xd8, + 0x2c, 0xc4, 0x6a, 0xb2, 0x61, 0xe5, 0xa5, 0x21, 0x7b, 0x3b, 0x1f, 0xb2, 0x7a, 0x3f, 0xad, 0x6b, + 0xf5, 0x35, 0x74, 0xfd, 0xa6, 0xa8, 0x56, 0xb2, 0x19, 0xbc, 0xec, 0x6b, 0x59, 0x3f, 0x37, 0xa0, + 0x5d, 0xb8, 0x0c, 0xd9, 0x07, 0x90, 0x09, 0x30, 0xf6, 0x9e, 0xeb, 0xca, 0x79, 0x5d, 0x5d, 0x1c, + 0x4d, 0xf6, 0xd8, 0x7b, 0xce, 0x6c, 0x73, 0xa0, 0x87, 0xe4, 0x23, 0x68, 0x33, 0x55, 0x3f, 0xe9, + 0x0c, 0x55, 0xc9, 0x41, 0x89, 0xae, 0xae, 0x94, 0xb6, 0x2d, 0x96, 0x9b, 0x5b, 0x47, 0x60, 0x26, + 0xfb, 0x92, 0xaf, 0x81, 0x39, 0xa5, 0x73, 0x55, 0xd5, 0xca, 0x7a, 0xa8, 0x39, 0xa5, 0x73, 0x2c, + 0x68, 0xc9, 0x26, 0x34, 0x04, 0x71, 0x44, 0xe5, 0x09, 0x55, 0xbb, 0x3e, 0xa5, 0xf3, 0x1f, 0xd0, + 0xd8, 0xda, 0x85, 0x56, 0xfe, 0x10, 0xcd, 0xaa, 0x11, 0x46, 0xb2, 0x1e, 0x8d, 0x98, 0xf5, 0x18, + 0x5a, 0xf9, 0xc2, 0x51, 0x64, 0x9b, 0x28, 0x98, 0xf9, 0x2e, 0x32, 0x2e, 0xdb, 0x72, 0x22, 0xba, + 0xc6, 0x8b, 0x40, 0x7e, 0xba, 0x6c, 0xa5, 0x78, 0x16, 0x70, 0x96, 0x29, 0x37, 0x25, 0x8f, 0xf5, + 0x87, 0x1a, 0xd4, 0x65, 0x15, 0x4b, 0xde, 0xcc, 0x34, 0x0e, 0x08, 0x51, 0xfd, 0x95, 0xab, 0x17, + 0xdb, 0x0d, 0xcc, 0xe6, 0x27, 0x0f, 0xd3, 0x2e, 0x22, 0x4d, 0x54, 0x95, 0x5c, 0x91, 0xad, 0x5b, + 0x96, 0xea, 0x2b, 0xb7, 0x2c, 0x9b, 0xd0, 0xf0, 0x67, 0x53, 0x87, 0xcf, 0x63, 0x8c, 0xb5, 0xaa, + 0x5d, 0xf7, 0x67, 0xd3, 0x27, 0xf3, 0x58, 0xd8, 0x94, 0x07, 0x9c, 0x4e, 0x90, 0x24, 0x83, 0xad, + 0x89, 0x0b, 0x82, 0x78, 0x08, 0x6b, 0x19, 0xd0, 0xf3, 0x5c, 0x55, 0x51, 0xb5, 0xb2, 0x5f, 0xfc, + 0xe4, 0xa1, 0x52, 0x77, 0x25, 0x01, 0xc1, 0x13, 0x97, 0xec, 0xe4, 0x2b, 0x74, 0xc4, 0x4a, 0x99, + 0xb0, 0x33, 0x45, 0xb8, 0x40, 0x4a, 0x71, 0x01, 0xe1, 0x6e, 0x92, 0xa5, 0x89, 0x2c, 0x4d, 0xb1, + 0x80, 0xc4, 0xb7, 0xa0, 0x9d, 0xc2, 0x8d, 0x64, 0x31, 0xe5, 0x2e, 0xe9, 0x32, 0x32, 0xbe, 0x0b, + 0xb7, 0x7c, 0x36, 0xe7, 0x4e, 0x91, 0x1b, 0x90, 0x9b, 0x08, 0xda, 0x59, 0x5e, 0xe2, 0x5b, 0xd0, + 0x4a, 0x03, 0x12, 0x79, 0x57, 0x64, 0x9f, 0x94, 0xac, 0x22, 0xdb, 0x1d, 0x68, 0x26, 0x60, 0xbf, + 0x8a, 0x0c, 0x0d, 0x2a, 0x31, 0x3e, 0x29, 0x1f, 0x22, 0x16, 0xcf, 0x26, 0x5c, 0x6d, 0xb2, 0x86, + 0x3c, 0x58, 0x3e, 0xd8, 0x72, 0x1d, 0x79, 0xdf, 0x80, 0xb5, 0x24, 0x0e, 0x90, 0xaf, 0x85, 0x7c, + 0xab, 0x7a, 0x11, 0x99, 0x76, 0x61, 0x3d, 0x8c, 0x82, 0x30, 0x88, 0x59, 0xe4, 0x50, 0xd7, 0x8d, + 0x58, 0x1c, 0x77, 0xda, 0x72, 0x3f, 0xbd, 0x7e, 0x24, 0x97, 0xad, 0x9f, 0x41, 0x43, 0x59, 0xbf, + 0xb4, 0x9b, 0xfa, 0x1e, 0xac, 0x86, 0x34, 0x12, 0x77, 0xca, 0xf6, 0x54, 0xba, 0xa6, 0x3d, 0xa5, + 0x91, 0x68, 0xa2, 0x73, 0xad, 0xd5, 0x0a, 0xf2, 0xcb, 0x25, 0xeb, 0x1e, 0xac, 0xe5, 0x78, 0x44, + 0x18, 0xa0, 0x53, 0xe8, 0x30, 0xc0, 0x49, 0x72, 0x72, 0x25, 0x3d, 0xd9, 0xba, 0x0f, 0x66, 0x62, + 0x68, 0x51, 0x7a, 0x69, 0x3d, 0x0c, 0x65, 0x3b, 0x39, 0xc5, 0x76, 0x31, 0xf8, 0x8c, 0x45, 0xaa, + 0xdc, 0x92, 0x13, 0xeb, 0x29, 0xb4, 0x0b, 0xf9, 0x94, 0xec, 0x41, 0x23, 0x9c, 0x0d, 0x1c, 0xdd, + 0xe6, 0xa7, 0x8d, 0xe1, 0xe9, 0x6c, 0xf0, 0x31, 0xbb, 0xd4, 0x8d, 0x61, 0x88, 0xb3, 0x74, 0xdb, + 0x4a, 0x76, 0xdb, 0x09, 0x34, 0x75, 0x68, 0x92, 0xef, 0x80, 0x99, 0xf8, 0x48, 0x21, 0x81, 0x25, + 0x47, 0xab, 0x4d, 0x53, 0x46, 0xf1, 0xa9, 0x63, 0x6f, 0xe4, 0x33, 0xd7, 0x49, 0xe3, 0x01, 0xcf, + 0x68, 0xda, 0x6d, 0x49, 0xf8, 0x44, 0x3b, 0xbf, 0xf5, 0x2e, 0xd4, 0xe5, 0xdd, 0x84, 0x7d, 0xc4, + 0xce, 0xba, 0x1a, 0x15, 0xe3, 0xd2, 0x4c, 0xfb, 0x47, 0x03, 0x9a, 0x3a, 0x45, 0x95, 0x0a, 0xe5, + 0x2e, 0x5d, 0xf9, 0xaa, 0x97, 0x5e, 0xd4, 0xaa, 0xeb, 0x2c, 0x52, 0x7b, 0xe5, 0x2c, 0xb2, 0x07, + 0x44, 0x26, 0x8b, 0x8b, 0x80, 0x7b, 0xfe, 0xc8, 0x91, 0xb6, 0x96, 0x59, 0x63, 0x1d, 0x29, 0x67, + 0x48, 0x38, 0x15, 0xeb, 0x07, 0x9f, 0x2f, 0x43, 0xfb, 0xa8, 0xff, 0xe0, 0xe4, 0x28, 0x0c, 0x27, + 0xde, 0x90, 0x62, 0x09, 0xbc, 0x0f, 0x35, 0x2c, 0xf2, 0x4b, 0x9e, 0x17, 0xbb, 0x65, 0xdd, 0x26, + 0x39, 0x80, 0x65, 0xac, 0xf5, 0x49, 0xd9, 0x2b, 0x63, 0xb7, 0xb4, 0xe9, 0x14, 0x87, 0xc8, 0x6e, + 0xe0, 0xfa, 0x63, 0x63, 0xb7, 0xac, 0xf3, 0x24, 0x1f, 0x81, 0x99, 0x56, 0xe9, 0x8b, 0x9e, 0x1c, + 0xbb, 0x0b, 0x7b, 0x50, 0x21, 0x9f, 0x56, 0x43, 0x8b, 0x5e, 0xce, 0xba, 0x0b, 0x9b, 0x35, 0x72, + 0x08, 0x0d, 0x5d, 0x25, 0x96, 0x3f, 0x0a, 0x76, 0x17, 0xf4, 0x87, 0xc2, 0x3c, 0xb2, 0xf0, 0x2e, + 0x7b, 0xb9, 0xec, 0x96, 0x36, 0xb1, 0xe4, 0x7d, 0xa8, 0x2b, 0xd8, 0x2f, 0x7d, 0x18, 0xec, 0x96, + 0x77, 0x79, 0x42, 0xc9, 0xb4, 0xf5, 0x58, 0xf4, 0xba, 0xda, 0x5d, 0xd8, 0x6d, 0x93, 0x23, 0x80, + 0x4c, 0x75, 0xbd, 0xf0, 0xd9, 0xb4, 0xbb, 0xb8, 0x8b, 0x26, 0xf7, 0xa1, 0x99, 0xbe, 0x8c, 0x94, + 0x3f, 0x84, 0x76, 0x17, 0x35, 0xb6, 0xfd, 0xaf, 0xff, 0xfb, 0x6f, 0x5b, 0xc6, 0xaf, 0xae, 0xb6, + 0x8c, 0x5f, 0x5f, 0x6d, 0x19, 0x5f, 0x5e, 0x6d, 0x19, 0xbf, 0xbf, 0xda, 0x32, 0xfe, 0x7a, 0xb5, + 0x65, 0xfc, 0xe6, 0xef, 0x5b, 0xc6, 0xa0, 0x8e, 0xee, 0xff, 0xde, 0x7f, 0x02, 0x00, 0x00, 0xff, + 0xff, 0x51, 0x4f, 0x34, 0x66, 0xf8, 0x17, 0x00, 0x00, } diff --git a/abci/types/types.proto b/abci/types/types.proto index d23ac513..b62162c4 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -6,6 +6,7 @@ package types; import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "google/protobuf/timestamp.proto"; import "github.com/tendermint/tendermint/libs/common/types.proto"; +import "github.com/tendermint/tendermint/crypto/merkle/merkle.proto"; // This file is copied from http://github.com/tendermint/abci // NOTE: When using custom types, mind the warnings. @@ -154,7 +155,7 @@ message ResponseQuery { int64 index = 5; bytes key = 6; bytes value = 7; - bytes proof = 8; + merkle.Proof proof = 8; int64 height = 9; } diff --git a/abci/types/typespb_test.go b/abci/types/typespb_test.go index 5da925e1..0ae0fea0 100644 --- a/abci/types/typespb_test.go +++ b/abci/types/typespb_test.go @@ -14,6 +14,7 @@ import fmt "fmt" import math "math" import _ "github.com/gogo/protobuf/gogoproto" import _ "github.com/golang/protobuf/ptypes/timestamp" +import _ "github.com/tendermint/tendermint/crypto/merkle" import _ "github.com/tendermint/tendermint/libs/common" // Reference imports to suppress errors if they are not otherwise used. diff --git a/crypto/merkle/compile.sh b/crypto/merkle/compile.sh new file mode 100644 index 00000000..8e4c739f --- /dev/null +++ b/crypto/merkle/compile.sh @@ -0,0 +1,6 @@ +#! /bin/bash + +protoc --gogo_out=. -I $GOPATH/src/ -I . -I $GOPATH/src/github.com/gogo/protobuf/protobuf merkle.proto +echo "--> adding nolint declarations to protobuf generated files" +awk '/package merkle/ { print "//nolint: gas"; print; next }1' merkle.pb.go > merkle.pb.go.new +mv merkle.pb.go.new merkle.pb.go diff --git a/crypto/merkle/merkle.pb.go b/crypto/merkle/merkle.pb.go new file mode 100644 index 00000000..75e1b08c --- /dev/null +++ b/crypto/merkle/merkle.pb.go @@ -0,0 +1,792 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: crypto/merkle/merkle.proto + +package merkle + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import bytes "bytes" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// ProofOp defines an operation used for calculating Merkle root +// The data could be arbitrary format, providing nessecary data +// for example neighbouring node hash +type ProofOp struct { + Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` + Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"` + Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ProofOp) Reset() { *m = ProofOp{} } +func (m *ProofOp) String() string { return proto.CompactTextString(m) } +func (*ProofOp) ProtoMessage() {} +func (*ProofOp) Descriptor() ([]byte, []int) { + return fileDescriptor_merkle_5d3f6051907285da, []int{0} +} +func (m *ProofOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProofOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ProofOp.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *ProofOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProofOp.Merge(dst, src) +} +func (m *ProofOp) XXX_Size() int { + return m.Size() +} +func (m *ProofOp) XXX_DiscardUnknown() { + xxx_messageInfo_ProofOp.DiscardUnknown(m) +} + +var xxx_messageInfo_ProofOp proto.InternalMessageInfo + +func (m *ProofOp) GetType() string { + if m != nil { + return m.Type + } + return "" +} + +func (m *ProofOp) GetKey() []byte { + if m != nil { + return m.Key + } + return nil +} + +func (m *ProofOp) GetData() []byte { + if m != nil { + return m.Data + } + return nil +} + +// Proof is Merkle proof defined by the list of ProofOps +type Proof struct { + Ops []ProofOp `protobuf:"bytes,1,rep,name=ops" json:"ops"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Proof) Reset() { *m = Proof{} } +func (m *Proof) String() string { return proto.CompactTextString(m) } +func (*Proof) ProtoMessage() {} +func (*Proof) Descriptor() ([]byte, []int) { + return fileDescriptor_merkle_5d3f6051907285da, []int{1} +} +func (m *Proof) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Proof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_Proof.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalTo(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (dst *Proof) XXX_Merge(src proto.Message) { + xxx_messageInfo_Proof.Merge(dst, src) +} +func (m *Proof) XXX_Size() int { + return m.Size() +} +func (m *Proof) XXX_DiscardUnknown() { + xxx_messageInfo_Proof.DiscardUnknown(m) +} + +var xxx_messageInfo_Proof proto.InternalMessageInfo + +func (m *Proof) GetOps() []ProofOp { + if m != nil { + return m.Ops + } + return nil +} + +func init() { + proto.RegisterType((*ProofOp)(nil), "merkle.ProofOp") + proto.RegisterType((*Proof)(nil), "merkle.Proof") +} +func (this *ProofOp) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ProofOp) + if !ok { + that2, ok := that.(ProofOp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Type != that1.Type { + return false + } + if !bytes.Equal(this.Key, that1.Key) { + return false + } + if !bytes.Equal(this.Data, that1.Data) { + return false + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (this *Proof) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Proof) + if !ok { + that2, ok := that.(Proof) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Ops) != len(that1.Ops) { + return false + } + for i := range this.Ops { + if !this.Ops[i].Equal(&that1.Ops[i]) { + return false + } + } + if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { + return false + } + return true +} +func (m *ProofOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProofOp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Type) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintMerkle(dAtA, i, uint64(len(m.Type))) + i += copy(dAtA[i:], m.Type) + } + if len(m.Key) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintMerkle(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Data) > 0 { + dAtA[i] = 0x1a + i++ + i = encodeVarintMerkle(dAtA, i, uint64(len(m.Data))) + i += copy(dAtA[i:], m.Data) + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func (m *Proof) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Proof) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Ops) > 0 { + for _, msg := range m.Ops { + dAtA[i] = 0xa + i++ + i = encodeVarintMerkle(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + +func encodeVarintMerkle(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedProofOp(r randyMerkle, easy bool) *ProofOp { + this := &ProofOp{} + this.Type = string(randStringMerkle(r)) + v1 := r.Intn(100) + this.Key = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Key[i] = byte(r.Intn(256)) + } + v2 := r.Intn(100) + this.Data = make([]byte, v2) + for i := 0; i < v2; i++ { + this.Data[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedMerkle(r, 4) + } + return this +} + +func NewPopulatedProof(r randyMerkle, easy bool) *Proof { + this := &Proof{} + if r.Intn(10) != 0 { + v3 := r.Intn(5) + this.Ops = make([]ProofOp, v3) + for i := 0; i < v3; i++ { + v4 := NewPopulatedProofOp(r, easy) + this.Ops[i] = *v4 + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedMerkle(r, 2) + } + return this +} + +type randyMerkle interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneMerkle(r randyMerkle) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringMerkle(r randyMerkle) string { + v5 := r.Intn(100) + tmps := make([]rune, v5) + for i := 0; i < v5; i++ { + tmps[i] = randUTF8RuneMerkle(r) + } + return string(tmps) +} +func randUnrecognizedMerkle(r randyMerkle, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldMerkle(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldMerkle(dAtA []byte, r randyMerkle, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) + v6 := r.Int63() + if r.Intn(2) == 0 { + v6 *= -1 + } + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(v6)) + case 1: + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateMerkle(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *ProofOp) Size() (n int) { + var l int + _ = l + l = len(m.Type) + if l > 0 { + n += 1 + l + sovMerkle(uint64(l)) + } + l = len(m.Key) + if l > 0 { + n += 1 + l + sovMerkle(uint64(l)) + } + l = len(m.Data) + if l > 0 { + n += 1 + l + sovMerkle(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Proof) Size() (n int) { + var l int + _ = l + if len(m.Ops) > 0 { + for _, e := range m.Ops { + l = e.Size() + n += 1 + l + sovMerkle(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func sovMerkle(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozMerkle(x uint64) (n int) { + return sovMerkle(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ProofOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProofOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProofOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthMerkle + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Type = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMerkle + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthMerkle + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...) + if m.Data == nil { + m.Data = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMerkle(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMerkle + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Proof) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Proof: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Proof: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ops", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowMerkle + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthMerkle + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ops = append(m.Ops, ProofOp{}) + if err := m.Ops[len(m.Ops)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipMerkle(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthMerkle + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipMerkle(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkle + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkle + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkle + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthMerkle + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowMerkle + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipMerkle(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthMerkle = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowMerkle = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("crypto/merkle/merkle.proto", fileDescriptor_merkle_5d3f6051907285da) } + +var fileDescriptor_merkle_5d3f6051907285da = []byte{ + // 200 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4a, 0x2e, 0xaa, 0x2c, + 0x28, 0xc9, 0xd7, 0xcf, 0x4d, 0x2d, 0xca, 0xce, 0x49, 0x85, 0x52, 0x7a, 0x05, 0x45, 0xf9, 0x25, + 0xf9, 0x42, 0x6c, 0x10, 0x9e, 0x94, 0x6e, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, + 0xae, 0x7e, 0x7a, 0x7e, 0x7a, 0xbe, 0x3e, 0x58, 0x3a, 0xa9, 0x34, 0x0d, 0xcc, 0x03, 0x73, 0xc0, + 0x2c, 0x88, 0x36, 0x25, 0x67, 0x2e, 0xf6, 0x80, 0xa2, 0xfc, 0xfc, 0x34, 0xff, 0x02, 0x21, 0x21, + 0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0x48, + 0x80, 0x8b, 0x39, 0x3b, 0xb5, 0x52, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc4, 0x04, 0xa9, + 0x4a, 0x49, 0x2c, 0x49, 0x94, 0x60, 0x06, 0x0b, 0x81, 0xd9, 0x4a, 0x06, 0x5c, 0xac, 0x60, 0x43, + 0x84, 0xd4, 0xb9, 0x98, 0xf3, 0x0b, 0x8a, 0x25, 0x18, 0x15, 0x98, 0x35, 0xb8, 0x8d, 0xf8, 0xf5, + 0xa0, 0x0e, 0x84, 0x5a, 0xe0, 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x85, 0x93, 0xc8, + 0x8f, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, + 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x49, 0x6c, 0x60, 0x37, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff, + 0xb9, 0x2b, 0x0f, 0xd1, 0xe8, 0x00, 0x00, 0x00, +} diff --git a/crypto/merkle/merkle.proto b/crypto/merkle/merkle.proto new file mode 100644 index 00000000..8a6c467d --- /dev/null +++ b/crypto/merkle/merkle.proto @@ -0,0 +1,30 @@ +syntax = "proto3"; +package merkle; + +// For more information on gogo.proto, see: +// https://github.com/gogo/protobuf/blob/master/extensions.md +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +option (gogoproto.marshaler_all) = true; +option (gogoproto.unmarshaler_all) = true; +option (gogoproto.sizer_all) = true; + +option (gogoproto.populate_all) = true; +option (gogoproto.equal_all) = true; + +//---------------------------------------- +// Message types + +// ProofOp defines an operation used for calculating Merkle root +// The data could be arbitrary format, providing nessecary data +// for example neighbouring node hash +message ProofOp { + string type = 1; + bytes key = 2; + bytes data = 3; +} + +// Proof is Merkle proof defined by the list of ProofOps +message Proof { + repeated ProofOp ops = 1 [(gogoproto.nullable)=false]; +} diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go new file mode 100644 index 00000000..7da89495 --- /dev/null +++ b/crypto/merkle/proof.go @@ -0,0 +1,132 @@ +package merkle + +import ( + "bytes" + + cmn "github.com/tendermint/tmlibs/common" +) + +//---------------------------------------- +// ProofOp gets converted to an instance of ProofOperator: + +// ProofOperator is a layer for calculating intermediate Merkle root +// Run() takes a list of bytes because it can be more than one +// for example in range proofs +// ProofOp() defines custom encoding which can be decoded later with +// OpDecoder +type ProofOperator interface { + Run([][]byte) ([][]byte, error) + GetKey() []byte + ProofOp() ProofOp +} + +//---------------------------------------- +// Operations on a list of ProofOperators + +// ProofOperators is a slice of ProofOperator(s) +// Each operator will be applied to the input value sequencially +// and the last Merkle root will be verified with already known data +type ProofOperators []ProofOperator + +func (poz ProofOperators) VerifyValue(root []byte, keypath string, value []byte) (err error) { + return poz.Verify(root, keypath, [][]byte{value}) +} + +func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (err error) { + keys, err := KeyPathToKeys(keypath) + if err != nil { + return + } + + for i, op := range poz { + key := op.GetKey() + if len(key) != 0 { + if !bytes.Equal(keys[0], key) { + return cmn.NewError("Key mismatch on operation #%d: expected %+v but %+v", i, []byte(keys[0]), []byte(key)) + } + keys = keys[1:] + } + args, err = op.Run(args) + if err != nil { + return + } + } + if !bytes.Equal(root, args[0]) { + return cmn.NewError("Calculated root hash is invalid: expected %+v but %+v", root, args[0]) + } + if len(keys) != 0 { + return cmn.NewError("Keypath not consumed all") + } + return nil +} + +//---------------------------------------- +// ProofRuntime - main entrypoint + +type OpDecoder func(ProofOp) (ProofOperator, error) + +type ProofRuntime struct { + decoders map[string]OpDecoder +} + +func NewProofRuntime() *ProofRuntime { + return &ProofRuntime{ + decoders: make(map[string]OpDecoder), + } +} + +func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) { + _, ok := prt.decoders[typ] + if ok { + panic("already registered for type " + typ) + } + prt.decoders[typ] = dec +} + +func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) { + decoder := prt.decoders[pop.Type] + if decoder == nil { + return nil, cmn.NewError("unrecognized proof type %v", pop.Type) + } + return decoder(pop) +} + +func (prt *ProofRuntime) DecodeProof(proof *Proof) (poz ProofOperators, err error) { + poz = ProofOperators(nil) + for _, pop := range proof.Ops { + operator, err := prt.Decode(pop) + if err != nil { + return nil, cmn.ErrorWrap(err, "decoding a proof operator") + } + poz = append(poz, operator) + } + return +} + +func (prt *ProofRuntime) VerifyValue(proof *Proof, root []byte, keypath string, value []byte) (err error) { + return prt.Verify(proof, root, keypath, [][]byte{value}) +} + +// TODO In the long run we'll need a method of classifcation of ops, +// whether existence or absence or perhaps a third? +func (prt *ProofRuntime) VerifyAbsence(proof *Proof, root []byte, keypath string) (err error) { + return prt.Verify(proof, root, keypath, nil) +} + +func (prt *ProofRuntime) Verify(proof *Proof, root []byte, keypath string, args [][]byte) (err error) { + poz, err := prt.DecodeProof(proof) + if err != nil { + return cmn.ErrorWrap(err, "decoding proof") + } + return poz.Verify(root, keypath, args) +} + +// DefaultProofRuntime only knows about Simple value +// proofs. +// To use e.g. IAVL proofs, register op-decoders as +// defined in the IAVL package. +func DefaultProofRuntime() (prt *ProofRuntime) { + prt = NewProofRuntime() + prt.RegisterOpDecoder(ProofOpSimpleValue, SimpleValueOpDecoder) + return +} diff --git a/crypto/merkle/proof_key_path.go b/crypto/merkle/proof_key_path.go new file mode 100644 index 00000000..d74dac51 --- /dev/null +++ b/crypto/merkle/proof_key_path.go @@ -0,0 +1,107 @@ +package merkle + +import ( + "encoding/hex" + "fmt" + "net/url" + "strings" + + cmn "github.com/tendermint/tendermint/libs/common" +) + +/* + + For generalized Merkle proofs, each layer of the proof may require an + optional key. The key may be encoded either by URL-encoding or + (upper-case) hex-encoding. + TODO: In the future, more encodings may be supported, like base32 (e.g. + /32:) + + For example, for a Cosmos-SDK application where the first two proof layers + are SimpleValueOps, and the third proof layer is an IAVLValueOp, the keys + might look like: + + 0: []byte("App") + 1: []byte("IBC") + 2: []byte{0x01, 0x02, 0x03} + + Assuming that we know that the first two layers are always ASCII texts, we + probably want to use URLEncoding for those, whereas the third layer will + require HEX encoding for efficient representation. + + kp := new(KeyPath) + kp.AppendKey([]byte("App"), KeyEncodingURL) + kp.AppendKey([]byte("IBC"), KeyEncodingURL) + kp.AppendKey([]byte{0x01, 0x02, 0x03}, KeyEncodingURL) + kp.String() // Should return "/App/IBC/x:010203" + + NOTE: All encodings *MUST* work compatibly, such that you can choose to use + whatever encoding, and the decoded keys will always be the same. In other + words, it's just as good to encode all three keys using URL encoding or HEX + encoding... it just wouldn't be optimal in terms of readability or space + efficiency. + + NOTE: Punycode will never be supported here, because not all values can be + decoded. For example, no string decodes to the string "xn--blah" in + Punycode. + +*/ + +type keyEncoding int + +const ( + KeyEncodingURL keyEncoding = iota + KeyEncodingHex + KeyEncodingMax +) + +type Key struct { + name []byte + enc keyEncoding +} + +type KeyPath []Key + +func (pth KeyPath) AppendKey(key []byte, enc keyEncoding) KeyPath { + return append(pth, Key{key, enc}) +} + +func (pth KeyPath) String() string { + res := "" + for _, key := range pth { + switch key.enc { + case KeyEncodingURL: + res += "/" + url.PathEscape(string(key.name)) + case KeyEncodingHex: + res += "/x:" + fmt.Sprintf("%X", key.name) + default: + panic("unexpected key encoding type") + } + } + return res +} + +func KeyPathToKeys(path string) (keys [][]byte, err error) { + if path == "" || path[0] != '/' { + return nil, cmn.NewError("key path string must start with a forward slash '/'") + } + parts := strings.Split(path[1:], "/") + keys = make([][]byte, len(parts)) + for i, part := range parts { + if strings.HasPrefix(part, "x:") { + hexPart := part[2:] + key, err := hex.DecodeString(hexPart) + if err != nil { + return nil, cmn.ErrorWrap(err, "decoding hex-encoded part #%d: /%s", i, part) + } + keys[i] = key + } else { + key, err := url.PathUnescape(part) + if err != nil { + return nil, cmn.ErrorWrap(err, "decoding url-encoded part #%d: /%s", i, part) + } + keys[i] = []byte(key) // TODO Test this with random bytes, I'm not sure that it works for arbitrary bytes... + } + } + return keys, nil +} diff --git a/crypto/merkle/proof_key_path_test.go b/crypto/merkle/proof_key_path_test.go new file mode 100644 index 00000000..48fda303 --- /dev/null +++ b/crypto/merkle/proof_key_path_test.go @@ -0,0 +1,41 @@ +package merkle + +import ( + "math/rand" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestKeyPath(t *testing.T) { + var path KeyPath + keys := make([][]byte, 10) + alphanum := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" + + for d := 0; d < 1e4; d++ { + path = nil + + for i := range keys { + enc := keyEncoding(rand.Intn(int(KeyEncodingMax))) + keys[i] = make([]byte, rand.Uint32()%20) + switch enc { + case KeyEncodingURL: + for j := range keys[i] { + keys[i][j] = alphanum[rand.Intn(len(alphanum))] + } + case KeyEncodingHex: + rand.Read(keys[i]) + default: + panic("Unexpected encoding") + } + path = path.AppendKey(keys[i], enc) + } + + res, err := KeyPathToKeys(path.String()) + require.Nil(t, err) + + for i, key := range keys { + require.Equal(t, key, res[i]) + } + } +} diff --git a/crypto/merkle/proof_simple_value.go b/crypto/merkle/proof_simple_value.go new file mode 100644 index 00000000..28935e2c --- /dev/null +++ b/crypto/merkle/proof_simple_value.go @@ -0,0 +1,91 @@ +package merkle + +import ( + "bytes" + "fmt" + + "github.com/tendermint/tendermint/crypto/tmhash" + cmn "github.com/tendermint/tendermint/libs/common" +) + +const ProofOpSimpleValue = "simple:v" + +// SimpleValueOp takes a key and a single value as argument and +// produces the root hash. The corresponding tree structure is +// the SimpleMap tree. SimpleMap takes a Hasher, and currently +// Tendermint uses aminoHasher. SimpleValueOp should support +// the hash function as used in aminoHasher. TODO support +// additional hash functions here as options/args to this +// operator. +// +// If the produced root hash matches the expected hash, the +// proof is good. +type SimpleValueOp struct { + // Encoded in ProofOp.Key. + key []byte + + // To encode in ProofOp.Data + Proof *SimpleProof `json:"simple-proof"` +} + +var _ ProofOperator = SimpleValueOp{} + +func NewSimpleValueOp(key []byte, proof *SimpleProof) SimpleValueOp { + return SimpleValueOp{ + key: key, + Proof: proof, + } +} + +func SimpleValueOpDecoder(pop ProofOp) (ProofOperator, error) { + if pop.Type != ProofOpSimpleValue { + return nil, cmn.NewError("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpSimpleValue) + } + var op SimpleValueOp // a bit strange as we'll discard this, but it works. + err := cdc.UnmarshalBinary(pop.Data, &op) + if err != nil { + return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp") + } + return NewSimpleValueOp(pop.Key, op.Proof), nil +} + +func (op SimpleValueOp) ProofOp() ProofOp { + bz := cdc.MustMarshalBinary(op) + return ProofOp{ + Type: ProofOpSimpleValue, + Key: op.key, + Data: bz, + } +} + +func (op SimpleValueOp) String() string { + return fmt.Sprintf("SimpleValueOp{%v}", op.GetKey()) +} + +func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) { + if len(args) != 1 { + return nil, cmn.NewError("expected 1 arg, got %v", len(args)) + } + value := args[0] + hasher := tmhash.New() + hasher.Write(value) // does not error + vhash := hasher.Sum(nil) + + // Wrap to hash the KVPair. + hasher = tmhash.New() + encodeByteSlice(hasher, []byte(op.key)) // does not error + encodeByteSlice(hasher, []byte(vhash)) // does not error + kvhash := hasher.Sum(nil) + + if !bytes.Equal(kvhash, op.Proof.LeafHash) { + return nil, cmn.NewError("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash) + } + + return [][]byte{ + op.Proof.ComputeRootHash(), + }, nil +} + +func (op SimpleValueOp) GetKey() []byte { + return op.key +} diff --git a/crypto/merkle/simple_proof.go b/crypto/merkle/simple_proof.go index 2541b6d3..306505fc 100644 --- a/crypto/merkle/simple_proof.go +++ b/crypto/merkle/simple_proof.go @@ -2,12 +2,24 @@ package merkle import ( "bytes" + "errors" "fmt" + + cmn "github.com/tendermint/tendermint/libs/common" ) -// SimpleProof represents a simple merkle proof. +// SimpleProof represents a simple Merkle proof. +// NOTE: The convention for proofs is to include leaf hashes but to +// exclude the root hash. +// This convention is implemented across IAVL range proofs as well. +// Keep this consistent unless there's a very good reason to change +// everything. This also affects the generalized proof system as +// well. type SimpleProof struct { - Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. + Total int `json:"total"` // Total number of items. + Index int `json:"index"` // Index of item to prove. + LeafHash []byte `json:"leaf_hash"` // Hash of item value. + Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. } // SimpleProofsFromHashers computes inclusion proof for given items. @@ -18,7 +30,10 @@ func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleP proofs = make([]*SimpleProof, len(items)) for i, trail := range trails { proofs[i] = &SimpleProof{ - Aunts: trail.FlattenAunts(), + Total: len(items), + Index: i, + LeafHash: trail.Hash, + Aunts: trail.FlattenAunts(), } } return @@ -49,11 +64,33 @@ func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs map[strin return } -// Verify that leafHash is a leaf hash of the simple-merkle-tree -// which hashes to rootHash. -func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool { - computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts) - return computedHash != nil && bytes.Equal(computedHash, rootHash) +// Verify that the SimpleProof proves the root hash. +// Check sp.Index/sp.Total manually if needed +func (sp *SimpleProof) Verify(rootHash []byte, leafHash []byte) error { + if sp.Total < 0 { + return errors.New("Proof total must be positive") + } + if sp.Index < 0 { + return errors.New("Proof index cannot be negative") + } + if !bytes.Equal(sp.LeafHash, leafHash) { + return cmn.NewError("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash) + } + computedHash := sp.ComputeRootHash() + if !bytes.Equal(computedHash, rootHash) { + return cmn.NewError("invalid root hash: wanted %X got %X", rootHash, computedHash) + } + return nil +} + +// Compute the root hash given a leaf hash. Does not verify the result. +func (sp *SimpleProof) ComputeRootHash() []byte { + return computeHashFromAunts( + sp.Index, + sp.Total, + sp.LeafHash, + sp.Aunts, + ) } // String implements the stringer interface for SimpleProof. diff --git a/crypto/merkle/simple_tree_test.go b/crypto/merkle/simple_tree_test.go index e2dccd3b..b299aba7 100644 --- a/crypto/merkle/simple_tree_test.go +++ b/crypto/merkle/simple_tree_test.go @@ -1,13 +1,13 @@ package merkle import ( - "bytes" + "testing" + + "github.com/stretchr/testify/require" cmn "github.com/tendermint/tendermint/libs/common" . "github.com/tendermint/tendermint/libs/test" - "testing" - "github.com/tendermint/tendermint/crypto/tmhash" ) @@ -30,60 +30,43 @@ func TestSimpleProof(t *testing.T) { rootHash2, proofs := SimpleProofsFromHashers(items) - if !bytes.Equal(rootHash, rootHash2) { - t.Errorf("Unmatched root hashes: %X vs %X", rootHash, rootHash2) - } + require.Equal(t, rootHash, rootHash2, "Unmatched root hashes: %X vs %X", rootHash, rootHash2) // For each item, check the trail. for i, item := range items { itemHash := item.Hash() proof := proofs[i] - // Verify success - ok := proof.Verify(i, total, itemHash, rootHash) - if !ok { - t.Errorf("Verification failed for index %v.", i) - } + // Check total/index + require.Equal(t, proof.Index, i, "Unmatched indicies: %d vs %d", proof.Index, i) - // Wrong item index should make it fail - { - ok = proof.Verify((i+1)%total, total, itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong index %v.", i) - } - } + require.Equal(t, proof.Total, total, "Unmatched totals: %d vs %d", proof.Total, total) + + // Verify success + err := proof.Verify(rootHash, itemHash) + require.NoError(t, err, "Verificatior failed: %v.", err) // Trail too long should make it fail origAunts := proof.Aunts proof.Aunts = append(proof.Aunts, cmn.RandBytes(32)) - { - ok = proof.Verify(i, total, itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong trail length.") - } - } + err = proof.Verify(rootHash, itemHash) + require.Error(t, err, "Expected verification to fail for wrong trail length") + proof.Aunts = origAunts // Trail too short should make it fail proof.Aunts = proof.Aunts[0 : len(proof.Aunts)-1] - { - ok = proof.Verify(i, total, itemHash, rootHash) - if ok { - t.Errorf("Expected verification to fail for wrong trail length.") - } - } + err = proof.Verify(rootHash, itemHash) + require.Error(t, err, "Expected verification to fail for wrong trail length") + proof.Aunts = origAunts // Mutating the itemHash should make it fail. - ok = proof.Verify(i, total, MutateByteSlice(itemHash), rootHash) - if ok { - t.Errorf("Expected verification to fail for mutated leaf hash") - } + err = proof.Verify(rootHash, MutateByteSlice(itemHash)) + require.Error(t, err, "Expected verification to fail for mutated leaf hash") // Mutating the rootHash should make it fail. - ok = proof.Verify(i, total, itemHash, MutateByteSlice(rootHash)) - if ok { - t.Errorf("Expected verification to fail for mutated root hash") - } + err = proof.Verify(MutateByteSlice(rootHash), itemHash) + require.Error(t, err, "Expected verification to fail for mutated root hash") } } diff --git a/crypto/merkle/wire.go b/crypto/merkle/wire.go new file mode 100644 index 00000000..c20ec9aa --- /dev/null +++ b/crypto/merkle/wire.go @@ -0,0 +1,12 @@ +package merkle + +import ( + "github.com/tendermint/go-amino" +) + +var cdc *amino.Codec + +func init() { + cdc = amino.NewCodec() + cdc.Seal() +} diff --git a/docs/app-dev/app-development.md b/docs/app-dev/app-development.md index 3aaebb23..2618bb1e 100644 --- a/docs/app-dev/app-development.md +++ b/docs/app-dev/app-development.md @@ -431,17 +431,30 @@ Note: these query formats are subject to change! In go: ``` -func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { - if reqQuery.Prove { - value, proof, exists := app.state.Proof(reqQuery.Data) - resQuery.Index = -1 // TODO make Proof return index - resQuery.Key = reqQuery.Data - resQuery.Value = value - resQuery.Proof = proof - if exists { - resQuery.Log = "exists" - } else { - resQuery.Log = "does not exist" + func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { + if reqQuery.Prove { + value, proof, exists := app.state.GetWithProof(reqQuery.Data) + resQuery.Index = -1 // TODO make Proof return index + resQuery.Key = reqQuery.Data + resQuery.Value = value + resQuery.Proof = proof + if exists { + resQuery.Log = "exists" + } else { + resQuery.Log = "does not exist" + } + return + } else { + index, value, exists := app.state.Get(reqQuery.Data) + resQuery.Index = int64(index) + resQuery.Value = value + if exists { + resQuery.Log = "exists" + } else { + resQuery.Log = "does not exist" + } + return + } } return } else { @@ -461,22 +474,25 @@ func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery type In Java: ``` -ResponseQuery requestQuery(RequestQuery req) { - final boolean isProveQuery = req.getProve(); - final ResponseQuery.Builder responseBuilder = ResponseQuery.newBuilder(); + ResponseQuery requestQuery(RequestQuery req) { + final boolean isProveQuery = req.getProve(); + final ResponseQuery.Builder responseBuilder = ResponseQuery.newBuilder(); + byte[] queryData = req.getData().toByteArray(); - if (isProveQuery) { - com.app.example.ProofResult proofResult = generateProof(req.getData().toByteArray()); - final byte[] proofAsByteArray = proofResult.getAsByteArray(); - - responseBuilder.setProof(ByteString.copyFrom(proofAsByteArray)); - responseBuilder.setKey(req.getData()); - responseBuilder.setValue(ByteString.copyFrom(proofResult.getData())); - responseBuilder.setLog(result.getLogValue()); - } else { - byte[] queryData = req.getData().toByteArray(); - - final com.app.example.QueryResult result = generateQueryResult(queryData); + if (isProveQuery) { + com.app.example.QueryResultWithProof result = generateQueryResultWithProof(queryData); + responseBuilder.setIndex(result.getLeftIndex()); + responseBuilder.setKey(req.getData()); + responseBuilder.setValue(result.getValueOrNull(0)); + responseBuilder.setHeight(result.getHeight()); + responseBuilder.setProof(result.getProof()); + responseBuilder.setLog(result.getLogValue()); + } else { + com.app.example.QueryResult result = generateQueryResult(queryData); + responseBuilder.setIndex(result.getIndex()); + responseBuilder.setValue(result.getValue()); + responseBuilder.setLog(result.getLogValue()); + } responseBuilder.setIndex(result.getIndex()); responseBuilder.setValue(ByteString.copyFrom(result.getValue())); diff --git a/lite/errors/errors.go b/lite/errors/errors.go index 61426b23..59b6380d 100644 --- a/lite/errors/errors.go +++ b/lite/errors/errors.go @@ -41,6 +41,12 @@ func (e errUnknownValidators) Error() string { e.chainID, e.height) } +type errEmptyTree struct{} + +func (e errEmptyTree) Error() string { + return "Tree is empty" +} + //---------------------------------------- // Methods for above error types @@ -110,3 +116,18 @@ func IsErrUnknownValidators(err error) bool { } return false } + +//----------------- +// ErrEmptyTree + +func ErrEmptyTree() error { + return cmn.ErrorWrap(errEmptyTree{}, "") +} + +func IsErrEmptyTree(err error) bool { + if err_, ok := err.(cmn.Error); ok { + _, ok := err_.Data().(errEmptyTree) + return ok + } + return false +} diff --git a/lite/proxy/proof.go b/lite/proxy/proof.go new file mode 100644 index 00000000..452dee27 --- /dev/null +++ b/lite/proxy/proof.go @@ -0,0 +1,14 @@ +package proxy + +import ( + "github.com/tendermint/tendermint/crypto/merkle" +) + +func defaultProofRuntime() *merkle.ProofRuntime { + prt := merkle.NewProofRuntime() + prt.RegisterOpDecoder( + merkle.ProofOpSimpleValue, + merkle.SimpleValueOpDecoder, + ) + return prt +} diff --git a/lite/proxy/query.go b/lite/proxy/query.go index 84ff98b4..3acf826b 100644 --- a/lite/proxy/query.go +++ b/lite/proxy/query.go @@ -3,127 +3,95 @@ package proxy import ( "fmt" - "github.com/pkg/errors" - cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/lite" + lerr "github.com/tendermint/tendermint/lite/errors" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" ) -// KeyProof represents a proof of existence or absence of a single key. -// Copied from iavl repo. TODO -type KeyProof interface { - // Verify verfies the proof is valid. To verify absence, - // the value should be nil. - Verify(key, value, root []byte) error - - // Root returns the root hash of the proof. - Root() []byte - - // Serialize itself - Bytes() []byte -} - // GetWithProof will query the key on the given node, and verify it has // a valid proof, as defined by the Verifier. // // If there is any error in checking, returns an error. -// If val is non-empty, proof should be KeyExistsProof -// If val is empty, proof should be KeyMissingProof -func GetWithProof(key []byte, reqHeight int64, node rpcclient.Client, +func GetWithProof(prt *merkle.ProofRuntime, key []byte, reqHeight int64, node rpcclient.Client, cert lite.Verifier) ( - val cmn.HexBytes, height int64, proof KeyProof, err error) { + val cmn.HexBytes, height int64, proof *merkle.Proof, err error) { if reqHeight < 0 { - err = errors.Errorf("Height cannot be negative") + err = cmn.NewError("Height cannot be negative") return } - _resp, proof, err := GetWithProofOptions("/key", key, - rpcclient.ABCIQueryOptions{Height: int64(reqHeight)}, + res, err := GetWithProofOptions(prt, "/key", key, + rpcclient.ABCIQueryOptions{Height: int64(reqHeight), Prove: true}, node, cert) - if _resp != nil { - resp := _resp.Response - val, height = resp.Value, resp.Height + if err != nil { + return } + + resp := res.Response + val, height = resp.Value, resp.Height return val, height, proof, err } -// GetWithProofOptions is useful if you want full access to the ABCIQueryOptions -func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOptions, +// GetWithProofOptions is useful if you want full access to the ABCIQueryOptions. +// XXX Usage of path? It's not used, and sometimes it's /, sometimes /key, sometimes /store. +func GetWithProofOptions(prt *merkle.ProofRuntime, path string, key []byte, opts rpcclient.ABCIQueryOptions, node rpcclient.Client, cert lite.Verifier) ( - *ctypes.ResultABCIQuery, KeyProof, error) { + *ctypes.ResultABCIQuery, error) { - _resp, err := node.ABCIQueryWithOptions(path, key, opts) + if !opts.Prove { + return nil, cmn.NewError("require ABCIQueryOptions.Prove to be true") + } + + res, err := node.ABCIQueryWithOptions(path, key, opts) if err != nil { - return nil, nil, err + return nil, err } - resp := _resp.Response + resp := res.Response - // make sure the proof is the proper height + // Validate the response, e.g. height. if resp.IsErr() { - err = errors.Errorf("Query error for key %d: %d", key, resp.Code) - return nil, nil, err + err = cmn.NewError("Query error for key %d: %d", key, resp.Code) + return nil, err } - if len(resp.Key) == 0 || len(resp.Proof) == 0 { - return nil, nil, ErrNoData() + + if len(resp.Key) == 0 || resp.Proof == nil { + return nil, lerr.ErrEmptyTree() } if resp.Height == 0 { - return nil, nil, errors.New("Height returned is zero") + return nil, cmn.NewError("Height returned is zero") } // AppHash for height H is in header H+1 signedHeader, err := GetCertifiedCommit(resp.Height+1, node, cert) if err != nil { - return nil, nil, err - } - - _ = signedHeader - return &ctypes.ResultABCIQuery{Response: resp}, nil, nil - - /* // TODO refactor so iavl stuff is not in tendermint core - // https://github.com/tendermint/tendermint/issues/1183 - if len(resp.Value) > 0 { - // The key was found, construct a proof of existence. - proof, err := iavl.ReadKeyProof(resp.Proof) - if err != nil { - return nil, nil, errors.Wrap(err, "Error reading proof") - } - - eproof, ok := proof.(*iavl.KeyExistsProof) - if !ok { - return nil, nil, errors.New("Expected KeyExistsProof for non-empty value") - } - - // Validate the proof against the certified header to ensure data integrity. - err = eproof.Verify(resp.Key, resp.Value, signedHeader.AppHash) - if err != nil { - return nil, nil, errors.Wrap(err, "Couldn't verify proof") - } - return &ctypes.ResultABCIQuery{Response: resp}, eproof, nil - } - - // The key wasn't found, construct a proof of non-existence. - proof, err := iavl.ReadKeyProof(resp.Proof) - if err != nil { - return nil, nil, errors.Wrap(err, "Error reading proof") - } - - aproof, ok := proof.(*iavl.KeyAbsentProof) - if !ok { - return nil, nil, errors.New("Expected KeyAbsentProof for empty Value") + return nil, err } // Validate the proof against the certified header to ensure data integrity. - err = aproof.Verify(resp.Key, nil, signedHeader.AppHash) - if err != nil { - return nil, nil, errors.Wrap(err, "Couldn't verify proof") + if resp.Value != nil { + // Value exists + // XXX How do we encode the key into a string... + err = prt.VerifyValue(resp.Proof, signedHeader.AppHash, string(resp.Key), resp.Value) + if err != nil { + return nil, cmn.ErrorWrap(err, "Couldn't verify value proof") + } + return &ctypes.ResultABCIQuery{Response: resp}, nil + } else { + // Value absent + // Validate the proof against the certified header to ensure data integrity. + // XXX How do we encode the key into a string... + err = prt.VerifyAbsence(resp.Proof, signedHeader.AppHash, string(resp.Key)) + if err != nil { + return nil, cmn.ErrorWrap(err, "Couldn't verify absence proof") + } + return &ctypes.ResultABCIQuery{Response: resp}, nil } - return &ctypes.ResultABCIQuery{Response: resp}, aproof, ErrNoData() - */ } // GetCertifiedCommit gets the signed header for a given height and certifies diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go index 7f759cc6..0e30d755 100644 --- a/lite/proxy/query_test.go +++ b/lite/proxy/query_test.go @@ -4,12 +4,12 @@ import ( "fmt" "os" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/lite" certclient "github.com/tendermint/tendermint/lite/client" nm "github.com/tendermint/tendermint/node" @@ -20,6 +20,7 @@ import ( var node *nm.Node var chainID = "tendermint_test" // TODO use from config. +var waitForEventTimeout = 5 * time.Second // TODO fix tests!! @@ -38,70 +39,87 @@ func kvstoreTx(k, v []byte) []byte { return []byte(fmt.Sprintf("%s=%s", k, v)) } +// TODO: enable it after general proof format has been adapted +// in abci/examples/kvstore.go func _TestAppProofs(t *testing.T) { assert, require := assert.New(t), require.New(t) + prt := defaultProofRuntime() cl := client.NewLocal(node) client.WaitForHeight(cl, 1, nil) + // This sets up our trust on the node based on some past point. + source := certclient.NewProvider(chainID, cl) + seed, err := source.LatestFullCommit(chainID, 1, 1) + require.NoError(err, "%#v", err) + cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators) + + // Wait for tx confirmation. + done := make(chan int64) + go func() { + evtTyp := types.EventTx + _, err = client.WaitForOneEvent(cl, evtTyp, waitForEventTimeout) + require.Nil(err, "%#v", err) + close(done) + }() + + // Submit a transaction. k := []byte("my-key") v := []byte("my-value") - tx := kvstoreTx(k, v) br, err := cl.BroadcastTxCommit(tx) - require.NoError(err, "%+v", err) + require.NoError(err, "%#v", err) require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) require.EqualValues(0, br.DeliverTx.Code) brh := br.Height - // This sets up our trust on the node based on some past point. - source := certclient.NewProvider(chainID, cl) - seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) - require.NoError(err, "%+v", err) - cert := lite.NewBaseVerifier("my-chain", seed.Height(), seed.Validators) - - client.WaitForHeight(cl, 3, nil) + // Fetch latest after tx commit. + <-done latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1) - require.NoError(err, "%+v", err) + require.NoError(err, "%#v", err) rootHash := latest.SignedHeader.AppHash + if rootHash == nil { + // Fetch one block later, AppHash hasn't been committed yet. + // TODO find a way to avoid doing this. + client.WaitForHeight(cl, latest.SignedHeader.Height+1, nil) + latest, err = source.LatestFullCommit(chainID, latest.SignedHeader.Height+1, 1<<63-1) + require.NoError(err, "%#v", err) + rootHash = latest.SignedHeader.AppHash + } + require.NotNil(rootHash) // verify a query before the tx block has no data (and valid non-exist proof) - bs, height, proof, err := GetWithProof(k, brh-1, cl, cert) - fmt.Println(bs, height, proof, err) - require.NotNil(err) - require.True(IsErrNoData(err), err.Error()) + bs, height, proof, err := GetWithProof(prt, k, brh-1, cl, cert) + require.NoError(err, "%#v", err) + // require.NotNil(proof) + // TODO: Ensure that *some* keys will be there, ensuring that proof is nil, + // (currently there's a race condition) + // and ensure that proof proves absence of k. require.Nil(bs) // but given that block it is good - bs, height, proof, err = GetWithProof(k, brh, cl, cert) - require.NoError(err, "%+v", err) + bs, height, proof, err = GetWithProof(prt, k, brh, cl, cert) + require.NoError(err, "%#v", err) require.NotNil(proof) - require.True(height >= int64(latest.Height())) + require.Equal(height, brh) - // Alexis there is a bug here, somehow the above code gives us rootHash = nil - // and proof.Verify doesn't care, while proofNotExists.Verify fails. - // I am hacking this in to make it pass, but please investigate further. - rootHash = proof.Root() - - //err = wire.ReadBinaryBytes(bs, &data) - //require.NoError(err, "%+v", err) assert.EqualValues(v, bs) - err = proof.Verify(k, bs, rootHash) - assert.NoError(err, "%+v", err) + err = prt.VerifyValue(proof, rootHash, string(k), bs) // XXX key encoding + assert.NoError(err, "%#v", err) // Test non-existing key. missing := []byte("my-missing-key") - bs, _, proof, err = GetWithProof(missing, 0, cl, cert) - require.True(IsErrNoData(err)) + bs, _, proof, err = GetWithProof(prt, missing, 0, cl, cert) + require.NoError(err) require.Nil(bs) require.NotNil(proof) - err = proof.Verify(missing, nil, rootHash) - assert.NoError(err, "%+v", err) - err = proof.Verify(k, nil, rootHash) - assert.Error(err) + err = prt.VerifyAbsence(proof, rootHash, string(missing)) // XXX VerifyAbsence(), keyencoding + assert.NoError(err, "%#v", err) + err = prt.VerifyAbsence(proof, rootHash, string(k)) // XXX VerifyAbsence(), keyencoding + assert.Error(err, "%#v", err) } -func _TestTxProofs(t *testing.T) { +func TestTxProofs(t *testing.T) { assert, require := assert.New(t), require.New(t) cl := client.NewLocal(node) @@ -109,15 +127,15 @@ func _TestTxProofs(t *testing.T) { tx := kvstoreTx([]byte("key-a"), []byte("value-a")) br, err := cl.BroadcastTxCommit(tx) - require.NoError(err, "%+v", err) + require.NoError(err, "%#v", err) require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) require.EqualValues(0, br.DeliverTx.Code) brh := br.Height source := certclient.NewProvider(chainID, cl) seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) - require.NoError(err, "%+v", err) - cert := lite.NewBaseVerifier("my-chain", seed.Height(), seed.Validators) + require.NoError(err, "%#v", err) + cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators) // First let's make sure a bogus transaction hash returns a valid non-existence proof. key := types.Tx([]byte("bogus")).Hash() @@ -128,12 +146,12 @@ func _TestTxProofs(t *testing.T) { // Now let's check with the real tx hash. key = types.Tx(tx).Hash() res, err = cl.Tx(key, true) - require.NoError(err, "%+v", err) + require.NoError(err, "%#v", err) require.NotNil(res) err = res.Proof.Validate(key) - assert.NoError(err, "%+v", err) + assert.NoError(err, "%#v", err) commit, err := GetCertifiedCommit(br.Height, cl, cert) - require.Nil(err, "%+v", err) + require.Nil(err, "%#v", err) require.Equal(res.Proof.RootHash, commit.Header.DataHash) } diff --git a/lite/proxy/wrapper.go b/lite/proxy/wrapper.go index 4c0df022..7ddb3b8a 100644 --- a/lite/proxy/wrapper.go +++ b/lite/proxy/wrapper.go @@ -3,6 +3,7 @@ package proxy import ( cmn "github.com/tendermint/tendermint/libs/common" + "github.com/tendermint/tendermint/crypto/merkle" "github.com/tendermint/tendermint/lite" rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" @@ -15,6 +16,7 @@ var _ rpcclient.Client = Wrapper{} type Wrapper struct { rpcclient.Client cert *lite.DynamicVerifier + prt *merkle.ProofRuntime } // SecureClient uses a given Verifier to wrap an connection to an untrusted @@ -22,7 +24,8 @@ type Wrapper struct { // // If it is wrapping an HTTP rpcclient, it will also wrap the websocket interface func SecureClient(c rpcclient.Client, cert *lite.DynamicVerifier) Wrapper { - wrap := Wrapper{c, cert} + prt := defaultProofRuntime() + wrap := Wrapper{c, cert, prt} // TODO: no longer possible as no more such interface exposed.... // if we wrap http client, then we can swap out the event switch to filter // if hc, ok := c.(*rpcclient.HTTP); ok { @@ -36,7 +39,7 @@ func SecureClient(c rpcclient.Client, cert *lite.DynamicVerifier) Wrapper { func (w Wrapper) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - res, _, err := GetWithProofOptions(path, data, opts, w.Client, w.cert) + res, err := GetWithProofOptions(w.prt, path, data, opts, w.Client, w.cert) return res, err } diff --git a/rpc/client/httpclient.go b/rpc/client/httpclient.go index a9c64f5d..a1b59ffa 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/httpclient.go @@ -75,7 +75,7 @@ func (c *HTTP) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuer func (c *HTTP) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { result := new(ctypes.ResultABCIQuery) _, err := c.rpc.Call("abci_query", - map[string]interface{}{"path": path, "data": data, "height": opts.Height, "trusted": opts.Trusted}, + map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, result) if err != nil { return nil, errors.Wrap(err, "ABCIQuery") diff --git a/rpc/client/localclient.go b/rpc/client/localclient.go index b3c5e309..8d89b715 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/localclient.go @@ -61,7 +61,7 @@ func (c *Local) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQue } func (Local) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(path, data, opts.Height, opts.Trusted) + return core.ABCIQuery(path, data, opts.Height, opts.Prove) } func (Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index 022e4f36..3a0ed79c 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -31,10 +31,18 @@ func (a ABCIApp) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQu } func (a ABCIApp) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - q := a.App.Query(abci.RequestQuery{Data: data, Path: path, Height: opts.Height, Prove: opts.Trusted}) + q := a.App.Query(abci.RequestQuery{ + Data: data, + Path: path, + Height: opts.Height, + Prove: opts.Prove, + }) return &ctypes.ResultABCIQuery{q}, nil } +// NOTE: Caller should call a.App.Commit() separately, +// this function does not actually wait for a commit. +// TODO: Make it wait for a commit and set res.Height appropriately. func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { res := ctypes.ResultBroadcastTxCommit{} res.CheckTx = a.App.CheckTx(tx) @@ -42,6 +50,7 @@ func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit return &res, nil } res.DeliverTx = a.App.DeliverTx(tx) + res.Height = -1 // TODO return &res, nil } @@ -86,7 +95,7 @@ func (m ABCIMock) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQ } func (m ABCIMock) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Trusted}) + res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Prove}) if err != nil { return nil, err } @@ -133,10 +142,10 @@ func NewABCIRecorder(client client.ABCIClient) *ABCIRecorder { } type QueryArgs struct { - Path string - Data cmn.HexBytes - Height int64 - Trusted bool + Path string + Data cmn.HexBytes + Height int64 + Prove bool } func (r *ABCIRecorder) addCall(call Call) { @@ -161,7 +170,7 @@ func (r *ABCIRecorder) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts res, err := r.Client.ABCIQueryWithOptions(path, data, opts) r.addCall(Call{ Name: "abci_query", - Args: QueryArgs{path, data, opts.Height, opts.Trusted}, + Args: QueryArgs{path, data, opts.Height, opts.Prove}, Response: res, Error: err, }) diff --git a/rpc/client/mock/abci_test.go b/rpc/client/mock/abci_test.go index 327ec9e7..ca220c84 100644 --- a/rpc/client/mock/abci_test.go +++ b/rpc/client/mock/abci_test.go @@ -51,7 +51,7 @@ func TestABCIMock(t *testing.T) { assert.Equal("foobar", err.Error()) // query always returns the response - _query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Trusted: true}) + _query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Prove: false}) query := _query.Response require.Nil(err) require.NotNil(query) @@ -98,7 +98,7 @@ func TestABCIRecorder(t *testing.T) { _, err := r.ABCIInfo() assert.Nil(err, "expected no err on info") - _, err = r.ABCIQueryWithOptions("path", cmn.HexBytes("data"), client.ABCIQueryOptions{Trusted: false}) + _, err = r.ABCIQueryWithOptions("path", cmn.HexBytes("data"), client.ABCIQueryOptions{Prove: false}) assert.NotNil(err, "expected error on query") require.Equal(2, len(r.Calls)) @@ -122,7 +122,7 @@ func TestABCIRecorder(t *testing.T) { require.True(ok) assert.Equal("path", qa.Path) assert.EqualValues("data", qa.Data) - assert.False(qa.Trusted) + assert.False(qa.Prove) // now add some broadcasts (should all err) txs := []types.Tx{{1}, {2}, {3}} @@ -173,9 +173,17 @@ func TestABCIApp(t *testing.T) { require.NotNil(res.DeliverTx) assert.True(res.DeliverTx.IsOK()) + // commit + // TODO: This may not be necessary in the future + if res.Height == -1 { + m.App.Commit() + } + // check the key - _qres, err := m.ABCIQueryWithOptions("/key", cmn.HexBytes(key), client.ABCIQueryOptions{Trusted: true}) + _qres, err := m.ABCIQueryWithOptions("/key", cmn.HexBytes(key), client.ABCIQueryOptions{Prove: true}) qres := _qres.Response require.Nil(err) assert.EqualValues(value, qres.Value) + + // XXX Check proof } diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index c5787849..ef2d4f19 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -1,3 +1,5 @@ +package mock + /* package mock returns a Client implementation that accepts various (mock) implementations of the various methods. @@ -11,7 +13,6 @@ For real clients, you probably want the "http" package. If you want to directly call a tendermint node in process, you can use the "local" package. */ -package mock import ( "reflect" @@ -87,7 +88,7 @@ func (c Client) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQue } func (c Client) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { - return core.ABCIQuery(path, data, opts.Height, opts.Trusted) + return core.ABCIQuery(path, data, opts.Height, opts.Prove) } func (c Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 767ae684..602525b5 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -166,10 +166,10 @@ func TestAppCalls(t *testing.T) { if err := client.WaitForHeight(c, apph, nil); err != nil { t.Error(err) } - _qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: true}) + _qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Prove: false}) qres := _qres.Response if assert.Nil(err) && assert.True(qres.IsOK()) { - // assert.Equal(k, data.GetKey()) // only returned for proofs + assert.Equal(k, qres.Key) assert.EqualValues(v, qres.Value) } @@ -221,10 +221,12 @@ func TestAppCalls(t *testing.T) { assert.Equal(block.Block.LastCommit, commit2.Commit) // and we got a proof that works! - _pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: false}) + _pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Prove: true}) pres := _pres.Response assert.Nil(err) assert.True(pres.IsOK()) + + // XXX Test proof } } @@ -310,7 +312,7 @@ func TestTx(t *testing.T) { // time to verify the proof proof := ptx.Proof if tc.prove && assert.EqualValues(t, tx, proof.Data) { - assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) + assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) } } } @@ -348,7 +350,7 @@ func TestTxSearch(t *testing.T) { // time to verify the proof proof := ptx.Proof if assert.EqualValues(t, tx, proof.Data) { - assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) + assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash)) } // query by height @@ -362,7 +364,7 @@ func TestTxSearch(t *testing.T) { require.Len(t, result.Txs, 0) // we query using a tag (see kvstore application) - result, err = c.TxSearch("app.creator='jae'", false, 1, 30) + result, err = c.TxSearch("app.creator='Cosmoshi Netowoko'", false, 1, 30) require.Nil(t, err, "%+v", err) if len(result.Txs) == 0 { t.Fatal("expected a lot of transactions") diff --git a/rpc/client/types.go b/rpc/client/types.go index 89bd2f98..6a23fa45 100644 --- a/rpc/client/types.go +++ b/rpc/client/types.go @@ -3,10 +3,9 @@ package client // ABCIQueryOptions can be used to provide options for ABCIQuery call other // than the DefaultABCIQueryOptions. type ABCIQueryOptions struct { - Height int64 - Trusted bool + Height int64 + Prove bool } -// DefaultABCIQueryOptions are latest height (0) and trusted equal to false -// (which will result in a proof being returned). -var DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Trusted: false} +// DefaultABCIQueryOptions are latest height (0) and prove false. +var DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Prove: false} diff --git a/rpc/core/abci.go b/rpc/core/abci.go index 9c7af92c..47219563 100644 --- a/rpc/core/abci.go +++ b/rpc/core/abci.go @@ -1,8 +1,6 @@ package core import ( - "fmt" - abci "github.com/tendermint/tendermint/abci/types" cmn "github.com/tendermint/tendermint/libs/common" ctypes "github.com/tendermint/tendermint/rpc/core/types" @@ -12,7 +10,7 @@ import ( // Query the application for some information. // // ```shell -// curl 'localhost:26657/abci_query?path=""&data="abcd"&trusted=false' +// curl 'localhost:26657/abci_query?path=""&data="abcd"&prove=false' // ``` // // ```go @@ -47,18 +45,14 @@ import ( // |-----------+--------+---------+----------+------------------------------------------------| // | path | string | false | false | Path to the data ("/a/b/c") | // | data | []byte | false | true | Data | -// | height | int64 | 0 | false | Height (0 means latest) | -// | trusted | bool | false | false | Does not include a proof of the data inclusion | -func ABCIQuery(path string, data cmn.HexBytes, height int64, trusted bool) (*ctypes.ResultABCIQuery, error) { - if height < 0 { - return nil, fmt.Errorf("height must be non-negative") - } - +// | height | int64 | 0 | false | Height (0 means latest) | +// | prove | bool | false | false | Includes proof if true | +func ABCIQuery(path string, data cmn.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error) { resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{ Path: path, Data: data, Height: height, - Prove: !trusted, + Prove: prove, }) if err != nil { return nil, err diff --git a/test/app/kvstore_test.sh b/test/app/kvstore_test.sh index 67f6b583..034e2887 100755 --- a/test/app/kvstore_test.sh +++ b/test/app/kvstore_test.sh @@ -41,7 +41,7 @@ set -e # we should not be able to look up the value RESPONSE=`abci-cli query \"$VALUE\"` set +e -A=`echo $RESPONSE | grep $VALUE` +A=`echo $RESPONSE | grep \"value: $VALUE\"` if [[ $? == 0 ]]; then echo "Found '$VALUE' for $VALUE when we should not have. Response:" echo "$RESPONSE" diff --git a/types/block.go b/types/block.go index 14f97548..5610cc79 100644 --- a/types/block.go +++ b/types/block.go @@ -709,7 +709,6 @@ func (h hasher) Hash() []byte { } } return hasher.Sum(nil) - } func aminoHash(item interface{}) []byte { diff --git a/types/part_set.go b/types/part_set.go index f6d7f6b6..8c8151ba 100644 --- a/types/part_set.go +++ b/types/part_set.go @@ -190,7 +190,7 @@ func (ps *PartSet) AddPart(part *Part) (bool, error) { } // Check hash proof - if !part.Proof.Verify(part.Index, ps.total, part.Hash(), ps.Hash()) { + if part.Proof.Verify(ps.Hash(), part.Hash()) != nil { return false, ErrPartSetInvalidProof } diff --git a/types/results_test.go b/types/results_test.go index 8cbe319f..80803385 100644 --- a/types/results_test.go +++ b/types/results_test.go @@ -38,8 +38,8 @@ func TestABCIResults(t *testing.T) { for i, res := range results { proof := results.ProveResult(i) - valid := proof.Verify(i, len(results), res.Hash(), root) - assert.True(t, valid, "%d", i) + valid := proof.Verify(root, res.Hash()) + assert.NoError(t, valid, "%d", i) } } diff --git a/types/tx.go b/types/tx.go index 489f0b23..41fc310f 100644 --- a/types/tx.go +++ b/types/tx.go @@ -77,8 +77,6 @@ func (txs Txs) Proof(i int) TxProof { root, proofs := merkle.SimpleProofsFromHashers(hashers) return TxProof{ - Index: i, - Total: l, RootHash: root, Data: txs[i], Proof: *proofs[i], @@ -87,10 +85,9 @@ func (txs Txs) Proof(i int) TxProof { // TxProof represents a Merkle proof of the presence of a transaction in the Merkle tree. type TxProof struct { - Index, Total int - RootHash cmn.HexBytes - Data Tx - Proof merkle.SimpleProof + RootHash cmn.HexBytes + Data Tx + Proof merkle.SimpleProof } // LeadHash returns the hash of the this proof refers to. @@ -104,14 +101,14 @@ func (tp TxProof) Validate(dataHash []byte) error { if !bytes.Equal(dataHash, tp.RootHash) { return errors.New("Proof matches different data hash") } - if tp.Index < 0 { + if tp.Proof.Index < 0 { return errors.New("Proof index cannot be negative") } - if tp.Total <= 0 { + if tp.Proof.Total <= 0 { return errors.New("Proof total must be positive") } - valid := tp.Proof.Verify(tp.Index, tp.Total, tp.LeafHash(), tp.RootHash) - if !valid { + valid := tp.Proof.Verify(tp.RootHash, tp.LeafHash()) + if valid != nil { return errors.New("Proof is not internally consistent") } return nil diff --git a/types/tx_test.go b/types/tx_test.go index df7a7449..9fb8ff34 100644 --- a/types/tx_test.go +++ b/types/tx_test.go @@ -69,8 +69,8 @@ func TestValidTxProof(t *testing.T) { leaf := txs[i] leafHash := leaf.Hash() proof := txs.Proof(i) - assert.Equal(t, i, proof.Index, "%d: %d", h, i) - assert.Equal(t, len(txs), proof.Total, "%d: %d", h, i) + assert.Equal(t, i, proof.Proof.Index, "%d: %d", h, i) + assert.Equal(t, len(txs), proof.Proof.Total, "%d: %d", h, i) assert.EqualValues(t, root, proof.RootHash, "%d: %d", h, i) assert.EqualValues(t, leaf, proof.Data, "%d: %d", h, i) assert.EqualValues(t, leafHash, proof.LeafHash(), "%d: %d", h, i) @@ -128,7 +128,7 @@ func assertBadProof(t *testing.T, root []byte, bad []byte, good TxProof) { // This can happen if we have a slightly different total (where the // path ends up the same). If it is something else, we have a real // problem. - assert.NotEqual(t, proof.Total, good.Total, "bad: %#v\ngood: %#v", proof, good) + assert.NotEqual(t, proof.Proof.Total, good.Proof.Total, "bad: %#v\ngood: %#v", proof, good) } } } From f36ed7e7ffb83d4a28bc38eab5686e17238e6be1 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Fri, 28 Sep 2018 23:32:13 -0400 Subject: [PATCH 07/18] General Merkle Follow Up (#2510) * tmlibs -> libs * update changelog * address some comments from review of #2298 --- CHANGELOG_PENDING.md | 16 ++++++++++++---- README.md | 11 ++++++----- config/config.go | 2 +- crypto/merkle/proof.go | 24 +++++++++++++----------- crypto/merkle/proof_key_path.go | 6 +++++- crypto/merkle/proof_simple_value.go | 2 +- libs/common/types.pb.go | 4 ++-- lite/doc.go | 2 +- lite/dynamic_verifier.go | 6 +++--- 9 files changed, 44 insertions(+), 29 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index c6346f6a..bf381dce 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -5,21 +5,29 @@ Special thanks to external contributors on this release: BREAKING CHANGES: * CLI/RPC/Config -- [config] `mempool.wal` is disabled by default + * [config] `mempool.wal` is disabled by default + * [rpc] \#2298 `/abci_query` takes `prove` argument instead of `trusted` and switches the default + behaviour to `prove=false` + * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer). * Apps + * [abci] \#2298 ResponseQuery.Proof is now a structured merkle.Proof, not just + arbitrary bytes * Go API -- [node] Remove node.RunForever -- [config] \#2232 timeouts as time.Duration, not ints + * [node] Remove node.RunForever + * [config] \#2232 timeouts as time.Duration, not ints + * [rpc/client] \#2298 `ABCIQueryOptions.Trusted` -> `ABCIQueryOptions.Prove` + * [types] \#2298 Remove `Index` and `Total` fields from `TxProof`. + * [crypto/merkle & lite] \#2298 Various changes to accomodate General Merkle trees * Blockchain Protocol * [types] \#2459 `Vote`/`Proposal`/`Heartbeat` use amino encoding instead of JSON in `SignBytes`. - * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer). * P2P Protocol FEATURES: +- [crypto/merkle] \#2298 General Merkle Proof scheme for chaining various types of Merkle trees together IMPROVEMENTS: - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics diff --git a/README.md b/README.md index 2e4146f4..069f9f13 100644 --- a/README.md +++ b/README.md @@ -118,11 +118,12 @@ CHANGELOG even if they don't lead to MINOR version bumps: - rpc/client - config - node -- libs/bech32 -- libs/common -- libs/db -- libs/errors -- libs/log +- libs + - bech32 + - common + - db + - errors + - log Exported objects in these packages that are not covered by the versioning scheme are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any diff --git a/config/config.go b/config/config.go index 2ccb4908..619c0410 100644 --- a/config/config.go +++ b/config/config.go @@ -20,7 +20,7 @@ const ( // generate the config.toml. Please reflect any changes // made here in the defaultConfigTemplate constant in // config/toml.go -// NOTE: tmlibs/cli must know to look in the config dir! +// NOTE: libs/cli must know to look in the config dir! var ( DefaultTendermintDir = ".tendermint" defaultConfigDir = "config" diff --git a/crypto/merkle/proof.go b/crypto/merkle/proof.go index 7da89495..3059ed3b 100644 --- a/crypto/merkle/proof.go +++ b/crypto/merkle/proof.go @@ -3,17 +3,19 @@ package merkle import ( "bytes" - cmn "github.com/tendermint/tmlibs/common" + cmn "github.com/tendermint/tendermint/libs/common" ) //---------------------------------------- // ProofOp gets converted to an instance of ProofOperator: -// ProofOperator is a layer for calculating intermediate Merkle root -// Run() takes a list of bytes because it can be more than one -// for example in range proofs -// ProofOp() defines custom encoding which can be decoded later with -// OpDecoder +// ProofOperator is a layer for calculating intermediate Merkle roots +// when a series of Merkle trees are chained together. +// Run() takes leaf values from a tree and returns the Merkle +// root for the corresponding tree. It takes and returns a list of bytes +// to allow multiple leaves to be part of a single proof, for instance in a range proof. +// ProofOp() encodes the ProofOperator in a generic way so it can later be +// decoded with OpDecoder. type ProofOperator interface { Run([][]byte) ([][]byte, error) GetKey() []byte @@ -23,8 +25,8 @@ type ProofOperator interface { //---------------------------------------- // Operations on a list of ProofOperators -// ProofOperators is a slice of ProofOperator(s) -// Each operator will be applied to the input value sequencially +// ProofOperators is a slice of ProofOperator(s). +// Each operator will be applied to the input value sequentially // and the last Merkle root will be verified with already known data type ProofOperators []ProofOperator @@ -91,8 +93,8 @@ func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) { return decoder(pop) } -func (prt *ProofRuntime) DecodeProof(proof *Proof) (poz ProofOperators, err error) { - poz = ProofOperators(nil) +func (prt *ProofRuntime) DecodeProof(proof *Proof) (ProofOperators, error) { + var poz ProofOperators for _, pop := range proof.Ops { operator, err := prt.Decode(pop) if err != nil { @@ -100,7 +102,7 @@ func (prt *ProofRuntime) DecodeProof(proof *Proof) (poz ProofOperators, err erro } poz = append(poz, operator) } - return + return poz, nil } func (prt *ProofRuntime) VerifyValue(proof *Proof, root []byte, keypath string, value []byte) (err error) { diff --git a/crypto/merkle/proof_key_path.go b/crypto/merkle/proof_key_path.go index d74dac51..aec93e82 100644 --- a/crypto/merkle/proof_key_path.go +++ b/crypto/merkle/proof_key_path.go @@ -35,6 +35,8 @@ import ( kp.AppendKey([]byte{0x01, 0x02, 0x03}, KeyEncodingURL) kp.String() // Should return "/App/IBC/x:010203" + NOTE: Key paths must begin with a `/`. + NOTE: All encodings *MUST* work compatibly, such that you can choose to use whatever encoding, and the decoded keys will always be the same. In other words, it's just as good to encode all three keys using URL encoding or HEX @@ -52,7 +54,7 @@ type keyEncoding int const ( KeyEncodingURL keyEncoding = iota KeyEncodingHex - KeyEncodingMax + KeyEncodingMax // Number of known encodings. Used for testing ) type Key struct { @@ -81,6 +83,8 @@ func (pth KeyPath) String() string { return res } +// Decode a path to a list of keys. Path must begin with `/`. +// Each key must use a known encoding. func KeyPathToKeys(path string) (keys [][]byte, err error) { if path == "" || path[0] != '/' { return nil, cmn.NewError("key path string must start with a forward slash '/'") diff --git a/crypto/merkle/proof_simple_value.go b/crypto/merkle/proof_simple_value.go index 28935e2c..5b7b5232 100644 --- a/crypto/merkle/proof_simple_value.go +++ b/crypto/merkle/proof_simple_value.go @@ -25,7 +25,7 @@ type SimpleValueOp struct { key []byte // To encode in ProofOp.Data - Proof *SimpleProof `json:"simple-proof"` + Proof *SimpleProof `json:"simple_proof"` } var _ ProofOperator = SimpleValueOp{} diff --git a/libs/common/types.pb.go b/libs/common/types.pb.go index 9cd62273..716d28a0 100644 --- a/libs/common/types.pb.go +++ b/libs/common/types.pb.go @@ -26,7 +26,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package -// Define these here for compatibility but use tmlibs/common.KVPair. +// Define these here for compatibility but use libs/common.KVPair. type KVPair struct { Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` @@ -82,7 +82,7 @@ func (m *KVPair) GetValue() []byte { return nil } -// Define these here for compatibility but use tmlibs/common.KI64Pair. +// Define these here for compatibility but use libs/common.KI64Pair. type KI64Pair struct { Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` diff --git a/lite/doc.go b/lite/doc.go index 2a0ba23e..00dcce68 100644 --- a/lite/doc.go +++ b/lite/doc.go @@ -88,7 +88,7 @@ type PersistentProvider interface { } ``` -* DBProvider - persistence provider for use with any tmlibs/DB. +* DBProvider - persistence provider for use with any libs/DB. * MultiProvider - combine multiple providers. The suggested use for local light clients is client.NewHTTPProvider(...) for diff --git a/lite/dynamic_verifier.go b/lite/dynamic_verifier.go index 2dee69f9..6a772091 100644 --- a/lite/dynamic_verifier.go +++ b/lite/dynamic_verifier.go @@ -4,6 +4,7 @@ import ( "bytes" "fmt" "sync" + log "github.com/tendermint/tendermint/libs/log" lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" @@ -25,10 +26,9 @@ type DynamicVerifier struct { // This is a source of new info, like a node rpc, or other import method. source Provider - // pending map for synchronize concurrent verification requests + // pending map to synchronize concurrent verification requests + mtx sync.Mutex pendingVerifications map[int64]chan struct{} - - mtx sync.Mutex } // NewDynamicVerifier returns a new DynamicVerifier. It uses the From ead9fc0179ac773f93dd044b10b9c3ba48269e0f Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 30 Sep 2018 12:35:52 -0400 Subject: [PATCH 08/18] Docs cleanup (#2522) * minor doc cleanup * docs/tools: link fixes and readme * docs/networks: networks/local/README.md * docs: update vuepress config * docs: fixes from review --- docs/.vuepress/config.js | 6 +- docs/README.md | 46 ++-- docs/introduction/README.md | 15 ++ docs/introduction/introduction.md | 2 + docs/introduction/quick-start.md | 36 +-- docs/introduction/what-is-tendermint.md | 332 ++++++++++++++++++++++++ docs/networks/README.md | 9 + docs/networks/deploy-testnets.md | 23 +- docs/networks/docker-compose.md | 85 ++++++ docs/networks/terraform-and-ansible.md | 8 +- docs/spec/abci/README.md | 2 +- docs/spec/abci/abci.md | 6 +- docs/tendermint-core/README.md | 4 + docs/tools/README.md | 4 + docs/tools/benchmarking.md | 2 +- docs/tools/monitoring.md | 8 +- networks/local/README.md | 4 + networks/remote/README.md | 2 +- 18 files changed, 504 insertions(+), 90 deletions(-) create mode 100644 docs/introduction/README.md create mode 100644 docs/introduction/what-is-tendermint.md create mode 100644 docs/networks/README.md create mode 100644 docs/networks/docker-compose.md create mode 100644 docs/tendermint-core/README.md create mode 100644 docs/tools/README.md diff --git a/docs/.vuepress/config.js b/docs/.vuepress/config.js index 892ea204..b4e2c3fa 100644 --- a/docs/.vuepress/config.js +++ b/docs/.vuepress/config.js @@ -11,12 +11,12 @@ module.exports = { nav: [{ text: "Back to Tendermint", link: "https://tendermint.com" }], sidebar: [ { - title: "Getting Started", + title: "Introduction", collapsable: false, children: [ "/introduction/quick-start", "/introduction/install", - "/introduction/introduction" + "/introduction/what-is-tendermint" ] }, { @@ -48,7 +48,7 @@ module.exports = { title: "Networks", collapsable: false, children: [ - "/networks/deploy-testnets", + "/networks/docker-compose", "/networks/terraform-and-ansible", ] }, diff --git a/docs/README.md b/docs/README.md index 2ecf625e..15ce74e3 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,43 +1,27 @@ # Tendermint -Welcome to the Tendermint Core documentation! Below you'll find an -overview of the documentation. +Welcome to the Tendermint Core documentation! -Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state -transition machine - written in any programming language - and securely -replicates it on many machines. In other words, a blockchain. +Tendermint Core is a blockchain application platform; it provides the equivalent +of a web-server, database, and supporting libraries for blockchain applications +written in any programming language. Like a web-server serving web applications, +Tendermint serves blockchain applications. -Tendermint requires an application running over the Application Blockchain -Interface (ABCI) - and comes packaged with an example application to do so. +More formally, Tendermint Core performs Byzantine Fault Tolerant (BFT) +State Machine Replication (SMR) for arbitrary deterministic, finite state machines. +For more background, see [What is +Tendermint?](introduction/what-is-tendermint.md). -## Getting Started +To get started quickly with an example application, see the [quick start guide](introduction/quick-start.md). -Here you'll find quick start guides and links to more advanced "get up and running" -documentation. +To learn about application development on Tendermint, see the [Application Blockchain Interface](spec/abci). -## Core +For more details on using Tendermint, see the respective documentation for +[Tendermint Core](tendermint-core), [benchmarking and monitoring](tools), and [network deployments](networks). -Details about the core functionality and configuration of Tendermint. +## Contribute -## Tools - -Benchmarking and monitoring tools. - -## Networks - -Setting up testnets manually or automated, local or in the cloud. - -## Apps - -Building appplications with the ABCI. - -## Specification - -Dive deep into the spec. There's one for each Tendermint and the ABCI - -## Edit the Documentation - -See [this file](./DOCS_README.md) for details of the build process and +To contribute to the documentation, see [this file](./DOCS_README.md) for details of the build process and considerations when making changes. ## Version diff --git a/docs/introduction/README.md b/docs/introduction/README.md new file mode 100644 index 00000000..ad9a93dd --- /dev/null +++ b/docs/introduction/README.md @@ -0,0 +1,15 @@ +# Introduction + +## Quick Start + +Get Tendermint up-and-running quickly with the [quick-start guide](quick-start.md)! + +## Install + +Detailed [installation instructions](install.md). + +## What is Tendermint? + +Dive into [what Tendermint is and why](what-is-tendermint.md)! + + diff --git a/docs/introduction/introduction.md b/docs/introduction/introduction.md index 389bf965..f80a159c 100644 --- a/docs/introduction/introduction.md +++ b/docs/introduction/introduction.md @@ -1,5 +1,7 @@ # What is Tendermint? +DEPRECATED! See [What is Tendermint?](what-is-tendermint.md). + Tendermint is software for securely and consistently replicating an application on many machines. By securely, we mean that Tendermint works even if up to 1/3 of machines fail in arbitrary ways. By consistently, diff --git a/docs/introduction/quick-start.md b/docs/introduction/quick-start.md index c10ba10a..05facadf 100644 --- a/docs/introduction/quick-start.md +++ b/docs/introduction/quick-start.md @@ -1,4 +1,4 @@ -# Tendermint +# Quick Start ## Overview @@ -9,45 +9,21 @@ works and want to get started right away, continue. ### Quick Install -On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/fFfOR), like so: +To quickly get Tendermint installed on a fresh +Ubuntu 16.04 machine, use [this script](https://git.io/fFfOR). + +WARNING: do not run this on your local machine. ``` curl -L https://git.io/fFfOR | bash source ~/.profile ``` -WARNING: do not run the above on your local machine. - The script is also used to facilitate cluster deployment below. ### Manual Install -Requires: - -- `go` minimum version 1.10 -- `$GOPATH` environment variable must be set -- `$GOPATH/bin` must be on your `$PATH` (see [here](https://github.com/tendermint/tendermint/wiki/Setting-GOPATH)) - -To install Tendermint, run: - -``` -go get github.com/tendermint/tendermint -cd $GOPATH/src/github.com/tendermint/tendermint -make get_tools && make get_vendor_deps -make install -``` - -Note that `go get` may return an error but it can be ignored. - -Confirm installation: - -``` -$ tendermint version -0.23.0 -``` - -Note: see the [releases page](https://github.com/tendermint/tendermint/releases) and the latest version -should match what you see above. +For manual installation, see the [install instructions](install.md) ## Initialization diff --git a/docs/introduction/what-is-tendermint.md b/docs/introduction/what-is-tendermint.md new file mode 100644 index 00000000..389bf965 --- /dev/null +++ b/docs/introduction/what-is-tendermint.md @@ -0,0 +1,332 @@ +# What is Tendermint? + +Tendermint is software for securely and consistently replicating an +application on many machines. By securely, we mean that Tendermint works +even if up to 1/3 of machines fail in arbitrary ways. By consistently, +we mean that every non-faulty machine sees the same transaction log and +computes the same state. Secure and consistent replication is a +fundamental problem in distributed systems; it plays a critical role in +the fault tolerance of a broad range of applications, from currencies, +to elections, to infrastructure orchestration, and beyond. + +The ability to tolerate machines failing in arbitrary ways, including +becoming malicious, is known as Byzantine fault tolerance (BFT). The +theory of BFT is decades old, but software implementations have only +became popular recently, due largely to the success of "blockchain +technology" like Bitcoin and Ethereum. Blockchain technology is just a +reformalization of BFT in a more modern setting, with emphasis on +peer-to-peer networking and cryptographic authentication. The name +derives from the way transactions are batched in blocks, where each +block contains a cryptographic hash of the previous one, forming a +chain. In practice, the blockchain data structure actually optimizes BFT +design. + +Tendermint consists of two chief technical components: a blockchain +consensus engine and a generic application interface. The consensus +engine, called Tendermint Core, ensures that the same transactions are +recorded on every machine in the same order. The application interface, +called the Application BlockChain Interface (ABCI), enables the +transactions to be processed in any programming language. Unlike other +blockchain and consensus solutions, which come pre-packaged with built +in state machines (like a fancy key-value store, or a quirky scripting +language), developers can use Tendermint for BFT state machine +replication of applications written in whatever programming language and +development environment is right for them. + +Tendermint is designed to be easy-to-use, simple-to-understand, highly +performant, and useful for a wide variety of distributed applications. + +## Tendermint vs. X + +Tendermint is broadly similar to two classes of software. The first +class consists of distributed key-value stores, like Zookeeper, etcd, +and consul, which use non-BFT consensus. The second class is known as +"blockchain technology", and consists of both cryptocurrencies like +Bitcoin and Ethereum, and alternative distributed ledger designs like +Hyperledger's Burrow. + +### Zookeeper, etcd, consul + +Zookeeper, etcd, and consul are all implementations of a key-value store +atop a classical, non-BFT consensus algorithm. Zookeeper uses a version +of Paxos called Zookeeper Atomic Broadcast, while etcd and consul use +the Raft consensus algorithm, which is much younger and simpler. A +typical cluster contains 3-5 machines, and can tolerate crash failures +in up to 1/2 of the machines, but even a single Byzantine fault can +destroy the system. + +Each offering provides a slightly different implementation of a +featureful key-value store, but all are generally focused around +providing basic services to distributed systems, such as dynamic +configuration, service discovery, locking, leader-election, and so on. + +Tendermint is in essence similar software, but with two key differences: + +- It is Byzantine Fault Tolerant, meaning it can only tolerate up to a + 1/3 of failures, but those failures can include arbitrary behaviour - + including hacking and malicious attacks. - It does not specify a + particular application, like a fancy key-value store. Instead, it + focuses on arbitrary state machine replication, so developers can build + the application logic that's right for them, from key-value store to + cryptocurrency to e-voting platform and beyond. + +The layout of this Tendermint website content is also ripped directly +and without shame from [consul.io](https://www.consul.io/) and the other +[Hashicorp sites](https://www.hashicorp.com/#tools). + +### Bitcoin, Ethereum, etc. + +Tendermint emerged in the tradition of cryptocurrencies like Bitcoin, +Ethereum, etc. with the goal of providing a more efficient and secure +consensus algorithm than Bitcoin's Proof of Work. In the early days, +Tendermint had a simple currency built in, and to participate in +consensus, users had to "bond" units of the currency into a security +deposit which could be revoked if they misbehaved -this is what made +Tendermint a Proof-of-Stake algorithm. + +Since then, Tendermint has evolved to be a general purpose blockchain +consensus engine that can host arbitrary application states. That means +it can be used as a plug-and-play replacement for the consensus engines +of other blockchain software. So one can take the current Ethereum code +base, whether in Rust, or Go, or Haskell, and run it as a ABCI +application using Tendermint consensus. Indeed, [we did that with +Ethereum](https://github.com/cosmos/ethermint). And we plan to do +the same for Bitcoin, ZCash, and various other deterministic +applications as well. + +Another example of a cryptocurrency application built on Tendermint is +[the Cosmos network](http://cosmos.network). + +### Other Blockchain Projects + +[Fabric](https://github.com/hyperledger/fabric) takes a similar approach +to Tendermint, but is more opinionated about how the state is managed, +and requires that all application behaviour runs in potentially many +docker containers, modules it calls "chaincode". It uses an +implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf). +from a team at IBM that is [augmented to handle potentially +non-deterministic +chaincode](https://www.zurich.ibm.com/~cca/papers/sieve.pdf) It is +possible to implement this docker-based behaviour as a ABCI app in +Tendermint, though extending Tendermint to handle non-determinism +remains for future work. + +[Burrow](https://github.com/hyperledger/burrow) is an implementation of +the Ethereum Virtual Machine and Ethereum transaction mechanics, with +additional features for a name-registry, permissions, and native +contracts, and an alternative blockchain API. It uses Tendermint as its +consensus engine, and provides a particular application state. + +## ABCI Overview + +The [Application BlockChain Interface +(ABCI)](https://github.com/tendermint/tendermint/tree/develop/abci) +allows for Byzantine Fault Tolerant replication of applications +written in any programming language. + +### Motivation + +Thus far, all blockchains "stacks" (such as +[Bitcoin](https://github.com/bitcoin/bitcoin)) have had a monolithic +design. That is, each blockchain stack is a single program that handles +all the concerns of a decentralized ledger; this includes P2P +connectivity, the "mempool" broadcasting of transactions, consensus on +the most recent block, account balances, Turing-complete contracts, +user-level permissions, etc. + +Using a monolithic architecture is typically bad practice in computer +science. It makes it difficult to reuse components of the code, and +attempts to do so result in complex maintenance procedures for forks of +the codebase. This is especially true when the codebase is not modular +in design and suffers from "spaghetti code". + +Another problem with monolithic design is that it limits you to the +language of the blockchain stack (or vice versa). In the case of +Ethereum which supports a Turing-complete bytecode virtual-machine, it +limits you to languages that compile down to that bytecode; today, those +are Serpent and Solidity. + +In contrast, our approach is to decouple the consensus engine and P2P +layers from the details of the application state of the particular +blockchain application. We do this by abstracting away the details of +the application to an interface, which is implemented as a socket +protocol. + +Thus we have an interface, the Application BlockChain Interface (ABCI), +and its primary implementation, the Tendermint Socket Protocol (TSP, or +Teaspoon). + +### Intro to ABCI + +[Tendermint Core](https://github.com/tendermint/tendermint) (the +"consensus engine") communicates with the application via a socket +protocol that satisfies the ABCI. + +To draw an analogy, lets talk about a well-known cryptocurrency, +Bitcoin. Bitcoin is a cryptocurrency blockchain where each node +maintains a fully audited Unspent Transaction Output (UTXO) database. If +one wanted to create a Bitcoin-like system on top of ABCI, Tendermint +Core would be responsible for + +- Sharing blocks and transactions between nodes +- Establishing a canonical/immutable order of transactions + (the blockchain) + +The application will be responsible for + +- Maintaining the UTXO database +- Validating cryptographic signatures of transactions +- Preventing transactions from spending non-existent transactions +- Allowing clients to query the UTXO database. + +Tendermint is able to decompose the blockchain design by offering a very +simple API (ie. the ABCI) between the application process and consensus +process. + +The ABCI consists of 3 primary message types that get delivered from the +core to the application. The application replies with corresponding +response messages. + +The messages are specified here: [ABCI Message +Types](https://github.com/tendermint/tendermint/blob/develop/abci/README.md#message-types). + +The **DeliverTx** message is the work horse of the application. Each +transaction in the blockchain is delivered with this message. The +application needs to validate each transaction received with the +**DeliverTx** message against the current state, application protocol, +and the cryptographic credentials of the transaction. A validated +transaction then needs to update the application state — by binding a +value into a key values store, or by updating the UTXO database, for +instance. + +The **CheckTx** message is similar to **DeliverTx**, but it's only for +validating transactions. Tendermint Core's mempool first checks the +validity of a transaction with **CheckTx**, and only relays valid +transactions to its peers. For instance, an application may check an +incrementing sequence number in the transaction and return an error upon +**CheckTx** if the sequence number is old. Alternatively, they might use +a capabilities based system that requires capabilities to be renewed +with every transaction. + +The **Commit** message is used to compute a cryptographic commitment to +the current application state, to be placed into the next block header. +This has some handy properties. Inconsistencies in updating that state +will now appear as blockchain forks which catches a whole class of +programming errors. This also simplifies the development of secure +lightweight clients, as Merkle-hash proofs can be verified by checking +against the block hash, and that the block hash is signed by a quorum. + +There can be multiple ABCI socket connections to an application. +Tendermint Core creates three ABCI connections to the application; one +for the validation of transactions when broadcasting in the mempool, one +for the consensus engine to run block proposals, and one more for +querying the application state. + +It's probably evident that applications designers need to very carefully +design their message handlers to create a blockchain that does anything +useful but this architecture provides a place to start. The diagram +below illustrates the flow of messages via ABCI. + +![](../imgs/abci.png) + +## A Note on Determinism + +The logic for blockchain transaction processing must be deterministic. +If the application logic weren't deterministic, consensus would not be +reached among the Tendermint Core replica nodes. + +Solidity on Ethereum is a great language of choice for blockchain +applications because, among other reasons, it is a completely +deterministic programming language. However, it's also possible to +create deterministic applications using existing popular languages like +Java, C++, Python, or Go. Game programmers and blockchain developers are +already familiar with creating deterministic programs by avoiding +sources of non-determinism such as: + +- random number generators (without deterministic seeding) +- race conditions on threads (or avoiding threads altogether) +- the system clock +- uninitialized memory (in unsafe programming languages like C + or C++) +- [floating point + arithmetic](http://gafferongames.com/networking-for-game-programmers/floating-point-determinism/) +- language features that are random (e.g. map iteration in Go) + +While programmers can avoid non-determinism by being careful, it is also +possible to create a special linter or static analyzer for each language +to check for determinism. In the future we may work with partners to +create such tools. + +## Consensus Overview + +Tendermint is an easy-to-understand, mostly asynchronous, BFT consensus +protocol. The protocol follows a simple state machine that looks like +this: + +![](../imgs/consensus_logic.png) + +Participants in the protocol are called **validators**; they take turns +proposing blocks of transactions and voting on them. Blocks are +committed in a chain, with one block at each **height**. A block may +fail to be committed, in which case the protocol moves to the next +**round**, and a new validator gets to propose a block for that height. +Two stages of voting are required to successfully commit a block; we +call them **pre-vote** and **pre-commit**. A block is committed when +more than 2/3 of validators pre-commit for the same block in the same +round. + +There is a picture of a couple doing the polka because validators are +doing something like a polka dance. When more than two-thirds of the +validators pre-vote for the same block, we call that a **polka**. Every +pre-commit must be justified by a polka in the same round. + +Validators may fail to commit a block for a number of reasons; the +current proposer may be offline, or the network may be slow. Tendermint +allows them to establish that a validator should be skipped. Validators +wait a small amount of time to receive a complete proposal block from +the proposer before voting to move to the next round. This reliance on a +timeout is what makes Tendermint a weakly synchronous protocol, rather +than an asynchronous one. However, the rest of the protocol is +asynchronous, and validators only make progress after hearing from more +than two-thirds of the validator set. A simplifying element of +Tendermint is that it uses the same mechanism to commit a block as it +does to skip to the next round. + +Assuming less than one-third of the validators are Byzantine, Tendermint +guarantees that safety will never be violated - that is, validators will +never commit conflicting blocks at the same height. To do this it +introduces a few **locking** rules which modulate which paths can be +followed in the flow diagram. Once a validator precommits a block, it is +locked on that block. Then, + +1. it must prevote for the block it is locked on +2. it can only unlock, and precommit for a new block, if there is a + polka for that block in a later round + +## Stake + +In many systems, not all validators will have the same "weight" in the +consensus protocol. Thus, we are not so much interested in one-third or +two-thirds of the validators, but in those proportions of the total +voting power, which may not be uniformly distributed across individual +validators. + +Since Tendermint can replicate arbitrary applications, it is possible to +define a currency, and denominate the voting power in that currency. +When voting power is denominated in a native currency, the system is +often referred to as Proof-of-Stake. Validators can be forced, by logic +in the application, to "bond" their currency holdings in a security +deposit that can be destroyed if they're found to misbehave in the +consensus protocol. This adds an economic element to the security of the +protocol, allowing one to quantify the cost of violating the assumption +that less than one-third of voting power is Byzantine. + +The [Cosmos Network](https://cosmos.network) is designed to use this +Proof-of-Stake mechanism across an array of cryptocurrencies implemented +as ABCI applications. + +The following diagram is Tendermint in a (technical) nutshell. [See here +for high resolution +version](https://github.com/mobfoundry/hackatom/blob/master/tminfo.pdf). + +![](../imgs/tm-transaction-flow.png) diff --git a/docs/networks/README.md b/docs/networks/README.md new file mode 100644 index 00000000..b1ba2712 --- /dev/null +++ b/docs/networks/README.md @@ -0,0 +1,9 @@ +# Networks + +Use [Docker Compose](docker-compose.md) to spin up Tendermint testnets on your +local machine. + +Use [Terraform and Ansible](terraform-and-ansible.md) to deploy Tendermint +testnets to the cloud. + +See the `tendermint testnet --help` command for more help initializing testnets. diff --git a/docs/networks/deploy-testnets.md b/docs/networks/deploy-testnets.md index 35732f9b..4df6916b 100644 --- a/docs/networks/deploy-testnets.md +++ b/docs/networks/deploy-testnets.md @@ -1,8 +1,8 @@ # Deploy a Testnet -Now that we've seen how ABCI works, and even played with a few -applications on a single validator node, it's time to deploy a test -network to four validator nodes. +DEPRECATED DOCS! + +See [Networks](../networks). ## Manual Deployments @@ -21,17 +21,16 @@ Here are the steps to setting up a testnet manually: 3. Generate a private key and a node key for each validator using `tendermint init` 4. Compile a list of public keys for each validator into a - `genesis.json` file and replace the existing file with it. -5. Run - `tendermint node --proxy_app=kvstore --p2p.persistent_peers=< peer addresses >` on each node, where `< peer addresses >` is a comma separated - list of the ID@IP:PORT combination for each node. The default port for - Tendermint is `26656`. The ID of a node can be obtained by running - `tendermint show_node_id` command. Thus, if the IP addresses of your nodes - were `192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4`, the command - would look like: + new `genesis.json` file and replace the existing file with it. +5. Get the node IDs of any peers you want other peers to connect to by + running `tendermint show_node_id` on the relevant machine +6. Set the `p2p.persistent_peers` in the config for all nodes to the comma + separated list of `ID@IP:PORT` for all nodes. Default port is 26656. + +Then start the node ``` -tendermint node --proxy_app=kvstore --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:26656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:26656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:26656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:26656 +tendermint node --proxy_app=kvstore ``` After a few seconds, all the nodes should connect to each other and diff --git a/docs/networks/docker-compose.md b/docs/networks/docker-compose.md new file mode 100644 index 00000000..a1924eb9 --- /dev/null +++ b/docs/networks/docker-compose.md @@ -0,0 +1,85 @@ +# Docker Compose + +With Docker Compose, we can spin up local testnets in a single command: + +``` +make localnet-start +``` + +## Requirements + +- [Install tendermint](/docs/install.md) +- [Install docker](https://docs.docker.com/engine/installation/) +- [Install docker-compose](https://docs.docker.com/compose/install/) + +## Build + +Build the `tendermint` binary and the `tendermint/localnode` docker image. + +Note the binary will be mounted into the container so it can be updated without +rebuilding the image. + +``` +cd $GOPATH/src/github.com/tendermint/tendermint + +# Build the linux binary in ./build +make build-linux + +# Build tendermint/localnode image +make build-docker-localnode +``` + + +## Run a testnet + +To start a 4 node testnet run: + +``` +make localnet-start +``` + +The nodes bind their RPC servers to ports 26657, 26660, 26662, and 26664 on the host. +This file creates a 4-node network using the localnode image. +The nodes of the network expose their P2P and RPC endpoints to the host machine on ports 26656-26657, 26659-26660, 26661-26662, and 26663-26664 respectively. + +To update the binary, just rebuild it and restart the nodes: + +``` +make build-linux +make localnet-stop +make localnet-start +``` + +## Configuration + +The `make localnet-start` creates files for a 4-node testnet in `./build` by calling the `tendermint testnet` command. + +The `./build` directory is mounted to the `/tendermint` mount point to attach the binary and config files to the container. + +For instance, to create a single node testnet: + +``` +cd $GOPATH/src/github.com/tendermint/tendermint + +# Clear the build folder +rm -rf ./build + +# Build binary +make build-linux + +# Create configuration +docker run -e LOG="stdout" -v `pwd`/build:/tendermint tendermint/localnode testnet --o . --v 1 + +#Run the node +docker run -v `pwd`/build:/tendermint tendermint/localnode + +``` + +## Logging + +Log is saved under the attached volume, in the `tendermint.log` file. If the `LOG` environment variable is set to `stdout` at start, the log is not saved, but printed on the screen. + +## Special binaries + +If you have multiple binaries with different names, you can specify which one to run with the BINARY environment variable. The path of the binary is relative to the attached volume. + diff --git a/docs/networks/terraform-and-ansible.md b/docs/networks/terraform-and-ansible.md index 5a4b9c53..c08ade17 100644 --- a/docs/networks/terraform-and-ansible.md +++ b/docs/networks/terraform-and-ansible.md @@ -29,7 +29,7 @@ export SSH_KEY_FILE="$HOME/.ssh/id_rsa.pub" These will be used by both `terraform` and `ansible`. -### Terraform +## Terraform This step will create four Digital Ocean droplets. First, go to the correct directory: @@ -49,7 +49,7 @@ and you will get a list of IP addresses that belong to your droplets. With the droplets created and running, let's setup Ansible. -### Ansible +## Ansible The playbooks in [the ansible directory](https://github.com/tendermint/tendermint/tree/master/networks/remote/ansible) @@ -144,7 +144,7 @@ Peek at the logs with the status role: ansible-playbook -i inventory/digital_ocean.py -l sentrynet status.yml ``` -### Logging +## Logging The crudest way is the status role described above. You can also ship logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana) @@ -160,7 +160,7 @@ go get github.com/mheese/journalbeat ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 ``` -### Cleanup +## Cleanup To remove your droplets, run: diff --git a/docs/spec/abci/README.md b/docs/spec/abci/README.md index c0956db6..02e369bf 100644 --- a/docs/spec/abci/README.md +++ b/docs/spec/abci/README.md @@ -1,7 +1,7 @@ # ABCI ABCI is the interface between Tendermint (a state-machine replication engine) -and an application (the actual state machine). It consists of a set of +and your application (the actual state machine). It consists of a set of *methods*, where each method has a corresponding `Request` and `Response` message type. Tendermint calls the ABCI methods on the ABCI application by sending the `Request*` messages and receiving the `Response*` messages in return. diff --git a/docs/spec/abci/abci.md b/docs/spec/abci/abci.md index a1217098..0e9b3d78 100644 --- a/docs/spec/abci/abci.md +++ b/docs/spec/abci/abci.md @@ -7,9 +7,9 @@ file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.pro ABCI methods are split across 3 separate ABCI *connections*: -- `Consensus Connection: InitChain, BeginBlock, DeliverTx, EndBlock, Commit` -- `Mempool Connection: CheckTx` -- `Info Connection: Info, SetOption, Query` +- `Consensus Connection`: `InitChain, BeginBlock, DeliverTx, EndBlock, Commit` +- `Mempool Connection`: `CheckTx` +- `Info Connection`: `Info, SetOption, Query` The `Consensus Connection` is driven by a consensus protocol and is responsible for block execution. diff --git a/docs/tendermint-core/README.md b/docs/tendermint-core/README.md new file mode 100644 index 00000000..7f5dc677 --- /dev/null +++ b/docs/tendermint-core/README.md @@ -0,0 +1,4 @@ +# Tendermint Core + +See the side-bar for details on the various features of Tendermint Core. + diff --git a/docs/tools/README.md b/docs/tools/README.md new file mode 100644 index 00000000..b08416bb --- /dev/null +++ b/docs/tools/README.md @@ -0,0 +1,4 @@ +# Tools + +Tendermint comes with some tools for [benchmarking](benchmarking.md) +and [monitoring](monitoring.md). diff --git a/docs/tools/benchmarking.md b/docs/tools/benchmarking.md index 691d3b6e..e17c2856 100644 --- a/docs/tools/benchmarking.md +++ b/docs/tools/benchmarking.md @@ -20,7 +20,7 @@ Blocks/sec 0.818 0.386 1 9 ## Quick Start -[Install Tendermint](../introduction/install) +[Install Tendermint](../introduction/install.md) This currently is setup to work on tendermint's develop branch. Please ensure you are on that. (If not, update `tendermint` and `tmlibs` in gopkg.toml to use the master branch.) diff --git a/docs/tools/monitoring.md b/docs/tools/monitoring.md index bd0105c8..c0fa94c0 100644 --- a/docs/tools/monitoring.md +++ b/docs/tools/monitoring.md @@ -33,21 +33,21 @@ docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657 ### Using Binaries -[Install Tendermint](https://github.com/tendermint/tendermint#install) +[Install Tendermint](../introduction/install.md). -then run: +Start a Tendermint node: ``` tendermint init tendermint node --proxy_app=kvstore ``` +In another window, run the monitor: + ``` tm-monitor localhost:26657 ``` -with the last command being in a seperate window. - ## Usage ``` diff --git a/networks/local/README.md b/networks/local/README.md index 09a0b12c..8d429969 100644 --- a/networks/local/README.md +++ b/networks/local/README.md @@ -1,5 +1,9 @@ # Local Cluster with Docker Compose +DEPRECATED! + +See the [docs](https://tendermint.com/docs/networks/docker-compose.html). + ## Requirements - [Install tendermint](/docs/install.md) diff --git a/networks/remote/README.md b/networks/remote/README.md index 2094fcc9..4c035be8 100644 --- a/networks/remote/README.md +++ b/networks/remote/README.md @@ -1,3 +1,3 @@ # Remote Cluster with Terraform and Ansible -See the [docs](/docs/terraform-and-ansible.md) +See the [docs](https://tendermint.com/docs/networks/terraform-and-ansible.html). From 69c7aa77bcc84cb92aadaa91023a32ec3951184b Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Sun, 30 Sep 2018 10:26:14 -0700 Subject: [PATCH 09/18] clist: speedup Next by removing defers (#2511) This change doubles the speed of the mempool's reaping. Before: BenchmarkReap-8 5000 365390 ns/op 122887 B/op After: BenchmarkReap-8 10000 158274 ns/op 122882 B/op --- libs/clist/clist.go | 6 ++--- mempool/bench_test.go | 55 +++++++++++++++++++++++++++++++++++++++++ mempool/mempool_test.go | 29 ---------------------- 3 files changed, 58 insertions(+), 32 deletions(-) create mode 100644 mempool/bench_test.go diff --git a/libs/clist/clist.go b/libs/clist/clist.go index c69d3d5f..393bdf73 100644 --- a/libs/clist/clist.go +++ b/libs/clist/clist.go @@ -113,9 +113,9 @@ func (e *CElement) NextWaitChan() <-chan struct{} { // Nonblocking, may return nil if at the end. func (e *CElement) Next() *CElement { e.mtx.RLock() - defer e.mtx.RUnlock() - - return e.next + val := e.next + e.mtx.RUnlock() + return val } // Nonblocking, may return nil if at the end. diff --git a/mempool/bench_test.go b/mempool/bench_test.go new file mode 100644 index 00000000..68b033ca --- /dev/null +++ b/mempool/bench_test.go @@ -0,0 +1,55 @@ +package mempool + +import ( + "encoding/binary" + "testing" + + "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/proxy" +) + +func BenchmarkReap(b *testing.B) { + app := kvstore.NewKVStoreApplication() + cc := proxy.NewLocalClientCreator(app) + mempool := newMempoolWithApp(cc) + + size := 10000 + for i := 0; i < size; i++ { + tx := make([]byte, 8) + binary.BigEndian.PutUint64(tx, uint64(i)) + mempool.CheckTx(tx, nil) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + mempool.ReapMaxBytesMaxGas(100000000, 10000000) + } +} + +func BenchmarkCacheInsertTime(b *testing.B) { + cache := newMapTxCache(b.N) + txs := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + txs[i] = make([]byte, 8) + binary.BigEndian.PutUint64(txs[i], uint64(i)) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Push(txs[i]) + } +} + +// This benchmark is probably skewed, since we actually will be removing +// txs in parallel, which may cause some overhead due to mutex locking. +func BenchmarkCacheRemoveTime(b *testing.B) { + cache := newMapTxCache(b.N) + txs := make([][]byte, b.N) + for i := 0; i < b.N; i++ { + txs[i] = make([]byte, 8) + binary.BigEndian.PutUint64(txs[i], uint64(i)) + cache.Push(txs[i]) + } + b.ResetTimer() + for i := 0; i < b.N; i++ { + cache.Remove(txs[i]) + } +} diff --git a/mempool/mempool_test.go b/mempool/mempool_test.go index 4f66da36..5aabd00e 100644 --- a/mempool/mempool_test.go +++ b/mempool/mempool_test.go @@ -399,35 +399,6 @@ func TestMempoolCloseWAL(t *testing.T) { require.Equal(t, 1, len(m3), "expecting the wal match in") } -func BenchmarkCacheInsertTime(b *testing.B) { - cache := newMapTxCache(b.N) - txs := make([][]byte, b.N) - for i := 0; i < b.N; i++ { - txs[i] = make([]byte, 8) - binary.BigEndian.PutUint64(txs[i], uint64(i)) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - cache.Push(txs[i]) - } -} - -// This benchmark is probably skewed, since we actually will be removing -// txs in parallel, which may cause some overhead due to mutex locking. -func BenchmarkCacheRemoveTime(b *testing.B) { - cache := newMapTxCache(b.N) - txs := make([][]byte, b.N) - for i := 0; i < b.N; i++ { - txs[i] = make([]byte, 8) - binary.BigEndian.PutUint64(txs[i], uint64(i)) - cache.Push(txs[i]) - } - b.ResetTimer() - for i := 0; i < b.N; i++ { - cache.Remove(txs[i]) - } -} - func checksumIt(data []byte) string { h := md5.New() h.Write(data) From 52e21cebcfe65522f629b457e39b9dc8b2c30297 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 30 Sep 2018 13:28:34 -0400 Subject: [PATCH 10/18] remove some xxx comments and the config.mempool.recheck_empty (#2505) * remove some XXX * config: remove Mempool.RecheckEmpty * docs: remove recheck_empty --- CHANGELOG_PENDING.md | 1 + config/config.go | 14 ++++++-------- config/toml.go | 1 - consensus/types/round_state.go | 4 ++-- docs/spec/reactors/mempool/config.md | 11 +++-------- docs/tendermint-core/configuration.md | 7 +++---- mempool/mempool.go | 4 +--- node/node.go | 1 - privval/priv_validator.go | 6 ++++-- types/params.go | 2 -- types/vote.go | 4 ++-- 11 files changed, 22 insertions(+), 33 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index bf381dce..bca7ba47 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -5,6 +5,7 @@ Special thanks to external contributors on this release: BREAKING CHANGES: * CLI/RPC/Config + * [config] \#2505 Remove Mempool.RecheckEmpty (it was effectively useless anyways) * [config] `mempool.wal` is disabled by default * [rpc] \#2298 `/abci_query` takes `prove` argument instead of `trusted` and switches the default behaviour to `prove=false` diff --git a/config/config.go b/config/config.go index 619c0410..8ff80005 100644 --- a/config/config.go +++ b/config/config.go @@ -488,20 +488,18 @@ func DefaultFuzzConnConfig() *FuzzConnConfig { // MempoolConfig defines the configuration options for the Tendermint mempool type MempoolConfig struct { - RootDir string `mapstructure:"home"` - Recheck bool `mapstructure:"recheck"` - RecheckEmpty bool `mapstructure:"recheck_empty"` - Broadcast bool `mapstructure:"broadcast"` - WalPath string `mapstructure:"wal_dir"` - Size int `mapstructure:"size"` - CacheSize int `mapstructure:"cache_size"` + RootDir string `mapstructure:"home"` + Recheck bool `mapstructure:"recheck"` + Broadcast bool `mapstructure:"broadcast"` + WalPath string `mapstructure:"wal_dir"` + Size int `mapstructure:"size"` + CacheSize int `mapstructure:"cache_size"` } // DefaultMempoolConfig returns a default configuration for the Tendermint mempool func DefaultMempoolConfig() *MempoolConfig { return &MempoolConfig{ Recheck: true, - RecheckEmpty: true, Broadcast: true, WalPath: "", // Each signature verification takes .5ms, size reduced until we implement diff --git a/config/toml.go b/config/toml.go index 846b33d1..ddfe5f05 100644 --- a/config/toml.go +++ b/config/toml.go @@ -213,7 +213,6 @@ dial_timeout = "{{ .P2P.DialTimeout }}" [mempool] recheck = {{ .Mempool.Recheck }} -recheck_empty = {{ .Mempool.RecheckEmpty }} broadcast = {{ .Mempool.Broadcast }} wal_dir = "{{ js .Mempool.WalPath }}" diff --git a/consensus/types/round_state.go b/consensus/types/round_state.go index c22880c2..d3f6468b 100644 --- a/consensus/types/round_state.go +++ b/consensus/types/round_state.go @@ -107,8 +107,8 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple { // RoundStateEvent returns the H/R/S of the RoundState as an event. func (rs *RoundState) RoundStateEvent() types.EventDataRoundState { - // XXX: copy the RoundState - // if we want to avoid this, we may need synchronous events after all + // copy the RoundState. + // TODO: if we want to avoid this, we may need synchronous events after all rsCopy := *rs edrs := types.EventDataRoundState{ Height: rs.Height, diff --git a/docs/spec/reactors/mempool/config.md b/docs/spec/reactors/mempool/config.md index 3e3c0d37..4fb756fa 100644 --- a/docs/spec/reactors/mempool/config.md +++ b/docs/spec/reactors/mempool/config.md @@ -6,23 +6,21 @@ as command-line flags, but they can also be passed in as environmental variables or in the config.toml file. The following are all equivalent: -Flag: `--mempool.recheck_empty=false` +Flag: `--mempool.recheck=false` -Environment: `TM_MEMPOOL_RECHECK_EMPTY=false` +Environment: `TM_MEMPOOL_RECHECK=false` Config: ``` [mempool] -recheck_empty = false +recheck = false ``` ## Recheck `--mempool.recheck=false` (default: true) -`--mempool.recheck_empty=false` (default: true) - Recheck determines if the mempool rechecks all pending transactions after a block was committed. Once a block is committed, the mempool removes all valid transactions @@ -31,9 +29,6 @@ that were successfully included in the block. If `recheck` is true, then it will rerun CheckTx on all remaining transactions with the new block state. -If the block contained no transactions, it will skip the -recheck unless `recheck_empty` is true. - ## Broadcast `--mempool.broadcast=false` (default: true) diff --git a/docs/tendermint-core/configuration.md b/docs/tendermint-core/configuration.md index c5b07497..8b3c3c22 100644 --- a/docs/tendermint-core/configuration.md +++ b/docs/tendermint-core/configuration.md @@ -156,7 +156,6 @@ dial_timeout = "3s" [mempool] recheck = true -recheck_empty = true broadcast = true wal_dir = "data/mempool.wal" @@ -203,15 +202,15 @@ indexer = "kv" # Comma-separated list of tags to index (by default the only tag is "tx.hash") # # You can also index transactions by height by adding "tx.height" tag here. -# +# # It's recommended to index only a subset of tags due to possible memory # bloat. This is, of course, depends on the indexer's DB and the volume of # transactions. index_tags = "" # When set to true, tells indexer to index all tags (predefined tags: -# "tx.hash", "tx.height" and all tags from DeliverTx responses). -# +# "tx.hash", "tx.height" and all tags from DeliverTx responses). +# # Note this may be not desirable (see the comment above). IndexTags has a # precedence over IndexAllTags (i.e. when given both, IndexTags will be # indexed). diff --git a/mempool/mempool.go b/mempool/mempool.go index 2096912f..db5f6160 100644 --- a/mempool/mempool.go +++ b/mempool/mempool.go @@ -513,9 +513,7 @@ func (mem *Mempool) Update( // Remove transactions that are already in txs. goodTxs := mem.filterTxs(txsMap) // Recheck mempool txs if any txs were committed in the block - // NOTE/XXX: in some apps a tx could be invalidated due to EndBlock, - // so we really still do need to recheck, but this is for debugging - if mem.config.Recheck && (mem.config.RecheckEmpty || len(goodTxs) > 0) { + if mem.config.Recheck && len(goodTxs) > 0 { mem.logger.Info("Recheck txs", "numtxs", len(goodTxs), "height", height) mem.recheckTxs(goodTxs) // At this point, mem.txs are being rechecked. diff --git a/node/node.go b/node/node.go index bba4dbda..9f9e3636 100644 --- a/node/node.go +++ b/node/node.go @@ -359,7 +359,6 @@ func NewNode(config *cfg.Config, // Filter peers by addr or pubkey with an ABCI query. // If the query return code is OK, add peer. - // XXX: Query format subject to change if config.FilterPeers { connFilters = append( connFilters, diff --git a/privval/priv_validator.go b/privval/priv_validator.go index 8091744c..e606b826 100644 --- a/privval/priv_validator.go +++ b/privval/priv_validator.go @@ -38,14 +38,16 @@ func voteToStep(vote *types.Vote) int8 { // FilePV implements PrivValidator using data persisted to disk // to prevent double signing. // NOTE: the directory containing the pv.filePath must already exist. +// It includes the LastSignature and LastSignBytes so we don't lose the signature +// if the process crashes after signing but before the resulting consensus message is processed. type FilePV struct { Address types.Address `json:"address"` PubKey crypto.PubKey `json:"pub_key"` LastHeight int64 `json:"last_height"` LastRound int `json:"last_round"` LastStep int8 `json:"last_step"` - LastSignature []byte `json:"last_signature,omitempty"` // so we dont lose signatures XXX Why would we lose signatures? - LastSignBytes cmn.HexBytes `json:"last_signbytes,omitempty"` // so we dont lose signatures XXX Why would we lose signatures? + LastSignature []byte `json:"last_signature,omitempty"` + LastSignBytes cmn.HexBytes `json:"last_signbytes,omitempty"` PrivKey crypto.PrivKey `json:"priv_key"` // For persistence. diff --git a/types/params.go b/types/params.go index a7301d06..014694cc 100644 --- a/types/params.go +++ b/types/params.go @@ -99,8 +99,6 @@ func (params ConsensusParams) Update(params2 *abci.ConsensusParams) ConsensusPar } // we must defensively consider any structs may be nil - // XXX: it's cast city over here. It's ok because we only do int32->int - // but still, watch it champ. if params2.BlockSize != nil { res.BlockSize.MaxBytes = params2.BlockSize.MaxBytes res.BlockSize.MaxGas = params2.BlockSize.MaxGas diff --git a/types/vote.go b/types/vote.go index ba2f1dfe..5a31f0e2 100644 --- a/types/vote.go +++ b/types/vote.go @@ -61,8 +61,8 @@ func IsVoteTypeValid(type_ byte) bool { } } -// Address is hex bytes. TODO: crypto.Address -type Address = cmn.HexBytes +// Address is hex bytes. +type Address = crypto.Address // Represents a prevote, precommit, or commit vote from validators for consensus. type Vote struct { From ccd04587ff3f8806af7f3af10a187520e4defd29 Mon Sep 17 00:00:00 2001 From: Ethan Buchman Date: Sun, 30 Sep 2018 15:08:01 -0400 Subject: [PATCH 11/18] docs/spec/abci: consensus params and general merkle (#2524) * docs: links to dirs need a slash * docs/spec/abci: consensus params and general merkle --- docs/DOCS_README.md | 2 ++ docs/README.md | 4 ++-- docs/spec/abci/abci.md | 49 +++++++++++++++++++++++++++++++++++++++--- docs/spec/abci/apps.md | 32 +++++++++++++++++++++++++-- 4 files changed, 80 insertions(+), 7 deletions(-) diff --git a/docs/DOCS_README.md b/docs/DOCS_README.md index e2f22ff6..a7671c36 100644 --- a/docs/DOCS_README.md +++ b/docs/DOCS_README.md @@ -35,6 +35,8 @@ of the sidebar. **NOTE:** Strongly consider the existing links - both within this directory and to the website docs - when moving or deleting files. +Links to directories *MUST* end in a `/`. + Relative links should be used nearly everywhere, having discovered and weighed the following: ### Relative diff --git a/docs/README.md b/docs/README.md index 15ce74e3..c3293547 100644 --- a/docs/README.md +++ b/docs/README.md @@ -14,10 +14,10 @@ Tendermint?](introduction/what-is-tendermint.md). To get started quickly with an example application, see the [quick start guide](introduction/quick-start.md). -To learn about application development on Tendermint, see the [Application Blockchain Interface](spec/abci). +To learn about application development on Tendermint, see the [Application Blockchain Interface](spec/abci/). For more details on using Tendermint, see the respective documentation for -[Tendermint Core](tendermint-core), [benchmarking and monitoring](tools), and [network deployments](networks). +[Tendermint Core](tendermint-core/), [benchmarking and monitoring](tools/), and [network deployments](networks/). ## Contribute diff --git a/docs/spec/abci/abci.md b/docs/spec/abci/abci.md index 0e9b3d78..1306128f 100644 --- a/docs/spec/abci/abci.md +++ b/docs/spec/abci/abci.md @@ -190,9 +190,9 @@ Commit are included in the header of the next block. of Path. - `Path (string)`: Path of request, like an HTTP GET path. Can be used with or in liue of Data. - - Apps MUST interpret '/store' as a query by key on the + - Apps MUST interpret '/store' as a query by key on the underlying store. The key SHOULD be specified in the Data field. - - Apps SHOULD allow queries over specific types like + - Apps SHOULD allow queries over specific types like '/accounts/...' or '/votes/...' - `Height (int64)`: The block height for which you want the query (default=0 returns data for the latest committed block). Note @@ -209,7 +209,7 @@ Commit are included in the header of the next block. - `Index (int64)`: The index of the key in the tree. - `Key ([]byte)`: The key of the matching data. - `Value ([]byte)`: The value of the matching data. - - `Proof ([]byte)`: Serialized proof for the data, if requested, to be + - `Proof (Proof)`: Serialized proof for the value data, if requested, to be verified against the `AppHash` for the given Height. - `Height (int64)`: The block height from which data was derived. Note that this is the height of the block containing the @@ -218,6 +218,8 @@ Commit are included in the header of the next block. - **Usage**: - Query for data from the application at current or past height. - Optionally return Merkle proof. + - Merkle proof includes self-describing `type` field to support many types + of Merkle trees and encoding formats. ### BeginBlock @@ -413,3 +415,44 @@ Commit are included in the header of the next block. - `Round (int32)`: Commit round. - `Votes ([]VoteInfo)`: List of validators addresses in the last validator set with their voting power and whether or not they signed a vote. + +### ConsensusParams + +- **Fields**: + - `BlockSize (BlockSize)`: Parameters limiting the size of a block. + - `EvidenceParams (EvidenceParams)`: Parameters limiting the validity of + evidence of byzantine behaviour. + +### BlockSize + +- **Fields**: + - `MaxBytes (int64)`: Max size of a block, in bytes. + - `MaxGas (int64)`: Max sum of `GasWanted` in a proposed block. + - NOTE: blocks that violate this may be committed if there are Byzantine proposers. + It's the application's responsibility to handle this when processing a + block! + +### EvidenceParams + +- **Fields**: + - `MaxAge (int64)`: Max age of evidence, in blocks. Evidence older than this + is considered stale and ignored. + - This should correspond with an app's "unbonding period" or other + similar mechanism for handling Nothing-At-Stake attacks. + - NOTE: this should change to time (instead of blocks)! + +### Proof + +- **Fields**: + - `Ops ([]ProofOp)`: List of chained Merkle proofs, of possibly different types + - The Merkle root of one op is the value being proven in the next op. + - The Merkle root of the final op should equal the ultimate root hash being + verified against. + +### ProofOp + +- **Fields**: + - `Type (string)`: Type of Merkle proof and how it's encoded. + - `Key ([]byte)`: Key in the Merkle tree that this proof is for. + - `Data ([]byte)`: Encoded Merkle proof for the key. + diff --git a/docs/spec/abci/apps.md b/docs/spec/abci/apps.md index a8f37771..acf2c4e6 100644 --- a/docs/spec/abci/apps.md +++ b/docs/spec/abci/apps.md @@ -247,8 +247,12 @@ Must have `0 < MaxAge`. ### Updates -The application may set the consensus params during InitChain, and update them during -EndBlock. +The application may set the ConsensusParams during InitChain, and update them during +EndBlock. If the ConsensusParams is empty, it will be ignored. Each field +that is not empty will be applied in full. For instance, if updating the +BlockSize.MaxBytes, applications must also set the other BlockSize fields (like +BlockSize.MaxGas), even if they are unchanged, as they will otherwise cause the +value to be updated to 0. #### InitChain @@ -312,6 +316,30 @@ their state as follows: For instance, this allows an application's lite-client to verify proofs of absence in the application state, something which is much less efficient to do using the block hash. +Some applications (eg. Ethereum, Cosmos-SDK) have multiple "levels" of Merkle trees, +where the leaves of one tree are the root hashes of others. To support this, and +the general variability in Merkle proofs, the `ResponseQuery.Proof` has some minimal structure: + +``` +message Proof { + repeated ProofOp ops +} + +message ProofOp { + string type = 1; + bytes key = 2; + bytes data = 3; +} +``` + +Each `ProofOp` contains a proof for a single key in a single Merkle tree, of the specified `type`. +This allows ABCI to support many different kinds of Merkle trees, encoding +formats, and proofs (eg. of presence and absence) just by varying the `type`. +The `data` contains the actual encoded proof, encoded according to the `type`. +When verifying the full proof, the root hash for one ProofOp is the value being +verified for the next ProofOp in the list. The root hash of the final ProofOp in +the list should match the `AppHash` being verified against. + ### Peer Filtering When Tendermint connects to a peer, it sends two queries to the ABCI application From 32e274cff09fcaf6ad3ac4db1693aed28de52daa Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Mon, 1 Oct 2018 16:38:35 +0400 Subject: [PATCH 12/18] config: Refactor ValidateBasic (#2503) * timeouts as time.Duration are also breaking for old configs * split BaseConfig#ValidateBasic into smaller methods --- CHANGELOG_PENDING.md | 5 +- config/config.go | 184 ++++++++++++++++++++++++++----------------- 2 files changed, 113 insertions(+), 76 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index bca7ba47..0c867730 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -5,12 +5,13 @@ Special thanks to external contributors on this release: BREAKING CHANGES: * CLI/RPC/Config + * [config] \#2232 timeouts as time.Duration, not ints * [config] \#2505 Remove Mempool.RecheckEmpty (it was effectively useless anyways) * [config] `mempool.wal` is disabled by default * [rpc] \#2298 `/abci_query` takes `prove` argument instead of `trusted` and switches the default behaviour to `prove=false` - * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer). - + * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer) + * Apps * [abci] \#2298 ResponseQuery.Proof is now a structured merkle.Proof, not just arbitrary bytes diff --git a/config/config.go b/config/config.go index 8ff80005..8f3d6d18 100644 --- a/config/config.go +++ b/config/config.go @@ -1,11 +1,12 @@ package config import ( - "errors" "fmt" "os" "path/filepath" "time" + + "github.com/pkg/errors" ) const ( @@ -93,83 +94,22 @@ func (cfg *Config) SetRoot(root string) *Config { // ValidateBasic performs basic validation (checking param bounds, etc.) and // returns an error if any check fails. func (cfg *Config) ValidateBasic() error { - // RPCConfig - if cfg.RPC.GRPCMaxOpenConnections < 0 { - return errors.New("[rpc] grpc_max_open_connections can't be negative") + if err := cfg.RPC.ValidateBasic(); err != nil { + return errors.Wrap(err, "Error in [rpc] section") } - if cfg.RPC.MaxOpenConnections < 0 { - return errors.New("[rpc] max_open_connections can't be negative") + if err := cfg.P2P.ValidateBasic(); err != nil { + return errors.Wrap(err, "Error in [p2p] section") } - - // P2PConfig - if cfg.P2P.MaxNumInboundPeers < 0 { - return errors.New("[p2p] max_num_inbound_peers can't be negative") + if err := cfg.Mempool.ValidateBasic(); err != nil { + return errors.Wrap(err, "Error in [mempool] section") } - if cfg.P2P.MaxNumOutboundPeers < 0 { - return errors.New("[p2p] max_num_outbound_peers can't be negative") + if err := cfg.Consensus.ValidateBasic(); err != nil { + return errors.Wrap(err, "Error in [consensus] section") } - if cfg.P2P.FlushThrottleTimeout < 0 { - return errors.New("[p2p] flush_throttle_timeout can't be negative") - } - if cfg.P2P.MaxPacketMsgPayloadSize < 0 { - return errors.New("[p2p] max_packet_msg_payload_size can't be negative") - } - if cfg.P2P.SendRate < 0 { - return errors.New("[p2p] send_rate can't be negative") - } - if cfg.P2P.RecvRate < 0 { - return errors.New("[p2p] recv_rate can't be negative") - } - - // MempoolConfig - if cfg.Mempool.Size < 0 { - return errors.New("[mempool] size can't be negative") - } - if cfg.Mempool.CacheSize < 0 { - return errors.New("[mempool] cache_size can't be negative") - } - - // ConsensusConfig - if cfg.Consensus.TimeoutPropose < 0 { - return errors.New("[consensus] timeout_propose can't be negative") - } - if cfg.Consensus.TimeoutProposeDelta < 0 { - return errors.New("[consensus] timeout_propose_delta can't be negative") - } - if cfg.Consensus.TimeoutPrevote < 0 { - return errors.New("[consensus] timeout_prevote can't be negative") - } - if cfg.Consensus.TimeoutPrevoteDelta < 0 { - return errors.New("[consensus] timeout_prevote_delta can't be negative") - } - if cfg.Consensus.TimeoutPrecommit < 0 { - return errors.New("[consensus] timeout_precommit can't be negative") - } - if cfg.Consensus.TimeoutPrecommitDelta < 0 { - return errors.New("[consensus] timeout_precommit_delta can't be negative") - } - if cfg.Consensus.TimeoutCommit < 0 { - return errors.New("[consensus] timeout_commit can't be negative") - } - if cfg.Consensus.CreateEmptyBlocksInterval < 0 { - return errors.New("[consensus] create_empty_blocks_interval can't be negative") - } - if cfg.Consensus.PeerGossipSleepDuration < 0 { - return errors.New("[consensus] peer_gossip_sleep_duration can't be negative") - } - if cfg.Consensus.PeerQueryMaj23SleepDuration < 0 { - return errors.New("[consensus] peer_query_maj23_sleep_duration can't be negative") - } - if cfg.Consensus.BlockTimeIota < 0 { - return errors.New("[consensus] blocktime_iota can't be negative") - } - - // InstrumentationConfig - if cfg.Instrumentation.MaxOpenConnections < 0 { - return errors.New("[instrumentation] max_open_connections can't be negative") - } - - return nil + return errors.Wrap( + cfg.Instrumentation.ValidateBasic(), + "Error in [instrumentation] section", + ) } //----------------------------------------------------------------------------- @@ -348,6 +288,18 @@ func TestRPCConfig() *RPCConfig { return cfg } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *RPCConfig) ValidateBasic() error { + if cfg.GRPCMaxOpenConnections < 0 { + return errors.New("grpc_max_open_connections can't be negative") + } + if cfg.MaxOpenConnections < 0 { + return errors.New("max_open_connections can't be negative") + } + return nil +} + //----------------------------------------------------------------------------- // P2PConfig @@ -463,6 +415,30 @@ func (cfg *P2PConfig) AddrBookFile() string { return rootify(cfg.AddrBook, cfg.RootDir) } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *P2PConfig) ValidateBasic() error { + if cfg.MaxNumInboundPeers < 0 { + return errors.New("max_num_inbound_peers can't be negative") + } + if cfg.MaxNumOutboundPeers < 0 { + return errors.New("max_num_outbound_peers can't be negative") + } + if cfg.FlushThrottleTimeout < 0 { + return errors.New("flush_throttle_timeout can't be negative") + } + if cfg.MaxPacketMsgPayloadSize < 0 { + return errors.New("max_packet_msg_payload_size can't be negative") + } + if cfg.SendRate < 0 { + return errors.New("send_rate can't be negative") + } + if cfg.RecvRate < 0 { + return errors.New("recv_rate can't be negative") + } + return nil +} + // FuzzConnConfig is a FuzzedConnection configuration. type FuzzConnConfig struct { Mode int @@ -521,6 +497,18 @@ func (cfg *MempoolConfig) WalDir() string { return rootify(cfg.WalPath, cfg.RootDir) } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *MempoolConfig) ValidateBasic() error { + if cfg.Size < 0 { + return errors.New("size can't be negative") + } + if cfg.CacheSize < 0 { + return errors.New("cache_size can't be negative") + } + return nil +} + //----------------------------------------------------------------------------- // ConsensusConfig @@ -641,6 +629,45 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) { cfg.walFile = walFile } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *ConsensusConfig) ValidateBasic() error { + if cfg.TimeoutPropose < 0 { + return errors.New("timeout_propose can't be negative") + } + if cfg.TimeoutProposeDelta < 0 { + return errors.New("timeout_propose_delta can't be negative") + } + if cfg.TimeoutPrevote < 0 { + return errors.New("timeout_prevote can't be negative") + } + if cfg.TimeoutPrevoteDelta < 0 { + return errors.New("timeout_prevote_delta can't be negative") + } + if cfg.TimeoutPrecommit < 0 { + return errors.New("timeout_precommit can't be negative") + } + if cfg.TimeoutPrecommitDelta < 0 { + return errors.New("timeout_precommit_delta can't be negative") + } + if cfg.TimeoutCommit < 0 { + return errors.New("timeout_commit can't be negative") + } + if cfg.CreateEmptyBlocksInterval < 0 { + return errors.New("create_empty_blocks_interval can't be negative") + } + if cfg.PeerGossipSleepDuration < 0 { + return errors.New("peer_gossip_sleep_duration can't be negative") + } + if cfg.PeerQueryMaj23SleepDuration < 0 { + return errors.New("peer_query_maj23_sleep_duration can't be negative") + } + if cfg.BlockTimeIota < 0 { + return errors.New("blocktime_iota can't be negative") + } + return nil +} + //----------------------------------------------------------------------------- // TxIndexConfig @@ -726,6 +753,15 @@ func TestInstrumentationConfig() *InstrumentationConfig { return DefaultInstrumentationConfig() } +// ValidateBasic performs basic validation (checking param bounds, etc.) and +// returns an error if any check fails. +func (cfg *InstrumentationConfig) ValidateBasic() error { + if cfg.MaxOpenConnections < 0 { + return errors.New("max_open_connections can't be negative") + } + return nil +} + //----------------------------------------------------------------------------- // Utils From fd1b8598bcfed552f208e33fadebf368b80b1daf Mon Sep 17 00:00:00 2001 From: Dev Ojha Date: Tue, 2 Oct 2018 00:47:20 -0700 Subject: [PATCH 13/18] Make block_test.go more table driven (#2526) --- types/block_test.go | 126 ++++++++++++++++++++------------------------ 1 file changed, 56 insertions(+), 70 deletions(-) diff --git a/types/block_test.go b/types/block_test.go index ffd73eae..c99fb6b0 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -1,14 +1,13 @@ package types import ( + "crypto/rand" "math" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - - "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) @@ -45,51 +44,37 @@ func TestBlockValidateBasic(t *testing.T) { ev := NewMockGoodEvidence(h, 0, valSet.Validators[0].Address) evList := []Evidence{ev} - block := MakeBlock(h, txs, commit, evList) - require.NotNil(t, block) - block.ProposerAddress = valSet.GetProposer().Address - - // proper block must pass - err = block.ValidateBasic() - require.NoError(t, err) - - // tamper with NumTxs - block = MakeBlock(h, txs, commit, evList) - block.NumTxs++ - err = block.ValidateBasic() - require.Error(t, err) - - // remove 1/2 the commits - block = MakeBlock(h, txs, commit, evList) - block.LastCommit.Precommits = commit.Precommits[:commit.Size()/2] - block.LastCommit.hash = nil // clear hash or change wont be noticed - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with LastCommitHash - block = MakeBlock(h, txs, commit, evList) - block.LastCommitHash = []byte("something else") - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with data - block = MakeBlock(h, txs, commit, evList) - block.Data.Txs[0] = Tx("something else") - block.Data.hash = nil // clear hash or change wont be noticed - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with DataHash - block = MakeBlock(h, txs, commit, evList) - block.DataHash = cmn.RandBytes(len(block.DataHash)) - err = block.ValidateBasic() - require.Error(t, err) - - // tamper with evidence - block = MakeBlock(h, txs, commit, evList) - block.EvidenceHash = []byte("something else") - err = block.ValidateBasic() - require.Error(t, err) + testCases := []struct { + testName string + malleateBlock func(*Block) + expErr bool + }{ + {"Make Block", func(blk *Block) {}, false}, + {"Make Block w/ proposer Addr", func(blk *Block) { blk.ProposerAddress = valSet.GetProposer().Address }, false}, + {"Increase NumTxs", func(blk *Block) { blk.NumTxs++ }, true}, + {"Remove 1/2 the commits", func(blk *Block) { + blk.LastCommit.Precommits = commit.Precommits[:commit.Size()/2] + blk.LastCommit.hash = nil // clear hash or change wont be noticed + }, true}, + {"Remove LastCommitHash", func(blk *Block) { blk.LastCommitHash = []byte("something else") }, true}, + {"Tampered Data", func(blk *Block) { + blk.Data.Txs[0] = Tx("something else") + blk.Data.hash = nil // clear hash or change wont be noticed + }, true}, + {"Tampered DataHash", func(blk *Block) { + blk.DataHash = cmn.RandBytes(len(blk.DataHash)) + }, true}, + {"Tampered EvidenceHash", func(blk *Block) { + blk.EvidenceHash = []byte("something else") + }, true}, + } + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + block := MakeBlock(h, txs, commit, evList) + tc.malleateBlock(block) + assert.Equal(t, tc.expErr, block.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } } func TestBlockHash(t *testing.T) { @@ -161,7 +146,11 @@ func TestBlockString(t *testing.T) { } func makeBlockIDRandom() BlockID { - blockHash, blockPartsHeader := crypto.CRandBytes(tmhash.Size), PartSetHeader{123, crypto.CRandBytes(tmhash.Size)} + blockHash := make([]byte, tmhash.Size) + partSetHash := make([]byte, tmhash.Size) + rand.Read(blockHash) + rand.Read(partSetHash) + blockPartsHeader := PartSetHeader{123, partSetHash} return BlockID{blockHash, blockPartsHeader} } @@ -211,28 +200,25 @@ func TestCommit(t *testing.T) { } func TestCommitValidateBasic(t *testing.T) { - commit := randCommit() - assert.NoError(t, commit.ValidateBasic()) - - // nil precommit is OK - commit = randCommit() - commit.Precommits[0] = nil - assert.NoError(t, commit.ValidateBasic()) - - // tamper with types - commit = randCommit() - commit.Precommits[0].Type = VoteTypePrevote - assert.Error(t, commit.ValidateBasic()) - - // tamper with height - commit = randCommit() - commit.Precommits[0].Height = int64(100) - assert.Error(t, commit.ValidateBasic()) - - // tamper with round - commit = randCommit() - commit.Precommits[0].Round = 100 - assert.Error(t, commit.ValidateBasic()) + testCases := []struct { + testName string + malleateCommit func(*Commit) + expectErr bool + }{ + {"Random Commit", func(com *Commit) {}, false}, + {"Nil precommit", func(com *Commit) { com.Precommits[0] = nil }, false}, + {"Incorrect signature", func(com *Commit) { com.Precommits[0].Signature = []byte{0} }, false}, + {"Incorrect type", func(com *Commit) { com.Precommits[0].Type = VoteTypePrevote }, true}, + {"Incorrect height", func(com *Commit) { com.Precommits[0].Height = int64(100) }, true}, + {"Incorrect round", func(com *Commit) { com.Precommits[0].Round = 100 }, true}, + } + for _, tc := range testCases { + t.Run(tc.testName, func(t *testing.T) { + com := randCommit() + tc.malleateCommit(com) + assert.Equal(t, tc.expectErr, com.ValidateBasic() != nil, "Validate Basic had an unexpected result") + }) + } } func TestMaxHeaderBytes(t *testing.T) { From 5c6999cf8f0fc09bec6da16d7038a0f1b6ca4d71 Mon Sep 17 00:00:00 2001 From: goolAdapter <267310165@qq.com> Date: Tue, 2 Oct 2018 15:52:56 +0800 Subject: [PATCH 14/18] fix evidence db iter leak (#2516) Also make reversing a slice more efficient --- CHANGELOG_PENDING.md | 1 + evidence/store.go | 9 +++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 0c867730..6d981335 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -39,3 +39,4 @@ IMPROVEMENTS: BUG FIXES: - [autofile] \#2428 Group.RotateFile need call Flush() before rename (@goolAdapter) - [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time +- [evidence] \#2515 fix db iter leak (@goolAdapter) diff --git a/evidence/store.go b/evidence/store.go index 9d0010a8..ccfd2d48 100644 --- a/evidence/store.go +++ b/evidence/store.go @@ -79,11 +79,11 @@ func NewEvidenceStore(db dbm.DB) *EvidenceStore { func (store *EvidenceStore) PriorityEvidence() (evidence []types.Evidence) { // reverse the order so highest priority is first l := store.listEvidence(baseKeyOutqueue, -1) - l2 := make([]types.Evidence, len(l)) - for i := range l { - l2[i] = l[len(l)-1-i] + for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 { + l[i], l[j] = l[j], l[i] } - return l2 + + return l } // PendingEvidence returns known uncommitted evidence up to maxBytes. @@ -98,6 +98,7 @@ func (store *EvidenceStore) PendingEvidence(maxBytes int64) (evidence []types.Ev func (store *EvidenceStore) listEvidence(prefixKey string, maxBytes int64) (evidence []types.Evidence) { var bytes int64 iter := dbm.IteratePrefix(store.db, []byte(prefixKey)) + defer iter.Close() for ; iter.Valid(); iter.Next() { val := iter.Value() From f3d08f969dbd5a219eca472fe4eb9f91e460573f Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 3 Oct 2018 04:31:04 +0400 Subject: [PATCH 15/18] [rpc] fix /abci_query: trusted was renamed to prove (#2531) --- rpc/core/routes.go | 2 +- types/block_test.go | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/rpc/core/routes.go b/rpc/core/routes.go index 639a2d08..736ded60 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -36,7 +36,7 @@ var Routes = map[string]*rpc.RPCFunc{ "broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"), // abci API - "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,trusted"), + "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"), "abci_info": rpc.NewRPCFunc(ABCIInfo, ""), } diff --git a/types/block_test.go b/types/block_test.go index c99fb6b0..43366a63 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/tmhash" cmn "github.com/tendermint/tendermint/libs/common" ) From c94133ed1b0dc203041d00b31194f2c7567e952a Mon Sep 17 00:00:00 2001 From: JamesRay <66258875@qq.com> Date: Wed, 3 Oct 2018 14:28:46 +0800 Subject: [PATCH 16/18] Fix a bug in bit_array's sub function (#2506) --- libs/common/bit_array.go | 2 +- libs/common/bit_array_test.go | 28 ++++++++++++++++++++++++++++ 2 files changed, 29 insertions(+), 1 deletion(-) diff --git a/libs/common/bit_array.go b/libs/common/bit_array.go index abf6110d..aa470bbd 100644 --- a/libs/common/bit_array.go +++ b/libs/common/bit_array.go @@ -189,7 +189,7 @@ func (bA *BitArray) Sub(o *BitArray) *BitArray { if bA.Bits > o.Bits { c := bA.copy() for i := 0; i < len(o.Elems)-1; i++ { - c.Elems[i] &= ^c.Elems[i] + c.Elems[i] &= ^o.Elems[i] } i := len(o.Elems) - 1 if i >= 0 { diff --git a/libs/common/bit_array_test.go b/libs/common/bit_array_test.go index b1efd3f6..3e2f17ce 100644 --- a/libs/common/bit_array_test.go +++ b/libs/common/bit_array_test.go @@ -131,6 +131,34 @@ func TestSub2(t *testing.T) { } } +func TestSub3(t *testing.T) { + + bA1, _ := randBitArray(231) + bA2, _ := randBitArray(81) + bA3 := bA1.Sub(bA2) + + bNil := (*BitArray)(nil) + require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) + require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) + require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) + + if bA3.Bits != bA1.Bits { + t.Error("Expected bA1 bits") + } + if len(bA3.Elems) != len(bA1.Elems) { + t.Error("Expected bA1 elems length") + } + for i := 0; i < bA3.Bits; i++ { + expected := bA1.GetIndex(i) + if i < bA2.Bits && bA2.GetIndex(i){ + expected = false + } + if bA3.GetIndex(i) != expected { + t.Error("Wrong bit from bA3") + } + } +} + func TestPickRandom(t *testing.T) { for idx := 0; idx < 123; idx++ { bA1 := NewBitArray(123) From 0755a5203da8fd5aab74373f2d2d537c3f17bf8a Mon Sep 17 00:00:00 2001 From: ValarDragon Date: Tue, 2 Oct 2018 16:03:59 -0700 Subject: [PATCH 17/18] bit_array: Simplify subtraction also, fix potential bug in Or function --- CHANGELOG_PENDING.md | 3 +- libs/common/bit_array.go | 44 +++++++-------- libs/common/bit_array_test.go | 103 +++++++++------------------------- 3 files changed, 47 insertions(+), 103 deletions(-) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 6d981335..81380e7c 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -11,7 +11,7 @@ BREAKING CHANGES: * [rpc] \#2298 `/abci_query` takes `prove` argument instead of `trusted` and switches the default behaviour to `prove=false` * [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer) - + * Apps * [abci] \#2298 ResponseQuery.Proof is now a structured merkle.Proof, not just arbitrary bytes @@ -40,3 +40,4 @@ BUG FIXES: - [autofile] \#2428 Group.RotateFile need call Flush() before rename (@goolAdapter) - [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time - [evidence] \#2515 fix db iter leak (@goolAdapter) +- [common/bit_array] Fixed a bug in the `Or` function diff --git a/libs/common/bit_array.go b/libs/common/bit_array.go index aa470bbd..161f21fc 100644 --- a/libs/common/bit_array.go +++ b/libs/common/bit_array.go @@ -119,14 +119,13 @@ func (bA *BitArray) Or(o *BitArray) *BitArray { } bA.mtx.Lock() o.mtx.Lock() - defer func() { - bA.mtx.Unlock() - o.mtx.Unlock() - }() c := bA.copyBits(MaxInt(bA.Bits, o.Bits)) - for i := 0; i < len(c.Elems); i++ { + smaller := MinInt(len(bA.Elems), len(o.Elems)) + for i := 0; i < smaller; i++ { c.Elems[i] |= o.Elems[i] } + bA.mtx.Unlock() + o.mtx.Unlock() return c } @@ -173,8 +172,9 @@ func (bA *BitArray) not() *BitArray { } // Sub subtracts the two bit-arrays bitwise, without carrying the bits. -// This is essentially bA.And(o.Not()). -// If bA is longer than o, o is right padded with zeroes. +// Note that carryless subtraction of a - b is (a and not b). +// The output is the same as bA, regardless of o's size. +// If bA is longer than o, o is right padded with zeroes func (bA *BitArray) Sub(o *BitArray) *BitArray { if bA == nil || o == nil { // TODO: Decide if we should do 1's complement here? @@ -182,24 +182,20 @@ func (bA *BitArray) Sub(o *BitArray) *BitArray { } bA.mtx.Lock() o.mtx.Lock() - defer func() { - bA.mtx.Unlock() - o.mtx.Unlock() - }() - if bA.Bits > o.Bits { - c := bA.copy() - for i := 0; i < len(o.Elems)-1; i++ { - c.Elems[i] &= ^o.Elems[i] - } - i := len(o.Elems) - 1 - if i >= 0 { - for idx := i * 64; idx < o.Bits; idx++ { - c.setIndex(idx, c.getIndex(idx) && !o.getIndex(idx)) - } - } - return c + // output is the same size as bA + c := bA.copyBits(bA.Bits) + // Only iterate to the minimum size between the two. + // If o is longer, those bits are ignored. + // If bA is longer, then skipping those iterations is equivalent + // to right padding with 0's + smaller := MinInt(len(bA.Elems), len(o.Elems)) + for i := 0; i < smaller; i++ { + // &^ is and not in golang + c.Elems[i] &^= o.Elems[i] } - return bA.and(o.not()) // Note degenerate case where o == nil + bA.mtx.Unlock() + o.mtx.Unlock() + return c } // IsEmpty returns true iff all bits in the bit array are 0 diff --git a/libs/common/bit_array_test.go b/libs/common/bit_array_test.go index 3e2f17ce..bc117b2a 100644 --- a/libs/common/bit_array_test.go +++ b/libs/common/bit_array_test.go @@ -75,87 +75,34 @@ func TestOr(t *testing.T) { } } -func TestSub1(t *testing.T) { - - bA1, _ := randBitArray(31) - bA2, _ := randBitArray(51) - bA3 := bA1.Sub(bA2) - - bNil := (*BitArray)(nil) - require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) - require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) - require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) - - if bA3.Bits != bA1.Bits { - t.Error("Expected bA1 bits") +func TestSub(t *testing.T) { + testCases := []struct { + initBA string + subtractingBA string + expectedBA string + }{ + {`null`, `null`, `null`}, + {`"x"`, `null`, `null`}, + {`null`, `"x"`, `null`}, + {`"x"`, `"x"`, `"_"`}, + {`"xxxxxx"`, `"x_x_x_"`, `"_x_x_x"`}, + {`"x_x_x_"`, `"xxxxxx"`, `"______"`}, + {`"xxxxxx"`, `"x_x_x_xxxx"`, `"_x_x_x"`}, + {`"x_x_x_xxxx"`, `"xxxxxx"`, `"______xxxx"`}, + {`"xxxxxxxxxx"`, `"x_x_x_"`, `"_x_x_xxxxx"`}, + {`"x_x_x_"`, `"xxxxxxxxxx"`, `"______"`}, } - if len(bA3.Elems) != len(bA1.Elems) { - t.Error("Expected bA1 elems length") - } - for i := 0; i < bA3.Bits; i++ { - expected := bA1.GetIndex(i) - if bA2.GetIndex(i) { - expected = false - } - if bA3.GetIndex(i) != expected { - t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i)) - } - } -} + for _, tc := range testCases { + var bA *BitArray + err := json.Unmarshal([]byte(tc.initBA), &bA) + require.Nil(t, err) -func TestSub2(t *testing.T) { + var o *BitArray + err = json.Unmarshal([]byte(tc.subtractingBA), &o) + require.Nil(t, err) - bA1, _ := randBitArray(51) - bA2, _ := randBitArray(31) - bA3 := bA1.Sub(bA2) - - bNil := (*BitArray)(nil) - require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) - require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) - require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) - - if bA3.Bits != bA1.Bits { - t.Error("Expected bA1 bits") - } - if len(bA3.Elems) != len(bA1.Elems) { - t.Error("Expected bA1 elems length") - } - for i := 0; i < bA3.Bits; i++ { - expected := bA1.GetIndex(i) - if i < bA2.Bits && bA2.GetIndex(i) { - expected = false - } - if bA3.GetIndex(i) != expected { - t.Error("Wrong bit from bA3") - } - } -} - -func TestSub3(t *testing.T) { - - bA1, _ := randBitArray(231) - bA2, _ := randBitArray(81) - bA3 := bA1.Sub(bA2) - - bNil := (*BitArray)(nil) - require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) - require.Equal(t, bA1.Sub(nil), (*BitArray)(nil)) - require.Equal(t, bNil.Sub(nil), (*BitArray)(nil)) - - if bA3.Bits != bA1.Bits { - t.Error("Expected bA1 bits") - } - if len(bA3.Elems) != len(bA1.Elems) { - t.Error("Expected bA1 elems length") - } - for i := 0; i < bA3.Bits; i++ { - expected := bA1.GetIndex(i) - if i < bA2.Bits && bA2.GetIndex(i){ - expected = false - } - if bA3.GetIndex(i) != expected { - t.Error("Wrong bit from bA3") - } + got, _ := json.Marshal(bA.Sub(o)) + require.Equal(t, tc.expectedBA, string(got), "%s minus %s doesn't equal %s", tc.initBA, tc.subtractingBA, tc.expectedBA) } } From cb2e58411f670549f7e1f66173f78c14116c77a0 Mon Sep 17 00:00:00 2001 From: Anton Kaliaev Date: Wed, 3 Oct 2018 10:53:29 +0400 Subject: [PATCH 18/18] add a missing changelog entry --- CHANGELOG_PENDING.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index 81380e7c..a9538dd1 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,6 +1,7 @@ # Pending Special thanks to external contributors on this release: +@goolAdapter, @bradyjoestar BREAKING CHANGES: @@ -41,3 +42,4 @@ BUG FIXES: - [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time - [evidence] \#2515 fix db iter leak (@goolAdapter) - [common/bit_array] Fixed a bug in the `Or` function +- [common/bit_array] Fixed a bug in the `Sub` function (@bradyjoestar)