Merge branch 'develop' into 2491-delete-old-results

This commit is contained in:
Anton Kaliaev 2018-10-03 11:36:48 +04:00 committed by GitHub
commit 45c6f2fec7
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
106 changed files with 3549 additions and 1430 deletions

View File

@ -1,24 +1,47 @@
# Pending # Pending
Special thanks to external contributors on this release: Special thanks to external contributors on this release:
@goolAdapter, @bradyjoestar
BREAKING CHANGES: BREAKING CHANGES:
* CLI/RPC/Config * CLI/RPC/Config
- [rpc] `/block_results` now uses indexer to fetch block results for heights < `latestBlockHeight` * [rpc] `/block_results` now uses indexer to fetch block results for heights < `latestBlockHeight`
* [config] \#2232 timeouts as time.Duration, not ints
* [config] \#2505 Remove Mempool.RecheckEmpty (it was effectively useless anyways)
* [config] `mempool.wal` is disabled by default
* [rpc] \#2298 `/abci_query` takes `prove` argument instead of `trusted` and switches the default
behaviour to `prove=false`
* [privval] \#2459 Split `SocketPVMsg`s implementations into Request and Response, where the Response may contain a error message (returned by the remote signer)
* Apps * Apps
* [abci] \#2298 ResponseQuery.Proof is now a structured merkle.Proof, not just
arbitrary bytes
* Go API * Go API
- [node] Remove node.RunForever * [node] Remove node.RunForever
* [config] \#2232 timeouts as time.Duration, not ints
* [rpc/client] \#2298 `ABCIQueryOptions.Trusted` -> `ABCIQueryOptions.Prove`
* [types] \#2298 Remove `Index` and `Total` fields from `TxProof`.
* [crypto/merkle & lite] \#2298 Various changes to accomodate General Merkle trees
* Blockchain Protocol
* [types] \#2459 `Vote`/`Proposal`/`Heartbeat` use amino encoding instead of JSON in `SignBytes`.
* P2P Protocol
FEATURES: FEATURES:
- [crypto/merkle] \#2298 General Merkle Proof scheme for chaining various types of Merkle trees together
IMPROVEMENTS: IMPROVEMENTS:
- [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics - [consensus] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics
- [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics - [p2p] [\#2169](https://github.com/cosmos/cosmos-sdk/issues/2169) add additional metrics
- [config] \#2232 added ValidateBasic method, which performs basic checks
BUG FIXES: BUG FIXES:
- [autofile] \#2428 Group.RotateFile need call Flush() before rename (@goolAdapter) - [autofile] \#2428 Group.RotateFile need call Flush() before rename (@goolAdapter)
- [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time - [node] \#2434 Make node respond to signal interrupts while sleeping for genesis time
- [state] \#2491 Store ABCIResponses only for the last block - [state] \#2491 Store ABCIResponses only for the last block
- [evidence] \#2515 fix db iter leak (@goolAdapter)
- [common/bit_array] Fixed a bug in the `Or` function
- [common/bit_array] Fixed a bug in the `Sub` function (@bradyjoestar)

View File

@ -35,7 +35,7 @@ install:
######################################## ########################################
### Protobuf ### Protobuf
protoc_all: protoc_libs protoc_abci protoc_grpc protoc_all: protoc_libs protoc_merkle protoc_abci protoc_grpc
%.pb.go: %.proto %.pb.go: %.proto
## If you get the following error, ## If you get the following error,
@ -137,6 +137,8 @@ grpc_dbserver:
protoc_grpc: rpc/grpc/types.pb.go protoc_grpc: rpc/grpc/types.pb.go
protoc_merkle: crypto/merkle/merkle.pb.go
######################################## ########################################
### Testing ### Testing

View File

@ -118,11 +118,12 @@ CHANGELOG even if they don't lead to MINOR version bumps:
- rpc/client - rpc/client
- config - config
- node - node
- libs/bech32 - libs
- libs/common - bech32
- libs/db - common
- libs/errors - db
- libs/log - errors
- log
Exported objects in these packages that are not covered by the versioning scheme Exported objects in these packages that are not covered by the versioning scheme
are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any are explicitly marked by `// UNSTABLE` in their go doc comment and may change at any

View File

@ -22,6 +22,7 @@ import (
servertest "github.com/tendermint/tendermint/abci/tests/server" servertest "github.com/tendermint/tendermint/abci/tests/server"
"github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/abci/types"
"github.com/tendermint/tendermint/abci/version" "github.com/tendermint/tendermint/abci/version"
"github.com/tendermint/tendermint/crypto/merkle"
) )
// client is a global variable so it can be reused by the console // client is a global variable so it can be reused by the console
@ -100,7 +101,7 @@ type queryResponse struct {
Key []byte Key []byte
Value []byte Value []byte
Height int64 Height int64
Proof []byte Proof *merkle.Proof
} }
func Execute() error { func Execute() error {
@ -748,7 +749,7 @@ func printResponse(cmd *cobra.Command, args []string, rsp response) {
fmt.Printf("-> value.hex: %X\n", rsp.Query.Value) fmt.Printf("-> value.hex: %X\n", rsp.Query.Value)
} }
if rsp.Query.Proof != nil { if rsp.Query.Proof != nil {
fmt.Printf("-> proof: %X\n", rsp.Query.Proof) fmt.Printf("-> proof: %#v\n", rsp.Query.Proof)
} }
} }
} }

View File

@ -6,4 +6,5 @@ const (
CodeTypeEncodingError uint32 = 1 CodeTypeEncodingError uint32 = 1
CodeTypeBadNonce uint32 = 2 CodeTypeBadNonce uint32 = 2
CodeTypeUnauthorized uint32 = 3 CodeTypeUnauthorized uint32 = 3
CodeTypeUnknownError uint32 = 4
) )

View File

@ -81,7 +81,7 @@ func (app *KVStoreApplication) DeliverTx(tx []byte) types.ResponseDeliverTx {
app.state.Size += 1 app.state.Size += 1
tags := []cmn.KVPair{ tags := []cmn.KVPair{
{Key: []byte("app.creator"), Value: []byte("jae")}, {Key: []byte("app.creator"), Value: []byte("Cosmoshi Netowoko")},
{Key: []byte("app.key"), Value: key}, {Key: []byte("app.key"), Value: key},
} }
return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags} return types.ResponseDeliverTx{Code: code.CodeTypeOK, Tags: tags}
@ -114,6 +114,7 @@ func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery type
} }
return return
} else { } else {
resQuery.Key = reqQuery.Data
value := app.state.db.Get(prefixKey(reqQuery.Data)) value := app.state.db.Get(prefixKey(reqQuery.Data))
resQuery.Value = value resQuery.Value = value
if value != nil { if value != nil {

View File

@ -28,6 +28,8 @@
-> code: OK -> code: OK
-> log: exists -> log: exists
-> height: 0 -> height: 0
-> key: abc
-> key.hex: 616263
-> value: abc -> value: abc
-> value.hex: 616263 -> value.hex: 616263
@ -42,6 +44,8 @@
-> code: OK -> code: OK
-> log: exists -> log: exists
-> height: 0 -> height: 0
-> key: def
-> key.hex: 646566
-> value: xyz -> value: xyz
-> value.hex: 78797A -> value.hex: 78797A

File diff suppressed because it is too large Load Diff

View File

@ -6,6 +6,7 @@ package types;
import "github.com/gogo/protobuf/gogoproto/gogo.proto"; import "github.com/gogo/protobuf/gogoproto/gogo.proto";
import "google/protobuf/timestamp.proto"; import "google/protobuf/timestamp.proto";
import "github.com/tendermint/tendermint/libs/common/types.proto"; import "github.com/tendermint/tendermint/libs/common/types.proto";
import "github.com/tendermint/tendermint/crypto/merkle/merkle.proto";
// This file is copied from http://github.com/tendermint/abci // This file is copied from http://github.com/tendermint/abci
// NOTE: When using custom types, mind the warnings. // NOTE: When using custom types, mind the warnings.
@ -154,7 +155,7 @@ message ResponseQuery {
int64 index = 5; int64 index = 5;
bytes key = 6; bytes key = 6;
bytes value = 7; bytes value = 7;
bytes proof = 8; merkle.Proof proof = 8;
int64 height = 9; int64 height = 9;
} }

View File

@ -14,6 +14,7 @@ import fmt "fmt"
import math "math" import math "math"
import _ "github.com/gogo/protobuf/gogoproto" import _ "github.com/gogo/protobuf/gogoproto"
import _ "github.com/golang/protobuf/ptypes/timestamp" import _ "github.com/golang/protobuf/ptypes/timestamp"
import _ "github.com/tendermint/tendermint/crypto/merkle"
import _ "github.com/tendermint/tendermint/libs/common" import _ "github.com/tendermint/tendermint/libs/common"
// Reference imports to suppress errors if they are not otherwise used. // Reference imports to suppress errors if they are not otherwise used.

View File

@ -30,6 +30,7 @@ var (
nodeAddr string nodeAddr string
chainID string chainID string
home string home string
cacheSize int
) )
func init() { func init() {
@ -37,6 +38,7 @@ func init() {
LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:26657", "Connect to a Tendermint node at this address") LiteCmd.Flags().StringVar(&nodeAddr, "node", "tcp://localhost:26657", "Connect to a Tendermint node at this address")
LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID") LiteCmd.Flags().StringVar(&chainID, "chain-id", "tendermint", "Specify the Tendermint chain ID")
LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory") LiteCmd.Flags().StringVar(&home, "home-dir", ".tendermint-lite", "Specify the home directory")
LiteCmd.Flags().IntVar(&cacheSize, "cache-size", 10, "Specify the memory trust store cache size")
} }
func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) { func ensureAddrHasSchemeOrDefaultToTCP(addr string) (string, error) {
@ -69,7 +71,7 @@ func runProxy(cmd *cobra.Command, args []string) error {
node := rpcclient.NewHTTP(nodeAddr, "/websocket") node := rpcclient.NewHTTP(nodeAddr, "/websocket")
logger.Info("Constructing Verifier...") logger.Info("Constructing Verifier...")
cert, err := proxy.NewVerifier(chainID, home, node, logger) cert, err := proxy.NewVerifier(chainID, home, node, logger, cacheSize)
if err != nil { if err != nil {
return cmn.ErrorWrap(err, "constructing Verifier") return cmn.ErrorWrap(err, "constructing Verifier")
} }

View File

@ -1,6 +1,7 @@
package commands package commands
import ( import (
"fmt"
"os" "os"
"github.com/spf13/cobra" "github.com/spf13/cobra"
@ -35,6 +36,9 @@ func ParseConfig() (*cfg.Config, error) {
} }
conf.SetRoot(conf.RootDir) conf.SetRoot(conf.RootDir)
cfg.EnsureRoot(conf.RootDir) cfg.EnsureRoot(conf.RootDir)
if err = conf.ValidateBasic(); err != nil {
return nil, fmt.Errorf("Error in config file: %v", err)
}
return conf, err return conf, err
} }

View File

@ -5,6 +5,8 @@ import (
"os" "os"
"path/filepath" "path/filepath"
"time" "time"
"github.com/pkg/errors"
) )
const ( const (
@ -19,7 +21,7 @@ const (
// generate the config.toml. Please reflect any changes // generate the config.toml. Please reflect any changes
// made here in the defaultConfigTemplate constant in // made here in the defaultConfigTemplate constant in
// config/toml.go // config/toml.go
// NOTE: tmlibs/cli must know to look in the config dir! // NOTE: libs/cli must know to look in the config dir!
var ( var (
DefaultTendermintDir = ".tendermint" DefaultTendermintDir = ".tendermint"
defaultConfigDir = "config" defaultConfigDir = "config"
@ -89,6 +91,27 @@ func (cfg *Config) SetRoot(root string) *Config {
return cfg return cfg
} }
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *Config) ValidateBasic() error {
if err := cfg.RPC.ValidateBasic(); err != nil {
return errors.Wrap(err, "Error in [rpc] section")
}
if err := cfg.P2P.ValidateBasic(); err != nil {
return errors.Wrap(err, "Error in [p2p] section")
}
if err := cfg.Mempool.ValidateBasic(); err != nil {
return errors.Wrap(err, "Error in [mempool] section")
}
if err := cfg.Consensus.ValidateBasic(); err != nil {
return errors.Wrap(err, "Error in [consensus] section")
}
return errors.Wrap(
cfg.Instrumentation.ValidateBasic(),
"Error in [instrumentation] section",
)
}
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// BaseConfig // BaseConfig
@ -265,6 +288,18 @@ func TestRPCConfig() *RPCConfig {
return cfg return cfg
} }
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *RPCConfig) ValidateBasic() error {
if cfg.GRPCMaxOpenConnections < 0 {
return errors.New("grpc_max_open_connections can't be negative")
}
if cfg.MaxOpenConnections < 0 {
return errors.New("max_open_connections can't be negative")
}
return nil
}
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// P2PConfig // P2PConfig
@ -301,8 +336,8 @@ type P2PConfig struct {
// Maximum number of outbound peers to connect to, excluding persistent peers // Maximum number of outbound peers to connect to, excluding persistent peers
MaxNumOutboundPeers int `mapstructure:"max_num_outbound_peers"` MaxNumOutboundPeers int `mapstructure:"max_num_outbound_peers"`
// Time to wait before flushing messages out on the connection, in ms // Time to wait before flushing messages out on the connection
FlushThrottleTimeout int `mapstructure:"flush_throttle_timeout"` FlushThrottleTimeout time.Duration `mapstructure:"flush_throttle_timeout"`
// Maximum size of a message packet payload, in bytes // Maximum size of a message packet payload, in bytes
MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"` MaxPacketMsgPayloadSize int `mapstructure:"max_packet_msg_payload_size"`
@ -351,7 +386,7 @@ func DefaultP2PConfig() *P2PConfig {
AddrBookStrict: true, AddrBookStrict: true,
MaxNumInboundPeers: 40, MaxNumInboundPeers: 40,
MaxNumOutboundPeers: 10, MaxNumOutboundPeers: 10,
FlushThrottleTimeout: 100, FlushThrottleTimeout: 100 * time.Millisecond,
MaxPacketMsgPayloadSize: 1024, // 1 kB MaxPacketMsgPayloadSize: 1024, // 1 kB
SendRate: 5120000, // 5 mB/s SendRate: 5120000, // 5 mB/s
RecvRate: 5120000, // 5 mB/s RecvRate: 5120000, // 5 mB/s
@ -380,6 +415,30 @@ func (cfg *P2PConfig) AddrBookFile() string {
return rootify(cfg.AddrBook, cfg.RootDir) return rootify(cfg.AddrBook, cfg.RootDir)
} }
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *P2PConfig) ValidateBasic() error {
if cfg.MaxNumInboundPeers < 0 {
return errors.New("max_num_inbound_peers can't be negative")
}
if cfg.MaxNumOutboundPeers < 0 {
return errors.New("max_num_outbound_peers can't be negative")
}
if cfg.FlushThrottleTimeout < 0 {
return errors.New("flush_throttle_timeout can't be negative")
}
if cfg.MaxPacketMsgPayloadSize < 0 {
return errors.New("max_packet_msg_payload_size can't be negative")
}
if cfg.SendRate < 0 {
return errors.New("send_rate can't be negative")
}
if cfg.RecvRate < 0 {
return errors.New("recv_rate can't be negative")
}
return nil
}
// FuzzConnConfig is a FuzzedConnection configuration. // FuzzConnConfig is a FuzzedConnection configuration.
type FuzzConnConfig struct { type FuzzConnConfig struct {
Mode int Mode int
@ -407,7 +466,6 @@ func DefaultFuzzConnConfig() *FuzzConnConfig {
type MempoolConfig struct { type MempoolConfig struct {
RootDir string `mapstructure:"home"` RootDir string `mapstructure:"home"`
Recheck bool `mapstructure:"recheck"` Recheck bool `mapstructure:"recheck"`
RecheckEmpty bool `mapstructure:"recheck_empty"`
Broadcast bool `mapstructure:"broadcast"` Broadcast bool `mapstructure:"broadcast"`
WalPath string `mapstructure:"wal_dir"` WalPath string `mapstructure:"wal_dir"`
Size int `mapstructure:"size"` Size int `mapstructure:"size"`
@ -418,9 +476,8 @@ type MempoolConfig struct {
func DefaultMempoolConfig() *MempoolConfig { func DefaultMempoolConfig() *MempoolConfig {
return &MempoolConfig{ return &MempoolConfig{
Recheck: true, Recheck: true,
RecheckEmpty: true,
Broadcast: true, Broadcast: true,
WalPath: filepath.Join(defaultDataDir, "mempool.wal"), WalPath: "",
// Each signature verification takes .5ms, size reduced until we implement // Each signature verification takes .5ms, size reduced until we implement
// ABCI Recheck // ABCI Recheck
Size: 5000, Size: 5000,
@ -440,6 +497,18 @@ func (cfg *MempoolConfig) WalDir() string {
return rootify(cfg.WalPath, cfg.RootDir) return rootify(cfg.WalPath, cfg.RootDir)
} }
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *MempoolConfig) ValidateBasic() error {
if cfg.Size < 0 {
return errors.New("size can't be negative")
}
if cfg.CacheSize < 0 {
return errors.New("cache_size can't be negative")
}
return nil
}
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// ConsensusConfig // ConsensusConfig
@ -450,72 +519,70 @@ type ConsensusConfig struct {
WalPath string `mapstructure:"wal_file"` WalPath string `mapstructure:"wal_file"`
walFile string // overrides WalPath if set walFile string // overrides WalPath if set
// All timeouts are in milliseconds TimeoutPropose time.Duration `mapstructure:"timeout_propose"`
TimeoutPropose int `mapstructure:"timeout_propose"` TimeoutProposeDelta time.Duration `mapstructure:"timeout_propose_delta"`
TimeoutProposeDelta int `mapstructure:"timeout_propose_delta"` TimeoutPrevote time.Duration `mapstructure:"timeout_prevote"`
TimeoutPrevote int `mapstructure:"timeout_prevote"` TimeoutPrevoteDelta time.Duration `mapstructure:"timeout_prevote_delta"`
TimeoutPrevoteDelta int `mapstructure:"timeout_prevote_delta"` TimeoutPrecommit time.Duration `mapstructure:"timeout_precommit"`
TimeoutPrecommit int `mapstructure:"timeout_precommit"` TimeoutPrecommitDelta time.Duration `mapstructure:"timeout_precommit_delta"`
TimeoutPrecommitDelta int `mapstructure:"timeout_precommit_delta"` TimeoutCommit time.Duration `mapstructure:"timeout_commit"`
TimeoutCommit int `mapstructure:"timeout_commit"`
// Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) // Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"` SkipTimeoutCommit bool `mapstructure:"skip_timeout_commit"`
// EmptyBlocks mode and possible interval between empty blocks in seconds // EmptyBlocks mode and possible interval between empty blocks
CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"` CreateEmptyBlocks bool `mapstructure:"create_empty_blocks"`
CreateEmptyBlocksInterval int `mapstructure:"create_empty_blocks_interval"` CreateEmptyBlocksInterval time.Duration `mapstructure:"create_empty_blocks_interval"`
// Reactor sleep duration parameters are in milliseconds // Reactor sleep duration parameters
PeerGossipSleepDuration int `mapstructure:"peer_gossip_sleep_duration"` PeerGossipSleepDuration time.Duration `mapstructure:"peer_gossip_sleep_duration"`
PeerQueryMaj23SleepDuration int `mapstructure:"peer_query_maj23_sleep_duration"` PeerQueryMaj23SleepDuration time.Duration `mapstructure:"peer_query_maj23_sleep_duration"`
// Block time parameters in milliseconds. Corresponds to the minimum time increment between consecutive blocks. // Block time parameters. Corresponds to the minimum time increment between consecutive blocks.
BlockTimeIota int `mapstructure:"blocktime_iota"` BlockTimeIota time.Duration `mapstructure:"blocktime_iota"`
} }
// DefaultConsensusConfig returns a default configuration for the consensus service // DefaultConsensusConfig returns a default configuration for the consensus service
func DefaultConsensusConfig() *ConsensusConfig { func DefaultConsensusConfig() *ConsensusConfig {
return &ConsensusConfig{ return &ConsensusConfig{
WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"), WalPath: filepath.Join(defaultDataDir, "cs.wal", "wal"),
TimeoutPropose: 3000, TimeoutPropose: 3000 * time.Millisecond,
TimeoutProposeDelta: 500, TimeoutProposeDelta: 500 * time.Millisecond,
TimeoutPrevote: 1000, TimeoutPrevote: 1000 * time.Millisecond,
TimeoutPrevoteDelta: 500, TimeoutPrevoteDelta: 500 * time.Millisecond,
TimeoutPrecommit: 1000, TimeoutPrecommit: 1000 * time.Millisecond,
TimeoutPrecommitDelta: 500, TimeoutPrecommitDelta: 500 * time.Millisecond,
TimeoutCommit: 1000, TimeoutCommit: 1000 * time.Millisecond,
SkipTimeoutCommit: false, SkipTimeoutCommit: false,
CreateEmptyBlocks: true, CreateEmptyBlocks: true,
CreateEmptyBlocksInterval: 0, CreateEmptyBlocksInterval: 0 * time.Second,
PeerGossipSleepDuration: 100, PeerGossipSleepDuration: 100 * time.Millisecond,
PeerQueryMaj23SleepDuration: 2000, PeerQueryMaj23SleepDuration: 2000 * time.Millisecond,
BlockTimeIota: 1000, BlockTimeIota: 1000 * time.Millisecond,
} }
} }
// TestConsensusConfig returns a configuration for testing the consensus service // TestConsensusConfig returns a configuration for testing the consensus service
func TestConsensusConfig() *ConsensusConfig { func TestConsensusConfig() *ConsensusConfig {
cfg := DefaultConsensusConfig() cfg := DefaultConsensusConfig()
cfg.TimeoutPropose = 100 cfg.TimeoutPropose = 100 * time.Millisecond
cfg.TimeoutProposeDelta = 1 cfg.TimeoutProposeDelta = 1 * time.Millisecond
cfg.TimeoutPrevote = 10 cfg.TimeoutPrevote = 10 * time.Millisecond
cfg.TimeoutPrevoteDelta = 1 cfg.TimeoutPrevoteDelta = 1 * time.Millisecond
cfg.TimeoutPrecommit = 10 cfg.TimeoutPrecommit = 10 * time.Millisecond
cfg.TimeoutPrecommitDelta = 1 cfg.TimeoutPrecommitDelta = 1 * time.Millisecond
cfg.TimeoutCommit = 10 cfg.TimeoutCommit = 10 * time.Millisecond
cfg.SkipTimeoutCommit = true cfg.SkipTimeoutCommit = true
cfg.PeerGossipSleepDuration = 5 cfg.PeerGossipSleepDuration = 5 * time.Millisecond
cfg.PeerQueryMaj23SleepDuration = 250 cfg.PeerQueryMaj23SleepDuration = 250 * time.Millisecond
cfg.BlockTimeIota = 10 cfg.BlockTimeIota = 10 * time.Millisecond
return cfg return cfg
} }
// MinValidVoteTime returns the minimum acceptable block time. // MinValidVoteTime returns the minimum acceptable block time.
// See the [BFT time spec](https://godoc.org/github.com/tendermint/tendermint/docs/spec/consensus/bft-time.md). // See the [BFT time spec](https://godoc.org/github.com/tendermint/tendermint/docs/spec/consensus/bft-time.md).
func (cfg *ConsensusConfig) MinValidVoteTime(lastBlockTime time.Time) time.Time { func (cfg *ConsensusConfig) MinValidVoteTime(lastBlockTime time.Time) time.Time {
return lastBlockTime. return lastBlockTime.Add(cfg.BlockTimeIota)
Add(time.Duration(cfg.BlockTimeIota) * time.Millisecond)
} }
// WaitForTxs returns true if the consensus should wait for transactions before entering the propose step // WaitForTxs returns true if the consensus should wait for transactions before entering the propose step
@ -523,39 +590,30 @@ func (cfg *ConsensusConfig) WaitForTxs() bool {
return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0 return !cfg.CreateEmptyBlocks || cfg.CreateEmptyBlocksInterval > 0
} }
// EmptyBlocks returns the amount of time to wait before proposing an empty block or starting the propose timer if there are no txs available
func (cfg *ConsensusConfig) EmptyBlocksInterval() time.Duration {
return time.Duration(cfg.CreateEmptyBlocksInterval) * time.Second
}
// Propose returns the amount of time to wait for a proposal // Propose returns the amount of time to wait for a proposal
func (cfg *ConsensusConfig) Propose(round int) time.Duration { func (cfg *ConsensusConfig) Propose(round int) time.Duration {
return time.Duration(cfg.TimeoutPropose+cfg.TimeoutProposeDelta*round) * time.Millisecond return time.Duration(
cfg.TimeoutPropose.Nanoseconds()+cfg.TimeoutProposeDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
} }
// Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes // Prevote returns the amount of time to wait for straggler votes after receiving any +2/3 prevotes
func (cfg *ConsensusConfig) Prevote(round int) time.Duration { func (cfg *ConsensusConfig) Prevote(round int) time.Duration {
return time.Duration(cfg.TimeoutPrevote+cfg.TimeoutPrevoteDelta*round) * time.Millisecond return time.Duration(
cfg.TimeoutPrevote.Nanoseconds()+cfg.TimeoutPrevoteDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
} }
// Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits // Precommit returns the amount of time to wait for straggler votes after receiving any +2/3 precommits
func (cfg *ConsensusConfig) Precommit(round int) time.Duration { func (cfg *ConsensusConfig) Precommit(round int) time.Duration {
return time.Duration(cfg.TimeoutPrecommit+cfg.TimeoutPrecommitDelta*round) * time.Millisecond return time.Duration(
cfg.TimeoutPrecommit.Nanoseconds()+cfg.TimeoutPrecommitDelta.Nanoseconds()*int64(round),
) * time.Nanosecond
} }
// Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits for a single block (ie. a commit). // Commit returns the amount of time to wait for straggler votes after receiving +2/3 precommits for a single block (ie. a commit).
func (cfg *ConsensusConfig) Commit(t time.Time) time.Time { func (cfg *ConsensusConfig) Commit(t time.Time) time.Time {
return t.Add(time.Duration(cfg.TimeoutCommit) * time.Millisecond) return t.Add(cfg.TimeoutCommit)
}
// PeerGossipSleep returns the amount of time to sleep if there is nothing to send from the ConsensusReactor
func (cfg *ConsensusConfig) PeerGossipSleep() time.Duration {
return time.Duration(cfg.PeerGossipSleepDuration) * time.Millisecond
}
// PeerQueryMaj23Sleep returns the amount of time to sleep after each VoteSetMaj23Message is sent in the ConsensusReactor
func (cfg *ConsensusConfig) PeerQueryMaj23Sleep() time.Duration {
return time.Duration(cfg.PeerQueryMaj23SleepDuration) * time.Millisecond
} }
// WalFile returns the full path to the write-ahead log file // WalFile returns the full path to the write-ahead log file
@ -571,6 +629,45 @@ func (cfg *ConsensusConfig) SetWalFile(walFile string) {
cfg.walFile = walFile cfg.walFile = walFile
} }
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *ConsensusConfig) ValidateBasic() error {
if cfg.TimeoutPropose < 0 {
return errors.New("timeout_propose can't be negative")
}
if cfg.TimeoutProposeDelta < 0 {
return errors.New("timeout_propose_delta can't be negative")
}
if cfg.TimeoutPrevote < 0 {
return errors.New("timeout_prevote can't be negative")
}
if cfg.TimeoutPrevoteDelta < 0 {
return errors.New("timeout_prevote_delta can't be negative")
}
if cfg.TimeoutPrecommit < 0 {
return errors.New("timeout_precommit can't be negative")
}
if cfg.TimeoutPrecommitDelta < 0 {
return errors.New("timeout_precommit_delta can't be negative")
}
if cfg.TimeoutCommit < 0 {
return errors.New("timeout_commit can't be negative")
}
if cfg.CreateEmptyBlocksInterval < 0 {
return errors.New("create_empty_blocks_interval can't be negative")
}
if cfg.PeerGossipSleepDuration < 0 {
return errors.New("peer_gossip_sleep_duration can't be negative")
}
if cfg.PeerQueryMaj23SleepDuration < 0 {
return errors.New("peer_query_maj23_sleep_duration can't be negative")
}
if cfg.BlockTimeIota < 0 {
return errors.New("blocktime_iota can't be negative")
}
return nil
}
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// TxIndexConfig // TxIndexConfig
@ -656,6 +753,15 @@ func TestInstrumentationConfig() *InstrumentationConfig {
return DefaultInstrumentationConfig() return DefaultInstrumentationConfig()
} }
// ValidateBasic performs basic validation (checking param bounds, etc.) and
// returns an error if any check fails.
func (cfg *InstrumentationConfig) ValidateBasic() error {
if cfg.MaxOpenConnections < 0 {
return errors.New("max_open_connections can't be negative")
}
return nil
}
//----------------------------------------------------------------------------- //-----------------------------------------------------------------------------
// Utils // Utils

View File

@ -2,6 +2,7 @@ package config
import ( import (
"testing" "testing"
"time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
@ -26,3 +27,12 @@ func TestDefaultConfig(t *testing.T) {
assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir()) assert.Equal("/foo/wal/mem", cfg.Mempool.WalDir())
} }
func TestConfigValidateBasic(t *testing.T) {
cfg := DefaultConfig()
assert.NoError(t, cfg.ValidateBasic())
// tamper with timeout_propose
cfg.Consensus.TimeoutPropose = -10 * time.Second
assert.Error(t, cfg.ValidateBasic())
}

View File

@ -99,7 +99,7 @@ priv_validator_file = "{{ js .BaseConfig.PrivValidator }}"
priv_validator_laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}" priv_validator_laddr = "{{ .BaseConfig.PrivValidatorListenAddr }}"
# Path to the JSON file containing the private key to use for node authentication in the p2p protocol # Path to the JSON file containing the private key to use for node authentication in the p2p protocol
node_key_file = "{{ js .BaseConfig.NodeKey}}" node_key_file = "{{ js .BaseConfig.NodeKey }}"
# Mechanism to connect to the ABCI application: socket | grpc # Mechanism to connect to the ABCI application: socket | grpc
abci = "{{ .BaseConfig.ABCI }}" abci = "{{ .BaseConfig.ABCI }}"
@ -172,15 +172,15 @@ addr_book_file = "{{ js .P2P.AddrBook }}"
# Set false for private or local networks # Set false for private or local networks
addr_book_strict = {{ .P2P.AddrBookStrict }} addr_book_strict = {{ .P2P.AddrBookStrict }}
# Time to wait before flushing messages out on the connection, in ms
flush_throttle_timeout = {{ .P2P.FlushThrottleTimeout }}
# Maximum number of inbound peers # Maximum number of inbound peers
max_num_inbound_peers = {{ .P2P.MaxNumInboundPeers }} max_num_inbound_peers = {{ .P2P.MaxNumInboundPeers }}
# Maximum number of outbound peers to connect to, excluding persistent peers # Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = {{ .P2P.MaxNumOutboundPeers }} max_num_outbound_peers = {{ .P2P.MaxNumOutboundPeers }}
# Time to wait before flushing messages out on the connection
flush_throttle_timeout = "{{ .P2P.FlushThrottleTimeout }}"
# Maximum size of a message packet payload, in bytes # Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }} max_packet_msg_payload_size = {{ .P2P.MaxPacketMsgPayloadSize }}
@ -202,11 +202,17 @@ seed_mode = {{ .P2P.SeedMode }}
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) # Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = "{{ .P2P.PrivatePeerIDs }}" private_peer_ids = "{{ .P2P.PrivatePeerIDs }}"
# Toggle to disable guard against peers connecting from the same ip.
allow_duplicate_ip = {{ .P2P.AllowDuplicateIP }}
# Peer connection configuration.
handshake_timeout = "{{ .P2P.HandshakeTimeout }}"
dial_timeout = "{{ .P2P.DialTimeout }}"
##### mempool configuration options ##### ##### mempool configuration options #####
[mempool] [mempool]
recheck = {{ .Mempool.Recheck }} recheck = {{ .Mempool.Recheck }}
recheck_empty = {{ .Mempool.RecheckEmpty }}
broadcast = {{ .Mempool.Broadcast }} broadcast = {{ .Mempool.Broadcast }}
wal_dir = "{{ js .Mempool.WalPath }}" wal_dir = "{{ js .Mempool.WalPath }}"
@ -221,25 +227,24 @@ cache_size = {{ .Mempool.CacheSize }}
wal_file = "{{ js .Consensus.WalPath }}" wal_file = "{{ js .Consensus.WalPath }}"
# All timeouts are in milliseconds timeout_propose = "{{ .Consensus.TimeoutPropose }}"
timeout_propose = {{ .Consensus.TimeoutPropose }} timeout_propose_delta = "{{ .Consensus.TimeoutProposeDelta }}"
timeout_propose_delta = {{ .Consensus.TimeoutProposeDelta }} timeout_prevote = "{{ .Consensus.TimeoutPrevote }}"
timeout_prevote = {{ .Consensus.TimeoutPrevote }} timeout_prevote_delta = "{{ .Consensus.TimeoutPrevoteDelta }}"
timeout_prevote_delta = {{ .Consensus.TimeoutPrevoteDelta }} timeout_precommit = "{{ .Consensus.TimeoutPrecommit }}"
timeout_precommit = {{ .Consensus.TimeoutPrecommit }} timeout_precommit_delta = "{{ .Consensus.TimeoutPrecommitDelta }}"
timeout_precommit_delta = {{ .Consensus.TimeoutPrecommitDelta }} timeout_commit = "{{ .Consensus.TimeoutCommit }}"
timeout_commit = {{ .Consensus.TimeoutCommit }}
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }} skip_timeout_commit = {{ .Consensus.SkipTimeoutCommit }}
# EmptyBlocks mode and possible interval between empty blocks in seconds # EmptyBlocks mode and possible interval between empty blocks
create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }} create_empty_blocks = {{ .Consensus.CreateEmptyBlocks }}
create_empty_blocks_interval = {{ .Consensus.CreateEmptyBlocksInterval }} create_empty_blocks_interval = "{{ .Consensus.CreateEmptyBlocksInterval }}"
# Reactor sleep duration parameters are in milliseconds # Reactor sleep duration parameters
peer_gossip_sleep_duration = {{ .Consensus.PeerGossipSleepDuration }} peer_gossip_sleep_duration = "{{ .Consensus.PeerGossipSleepDuration }}"
peer_query_maj23_sleep_duration = {{ .Consensus.PeerQueryMaj23SleepDuration }} peer_query_maj23_sleep_duration = "{{ .Consensus.PeerQueryMaj23SleepDuration }}"
##### transactions indexer configuration options ##### ##### transactions indexer configuration options #####
[tx_index] [tx_index]

View File

@ -38,7 +38,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) {
func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) {
config := ResetConfig("consensus_mempool_txs_available_test") config := ResetConfig("consensus_mempool_txs_available_test")
config.Consensus.CreateEmptyBlocksInterval = int(ensureTimeout.Seconds()) config.Consensus.CreateEmptyBlocksInterval = ensureTimeout
state, privVals := randGenesisState(1, false, 10) state, privVals := randGenesisState(1, false, 10)
cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication()) cs := newConsensusStateWithConfig(config, state, privVals[0], NewCounterApplication())
cs.mempool.EnableTxsAvailable() cs.mempool.EnableTxsAvailable()

View File

@ -508,7 +508,7 @@ OUTER_LOOP:
// If height and round don't match, sleep. // If height and round don't match, sleep.
if (rs.Height != prs.Height) || (rs.Round != prs.Round) { if (rs.Height != prs.Height) || (rs.Round != prs.Round) {
//logger.Info("Peer Height|Round mismatch, sleeping", "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer) //logger.Info("Peer Height|Round mismatch, sleeping", "peerHeight", prs.Height, "peerRound", prs.Round, "peer", peer)
time.Sleep(conR.conS.config.PeerGossipSleep()) time.Sleep(conR.conS.config.PeerGossipSleepDuration)
continue OUTER_LOOP continue OUTER_LOOP
} }
@ -544,7 +544,7 @@ OUTER_LOOP:
} }
// Nothing to do. Sleep. // Nothing to do. Sleep.
time.Sleep(conR.conS.config.PeerGossipSleep()) time.Sleep(conR.conS.config.PeerGossipSleepDuration)
continue OUTER_LOOP continue OUTER_LOOP
} }
} }
@ -558,12 +558,12 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
if blockMeta == nil { if blockMeta == nil {
logger.Error("Failed to load block meta", logger.Error("Failed to load block meta",
"ourHeight", rs.Height, "blockstoreHeight", conR.conS.blockStore.Height()) "ourHeight", rs.Height, "blockstoreHeight", conR.conS.blockStore.Height())
time.Sleep(conR.conS.config.PeerGossipSleep()) time.Sleep(conR.conS.config.PeerGossipSleepDuration)
return return
} else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) { } else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) {
logger.Info("Peer ProposalBlockPartsHeader mismatch, sleeping", logger.Info("Peer ProposalBlockPartsHeader mismatch, sleeping",
"blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
time.Sleep(conR.conS.config.PeerGossipSleep()) time.Sleep(conR.conS.config.PeerGossipSleepDuration)
return return
} }
// Load the part // Load the part
@ -571,7 +571,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
if part == nil { if part == nil {
logger.Error("Could not load part", "index", index, logger.Error("Could not load part", "index", index,
"blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader) "blockPartsHeader", blockMeta.BlockID.PartsHeader, "peerBlockPartsHeader", prs.ProposalBlockPartsHeader)
time.Sleep(conR.conS.config.PeerGossipSleep()) time.Sleep(conR.conS.config.PeerGossipSleepDuration)
return return
} }
// Send the part // Send the part
@ -589,7 +589,7 @@ func (conR *ConsensusReactor) gossipDataForCatchup(logger log.Logger, rs *cstype
return return
} }
//logger.Info("No parts to send in catch-up, sleeping") //logger.Info("No parts to send in catch-up, sleeping")
time.Sleep(conR.conS.config.PeerGossipSleep()) time.Sleep(conR.conS.config.PeerGossipSleepDuration)
} }
func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) { func (conR *ConsensusReactor) gossipVotesRoutine(peer p2p.Peer, ps *PeerState) {
@ -658,7 +658,7 @@ OUTER_LOOP:
sleeping = 1 sleeping = 1
} }
time.Sleep(conR.conS.config.PeerGossipSleep()) time.Sleep(conR.conS.config.PeerGossipSleepDuration)
continue OUTER_LOOP continue OUTER_LOOP
} }
} }
@ -742,7 +742,7 @@ OUTER_LOOP:
Type: types.VoteTypePrevote, Type: types.VoteTypePrevote,
BlockID: maj23, BlockID: maj23,
})) }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
} }
} }
} }
@ -759,7 +759,7 @@ OUTER_LOOP:
Type: types.VoteTypePrecommit, Type: types.VoteTypePrecommit,
BlockID: maj23, BlockID: maj23,
})) }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
} }
} }
} }
@ -776,7 +776,7 @@ OUTER_LOOP:
Type: types.VoteTypePrevote, Type: types.VoteTypePrevote,
BlockID: maj23, BlockID: maj23,
})) }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
} }
} }
} }
@ -795,11 +795,11 @@ OUTER_LOOP:
Type: types.VoteTypePrecommit, Type: types.VoteTypePrecommit,
BlockID: commit.BlockID, BlockID: commit.BlockID,
})) }))
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
} }
} }
time.Sleep(conR.conS.config.PeerQueryMaj23Sleep()) time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration)
continue OUTER_LOOP continue OUTER_LOOP
} }

View File

@ -782,7 +782,7 @@ func (cs *ConsensusState) enterNewRound(height int64, round int) {
waitForTxs := cs.config.WaitForTxs() && round == 0 && !cs.needProofBlock(height) waitForTxs := cs.config.WaitForTxs() && round == 0 && !cs.needProofBlock(height)
if waitForTxs { if waitForTxs {
if cs.config.CreateEmptyBlocksInterval > 0 { if cs.config.CreateEmptyBlocksInterval > 0 {
cs.scheduleTimeout(cs.config.EmptyBlocksInterval(), height, round, cstypes.RoundStepNewRound) cs.scheduleTimeout(cs.config.CreateEmptyBlocksInterval, height, round, cstypes.RoundStepNewRound)
} }
go cs.proposalHeartbeat(height, round) go cs.proposalHeartbeat(height, round)
} else { } else {

View File

@ -21,8 +21,8 @@ func init() {
config = ResetConfig("consensus_state_test") config = ResetConfig("consensus_state_test")
} }
func ensureProposeTimeout(timeoutPropose int) time.Duration { func ensureProposeTimeout(timeoutPropose time.Duration) time.Duration {
return time.Duration(timeoutPropose*2) * time.Millisecond return time.Duration(timeoutPropose.Nanoseconds()*2) * time.Nanosecond
} }
/* /*

View File

@ -107,8 +107,8 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple {
// RoundStateEvent returns the H/R/S of the RoundState as an event. // RoundStateEvent returns the H/R/S of the RoundState as an event.
func (rs *RoundState) RoundStateEvent() types.EventDataRoundState { func (rs *RoundState) RoundStateEvent() types.EventDataRoundState {
// XXX: copy the RoundState // copy the RoundState.
// if we want to avoid this, we may need synchronous events after all // TODO: if we want to avoid this, we may need synchronous events after all
rsCopy := *rs rsCopy := *rs
edrs := types.EventDataRoundState{ edrs := types.EventDataRoundState{
Height: rs.Height, Height: rs.Height,

6
crypto/merkle/compile.sh Normal file
View File

@ -0,0 +1,6 @@
#! /bin/bash
protoc --gogo_out=. -I $GOPATH/src/ -I . -I $GOPATH/src/github.com/gogo/protobuf/protobuf merkle.proto
echo "--> adding nolint declarations to protobuf generated files"
awk '/package merkle/ { print "//nolint: gas"; print; next }1' merkle.pb.go > merkle.pb.go.new
mv merkle.pb.go.new merkle.pb.go

792
crypto/merkle/merkle.pb.go Normal file
View File

@ -0,0 +1,792 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: crypto/merkle/merkle.proto
package merkle
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import _ "github.com/gogo/protobuf/gogoproto"
import bytes "bytes"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// ProofOp defines an operation used for calculating Merkle root
// The data could be arbitrary format, providing nessecary data
// for example neighbouring node hash
type ProofOp struct {
Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"`
Key []byte `protobuf:"bytes,2,opt,name=key,proto3" json:"key,omitempty"`
Data []byte `protobuf:"bytes,3,opt,name=data,proto3" json:"data,omitempty"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *ProofOp) Reset() { *m = ProofOp{} }
func (m *ProofOp) String() string { return proto.CompactTextString(m) }
func (*ProofOp) ProtoMessage() {}
func (*ProofOp) Descriptor() ([]byte, []int) {
return fileDescriptor_merkle_5d3f6051907285da, []int{0}
}
func (m *ProofOp) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *ProofOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_ProofOp.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *ProofOp) XXX_Merge(src proto.Message) {
xxx_messageInfo_ProofOp.Merge(dst, src)
}
func (m *ProofOp) XXX_Size() int {
return m.Size()
}
func (m *ProofOp) XXX_DiscardUnknown() {
xxx_messageInfo_ProofOp.DiscardUnknown(m)
}
var xxx_messageInfo_ProofOp proto.InternalMessageInfo
func (m *ProofOp) GetType() string {
if m != nil {
return m.Type
}
return ""
}
func (m *ProofOp) GetKey() []byte {
if m != nil {
return m.Key
}
return nil
}
func (m *ProofOp) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
// Proof is Merkle proof defined by the list of ProofOps
type Proof struct {
Ops []ProofOp `protobuf:"bytes,1,rep,name=ops" json:"ops"`
XXX_NoUnkeyedLiteral struct{} `json:"-"`
XXX_unrecognized []byte `json:"-"`
XXX_sizecache int32 `json:"-"`
}
func (m *Proof) Reset() { *m = Proof{} }
func (m *Proof) String() string { return proto.CompactTextString(m) }
func (*Proof) ProtoMessage() {}
func (*Proof) Descriptor() ([]byte, []int) {
return fileDescriptor_merkle_5d3f6051907285da, []int{1}
}
func (m *Proof) XXX_Unmarshal(b []byte) error {
return m.Unmarshal(b)
}
func (m *Proof) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) {
if deterministic {
return xxx_messageInfo_Proof.Marshal(b, m, deterministic)
} else {
b = b[:cap(b)]
n, err := m.MarshalTo(b)
if err != nil {
return nil, err
}
return b[:n], nil
}
}
func (dst *Proof) XXX_Merge(src proto.Message) {
xxx_messageInfo_Proof.Merge(dst, src)
}
func (m *Proof) XXX_Size() int {
return m.Size()
}
func (m *Proof) XXX_DiscardUnknown() {
xxx_messageInfo_Proof.DiscardUnknown(m)
}
var xxx_messageInfo_Proof proto.InternalMessageInfo
func (m *Proof) GetOps() []ProofOp {
if m != nil {
return m.Ops
}
return nil
}
func init() {
proto.RegisterType((*ProofOp)(nil), "merkle.ProofOp")
proto.RegisterType((*Proof)(nil), "merkle.Proof")
}
func (this *ProofOp) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*ProofOp)
if !ok {
that2, ok := that.(ProofOp)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.Type != that1.Type {
return false
}
if !bytes.Equal(this.Key, that1.Key) {
return false
}
if !bytes.Equal(this.Data, that1.Data) {
return false
}
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
return false
}
return true
}
func (this *Proof) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*Proof)
if !ok {
that2, ok := that.(Proof)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if len(this.Ops) != len(that1.Ops) {
return false
}
for i := range this.Ops {
if !this.Ops[i].Equal(&that1.Ops[i]) {
return false
}
}
if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) {
return false
}
return true
}
func (m *ProofOp) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ProofOp) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Type) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintMerkle(dAtA, i, uint64(len(m.Type)))
i += copy(dAtA[i:], m.Type)
}
if len(m.Key) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintMerkle(dAtA, i, uint64(len(m.Key)))
i += copy(dAtA[i:], m.Key)
}
if len(m.Data) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintMerkle(dAtA, i, uint64(len(m.Data)))
i += copy(dAtA[i:], m.Data)
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func (m *Proof) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *Proof) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Ops) > 0 {
for _, msg := range m.Ops {
dAtA[i] = 0xa
i++
i = encodeVarintMerkle(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
if m.XXX_unrecognized != nil {
i += copy(dAtA[i:], m.XXX_unrecognized)
}
return i, nil
}
func encodeVarintMerkle(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func NewPopulatedProofOp(r randyMerkle, easy bool) *ProofOp {
this := &ProofOp{}
this.Type = string(randStringMerkle(r))
v1 := r.Intn(100)
this.Key = make([]byte, v1)
for i := 0; i < v1; i++ {
this.Key[i] = byte(r.Intn(256))
}
v2 := r.Intn(100)
this.Data = make([]byte, v2)
for i := 0; i < v2; i++ {
this.Data[i] = byte(r.Intn(256))
}
if !easy && r.Intn(10) != 0 {
this.XXX_unrecognized = randUnrecognizedMerkle(r, 4)
}
return this
}
func NewPopulatedProof(r randyMerkle, easy bool) *Proof {
this := &Proof{}
if r.Intn(10) != 0 {
v3 := r.Intn(5)
this.Ops = make([]ProofOp, v3)
for i := 0; i < v3; i++ {
v4 := NewPopulatedProofOp(r, easy)
this.Ops[i] = *v4
}
}
if !easy && r.Intn(10) != 0 {
this.XXX_unrecognized = randUnrecognizedMerkle(r, 2)
}
return this
}
type randyMerkle interface {
Float32() float32
Float64() float64
Int63() int64
Int31() int32
Uint32() uint32
Intn(n int) int
}
func randUTF8RuneMerkle(r randyMerkle) rune {
ru := r.Intn(62)
if ru < 10 {
return rune(ru + 48)
} else if ru < 36 {
return rune(ru + 55)
}
return rune(ru + 61)
}
func randStringMerkle(r randyMerkle) string {
v5 := r.Intn(100)
tmps := make([]rune, v5)
for i := 0; i < v5; i++ {
tmps[i] = randUTF8RuneMerkle(r)
}
return string(tmps)
}
func randUnrecognizedMerkle(r randyMerkle, maxFieldNumber int) (dAtA []byte) {
l := r.Intn(5)
for i := 0; i < l; i++ {
wire := r.Intn(4)
if wire == 3 {
wire = 5
}
fieldNumber := maxFieldNumber + r.Intn(100)
dAtA = randFieldMerkle(dAtA, r, fieldNumber, wire)
}
return dAtA
}
func randFieldMerkle(dAtA []byte, r randyMerkle, fieldNumber int, wire int) []byte {
key := uint32(fieldNumber)<<3 | uint32(wire)
switch wire {
case 0:
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key))
v6 := r.Int63()
if r.Intn(2) == 0 {
v6 *= -1
}
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(v6))
case 1:
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key))
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
case 2:
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key))
ll := r.Intn(100)
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(ll))
for j := 0; j < ll; j++ {
dAtA = append(dAtA, byte(r.Intn(256)))
}
default:
dAtA = encodeVarintPopulateMerkle(dAtA, uint64(key))
dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))
}
return dAtA
}
func encodeVarintPopulateMerkle(dAtA []byte, v uint64) []byte {
for v >= 1<<7 {
dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))
v >>= 7
}
dAtA = append(dAtA, uint8(v))
return dAtA
}
func (m *ProofOp) Size() (n int) {
var l int
_ = l
l = len(m.Type)
if l > 0 {
n += 1 + l + sovMerkle(uint64(l))
}
l = len(m.Key)
if l > 0 {
n += 1 + l + sovMerkle(uint64(l))
}
l = len(m.Data)
if l > 0 {
n += 1 + l + sovMerkle(uint64(l))
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func (m *Proof) Size() (n int) {
var l int
_ = l
if len(m.Ops) > 0 {
for _, e := range m.Ops {
l = e.Size()
n += 1 + l + sovMerkle(uint64(l))
}
}
if m.XXX_unrecognized != nil {
n += len(m.XXX_unrecognized)
}
return n
}
func sovMerkle(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozMerkle(x uint64) (n int) {
return sovMerkle(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *ProofOp) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMerkle
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ProofOp: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ProofOp: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMerkle
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthMerkle
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Type = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMerkle
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthMerkle
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...)
if m.Key == nil {
m.Key = []byte{}
}
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMerkle
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthMerkle
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
if m.Data == nil {
m.Data = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMerkle(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMerkle
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *Proof) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMerkle
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Proof: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Proof: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ops", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowMerkle
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthMerkle
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ops = append(m.Ops, ProofOp{})
if err := m.Ops[len(m.Ops)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipMerkle(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthMerkle
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipMerkle(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMerkle
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMerkle
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMerkle
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthMerkle
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowMerkle
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipMerkle(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthMerkle = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowMerkle = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("crypto/merkle/merkle.proto", fileDescriptor_merkle_5d3f6051907285da) }
var fileDescriptor_merkle_5d3f6051907285da = []byte{
// 200 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4a, 0x2e, 0xaa, 0x2c,
0x28, 0xc9, 0xd7, 0xcf, 0x4d, 0x2d, 0xca, 0xce, 0x49, 0x85, 0x52, 0x7a, 0x05, 0x45, 0xf9, 0x25,
0xf9, 0x42, 0x6c, 0x10, 0x9e, 0x94, 0x6e, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e,
0xae, 0x7e, 0x7a, 0x7e, 0x7a, 0xbe, 0x3e, 0x58, 0x3a, 0xa9, 0x34, 0x0d, 0xcc, 0x03, 0x73, 0xc0,
0x2c, 0x88, 0x36, 0x25, 0x67, 0x2e, 0xf6, 0x80, 0xa2, 0xfc, 0xfc, 0x34, 0xff, 0x02, 0x21, 0x21,
0x2e, 0x96, 0x92, 0xca, 0x82, 0x54, 0x09, 0x46, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x30, 0x5b, 0x48,
0x80, 0x8b, 0x39, 0x3b, 0xb5, 0x52, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc4, 0x04, 0xa9,
0x4a, 0x49, 0x2c, 0x49, 0x94, 0x60, 0x06, 0x0b, 0x81, 0xd9, 0x4a, 0x06, 0x5c, 0xac, 0x60, 0x43,
0x84, 0xd4, 0xb9, 0x98, 0xf3, 0x0b, 0x8a, 0x25, 0x18, 0x15, 0x98, 0x35, 0xb8, 0x8d, 0xf8, 0xf5,
0xa0, 0x0e, 0x84, 0x5a, 0xe0, 0xc4, 0x72, 0xe2, 0x9e, 0x3c, 0x43, 0x10, 0x48, 0x85, 0x93, 0xc8,
0x8f, 0x87, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c,
0xe3, 0x83, 0x47, 0x72, 0x8c, 0x49, 0x6c, 0x60, 0x37, 0x19, 0x03, 0x02, 0x00, 0x00, 0xff, 0xff,
0xb9, 0x2b, 0x0f, 0xd1, 0xe8, 0x00, 0x00, 0x00,
}

View File

@ -0,0 +1,30 @@
syntax = "proto3";
package merkle;
// For more information on gogo.proto, see:
// https://github.com/gogo/protobuf/blob/master/extensions.md
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.marshaler_all) = true;
option (gogoproto.unmarshaler_all) = true;
option (gogoproto.sizer_all) = true;
option (gogoproto.populate_all) = true;
option (gogoproto.equal_all) = true;
//----------------------------------------
// Message types
// ProofOp defines an operation used for calculating Merkle root
// The data could be arbitrary format, providing nessecary data
// for example neighbouring node hash
message ProofOp {
string type = 1;
bytes key = 2;
bytes data = 3;
}
// Proof is Merkle proof defined by the list of ProofOps
message Proof {
repeated ProofOp ops = 1 [(gogoproto.nullable)=false];
}

134
crypto/merkle/proof.go Normal file
View File

@ -0,0 +1,134 @@
package merkle
import (
"bytes"
cmn "github.com/tendermint/tendermint/libs/common"
)
//----------------------------------------
// ProofOp gets converted to an instance of ProofOperator:
// ProofOperator is a layer for calculating intermediate Merkle roots
// when a series of Merkle trees are chained together.
// Run() takes leaf values from a tree and returns the Merkle
// root for the corresponding tree. It takes and returns a list of bytes
// to allow multiple leaves to be part of a single proof, for instance in a range proof.
// ProofOp() encodes the ProofOperator in a generic way so it can later be
// decoded with OpDecoder.
type ProofOperator interface {
Run([][]byte) ([][]byte, error)
GetKey() []byte
ProofOp() ProofOp
}
//----------------------------------------
// Operations on a list of ProofOperators
// ProofOperators is a slice of ProofOperator(s).
// Each operator will be applied to the input value sequentially
// and the last Merkle root will be verified with already known data
type ProofOperators []ProofOperator
func (poz ProofOperators) VerifyValue(root []byte, keypath string, value []byte) (err error) {
return poz.Verify(root, keypath, [][]byte{value})
}
func (poz ProofOperators) Verify(root []byte, keypath string, args [][]byte) (err error) {
keys, err := KeyPathToKeys(keypath)
if err != nil {
return
}
for i, op := range poz {
key := op.GetKey()
if len(key) != 0 {
if !bytes.Equal(keys[0], key) {
return cmn.NewError("Key mismatch on operation #%d: expected %+v but %+v", i, []byte(keys[0]), []byte(key))
}
keys = keys[1:]
}
args, err = op.Run(args)
if err != nil {
return
}
}
if !bytes.Equal(root, args[0]) {
return cmn.NewError("Calculated root hash is invalid: expected %+v but %+v", root, args[0])
}
if len(keys) != 0 {
return cmn.NewError("Keypath not consumed all")
}
return nil
}
//----------------------------------------
// ProofRuntime - main entrypoint
type OpDecoder func(ProofOp) (ProofOperator, error)
type ProofRuntime struct {
decoders map[string]OpDecoder
}
func NewProofRuntime() *ProofRuntime {
return &ProofRuntime{
decoders: make(map[string]OpDecoder),
}
}
func (prt *ProofRuntime) RegisterOpDecoder(typ string, dec OpDecoder) {
_, ok := prt.decoders[typ]
if ok {
panic("already registered for type " + typ)
}
prt.decoders[typ] = dec
}
func (prt *ProofRuntime) Decode(pop ProofOp) (ProofOperator, error) {
decoder := prt.decoders[pop.Type]
if decoder == nil {
return nil, cmn.NewError("unrecognized proof type %v", pop.Type)
}
return decoder(pop)
}
func (prt *ProofRuntime) DecodeProof(proof *Proof) (ProofOperators, error) {
var poz ProofOperators
for _, pop := range proof.Ops {
operator, err := prt.Decode(pop)
if err != nil {
return nil, cmn.ErrorWrap(err, "decoding a proof operator")
}
poz = append(poz, operator)
}
return poz, nil
}
func (prt *ProofRuntime) VerifyValue(proof *Proof, root []byte, keypath string, value []byte) (err error) {
return prt.Verify(proof, root, keypath, [][]byte{value})
}
// TODO In the long run we'll need a method of classifcation of ops,
// whether existence or absence or perhaps a third?
func (prt *ProofRuntime) VerifyAbsence(proof *Proof, root []byte, keypath string) (err error) {
return prt.Verify(proof, root, keypath, nil)
}
func (prt *ProofRuntime) Verify(proof *Proof, root []byte, keypath string, args [][]byte) (err error) {
poz, err := prt.DecodeProof(proof)
if err != nil {
return cmn.ErrorWrap(err, "decoding proof")
}
return poz.Verify(root, keypath, args)
}
// DefaultProofRuntime only knows about Simple value
// proofs.
// To use e.g. IAVL proofs, register op-decoders as
// defined in the IAVL package.
func DefaultProofRuntime() (prt *ProofRuntime) {
prt = NewProofRuntime()
prt.RegisterOpDecoder(ProofOpSimpleValue, SimpleValueOpDecoder)
return
}

View File

@ -0,0 +1,111 @@
package merkle
import (
"encoding/hex"
"fmt"
"net/url"
"strings"
cmn "github.com/tendermint/tendermint/libs/common"
)
/*
For generalized Merkle proofs, each layer of the proof may require an
optional key. The key may be encoded either by URL-encoding or
(upper-case) hex-encoding.
TODO: In the future, more encodings may be supported, like base32 (e.g.
/32:)
For example, for a Cosmos-SDK application where the first two proof layers
are SimpleValueOps, and the third proof layer is an IAVLValueOp, the keys
might look like:
0: []byte("App")
1: []byte("IBC")
2: []byte{0x01, 0x02, 0x03}
Assuming that we know that the first two layers are always ASCII texts, we
probably want to use URLEncoding for those, whereas the third layer will
require HEX encoding for efficient representation.
kp := new(KeyPath)
kp.AppendKey([]byte("App"), KeyEncodingURL)
kp.AppendKey([]byte("IBC"), KeyEncodingURL)
kp.AppendKey([]byte{0x01, 0x02, 0x03}, KeyEncodingURL)
kp.String() // Should return "/App/IBC/x:010203"
NOTE: Key paths must begin with a `/`.
NOTE: All encodings *MUST* work compatibly, such that you can choose to use
whatever encoding, and the decoded keys will always be the same. In other
words, it's just as good to encode all three keys using URL encoding or HEX
encoding... it just wouldn't be optimal in terms of readability or space
efficiency.
NOTE: Punycode will never be supported here, because not all values can be
decoded. For example, no string decodes to the string "xn--blah" in
Punycode.
*/
type keyEncoding int
const (
KeyEncodingURL keyEncoding = iota
KeyEncodingHex
KeyEncodingMax // Number of known encodings. Used for testing
)
type Key struct {
name []byte
enc keyEncoding
}
type KeyPath []Key
func (pth KeyPath) AppendKey(key []byte, enc keyEncoding) KeyPath {
return append(pth, Key{key, enc})
}
func (pth KeyPath) String() string {
res := ""
for _, key := range pth {
switch key.enc {
case KeyEncodingURL:
res += "/" + url.PathEscape(string(key.name))
case KeyEncodingHex:
res += "/x:" + fmt.Sprintf("%X", key.name)
default:
panic("unexpected key encoding type")
}
}
return res
}
// Decode a path to a list of keys. Path must begin with `/`.
// Each key must use a known encoding.
func KeyPathToKeys(path string) (keys [][]byte, err error) {
if path == "" || path[0] != '/' {
return nil, cmn.NewError("key path string must start with a forward slash '/'")
}
parts := strings.Split(path[1:], "/")
keys = make([][]byte, len(parts))
for i, part := range parts {
if strings.HasPrefix(part, "x:") {
hexPart := part[2:]
key, err := hex.DecodeString(hexPart)
if err != nil {
return nil, cmn.ErrorWrap(err, "decoding hex-encoded part #%d: /%s", i, part)
}
keys[i] = key
} else {
key, err := url.PathUnescape(part)
if err != nil {
return nil, cmn.ErrorWrap(err, "decoding url-encoded part #%d: /%s", i, part)
}
keys[i] = []byte(key) // TODO Test this with random bytes, I'm not sure that it works for arbitrary bytes...
}
}
return keys, nil
}

View File

@ -0,0 +1,41 @@
package merkle
import (
"math/rand"
"testing"
"github.com/stretchr/testify/require"
)
func TestKeyPath(t *testing.T) {
var path KeyPath
keys := make([][]byte, 10)
alphanum := "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ"
for d := 0; d < 1e4; d++ {
path = nil
for i := range keys {
enc := keyEncoding(rand.Intn(int(KeyEncodingMax)))
keys[i] = make([]byte, rand.Uint32()%20)
switch enc {
case KeyEncodingURL:
for j := range keys[i] {
keys[i][j] = alphanum[rand.Intn(len(alphanum))]
}
case KeyEncodingHex:
rand.Read(keys[i])
default:
panic("Unexpected encoding")
}
path = path.AppendKey(keys[i], enc)
}
res, err := KeyPathToKeys(path.String())
require.Nil(t, err)
for i, key := range keys {
require.Equal(t, key, res[i])
}
}
}

View File

@ -0,0 +1,91 @@
package merkle
import (
"bytes"
"fmt"
"github.com/tendermint/tendermint/crypto/tmhash"
cmn "github.com/tendermint/tendermint/libs/common"
)
const ProofOpSimpleValue = "simple:v"
// SimpleValueOp takes a key and a single value as argument and
// produces the root hash. The corresponding tree structure is
// the SimpleMap tree. SimpleMap takes a Hasher, and currently
// Tendermint uses aminoHasher. SimpleValueOp should support
// the hash function as used in aminoHasher. TODO support
// additional hash functions here as options/args to this
// operator.
//
// If the produced root hash matches the expected hash, the
// proof is good.
type SimpleValueOp struct {
// Encoded in ProofOp.Key.
key []byte
// To encode in ProofOp.Data
Proof *SimpleProof `json:"simple_proof"`
}
var _ ProofOperator = SimpleValueOp{}
func NewSimpleValueOp(key []byte, proof *SimpleProof) SimpleValueOp {
return SimpleValueOp{
key: key,
Proof: proof,
}
}
func SimpleValueOpDecoder(pop ProofOp) (ProofOperator, error) {
if pop.Type != ProofOpSimpleValue {
return nil, cmn.NewError("unexpected ProofOp.Type; got %v, want %v", pop.Type, ProofOpSimpleValue)
}
var op SimpleValueOp // a bit strange as we'll discard this, but it works.
err := cdc.UnmarshalBinary(pop.Data, &op)
if err != nil {
return nil, cmn.ErrorWrap(err, "decoding ProofOp.Data into SimpleValueOp")
}
return NewSimpleValueOp(pop.Key, op.Proof), nil
}
func (op SimpleValueOp) ProofOp() ProofOp {
bz := cdc.MustMarshalBinary(op)
return ProofOp{
Type: ProofOpSimpleValue,
Key: op.key,
Data: bz,
}
}
func (op SimpleValueOp) String() string {
return fmt.Sprintf("SimpleValueOp{%v}", op.GetKey())
}
func (op SimpleValueOp) Run(args [][]byte) ([][]byte, error) {
if len(args) != 1 {
return nil, cmn.NewError("expected 1 arg, got %v", len(args))
}
value := args[0]
hasher := tmhash.New()
hasher.Write(value) // does not error
vhash := hasher.Sum(nil)
// Wrap <op.Key, vhash> to hash the KVPair.
hasher = tmhash.New()
encodeByteSlice(hasher, []byte(op.key)) // does not error
encodeByteSlice(hasher, []byte(vhash)) // does not error
kvhash := hasher.Sum(nil)
if !bytes.Equal(kvhash, op.Proof.LeafHash) {
return nil, cmn.NewError("leaf hash mismatch: want %X got %X", op.Proof.LeafHash, kvhash)
}
return [][]byte{
op.Proof.ComputeRootHash(),
}, nil
}
func (op SimpleValueOp) GetKey() []byte {
return op.key
}

View File

@ -2,11 +2,23 @@ package merkle
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
cmn "github.com/tendermint/tendermint/libs/common"
) )
// SimpleProof represents a simple merkle proof. // SimpleProof represents a simple Merkle proof.
// NOTE: The convention for proofs is to include leaf hashes but to
// exclude the root hash.
// This convention is implemented across IAVL range proofs as well.
// Keep this consistent unless there's a very good reason to change
// everything. This also affects the generalized proof system as
// well.
type SimpleProof struct { type SimpleProof struct {
Total int `json:"total"` // Total number of items.
Index int `json:"index"` // Index of item to prove.
LeafHash []byte `json:"leaf_hash"` // Hash of item value.
Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child. Aunts [][]byte `json:"aunts"` // Hashes from leaf's sibling to a root's child.
} }
@ -18,6 +30,9 @@ func SimpleProofsFromHashers(items []Hasher) (rootHash []byte, proofs []*SimpleP
proofs = make([]*SimpleProof, len(items)) proofs = make([]*SimpleProof, len(items))
for i, trail := range trails { for i, trail := range trails {
proofs[i] = &SimpleProof{ proofs[i] = &SimpleProof{
Total: len(items),
Index: i,
LeafHash: trail.Hash,
Aunts: trail.FlattenAunts(), Aunts: trail.FlattenAunts(),
} }
} }
@ -49,11 +64,33 @@ func SimpleProofsFromMap(m map[string]Hasher) (rootHash []byte, proofs map[strin
return return
} }
// Verify that leafHash is a leaf hash of the simple-merkle-tree // Verify that the SimpleProof proves the root hash.
// which hashes to rootHash. // Check sp.Index/sp.Total manually if needed
func (sp *SimpleProof) Verify(index int, total int, leafHash []byte, rootHash []byte) bool { func (sp *SimpleProof) Verify(rootHash []byte, leafHash []byte) error {
computedHash := computeHashFromAunts(index, total, leafHash, sp.Aunts) if sp.Total < 0 {
return computedHash != nil && bytes.Equal(computedHash, rootHash) return errors.New("Proof total must be positive")
}
if sp.Index < 0 {
return errors.New("Proof index cannot be negative")
}
if !bytes.Equal(sp.LeafHash, leafHash) {
return cmn.NewError("invalid leaf hash: wanted %X got %X", leafHash, sp.LeafHash)
}
computedHash := sp.ComputeRootHash()
if !bytes.Equal(computedHash, rootHash) {
return cmn.NewError("invalid root hash: wanted %X got %X", rootHash, computedHash)
}
return nil
}
// Compute the root hash given a leaf hash. Does not verify the result.
func (sp *SimpleProof) ComputeRootHash() []byte {
return computeHashFromAunts(
sp.Index,
sp.Total,
sp.LeafHash,
sp.Aunts,
)
} }
// String implements the stringer interface for SimpleProof. // String implements the stringer interface for SimpleProof.

View File

@ -1,13 +1,13 @@
package merkle package merkle
import ( import (
"bytes" "testing"
"github.com/stretchr/testify/require"
cmn "github.com/tendermint/tendermint/libs/common" cmn "github.com/tendermint/tendermint/libs/common"
. "github.com/tendermint/tendermint/libs/test" . "github.com/tendermint/tendermint/libs/test"
"testing"
"github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/crypto/tmhash"
) )
@ -30,60 +30,43 @@ func TestSimpleProof(t *testing.T) {
rootHash2, proofs := SimpleProofsFromHashers(items) rootHash2, proofs := SimpleProofsFromHashers(items)
if !bytes.Equal(rootHash, rootHash2) { require.Equal(t, rootHash, rootHash2, "Unmatched root hashes: %X vs %X", rootHash, rootHash2)
t.Errorf("Unmatched root hashes: %X vs %X", rootHash, rootHash2)
}
// For each item, check the trail. // For each item, check the trail.
for i, item := range items { for i, item := range items {
itemHash := item.Hash() itemHash := item.Hash()
proof := proofs[i] proof := proofs[i]
// Verify success // Check total/index
ok := proof.Verify(i, total, itemHash, rootHash) require.Equal(t, proof.Index, i, "Unmatched indicies: %d vs %d", proof.Index, i)
if !ok {
t.Errorf("Verification failed for index %v.", i)
}
// Wrong item index should make it fail require.Equal(t, proof.Total, total, "Unmatched totals: %d vs %d", proof.Total, total)
{
ok = proof.Verify((i+1)%total, total, itemHash, rootHash) // Verify success
if ok { err := proof.Verify(rootHash, itemHash)
t.Errorf("Expected verification to fail for wrong index %v.", i) require.NoError(t, err, "Verificatior failed: %v.", err)
}
}
// Trail too long should make it fail // Trail too long should make it fail
origAunts := proof.Aunts origAunts := proof.Aunts
proof.Aunts = append(proof.Aunts, cmn.RandBytes(32)) proof.Aunts = append(proof.Aunts, cmn.RandBytes(32))
{ err = proof.Verify(rootHash, itemHash)
ok = proof.Verify(i, total, itemHash, rootHash) require.Error(t, err, "Expected verification to fail for wrong trail length")
if ok {
t.Errorf("Expected verification to fail for wrong trail length.")
}
}
proof.Aunts = origAunts proof.Aunts = origAunts
// Trail too short should make it fail // Trail too short should make it fail
proof.Aunts = proof.Aunts[0 : len(proof.Aunts)-1] proof.Aunts = proof.Aunts[0 : len(proof.Aunts)-1]
{ err = proof.Verify(rootHash, itemHash)
ok = proof.Verify(i, total, itemHash, rootHash) require.Error(t, err, "Expected verification to fail for wrong trail length")
if ok {
t.Errorf("Expected verification to fail for wrong trail length.")
}
}
proof.Aunts = origAunts proof.Aunts = origAunts
// Mutating the itemHash should make it fail. // Mutating the itemHash should make it fail.
ok = proof.Verify(i, total, MutateByteSlice(itemHash), rootHash) err = proof.Verify(rootHash, MutateByteSlice(itemHash))
if ok { require.Error(t, err, "Expected verification to fail for mutated leaf hash")
t.Errorf("Expected verification to fail for mutated leaf hash")
}
// Mutating the rootHash should make it fail. // Mutating the rootHash should make it fail.
ok = proof.Verify(i, total, itemHash, MutateByteSlice(rootHash)) err = proof.Verify(MutateByteSlice(rootHash), itemHash)
if ok { require.Error(t, err, "Expected verification to fail for mutated root hash")
t.Errorf("Expected verification to fail for mutated root hash")
}
} }
} }

12
crypto/merkle/wire.go Normal file
View File

@ -0,0 +1,12 @@
package merkle
import (
"github.com/tendermint/go-amino"
)
var cdc *amino.Codec
func init() {
cdc = amino.NewCodec()
cdc.Seal()
}

View File

@ -11,12 +11,12 @@ module.exports = {
nav: [{ text: "Back to Tendermint", link: "https://tendermint.com" }], nav: [{ text: "Back to Tendermint", link: "https://tendermint.com" }],
sidebar: [ sidebar: [
{ {
title: "Getting Started", title: "Introduction",
collapsable: false, collapsable: false,
children: [ children: [
"/introduction/quick-start", "/introduction/quick-start",
"/introduction/install", "/introduction/install",
"/introduction/introduction" "/introduction/what-is-tendermint"
] ]
}, },
{ {
@ -48,7 +48,7 @@ module.exports = {
title: "Networks", title: "Networks",
collapsable: false, collapsable: false,
children: [ children: [
"/networks/deploy-testnets", "/networks/docker-compose",
"/networks/terraform-and-ansible", "/networks/terraform-and-ansible",
] ]
}, },

View File

@ -20,7 +20,8 @@ a private website repository has make targets consumed by a standard Jenkins tas
## README ## README
The [README.md](./README.md) is also the landing page for the documentation The [README.md](./README.md) is also the landing page for the documentation
on the website. on the website. During the Jenkins build, the current commit is added to the bottom
of the README.
## Config.js ## Config.js
@ -34,6 +35,8 @@ of the sidebar.
**NOTE:** Strongly consider the existing links - both within this directory **NOTE:** Strongly consider the existing links - both within this directory
and to the website docs - when moving or deleting files. and to the website docs - when moving or deleting files.
Links to directories *MUST* end in a `/`.
Relative links should be used nearly everywhere, having discovered and weighed the following: Relative links should be used nearly everywhere, having discovered and weighed the following:
### Relative ### Relative

View File

@ -1,41 +1,29 @@
# Tendermint # Tendermint
Welcome to the Tendermint Core documentation! Below you'll find an Welcome to the Tendermint Core documentation!
overview of the documentation.
Tendermint Core is Byzantine Fault Tolerant (BFT) middleware that takes a state Tendermint Core is a blockchain application platform; it provides the equivalent
transition machine - written in any programming language - and securely of a web-server, database, and supporting libraries for blockchain applications
replicates it on many machines. In other words, a blockchain. written in any programming language. Like a web-server serving web applications,
Tendermint serves blockchain applications.
Tendermint requires an application running over the Application Blockchain More formally, Tendermint Core performs Byzantine Fault Tolerant (BFT)
Interface (ABCI) - and comes packaged with an example application to do so. State Machine Replication (SMR) for arbitrary deterministic, finite state machines.
For more background, see [What is
Tendermint?](introduction/what-is-tendermint.md).
## Getting Started To get started quickly with an example application, see the [quick start guide](introduction/quick-start.md).
Here you'll find quick start guides and links to more advanced "get up and running" To learn about application development on Tendermint, see the [Application Blockchain Interface](spec/abci/).
documentation.
## Core For more details on using Tendermint, see the respective documentation for
[Tendermint Core](tendermint-core/), [benchmarking and monitoring](tools/), and [network deployments](networks/).
Details about the core functionality and configuration of Tendermint. ## Contribute
## Tools To contribute to the documentation, see [this file](./DOCS_README.md) for details of the build process and
Benchmarking and monitoring tools.
## Networks
Setting up testnets manually or automated, local or in the cloud.
## Apps
Building appplications with the ABCI.
## Specification
Dive deep into the spec. There's one for each Tendermint and the ABCI
## Edit the Documentation
See [this file](./DOCS_README.md) for details of the build process and
considerations when making changes. considerations when making changes.
## Version
This documentation is built from the following commit:

View File

@ -431,9 +431,9 @@ Note: these query formats are subject to change!
In go: In go:
``` ```
func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) { func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery types.ResponseQuery) {
if reqQuery.Prove { if reqQuery.Prove {
value, proof, exists := app.state.Proof(reqQuery.Data) value, proof, exists := app.state.GetWithProof(reqQuery.Data)
resQuery.Index = -1 // TODO make Proof return index resQuery.Index = -1 // TODO make Proof return index
resQuery.Key = reqQuery.Data resQuery.Key = reqQuery.Data
resQuery.Value = value resQuery.Value = value
@ -455,28 +455,44 @@ func (app *KVStoreApplication) Query(reqQuery types.RequestQuery) (resQuery type
} }
return return
} }
}
return
} else {
index, value, exists := app.state.Get(reqQuery.Data)
resQuery.Index = int64(index)
resQuery.Value = value
if exists {
resQuery.Log = "exists"
} else {
resQuery.Log = "does not exist"
}
return
}
} }
``` ```
In Java: In Java:
``` ```
ResponseQuery requestQuery(RequestQuery req) { ResponseQuery requestQuery(RequestQuery req) {
final boolean isProveQuery = req.getProve(); final boolean isProveQuery = req.getProve();
final ResponseQuery.Builder responseBuilder = ResponseQuery.newBuilder(); final ResponseQuery.Builder responseBuilder = ResponseQuery.newBuilder();
if (isProveQuery) {
com.app.example.ProofResult proofResult = generateProof(req.getData().toByteArray());
final byte[] proofAsByteArray = proofResult.getAsByteArray();
responseBuilder.setProof(ByteString.copyFrom(proofAsByteArray));
responseBuilder.setKey(req.getData());
responseBuilder.setValue(ByteString.copyFrom(proofResult.getData()));
responseBuilder.setLog(result.getLogValue());
} else {
byte[] queryData = req.getData().toByteArray(); byte[] queryData = req.getData().toByteArray();
final com.app.example.QueryResult result = generateQueryResult(queryData); if (isProveQuery) {
com.app.example.QueryResultWithProof result = generateQueryResultWithProof(queryData);
responseBuilder.setIndex(result.getLeftIndex());
responseBuilder.setKey(req.getData());
responseBuilder.setValue(result.getValueOrNull(0));
responseBuilder.setHeight(result.getHeight());
responseBuilder.setProof(result.getProof());
responseBuilder.setLog(result.getLogValue());
} else {
com.app.example.QueryResult result = generateQueryResult(queryData);
responseBuilder.setIndex(result.getIndex());
responseBuilder.setValue(result.getValue());
responseBuilder.setLog(result.getLogValue());
}
responseBuilder.setIndex(result.getIndex()); responseBuilder.setIndex(result.getIndex());
responseBuilder.setValue(ByteString.copyFrom(result.getValue())); responseBuilder.setValue(ByteString.copyFrom(result.getValue()));

View File

@ -0,0 +1,15 @@
# Introduction
## Quick Start
Get Tendermint up-and-running quickly with the [quick-start guide](quick-start.md)!
## Install
Detailed [installation instructions](install.md).
## What is Tendermint?
Dive into [what Tendermint is and why](what-is-tendermint.md)!

View File

@ -1,5 +1,7 @@
# What is Tendermint? # What is Tendermint?
DEPRECATED! See [What is Tendermint?](what-is-tendermint.md).
Tendermint is software for securely and consistently replicating an Tendermint is software for securely and consistently replicating an
application on many machines. By securely, we mean that Tendermint works application on many machines. By securely, we mean that Tendermint works
even if up to 1/3 of machines fail in arbitrary ways. By consistently, even if up to 1/3 of machines fail in arbitrary ways. By consistently,

View File

@ -1,4 +1,4 @@
# Tendermint # Quick Start
## Overview ## Overview
@ -9,45 +9,21 @@ works and want to get started right away, continue.
### Quick Install ### Quick Install
On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/fFfOR), like so: To quickly get Tendermint installed on a fresh
Ubuntu 16.04 machine, use [this script](https://git.io/fFfOR).
WARNING: do not run this on your local machine.
``` ```
curl -L https://git.io/fFfOR | bash curl -L https://git.io/fFfOR | bash
source ~/.profile source ~/.profile
``` ```
WARNING: do not run the above on your local machine.
The script is also used to facilitate cluster deployment below. The script is also used to facilitate cluster deployment below.
### Manual Install ### Manual Install
Requires: For manual installation, see the [install instructions](install.md)
- `go` minimum version 1.10
- `$GOPATH` environment variable must be set
- `$GOPATH/bin` must be on your `$PATH` (see [here](https://github.com/tendermint/tendermint/wiki/Setting-GOPATH))
To install Tendermint, run:
```
go get github.com/tendermint/tendermint
cd $GOPATH/src/github.com/tendermint/tendermint
make get_tools && make get_vendor_deps
make install
```
Note that `go get` may return an error but it can be ignored.
Confirm installation:
```
$ tendermint version
0.23.0
```
Note: see the [releases page](https://github.com/tendermint/tendermint/releases) and the latest version
should match what you see above.
## Initialization ## Initialization

View File

@ -0,0 +1,332 @@
# What is Tendermint?
Tendermint is software for securely and consistently replicating an
application on many machines. By securely, we mean that Tendermint works
even if up to 1/3 of machines fail in arbitrary ways. By consistently,
we mean that every non-faulty machine sees the same transaction log and
computes the same state. Secure and consistent replication is a
fundamental problem in distributed systems; it plays a critical role in
the fault tolerance of a broad range of applications, from currencies,
to elections, to infrastructure orchestration, and beyond.
The ability to tolerate machines failing in arbitrary ways, including
becoming malicious, is known as Byzantine fault tolerance (BFT). The
theory of BFT is decades old, but software implementations have only
became popular recently, due largely to the success of "blockchain
technology" like Bitcoin and Ethereum. Blockchain technology is just a
reformalization of BFT in a more modern setting, with emphasis on
peer-to-peer networking and cryptographic authentication. The name
derives from the way transactions are batched in blocks, where each
block contains a cryptographic hash of the previous one, forming a
chain. In practice, the blockchain data structure actually optimizes BFT
design.
Tendermint consists of two chief technical components: a blockchain
consensus engine and a generic application interface. The consensus
engine, called Tendermint Core, ensures that the same transactions are
recorded on every machine in the same order. The application interface,
called the Application BlockChain Interface (ABCI), enables the
transactions to be processed in any programming language. Unlike other
blockchain and consensus solutions, which come pre-packaged with built
in state machines (like a fancy key-value store, or a quirky scripting
language), developers can use Tendermint for BFT state machine
replication of applications written in whatever programming language and
development environment is right for them.
Tendermint is designed to be easy-to-use, simple-to-understand, highly
performant, and useful for a wide variety of distributed applications.
## Tendermint vs. X
Tendermint is broadly similar to two classes of software. The first
class consists of distributed key-value stores, like Zookeeper, etcd,
and consul, which use non-BFT consensus. The second class is known as
"blockchain technology", and consists of both cryptocurrencies like
Bitcoin and Ethereum, and alternative distributed ledger designs like
Hyperledger's Burrow.
### Zookeeper, etcd, consul
Zookeeper, etcd, and consul are all implementations of a key-value store
atop a classical, non-BFT consensus algorithm. Zookeeper uses a version
of Paxos called Zookeeper Atomic Broadcast, while etcd and consul use
the Raft consensus algorithm, which is much younger and simpler. A
typical cluster contains 3-5 machines, and can tolerate crash failures
in up to 1/2 of the machines, but even a single Byzantine fault can
destroy the system.
Each offering provides a slightly different implementation of a
featureful key-value store, but all are generally focused around
providing basic services to distributed systems, such as dynamic
configuration, service discovery, locking, leader-election, and so on.
Tendermint is in essence similar software, but with two key differences:
- It is Byzantine Fault Tolerant, meaning it can only tolerate up to a
1/3 of failures, but those failures can include arbitrary behaviour -
including hacking and malicious attacks. - It does not specify a
particular application, like a fancy key-value store. Instead, it
focuses on arbitrary state machine replication, so developers can build
the application logic that's right for them, from key-value store to
cryptocurrency to e-voting platform and beyond.
The layout of this Tendermint website content is also ripped directly
and without shame from [consul.io](https://www.consul.io/) and the other
[Hashicorp sites](https://www.hashicorp.com/#tools).
### Bitcoin, Ethereum, etc.
Tendermint emerged in the tradition of cryptocurrencies like Bitcoin,
Ethereum, etc. with the goal of providing a more efficient and secure
consensus algorithm than Bitcoin's Proof of Work. In the early days,
Tendermint had a simple currency built in, and to participate in
consensus, users had to "bond" units of the currency into a security
deposit which could be revoked if they misbehaved -this is what made
Tendermint a Proof-of-Stake algorithm.
Since then, Tendermint has evolved to be a general purpose blockchain
consensus engine that can host arbitrary application states. That means
it can be used as a plug-and-play replacement for the consensus engines
of other blockchain software. So one can take the current Ethereum code
base, whether in Rust, or Go, or Haskell, and run it as a ABCI
application using Tendermint consensus. Indeed, [we did that with
Ethereum](https://github.com/cosmos/ethermint). And we plan to do
the same for Bitcoin, ZCash, and various other deterministic
applications as well.
Another example of a cryptocurrency application built on Tendermint is
[the Cosmos network](http://cosmos.network).
### Other Blockchain Projects
[Fabric](https://github.com/hyperledger/fabric) takes a similar approach
to Tendermint, but is more opinionated about how the state is managed,
and requires that all application behaviour runs in potentially many
docker containers, modules it calls "chaincode". It uses an
implementation of [PBFT](http://pmg.csail.mit.edu/papers/osdi99.pdf).
from a team at IBM that is [augmented to handle potentially
non-deterministic
chaincode](https://www.zurich.ibm.com/~cca/papers/sieve.pdf) It is
possible to implement this docker-based behaviour as a ABCI app in
Tendermint, though extending Tendermint to handle non-determinism
remains for future work.
[Burrow](https://github.com/hyperledger/burrow) is an implementation of
the Ethereum Virtual Machine and Ethereum transaction mechanics, with
additional features for a name-registry, permissions, and native
contracts, and an alternative blockchain API. It uses Tendermint as its
consensus engine, and provides a particular application state.
## ABCI Overview
The [Application BlockChain Interface
(ABCI)](https://github.com/tendermint/tendermint/tree/develop/abci)
allows for Byzantine Fault Tolerant replication of applications
written in any programming language.
### Motivation
Thus far, all blockchains "stacks" (such as
[Bitcoin](https://github.com/bitcoin/bitcoin)) have had a monolithic
design. That is, each blockchain stack is a single program that handles
all the concerns of a decentralized ledger; this includes P2P
connectivity, the "mempool" broadcasting of transactions, consensus on
the most recent block, account balances, Turing-complete contracts,
user-level permissions, etc.
Using a monolithic architecture is typically bad practice in computer
science. It makes it difficult to reuse components of the code, and
attempts to do so result in complex maintenance procedures for forks of
the codebase. This is especially true when the codebase is not modular
in design and suffers from "spaghetti code".
Another problem with monolithic design is that it limits you to the
language of the blockchain stack (or vice versa). In the case of
Ethereum which supports a Turing-complete bytecode virtual-machine, it
limits you to languages that compile down to that bytecode; today, those
are Serpent and Solidity.
In contrast, our approach is to decouple the consensus engine and P2P
layers from the details of the application state of the particular
blockchain application. We do this by abstracting away the details of
the application to an interface, which is implemented as a socket
protocol.
Thus we have an interface, the Application BlockChain Interface (ABCI),
and its primary implementation, the Tendermint Socket Protocol (TSP, or
Teaspoon).
### Intro to ABCI
[Tendermint Core](https://github.com/tendermint/tendermint) (the
"consensus engine") communicates with the application via a socket
protocol that satisfies the ABCI.
To draw an analogy, lets talk about a well-known cryptocurrency,
Bitcoin. Bitcoin is a cryptocurrency blockchain where each node
maintains a fully audited Unspent Transaction Output (UTXO) database. If
one wanted to create a Bitcoin-like system on top of ABCI, Tendermint
Core would be responsible for
- Sharing blocks and transactions between nodes
- Establishing a canonical/immutable order of transactions
(the blockchain)
The application will be responsible for
- Maintaining the UTXO database
- Validating cryptographic signatures of transactions
- Preventing transactions from spending non-existent transactions
- Allowing clients to query the UTXO database.
Tendermint is able to decompose the blockchain design by offering a very
simple API (ie. the ABCI) between the application process and consensus
process.
The ABCI consists of 3 primary message types that get delivered from the
core to the application. The application replies with corresponding
response messages.
The messages are specified here: [ABCI Message
Types](https://github.com/tendermint/tendermint/blob/develop/abci/README.md#message-types).
The **DeliverTx** message is the work horse of the application. Each
transaction in the blockchain is delivered with this message. The
application needs to validate each transaction received with the
**DeliverTx** message against the current state, application protocol,
and the cryptographic credentials of the transaction. A validated
transaction then needs to update the application state — by binding a
value into a key values store, or by updating the UTXO database, for
instance.
The **CheckTx** message is similar to **DeliverTx**, but it's only for
validating transactions. Tendermint Core's mempool first checks the
validity of a transaction with **CheckTx**, and only relays valid
transactions to its peers. For instance, an application may check an
incrementing sequence number in the transaction and return an error upon
**CheckTx** if the sequence number is old. Alternatively, they might use
a capabilities based system that requires capabilities to be renewed
with every transaction.
The **Commit** message is used to compute a cryptographic commitment to
the current application state, to be placed into the next block header.
This has some handy properties. Inconsistencies in updating that state
will now appear as blockchain forks which catches a whole class of
programming errors. This also simplifies the development of secure
lightweight clients, as Merkle-hash proofs can be verified by checking
against the block hash, and that the block hash is signed by a quorum.
There can be multiple ABCI socket connections to an application.
Tendermint Core creates three ABCI connections to the application; one
for the validation of transactions when broadcasting in the mempool, one
for the consensus engine to run block proposals, and one more for
querying the application state.
It's probably evident that applications designers need to very carefully
design their message handlers to create a blockchain that does anything
useful but this architecture provides a place to start. The diagram
below illustrates the flow of messages via ABCI.
![](../imgs/abci.png)
## A Note on Determinism
The logic for blockchain transaction processing must be deterministic.
If the application logic weren't deterministic, consensus would not be
reached among the Tendermint Core replica nodes.
Solidity on Ethereum is a great language of choice for blockchain
applications because, among other reasons, it is a completely
deterministic programming language. However, it's also possible to
create deterministic applications using existing popular languages like
Java, C++, Python, or Go. Game programmers and blockchain developers are
already familiar with creating deterministic programs by avoiding
sources of non-determinism such as:
- random number generators (without deterministic seeding)
- race conditions on threads (or avoiding threads altogether)
- the system clock
- uninitialized memory (in unsafe programming languages like C
or C++)
- [floating point
arithmetic](http://gafferongames.com/networking-for-game-programmers/floating-point-determinism/)
- language features that are random (e.g. map iteration in Go)
While programmers can avoid non-determinism by being careful, it is also
possible to create a special linter or static analyzer for each language
to check for determinism. In the future we may work with partners to
create such tools.
## Consensus Overview
Tendermint is an easy-to-understand, mostly asynchronous, BFT consensus
protocol. The protocol follows a simple state machine that looks like
this:
![](../imgs/consensus_logic.png)
Participants in the protocol are called **validators**; they take turns
proposing blocks of transactions and voting on them. Blocks are
committed in a chain, with one block at each **height**. A block may
fail to be committed, in which case the protocol moves to the next
**round**, and a new validator gets to propose a block for that height.
Two stages of voting are required to successfully commit a block; we
call them **pre-vote** and **pre-commit**. A block is committed when
more than 2/3 of validators pre-commit for the same block in the same
round.
There is a picture of a couple doing the polka because validators are
doing something like a polka dance. When more than two-thirds of the
validators pre-vote for the same block, we call that a **polka**. Every
pre-commit must be justified by a polka in the same round.
Validators may fail to commit a block for a number of reasons; the
current proposer may be offline, or the network may be slow. Tendermint
allows them to establish that a validator should be skipped. Validators
wait a small amount of time to receive a complete proposal block from
the proposer before voting to move to the next round. This reliance on a
timeout is what makes Tendermint a weakly synchronous protocol, rather
than an asynchronous one. However, the rest of the protocol is
asynchronous, and validators only make progress after hearing from more
than two-thirds of the validator set. A simplifying element of
Tendermint is that it uses the same mechanism to commit a block as it
does to skip to the next round.
Assuming less than one-third of the validators are Byzantine, Tendermint
guarantees that safety will never be violated - that is, validators will
never commit conflicting blocks at the same height. To do this it
introduces a few **locking** rules which modulate which paths can be
followed in the flow diagram. Once a validator precommits a block, it is
locked on that block. Then,
1. it must prevote for the block it is locked on
2. it can only unlock, and precommit for a new block, if there is a
polka for that block in a later round
## Stake
In many systems, not all validators will have the same "weight" in the
consensus protocol. Thus, we are not so much interested in one-third or
two-thirds of the validators, but in those proportions of the total
voting power, which may not be uniformly distributed across individual
validators.
Since Tendermint can replicate arbitrary applications, it is possible to
define a currency, and denominate the voting power in that currency.
When voting power is denominated in a native currency, the system is
often referred to as Proof-of-Stake. Validators can be forced, by logic
in the application, to "bond" their currency holdings in a security
deposit that can be destroyed if they're found to misbehave in the
consensus protocol. This adds an economic element to the security of the
protocol, allowing one to quantify the cost of violating the assumption
that less than one-third of voting power is Byzantine.
The [Cosmos Network](https://cosmos.network) is designed to use this
Proof-of-Stake mechanism across an array of cryptocurrencies implemented
as ABCI applications.
The following diagram is Tendermint in a (technical) nutshell. [See here
for high resolution
version](https://github.com/mobfoundry/hackatom/blob/master/tminfo.pdf).
![](../imgs/tm-transaction-flow.png)

9
docs/networks/README.md Normal file
View File

@ -0,0 +1,9 @@
# Networks
Use [Docker Compose](docker-compose.md) to spin up Tendermint testnets on your
local machine.
Use [Terraform and Ansible](terraform-and-ansible.md) to deploy Tendermint
testnets to the cloud.
See the `tendermint testnet --help` command for more help initializing testnets.

View File

@ -1,8 +1,8 @@
# Deploy a Testnet # Deploy a Testnet
Now that we've seen how ABCI works, and even played with a few DEPRECATED DOCS!
applications on a single validator node, it's time to deploy a test
network to four validator nodes. See [Networks](../networks).
## Manual Deployments ## Manual Deployments
@ -21,17 +21,16 @@ Here are the steps to setting up a testnet manually:
3. Generate a private key and a node key for each validator using 3. Generate a private key and a node key for each validator using
`tendermint init` `tendermint init`
4. Compile a list of public keys for each validator into a 4. Compile a list of public keys for each validator into a
`genesis.json` file and replace the existing file with it. new `genesis.json` file and replace the existing file with it.
5. Run 5. Get the node IDs of any peers you want other peers to connect to by
`tendermint node --proxy_app=kvstore --p2p.persistent_peers=< peer addresses >` on each node, where `< peer addresses >` is a comma separated running `tendermint show_node_id` on the relevant machine
list of the ID@IP:PORT combination for each node. The default port for 6. Set the `p2p.persistent_peers` in the config for all nodes to the comma
Tendermint is `26656`. The ID of a node can be obtained by running separated list of `ID@IP:PORT` for all nodes. Default port is 26656.
`tendermint show_node_id` command. Thus, if the IP addresses of your nodes
were `192.168.0.1, 192.168.0.2, 192.168.0.3, 192.168.0.4`, the command Then start the node
would look like:
``` ```
tendermint node --proxy_app=kvstore --p2p.persistent_peers=96663a3dd0d7b9d17d4c8211b191af259621c693@192.168.0.1:26656, 429fcf25974313b95673f58d77eacdd434402665@192.168.0.2:26656, 0491d373a8e0fcf1023aaf18c51d6a1d0d4f31bd@192.168.0.3:26656, f9baeaa15fedf5e1ef7448dd60f46c01f1a9e9c4@192.168.0.4:26656 tendermint node --proxy_app=kvstore
``` ```
After a few seconds, all the nodes should connect to each other and After a few seconds, all the nodes should connect to each other and

View File

@ -0,0 +1,85 @@
# Docker Compose
With Docker Compose, we can spin up local testnets in a single command:
```
make localnet-start
```
## Requirements
- [Install tendermint](/docs/install.md)
- [Install docker](https://docs.docker.com/engine/installation/)
- [Install docker-compose](https://docs.docker.com/compose/install/)
## Build
Build the `tendermint` binary and the `tendermint/localnode` docker image.
Note the binary will be mounted into the container so it can be updated without
rebuilding the image.
```
cd $GOPATH/src/github.com/tendermint/tendermint
# Build the linux binary in ./build
make build-linux
# Build tendermint/localnode image
make build-docker-localnode
```
## Run a testnet
To start a 4 node testnet run:
```
make localnet-start
```
The nodes bind their RPC servers to ports 26657, 26660, 26662, and 26664 on the host.
This file creates a 4-node network using the localnode image.
The nodes of the network expose their P2P and RPC endpoints to the host machine on ports 26656-26657, 26659-26660, 26661-26662, and 26663-26664 respectively.
To update the binary, just rebuild it and restart the nodes:
```
make build-linux
make localnet-stop
make localnet-start
```
## Configuration
The `make localnet-start` creates files for a 4-node testnet in `./build` by calling the `tendermint testnet` command.
The `./build` directory is mounted to the `/tendermint` mount point to attach the binary and config files to the container.
For instance, to create a single node testnet:
```
cd $GOPATH/src/github.com/tendermint/tendermint
# Clear the build folder
rm -rf ./build
# Build binary
make build-linux
# Create configuration
docker run -e LOG="stdout" -v `pwd`/build:/tendermint tendermint/localnode testnet --o . --v 1
#Run the node
docker run -v `pwd`/build:/tendermint tendermint/localnode
```
## Logging
Log is saved under the attached volume, in the `tendermint.log` file. If the `LOG` environment variable is set to `stdout` at start, the log is not saved, but printed on the screen.
## Special binaries
If you have multiple binaries with different names, you can specify which one to run with the BINARY environment variable. The path of the binary is relative to the attached volume.

View File

@ -29,7 +29,7 @@ export SSH_KEY_FILE="$HOME/.ssh/id_rsa.pub"
These will be used by both `terraform` and `ansible`. These will be used by both `terraform` and `ansible`.
### Terraform ## Terraform
This step will create four Digital Ocean droplets. First, go to the This step will create four Digital Ocean droplets. First, go to the
correct directory: correct directory:
@ -49,7 +49,7 @@ and you will get a list of IP addresses that belong to your droplets.
With the droplets created and running, let's setup Ansible. With the droplets created and running, let's setup Ansible.
### Ansible ## Ansible
The playbooks in [the ansible The playbooks in [the ansible
directory](https://github.com/tendermint/tendermint/tree/master/networks/remote/ansible) directory](https://github.com/tendermint/tendermint/tree/master/networks/remote/ansible)
@ -144,7 +144,7 @@ Peek at the logs with the status role:
ansible-playbook -i inventory/digital_ocean.py -l sentrynet status.yml ansible-playbook -i inventory/digital_ocean.py -l sentrynet status.yml
``` ```
### Logging ## Logging
The crudest way is the status role described above. You can also ship The crudest way is the status role described above. You can also ship
logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana) logs to Logz.io, an Elastic stack (Elastic search, Logstash and Kibana)
@ -160,7 +160,7 @@ go get github.com/mheese/journalbeat
ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345 ansible-playbook -i inventory/digital_ocean.py -l sentrynet logzio.yml -e LOGZIO_TOKEN=ABCDEFGHIJKLMNOPQRSTUVWXYZ012345
``` ```
### Cleanup ## Cleanup
To remove your droplets, run: To remove your droplets, run:

View File

@ -1,7 +1,7 @@
# ABCI # ABCI
ABCI is the interface between Tendermint (a state-machine replication engine) ABCI is the interface between Tendermint (a state-machine replication engine)
and an application (the actual state machine). It consists of a set of and your application (the actual state machine). It consists of a set of
*methods*, where each method has a corresponding `Request` and `Response` *methods*, where each method has a corresponding `Request` and `Response`
message type. Tendermint calls the ABCI methods on the ABCI application by sending the `Request*` message type. Tendermint calls the ABCI methods on the ABCI application by sending the `Request*`
messages and receiving the `Response*` messages in return. messages and receiving the `Response*` messages in return.

View File

@ -7,9 +7,9 @@ file](https://github.com/tendermint/tendermint/blob/develop/abci/types/types.pro
ABCI methods are split across 3 separate ABCI *connections*: ABCI methods are split across 3 separate ABCI *connections*:
- `Consensus Connection: InitChain, BeginBlock, DeliverTx, EndBlock, Commit` - `Consensus Connection`: `InitChain, BeginBlock, DeliverTx, EndBlock, Commit`
- `Mempool Connection: CheckTx` - `Mempool Connection`: `CheckTx`
- `Info Connection: Info, SetOption, Query` - `Info Connection`: `Info, SetOption, Query`
The `Consensus Connection` is driven by a consensus protocol and is responsible The `Consensus Connection` is driven by a consensus protocol and is responsible
for block execution. for block execution.
@ -209,7 +209,7 @@ Commit are included in the header of the next block.
- `Index (int64)`: The index of the key in the tree. - `Index (int64)`: The index of the key in the tree.
- `Key ([]byte)`: The key of the matching data. - `Key ([]byte)`: The key of the matching data.
- `Value ([]byte)`: The value of the matching data. - `Value ([]byte)`: The value of the matching data.
- `Proof ([]byte)`: Serialized proof for the data, if requested, to be - `Proof (Proof)`: Serialized proof for the value data, if requested, to be
verified against the `AppHash` for the given Height. verified against the `AppHash` for the given Height.
- `Height (int64)`: The block height from which data was derived. - `Height (int64)`: The block height from which data was derived.
Note that this is the height of the block containing the Note that this is the height of the block containing the
@ -218,6 +218,8 @@ Commit are included in the header of the next block.
- **Usage**: - **Usage**:
- Query for data from the application at current or past height. - Query for data from the application at current or past height.
- Optionally return Merkle proof. - Optionally return Merkle proof.
- Merkle proof includes self-describing `type` field to support many types
of Merkle trees and encoding formats.
### BeginBlock ### BeginBlock
@ -413,3 +415,44 @@ Commit are included in the header of the next block.
- `Round (int32)`: Commit round. - `Round (int32)`: Commit round.
- `Votes ([]VoteInfo)`: List of validators addresses in the last validator set - `Votes ([]VoteInfo)`: List of validators addresses in the last validator set
with their voting power and whether or not they signed a vote. with their voting power and whether or not they signed a vote.
### ConsensusParams
- **Fields**:
- `BlockSize (BlockSize)`: Parameters limiting the size of a block.
- `EvidenceParams (EvidenceParams)`: Parameters limiting the validity of
evidence of byzantine behaviour.
### BlockSize
- **Fields**:
- `MaxBytes (int64)`: Max size of a block, in bytes.
- `MaxGas (int64)`: Max sum of `GasWanted` in a proposed block.
- NOTE: blocks that violate this may be committed if there are Byzantine proposers.
It's the application's responsibility to handle this when processing a
block!
### EvidenceParams
- **Fields**:
- `MaxAge (int64)`: Max age of evidence, in blocks. Evidence older than this
is considered stale and ignored.
- This should correspond with an app's "unbonding period" or other
similar mechanism for handling Nothing-At-Stake attacks.
- NOTE: this should change to time (instead of blocks)!
### Proof
- **Fields**:
- `Ops ([]ProofOp)`: List of chained Merkle proofs, of possibly different types
- The Merkle root of one op is the value being proven in the next op.
- The Merkle root of the final op should equal the ultimate root hash being
verified against.
### ProofOp
- **Fields**:
- `Type (string)`: Type of Merkle proof and how it's encoded.
- `Key ([]byte)`: Key in the Merkle tree that this proof is for.
- `Data ([]byte)`: Encoded Merkle proof for the key.

View File

@ -247,8 +247,12 @@ Must have `0 < MaxAge`.
### Updates ### Updates
The application may set the consensus params during InitChain, and update them during The application may set the ConsensusParams during InitChain, and update them during
EndBlock. EndBlock. If the ConsensusParams is empty, it will be ignored. Each field
that is not empty will be applied in full. For instance, if updating the
BlockSize.MaxBytes, applications must also set the other BlockSize fields (like
BlockSize.MaxGas), even if they are unchanged, as they will otherwise cause the
value to be updated to 0.
#### InitChain #### InitChain
@ -312,6 +316,30 @@ their state as follows:
For instance, this allows an application's lite-client to verify proofs of For instance, this allows an application's lite-client to verify proofs of
absence in the application state, something which is much less efficient to do using the block hash. absence in the application state, something which is much less efficient to do using the block hash.
Some applications (eg. Ethereum, Cosmos-SDK) have multiple "levels" of Merkle trees,
where the leaves of one tree are the root hashes of others. To support this, and
the general variability in Merkle proofs, the `ResponseQuery.Proof` has some minimal structure:
```
message Proof {
repeated ProofOp ops
}
message ProofOp {
string type = 1;
bytes key = 2;
bytes data = 3;
}
```
Each `ProofOp` contains a proof for a single key in a single Merkle tree, of the specified `type`.
This allows ABCI to support many different kinds of Merkle trees, encoding
formats, and proofs (eg. of presence and absence) just by varying the `type`.
The `data` contains the actual encoded proof, encoded according to the `type`.
When verifying the full proof, the root hash for one ProofOp is the value being
verified for the next ProofOp in the list. The root hash of the final ProofOp in
the list should match the `AppHash` being verified against.
### Peer Filtering ### Peer Filtering
When Tendermint connects to a peer, it sends two queries to the ABCI application When Tendermint connects to a peer, it sends two queries to the ABCI application

View File

@ -401,14 +401,22 @@ must be greater than 2/3 of the total voting power of the complete validator set
A vote is a signed message broadcast in the consensus for a particular block at a particular height and round. A vote is a signed message broadcast in the consensus for a particular block at a particular height and round.
When stored in the blockchain or propagated over the network, votes are encoded in Amino. When stored in the blockchain or propagated over the network, votes are encoded in Amino.
For signing, votes are encoded in JSON, and the ChainID is included, in the form of the `CanonicalSignBytes`. For signing, votes are represented via `CanonicalVote` and also encoded using amino (protobuf compatible) via
`Vote.SignBytes` which includes the `ChainID`.
We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the CanonicalSignBytes We define a method `Verify` that returns `true` if the signature verifies against the pubkey for the `SignBytes`
using the given ChainID: using the given ChainID:
```go ```go
func (v Vote) Verify(chainID string, pubKey PubKey) bool { func (vote *Vote) Verify(chainID string, pubKey crypto.PubKey) error {
return pubKey.Verify(v.Signature, CanonicalSignBytes(chainID, v)) if !bytes.Equal(pubKey.Address(), vote.ValidatorAddress) {
return ErrVoteInvalidValidatorAddress
}
if !pubKey.VerifyBytes(vote.SignBytes(chainID), vote.Signature) {
return ErrVoteInvalidSignature
}
return nil
} }
``` ```

View File

@ -298,14 +298,22 @@ Where the `"value"` is the base64 encoding of the raw pubkey bytes, and the
### Signed Messages ### Signed Messages
Signed messages (eg. votes, proposals) in the consensus are encoded using Amino-JSON, rather than in the standard binary format Signed messages (eg. votes, proposals) in the consensus are encoded using Amino.
(NOTE: this is subject to change: https://github.com/tendermint/tendermint/issues/1622)
When signing, the elements of a message are sorted by key and prepended with When signing, the elements of a message are sorted alphabetically by key and prepended with
a `@chain_id` and `@type` field. a `chain_id` and `type` field.
We call this encoding the CanonicalSignBytes. For instance, CanonicalSignBytes for a vote would look We call this encoding the SignBytes. For instance, SignBytes for a vote is the Amino encoding of the following struct:
like:
```json ```go
{"@chain_id":"test_chain_id","@type":"vote","block_id":{"hash":"8B01023386C371778ECB6368573E539AFC3CC860","parts":{"hash":"72DB3D959635DFF1BB567BEDAA70573392C51596","total":"1000000"}},"height":"12345","round":"2","timestamp":"2017-12-25T03:00:01.234Z","type":2} type CanonicalVote struct {
ChainID string
Type string
BlockID CanonicalBlockID
Height int64
Round int
Timestamp time.Time
VoteType byte
}
``` ```
NOTE: see [#1622](https://github.com/tendermint/tendermint/issues/1622) for how field ordering will change

View File

@ -6,23 +6,21 @@ as command-line flags, but they can also be passed in as
environmental variables or in the config.toml file. The environmental variables or in the config.toml file. The
following are all equivalent: following are all equivalent:
Flag: `--mempool.recheck_empty=false` Flag: `--mempool.recheck=false`
Environment: `TM_MEMPOOL_RECHECK_EMPTY=false` Environment: `TM_MEMPOOL_RECHECK=false`
Config: Config:
``` ```
[mempool] [mempool]
recheck_empty = false recheck = false
``` ```
## Recheck ## Recheck
`--mempool.recheck=false` (default: true) `--mempool.recheck=false` (default: true)
`--mempool.recheck_empty=false` (default: true)
Recheck determines if the mempool rechecks all pending Recheck determines if the mempool rechecks all pending
transactions after a block was committed. Once a block transactions after a block was committed. Once a block
is committed, the mempool removes all valid transactions is committed, the mempool removes all valid transactions
@ -31,9 +29,6 @@ that were successfully included in the block.
If `recheck` is true, then it will rerun CheckTx on If `recheck` is true, then it will rerun CheckTx on
all remaining transactions with the new block state. all remaining transactions with the new block state.
If the block contained no transactions, it will skip the
recheck unless `recheck_empty` is true.
## Broadcast ## Broadcast
`--mempool.broadcast=false` (default: true) `--mempool.broadcast=false` (default: true)

View File

@ -0,0 +1,4 @@
# Tendermint Core
See the side-bar for details on the various features of Tendermint Core.

View File

@ -115,15 +115,15 @@ addr_book_file = "addrbook.json"
# Set false for private or local networks # Set false for private or local networks
addr_book_strict = true addr_book_strict = true
# Time to wait before flushing messages out on the connection, in ms
flush_throttle_timeout = 100
# Maximum number of inbound peers # Maximum number of inbound peers
max_num_inbound_peers = 40 max_num_inbound_peers = 40
# Maximum number of outbound peers to connect to, excluding persistent peers # Maximum number of outbound peers to connect to, excluding persistent peers
max_num_outbound_peers = 10 max_num_outbound_peers = 10
# Time to wait before flushing messages out on the connection
flush_throttle_timeout = "100ms"
# Maximum size of a message packet payload, in bytes # Maximum size of a message packet payload, in bytes
max_packet_msg_payload_size = 1024 max_packet_msg_payload_size = 1024
@ -145,11 +145,17 @@ seed_mode = false
# Comma separated list of peer IDs to keep private (will not be gossiped to other peers) # Comma separated list of peer IDs to keep private (will not be gossiped to other peers)
private_peer_ids = "" private_peer_ids = ""
# Toggle to disable guard against peers connecting from the same ip.
allow_duplicate_ip = true
# Peer connection configuration.
handshake_timeout = "20s"
dial_timeout = "3s"
##### mempool configuration options ##### ##### mempool configuration options #####
[mempool] [mempool]
recheck = true recheck = true
recheck_empty = true
broadcast = true broadcast = true
wal_dir = "data/mempool.wal" wal_dir = "data/mempool.wal"
@ -164,25 +170,24 @@ cache_size = 100000
wal_file = "data/cs.wal/wal" wal_file = "data/cs.wal/wal"
# All timeouts are in milliseconds timeout_propose = "3000ms"
timeout_propose = 3000 timeout_propose_delta = "500ms"
timeout_propose_delta = 500 timeout_prevote = "1000ms"
timeout_prevote = 1000 timeout_prevote_delta = "500ms"
timeout_prevote_delta = 500 timeout_precommit = "1000ms"
timeout_precommit = 1000 timeout_precommit_delta = "500ms"
timeout_precommit_delta = 500 timeout_commit = "1000ms"
timeout_commit = 1000
# Make progress as soon as we have all the precommits (as if TimeoutCommit = 0) # Make progress as soon as we have all the precommits (as if TimeoutCommit = 0)
skip_timeout_commit = false skip_timeout_commit = false
# EmptyBlocks mode and possible interval between empty blocks in seconds # EmptyBlocks mode and possible interval between empty blocks
create_empty_blocks = true create_empty_blocks = true
create_empty_blocks_interval = 0 create_empty_blocks_interval = "0s"
# Reactor sleep duration parameters are in milliseconds # Reactor sleep duration parameters
peer_gossip_sleep_duration = 100 peer_gossip_sleep_duration = "100ms"
peer_query_maj23_sleep_duration = 2000 peer_query_maj23_sleep_duration = "2000ms"
##### transactions indexer configuration options ##### ##### transactions indexer configuration options #####
[tx_index] [tx_index]

View File

@ -74,6 +74,10 @@ propose it. Clients must monitor their txs by subscribing over websockets,
polling for them, or using `/broadcast_tx_commit`. In the worst case, txs can be polling for them, or using `/broadcast_tx_commit`. In the worst case, txs can be
resent from the mempool WAL manually. resent from the mempool WAL manually.
For the above reasons, the `mempool.wal` is disabled by default. To enable, set
`mempool.wal_dir` to where you want the WAL to be located (e.g.
`data/mempool.wal`).
## DOS Exposure and Mitigation ## DOS Exposure and Mitigation
Validators are supposed to setup [Sentry Node Validators are supposed to setup [Sentry Node

4
docs/tools/README.md Normal file
View File

@ -0,0 +1,4 @@
# Tools
Tendermint comes with some tools for [benchmarking](benchmarking.md)
and [monitoring](monitoring.md).

View File

@ -20,7 +20,7 @@ Blocks/sec 0.818 0.386 1 9
## Quick Start ## Quick Start
[Install Tendermint](../introduction/install) [Install Tendermint](../introduction/install.md)
This currently is setup to work on tendermint's develop branch. Please ensure This currently is setup to work on tendermint's develop branch. Please ensure
you are on that. (If not, update `tendermint` and `tmlibs` in gopkg.toml to use you are on that. (If not, update `tendermint` and `tmlibs` in gopkg.toml to use
the master branch.) the master branch.)

View File

@ -33,21 +33,21 @@ docker run -it --rm -p "26670:26670" --link=tm tendermint/monitor tm:26657
### Using Binaries ### Using Binaries
[Install Tendermint](https://github.com/tendermint/tendermint#install) [Install Tendermint](../introduction/install.md).
then run: Start a Tendermint node:
``` ```
tendermint init tendermint init
tendermint node --proxy_app=kvstore tendermint node --proxy_app=kvstore
``` ```
In another window, run the monitor:
``` ```
tm-monitor localhost:26657 tm-monitor localhost:26657
``` ```
with the last command being in a seperate window.
## Usage ## Usage
``` ```

View File

@ -79,11 +79,11 @@ func NewEvidenceStore(db dbm.DB) *EvidenceStore {
func (store *EvidenceStore) PriorityEvidence() (evidence []types.Evidence) { func (store *EvidenceStore) PriorityEvidence() (evidence []types.Evidence) {
// reverse the order so highest priority is first // reverse the order so highest priority is first
l := store.listEvidence(baseKeyOutqueue, -1) l := store.listEvidence(baseKeyOutqueue, -1)
l2 := make([]types.Evidence, len(l)) for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 {
for i := range l { l[i], l[j] = l[j], l[i]
l2[i] = l[len(l)-1-i]
} }
return l2
return l
} }
// PendingEvidence returns known uncommitted evidence up to maxBytes. // PendingEvidence returns known uncommitted evidence up to maxBytes.
@ -98,6 +98,7 @@ func (store *EvidenceStore) PendingEvidence(maxBytes int64) (evidence []types.Ev
func (store *EvidenceStore) listEvidence(prefixKey string, maxBytes int64) (evidence []types.Evidence) { func (store *EvidenceStore) listEvidence(prefixKey string, maxBytes int64) (evidence []types.Evidence) {
var bytes int64 var bytes int64
iter := dbm.IteratePrefix(store.db, []byte(prefixKey)) iter := dbm.IteratePrefix(store.db, []byte(prefixKey))
defer iter.Close()
for ; iter.Valid(); iter.Next() { for ; iter.Valid(); iter.Next() {
val := iter.Value() val := iter.Value()

View File

@ -113,9 +113,9 @@ func (e *CElement) NextWaitChan() <-chan struct{} {
// Nonblocking, may return nil if at the end. // Nonblocking, may return nil if at the end.
func (e *CElement) Next() *CElement { func (e *CElement) Next() *CElement {
e.mtx.RLock() e.mtx.RLock()
defer e.mtx.RUnlock() val := e.next
e.mtx.RUnlock()
return e.next return val
} }
// Nonblocking, may return nil if at the end. // Nonblocking, may return nil if at the end.

View File

@ -119,14 +119,13 @@ func (bA *BitArray) Or(o *BitArray) *BitArray {
} }
bA.mtx.Lock() bA.mtx.Lock()
o.mtx.Lock() o.mtx.Lock()
defer func() {
bA.mtx.Unlock()
o.mtx.Unlock()
}()
c := bA.copyBits(MaxInt(bA.Bits, o.Bits)) c := bA.copyBits(MaxInt(bA.Bits, o.Bits))
for i := 0; i < len(c.Elems); i++ { smaller := MinInt(len(bA.Elems), len(o.Elems))
for i := 0; i < smaller; i++ {
c.Elems[i] |= o.Elems[i] c.Elems[i] |= o.Elems[i]
} }
bA.mtx.Unlock()
o.mtx.Unlock()
return c return c
} }
@ -173,8 +172,9 @@ func (bA *BitArray) not() *BitArray {
} }
// Sub subtracts the two bit-arrays bitwise, without carrying the bits. // Sub subtracts the two bit-arrays bitwise, without carrying the bits.
// This is essentially bA.And(o.Not()). // Note that carryless subtraction of a - b is (a and not b).
// If bA is longer than o, o is right padded with zeroes. // The output is the same as bA, regardless of o's size.
// If bA is longer than o, o is right padded with zeroes
func (bA *BitArray) Sub(o *BitArray) *BitArray { func (bA *BitArray) Sub(o *BitArray) *BitArray {
if bA == nil || o == nil { if bA == nil || o == nil {
// TODO: Decide if we should do 1's complement here? // TODO: Decide if we should do 1's complement here?
@ -182,24 +182,20 @@ func (bA *BitArray) Sub(o *BitArray) *BitArray {
} }
bA.mtx.Lock() bA.mtx.Lock()
o.mtx.Lock() o.mtx.Lock()
defer func() { // output is the same size as bA
c := bA.copyBits(bA.Bits)
// Only iterate to the minimum size between the two.
// If o is longer, those bits are ignored.
// If bA is longer, then skipping those iterations is equivalent
// to right padding with 0's
smaller := MinInt(len(bA.Elems), len(o.Elems))
for i := 0; i < smaller; i++ {
// &^ is and not in golang
c.Elems[i] &^= o.Elems[i]
}
bA.mtx.Unlock() bA.mtx.Unlock()
o.mtx.Unlock() o.mtx.Unlock()
}()
if bA.Bits > o.Bits {
c := bA.copy()
for i := 0; i < len(o.Elems)-1; i++ {
c.Elems[i] &= ^c.Elems[i]
}
i := len(o.Elems) - 1
if i >= 0 {
for idx := i * 64; idx < o.Bits; idx++ {
c.setIndex(idx, c.getIndex(idx) && !o.getIndex(idx))
}
}
return c return c
}
return bA.and(o.not()) // Note degenerate case where o == nil
} }
// IsEmpty returns true iff all bits in the bit array are 0 // IsEmpty returns true iff all bits in the bit array are 0

View File

@ -75,59 +75,34 @@ func TestOr(t *testing.T) {
} }
} }
func TestSub1(t *testing.T) { func TestSub(t *testing.T) {
testCases := []struct {
initBA string
subtractingBA string
expectedBA string
}{
{`null`, `null`, `null`},
{`"x"`, `null`, `null`},
{`null`, `"x"`, `null`},
{`"x"`, `"x"`, `"_"`},
{`"xxxxxx"`, `"x_x_x_"`, `"_x_x_x"`},
{`"x_x_x_"`, `"xxxxxx"`, `"______"`},
{`"xxxxxx"`, `"x_x_x_xxxx"`, `"_x_x_x"`},
{`"x_x_x_xxxx"`, `"xxxxxx"`, `"______xxxx"`},
{`"xxxxxxxxxx"`, `"x_x_x_"`, `"_x_x_xxxxx"`},
{`"x_x_x_"`, `"xxxxxxxxxx"`, `"______"`},
}
for _, tc := range testCases {
var bA *BitArray
err := json.Unmarshal([]byte(tc.initBA), &bA)
require.Nil(t, err)
bA1, _ := randBitArray(31) var o *BitArray
bA2, _ := randBitArray(51) err = json.Unmarshal([]byte(tc.subtractingBA), &o)
bA3 := bA1.Sub(bA2) require.Nil(t, err)
bNil := (*BitArray)(nil) got, _ := json.Marshal(bA.Sub(o))
require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil)) require.Equal(t, tc.expectedBA, string(got), "%s minus %s doesn't equal %s", tc.initBA, tc.subtractingBA, tc.expectedBA)
require.Equal(t, bA1.Sub(nil), (*BitArray)(nil))
require.Equal(t, bNil.Sub(nil), (*BitArray)(nil))
if bA3.Bits != bA1.Bits {
t.Error("Expected bA1 bits")
}
if len(bA3.Elems) != len(bA1.Elems) {
t.Error("Expected bA1 elems length")
}
for i := 0; i < bA3.Bits; i++ {
expected := bA1.GetIndex(i)
if bA2.GetIndex(i) {
expected = false
}
if bA3.GetIndex(i) != expected {
t.Error("Wrong bit from bA3", i, bA1.GetIndex(i), bA2.GetIndex(i), bA3.GetIndex(i))
}
}
}
func TestSub2(t *testing.T) {
bA1, _ := randBitArray(51)
bA2, _ := randBitArray(31)
bA3 := bA1.Sub(bA2)
bNil := (*BitArray)(nil)
require.Equal(t, bNil.Sub(bA1), (*BitArray)(nil))
require.Equal(t, bA1.Sub(nil), (*BitArray)(nil))
require.Equal(t, bNil.Sub(nil), (*BitArray)(nil))
if bA3.Bits != bA1.Bits {
t.Error("Expected bA1 bits")
}
if len(bA3.Elems) != len(bA1.Elems) {
t.Error("Expected bA1 elems length")
}
for i := 0; i < bA3.Bits; i++ {
expected := bA1.GetIndex(i)
if i < bA2.Bits && bA2.GetIndex(i) {
expected = false
}
if bA3.GetIndex(i) != expected {
t.Error("Wrong bit from bA3")
}
} }
} }

View File

@ -26,7 +26,7 @@ var _ = math.Inf
// proto package needs to be updated. // proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// Define these here for compatibility but use tmlibs/common.KVPair. // Define these here for compatibility but use libs/common.KVPair.
type KVPair struct { type KVPair struct {
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"`
@ -82,7 +82,7 @@ func (m *KVPair) GetValue() []byte {
return nil return nil
} }
// Define these here for compatibility but use tmlibs/common.KI64Pair. // Define these here for compatibility but use libs/common.KI64Pair.
type KI64Pair struct { type KI64Pair struct {
Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"`
Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"` Value int64 `protobuf:"varint,2,opt,name=value,proto3" json:"value,omitempty"`

View File

@ -12,7 +12,7 @@ var _ Verifier = (*BaseVerifier)(nil)
// BaseVerifier lets us check the validity of SignedHeaders at height or // BaseVerifier lets us check the validity of SignedHeaders at height or
// later, requiring sufficient votes (> 2/3) from the given valset. // later, requiring sufficient votes (> 2/3) from the given valset.
// To certify blocks produced by a blockchain with mutable validator sets, // To verify blocks produced by a blockchain with mutable validator sets,
// use the DynamicVerifier. // use the DynamicVerifier.
// TODO: Handle unbonding time. // TODO: Handle unbonding time.
type BaseVerifier struct { type BaseVerifier struct {
@ -40,15 +40,15 @@ func (bc *BaseVerifier) ChainID() string {
} }
// Implements Verifier. // Implements Verifier.
func (bc *BaseVerifier) Certify(signedHeader types.SignedHeader) error { func (bc *BaseVerifier) Verify(signedHeader types.SignedHeader) error {
// We can't certify commits older than bc.height. // We can't verify commits older than bc.height.
if signedHeader.Height < bc.height { if signedHeader.Height < bc.height {
return cmn.NewError("BaseVerifier height is %v, cannot certify height %v", return cmn.NewError("BaseVerifier height is %v, cannot verify height %v",
bc.height, signedHeader.Height) bc.height, signedHeader.Height)
} }
// We can't certify with the wrong validator set. // We can't verify with the wrong validator set.
if !bytes.Equal(signedHeader.ValidatorsHash, if !bytes.Equal(signedHeader.ValidatorsHash,
bc.valset.Hash()) { bc.valset.Hash()) {
return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bc.valset.Hash()) return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bc.valset.Hash())
@ -57,7 +57,7 @@ func (bc *BaseVerifier) Certify(signedHeader types.SignedHeader) error {
// Do basic sanity checks. // Do basic sanity checks.
err := signedHeader.ValidateBasic(bc.chainID) err := signedHeader.ValidateBasic(bc.chainID)
if err != nil { if err != nil {
return cmn.ErrorWrap(err, "in certify") return cmn.ErrorWrap(err, "in verify")
} }
// Check commit signatures. // Check commit signatures.
@ -65,7 +65,7 @@ func (bc *BaseVerifier) Certify(signedHeader types.SignedHeader) error {
bc.chainID, signedHeader.Commit.BlockID, bc.chainID, signedHeader.Commit.BlockID,
signedHeader.Height, signedHeader.Commit) signedHeader.Height, signedHeader.Commit)
if err != nil { if err != nil {
return cmn.ErrorWrap(err, "in certify") return cmn.ErrorWrap(err, "in verify")
} }
return nil return nil

View File

@ -43,7 +43,7 @@ func TestBaseCert(t *testing.T) {
for _, tc := range cases { for _, tc := range cases {
sh := tc.keys.GenSignedHeader(chainID, tc.height, nil, tc.vals, tc.vals, sh := tc.keys.GenSignedHeader(chainID, tc.height, nil, tc.vals, tc.vals,
[]byte("foo"), []byte("params"), []byte("results"), tc.first, tc.last) []byte("foo"), []byte("params"), []byte("results"), tc.first, tc.last)
err := cert.Certify(sh) err := cert.Verify(sh)
if tc.proper { if tc.proper {
assert.Nil(err, "%+v", err) assert.Nil(err, "%+v", err)
} else { } else {

View File

@ -54,11 +54,11 @@ validator set, and that the height of the commit is at least height (or
greater). greater).
SignedHeader.Commit may be signed by a different validator set, it can get SignedHeader.Commit may be signed by a different validator set, it can get
certified with a BaseVerifier as long as sufficient signatures from the verified with a BaseVerifier as long as sufficient signatures from the
previous validator set are present in the commit. previous validator set are present in the commit.
DynamicVerifier - this Verifier implements an auto-update and persistence DynamicVerifier - this Verifier implements an auto-update and persistence
strategy to certify any SignedHeader of the blockchain. strategy to verify any SignedHeader of the blockchain.
## Provider and PersistentProvider ## Provider and PersistentProvider
@ -88,7 +88,7 @@ type PersistentProvider interface {
} }
``` ```
* DBProvider - persistence provider for use with any tmlibs/DB. * DBProvider - persistence provider for use with any libs/DB.
* MultiProvider - combine multiple providers. * MultiProvider - combine multiple providers.
The suggested use for local light clients is client.NewHTTPProvider(...) for The suggested use for local light clients is client.NewHTTPProvider(...) for

View File

@ -2,12 +2,16 @@ package lite
import ( import (
"bytes" "bytes"
"fmt"
"sync"
log "github.com/tendermint/tendermint/libs/log" log "github.com/tendermint/tendermint/libs/log"
lerr "github.com/tendermint/tendermint/lite/errors" lerr "github.com/tendermint/tendermint/lite/errors"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
const sizeOfPendingMap = 1024
var _ Verifier = (*DynamicVerifier)(nil) var _ Verifier = (*DynamicVerifier)(nil)
// DynamicVerifier implements an auto-updating Verifier. It uses a // DynamicVerifier implements an auto-updating Verifier. It uses a
@ -21,6 +25,10 @@ type DynamicVerifier struct {
trusted PersistentProvider trusted PersistentProvider
// This is a source of new info, like a node rpc, or other import method. // This is a source of new info, like a node rpc, or other import method.
source Provider source Provider
// pending map to synchronize concurrent verification requests
mtx sync.Mutex
pendingVerifications map[int64]chan struct{}
} }
// NewDynamicVerifier returns a new DynamicVerifier. It uses the // NewDynamicVerifier returns a new DynamicVerifier. It uses the
@ -35,6 +43,7 @@ func NewDynamicVerifier(chainID string, trusted PersistentProvider, source Provi
chainID: chainID, chainID: chainID,
trusted: trusted, trusted: trusted,
source: source, source: source,
pendingVerifications: make(map[int64]chan struct{}, sizeOfPendingMap),
} }
} }
@ -56,7 +65,40 @@ func (ic *DynamicVerifier) ChainID() string {
// ic.trusted and ic.source to prove the new validators. On success, it will // ic.trusted and ic.source to prove the new validators. On success, it will
// try to store the SignedHeader in ic.trusted if the next // try to store the SignedHeader in ic.trusted if the next
// validator can be sourced. // validator can be sourced.
func (ic *DynamicVerifier) Certify(shdr types.SignedHeader) error { func (ic *DynamicVerifier) Verify(shdr types.SignedHeader) error {
// Performs synchronization for multi-threads verification at the same height.
ic.mtx.Lock()
if pending := ic.pendingVerifications[shdr.Height]; pending != nil {
ic.mtx.Unlock()
<-pending // pending is chan struct{}
} else {
pending := make(chan struct{})
ic.pendingVerifications[shdr.Height] = pending
defer func() {
close(pending)
ic.mtx.Lock()
delete(ic.pendingVerifications, shdr.Height)
ic.mtx.Unlock()
}()
ic.mtx.Unlock()
}
//Get the exact trusted commit for h, and if it is
// equal to shdr, then don't even verify it,
// and just return nil.
trustedFCSameHeight, err := ic.trusted.LatestFullCommit(ic.chainID, shdr.Height, shdr.Height)
if err == nil {
// If loading trust commit successfully, and trust commit equal to shdr, then don't verify it,
// just return nil.
if bytes.Equal(trustedFCSameHeight.SignedHeader.Hash(), shdr.Hash()) {
ic.logger.Info(fmt.Sprintf("Load full commit at height %d from cache, there is not need to verify.", shdr.Height))
return nil
}
} else if !lerr.IsErrCommitNotFound(err) {
// Return error if it is not CommitNotFound error
ic.logger.Info(fmt.Sprintf("Encountered unknown error in loading full commit at height %d.", shdr.Height))
return err
}
// Get the latest known full commit <= h-1 from our trusted providers. // Get the latest known full commit <= h-1 from our trusted providers.
// The full commit at h-1 contains the valset to sign for h. // The full commit at h-1 contains the valset to sign for h.
@ -94,9 +136,9 @@ func (ic *DynamicVerifier) Certify(shdr types.SignedHeader) error {
} }
} }
// Certify the signed header using the matching valset. // Verify the signed header using the matching valset.
cert := NewBaseVerifier(ic.chainID, trustedFC.Height()+1, trustedFC.NextValidators) cert := NewBaseVerifier(ic.chainID, trustedFC.Height()+1, trustedFC.NextValidators)
err = cert.Certify(shdr) err = cert.Verify(shdr)
if err != nil { if err != nil {
return err return err
} }

View File

@ -2,8 +2,8 @@ package lite
import ( import (
"fmt" "fmt"
"sync"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
@ -49,7 +49,7 @@ func TestInquirerValidPath(t *testing.T) {
// This should fail validation: // This should fail validation:
sh := fcz[count-1].SignedHeader sh := fcz[count-1].SignedHeader
err = cert.Certify(sh) err = cert.Verify(sh)
require.NotNil(err) require.NotNil(err)
// Adding a few commits in the middle should be insufficient. // Adding a few commits in the middle should be insufficient.
@ -57,7 +57,7 @@ func TestInquirerValidPath(t *testing.T) {
err := source.SaveFullCommit(fcz[i]) err := source.SaveFullCommit(fcz[i])
require.Nil(err) require.Nil(err)
} }
err = cert.Certify(sh) err = cert.Verify(sh)
assert.NotNil(err) assert.NotNil(err)
// With more info, we succeed. // With more info, we succeed.
@ -65,7 +65,7 @@ func TestInquirerValidPath(t *testing.T) {
err := source.SaveFullCommit(fcz[i]) err := source.SaveFullCommit(fcz[i])
require.Nil(err) require.Nil(err)
} }
err = cert.Certify(sh) err = cert.Verify(sh)
assert.Nil(err, "%+v", err) assert.Nil(err, "%+v", err)
} }
@ -115,18 +115,18 @@ func TestInquirerVerifyHistorical(t *testing.T) {
err = source.SaveFullCommit(fcz[7]) err = source.SaveFullCommit(fcz[7])
require.Nil(err, "%+v", err) require.Nil(err, "%+v", err)
sh := fcz[8].SignedHeader sh := fcz[8].SignedHeader
err = cert.Certify(sh) err = cert.Verify(sh)
require.Nil(err, "%+v", err) require.Nil(err, "%+v", err)
assert.Equal(fcz[7].Height(), cert.LastTrustedHeight()) assert.Equal(fcz[7].Height(), cert.LastTrustedHeight())
fc_, err := trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) fc_, err := trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height())
require.NotNil(err, "%+v", err) require.NotNil(err, "%+v", err)
assert.Equal(fc_, (FullCommit{})) assert.Equal(fc_, (FullCommit{}))
// With fcz[9] Certify will update last trusted height. // With fcz[9] Verify will update last trusted height.
err = source.SaveFullCommit(fcz[9]) err = source.SaveFullCommit(fcz[9])
require.Nil(err, "%+v", err) require.Nil(err, "%+v", err)
sh = fcz[8].SignedHeader sh = fcz[8].SignedHeader
err = cert.Certify(sh) err = cert.Verify(sh)
require.Nil(err, "%+v", err) require.Nil(err, "%+v", err)
assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) assert.Equal(fcz[8].Height(), cert.LastTrustedHeight())
fc_, err = trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height()) fc_, err = trust.LatestFullCommit(chainID, fcz[8].Height(), fcz[8].Height())
@ -141,13 +141,70 @@ func TestInquirerVerifyHistorical(t *testing.T) {
// Try to check an unknown seed in the past. // Try to check an unknown seed in the past.
sh = fcz[3].SignedHeader sh = fcz[3].SignedHeader
err = cert.Certify(sh) err = cert.Verify(sh)
require.Nil(err, "%+v", err) require.Nil(err, "%+v", err)
assert.Equal(fcz[8].Height(), cert.LastTrustedHeight()) assert.Equal(fcz[8].Height(), cert.LastTrustedHeight())
// Jump all the way forward again. // Jump all the way forward again.
sh = fcz[count-1].SignedHeader sh = fcz[count-1].SignedHeader
err = cert.Certify(sh) err = cert.Verify(sh)
require.Nil(err, "%+v", err) require.Nil(err, "%+v", err)
assert.Equal(fcz[9].Height(), cert.LastTrustedHeight()) assert.Equal(fcz[9].Height(), cert.LastTrustedHeight())
} }
func TestConcurrencyInquirerVerify(t *testing.T) {
_, require := assert.New(t), require.New(t)
trust := NewDBProvider("trust", dbm.NewMemDB()).SetLimit(10)
source := NewDBProvider("source", dbm.NewMemDB())
// Set up the validators to generate test blocks.
var vote int64 = 10
keys := genPrivKeys(5)
nkeys := keys.Extend(1)
// Construct a bunch of commits, each with one more height than the last.
chainID := "inquiry-test"
count := 10
consHash := []byte("special-params")
fcz := make([]FullCommit, count)
for i := 0; i < count; i++ {
vals := keys.ToValidators(vote, 0)
nextVals := nkeys.ToValidators(vote, 0)
h := int64(1 + i)
appHash := []byte(fmt.Sprintf("h=%d", h))
resHash := []byte(fmt.Sprintf("res=%d", h))
fcz[i] = keys.GenFullCommit(
chainID, h, nil,
vals, nextVals,
appHash, consHash, resHash, 0, len(keys))
// Extend the keys by 1 each time.
keys = nkeys
nkeys = nkeys.Extend(1)
}
// Initialize a Verifier with the initial state.
err := trust.SaveFullCommit(fcz[0])
require.Nil(err)
cert := NewDynamicVerifier(chainID, trust, source)
cert.SetLogger(log.TestingLogger())
err = source.SaveFullCommit(fcz[7])
err = source.SaveFullCommit(fcz[8])
require.Nil(err, "%+v", err)
sh := fcz[8].SignedHeader
var wg sync.WaitGroup
count = 100
errList := make([]error, count)
for i := 0; i < count; i++ {
wg.Add(1)
go func(index int) {
errList[index] = cert.Verify(sh)
defer wg.Done()
}(i)
}
wg.Wait()
for _, err := range errList {
require.Nil(err)
}
}

View File

@ -41,6 +41,12 @@ func (e errUnknownValidators) Error() string {
e.chainID, e.height) e.chainID, e.height)
} }
type errEmptyTree struct{}
func (e errEmptyTree) Error() string {
return "Tree is empty"
}
//---------------------------------------- //----------------------------------------
// Methods for above error types // Methods for above error types
@ -110,3 +116,18 @@ func IsErrUnknownValidators(err error) bool {
} }
return false return false
} }
//-----------------
// ErrEmptyTree
func ErrEmptyTree() error {
return cmn.ErrorWrap(errEmptyTree{}, "")
}
func IsErrEmptyTree(err error) bool {
if err_, ok := err.(cmn.Error); ok {
_, ok := err_.Data().(errEmptyTree)
return ok
}
return false
}

14
lite/proxy/proof.go Normal file
View File

@ -0,0 +1,14 @@
package proxy
import (
"github.com/tendermint/tendermint/crypto/merkle"
)
func defaultProofRuntime() *merkle.ProofRuntime {
prt := merkle.NewProofRuntime()
prt.RegisterOpDecoder(
merkle.ProofOpSimpleValue,
merkle.SimpleValueOpDecoder,
)
return prt
}

View File

@ -3,127 +3,95 @@ package proxy
import ( import (
"fmt" "fmt"
"github.com/pkg/errors"
cmn "github.com/tendermint/tendermint/libs/common" cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/crypto/merkle"
"github.com/tendermint/tendermint/lite" "github.com/tendermint/tendermint/lite"
lerr "github.com/tendermint/tendermint/lite/errors"
rpcclient "github.com/tendermint/tendermint/rpc/client" rpcclient "github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/core/types" ctypes "github.com/tendermint/tendermint/rpc/core/types"
"github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/types"
) )
// KeyProof represents a proof of existence or absence of a single key.
// Copied from iavl repo. TODO
type KeyProof interface {
// Verify verfies the proof is valid. To verify absence,
// the value should be nil.
Verify(key, value, root []byte) error
// Root returns the root hash of the proof.
Root() []byte
// Serialize itself
Bytes() []byte
}
// GetWithProof will query the key on the given node, and verify it has // GetWithProof will query the key on the given node, and verify it has
// a valid proof, as defined by the Verifier. // a valid proof, as defined by the Verifier.
// //
// If there is any error in checking, returns an error. // If there is any error in checking, returns an error.
// If val is non-empty, proof should be KeyExistsProof func GetWithProof(prt *merkle.ProofRuntime, key []byte, reqHeight int64, node rpcclient.Client,
// If val is empty, proof should be KeyMissingProof
func GetWithProof(key []byte, reqHeight int64, node rpcclient.Client,
cert lite.Verifier) ( cert lite.Verifier) (
val cmn.HexBytes, height int64, proof KeyProof, err error) { val cmn.HexBytes, height int64, proof *merkle.Proof, err error) {
if reqHeight < 0 { if reqHeight < 0 {
err = errors.Errorf("Height cannot be negative") err = cmn.NewError("Height cannot be negative")
return return
} }
_resp, proof, err := GetWithProofOptions("/key", key, res, err := GetWithProofOptions(prt, "/key", key,
rpcclient.ABCIQueryOptions{Height: int64(reqHeight)}, rpcclient.ABCIQueryOptions{Height: int64(reqHeight), Prove: true},
node, cert) node, cert)
if _resp != nil { if err != nil {
resp := _resp.Response return
val, height = resp.Value, resp.Height
} }
resp := res.Response
val, height = resp.Value, resp.Height
return val, height, proof, err return val, height, proof, err
} }
// GetWithProofOptions is useful if you want full access to the ABCIQueryOptions // GetWithProofOptions is useful if you want full access to the ABCIQueryOptions.
func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOptions, // XXX Usage of path? It's not used, and sometimes it's /, sometimes /key, sometimes /store.
func GetWithProofOptions(prt *merkle.ProofRuntime, path string, key []byte, opts rpcclient.ABCIQueryOptions,
node rpcclient.Client, cert lite.Verifier) ( node rpcclient.Client, cert lite.Verifier) (
*ctypes.ResultABCIQuery, KeyProof, error) { *ctypes.ResultABCIQuery, error) {
_resp, err := node.ABCIQueryWithOptions(path, key, opts) if !opts.Prove {
return nil, cmn.NewError("require ABCIQueryOptions.Prove to be true")
}
res, err := node.ABCIQueryWithOptions(path, key, opts)
if err != nil { if err != nil {
return nil, nil, err return nil, err
} }
resp := _resp.Response resp := res.Response
// make sure the proof is the proper height // Validate the response, e.g. height.
if resp.IsErr() { if resp.IsErr() {
err = errors.Errorf("Query error for key %d: %d", key, resp.Code) err = cmn.NewError("Query error for key %d: %d", key, resp.Code)
return nil, nil, err return nil, err
} }
if len(resp.Key) == 0 || len(resp.Proof) == 0 {
return nil, nil, ErrNoData() if len(resp.Key) == 0 || resp.Proof == nil {
return nil, lerr.ErrEmptyTree()
} }
if resp.Height == 0 { if resp.Height == 0 {
return nil, nil, errors.New("Height returned is zero") return nil, cmn.NewError("Height returned is zero")
} }
// AppHash for height H is in header H+1 // AppHash for height H is in header H+1
signedHeader, err := GetCertifiedCommit(resp.Height+1, node, cert) signedHeader, err := GetCertifiedCommit(resp.Height+1, node, cert)
if err != nil { if err != nil {
return nil, nil, err return nil, err
}
_ = signedHeader
return &ctypes.ResultABCIQuery{Response: resp}, nil, nil
/* // TODO refactor so iavl stuff is not in tendermint core
// https://github.com/tendermint/tendermint/issues/1183
if len(resp.Value) > 0 {
// The key was found, construct a proof of existence.
proof, err := iavl.ReadKeyProof(resp.Proof)
if err != nil {
return nil, nil, errors.Wrap(err, "Error reading proof")
}
eproof, ok := proof.(*iavl.KeyExistsProof)
if !ok {
return nil, nil, errors.New("Expected KeyExistsProof for non-empty value")
} }
// Validate the proof against the certified header to ensure data integrity. // Validate the proof against the certified header to ensure data integrity.
err = eproof.Verify(resp.Key, resp.Value, signedHeader.AppHash) if resp.Value != nil {
// Value exists
// XXX How do we encode the key into a string...
err = prt.VerifyValue(resp.Proof, signedHeader.AppHash, string(resp.Key), resp.Value)
if err != nil { if err != nil {
return nil, nil, errors.Wrap(err, "Couldn't verify proof") return nil, cmn.ErrorWrap(err, "Couldn't verify value proof")
} }
return &ctypes.ResultABCIQuery{Response: resp}, eproof, nil return &ctypes.ResultABCIQuery{Response: resp}, nil
} } else {
// Value absent
// The key wasn't found, construct a proof of non-existence.
proof, err := iavl.ReadKeyProof(resp.Proof)
if err != nil {
return nil, nil, errors.Wrap(err, "Error reading proof")
}
aproof, ok := proof.(*iavl.KeyAbsentProof)
if !ok {
return nil, nil, errors.New("Expected KeyAbsentProof for empty Value")
}
// Validate the proof against the certified header to ensure data integrity. // Validate the proof against the certified header to ensure data integrity.
err = aproof.Verify(resp.Key, nil, signedHeader.AppHash) // XXX How do we encode the key into a string...
err = prt.VerifyAbsence(resp.Proof, signedHeader.AppHash, string(resp.Key))
if err != nil { if err != nil {
return nil, nil, errors.Wrap(err, "Couldn't verify proof") return nil, cmn.ErrorWrap(err, "Couldn't verify absence proof")
}
return &ctypes.ResultABCIQuery{Response: resp}, nil
} }
return &ctypes.ResultABCIQuery{Response: resp}, aproof, ErrNoData()
*/
} }
// GetCertifiedCommit gets the signed header for a given height and certifies // GetCertifiedCommit gets the signed header for a given height and certifies
@ -146,7 +114,7 @@ func GetCertifiedCommit(h int64, client rpcclient.Client, cert lite.Verifier) (t
h, sh.Height) h, sh.Height)
} }
if err = cert.Certify(sh); err != nil { if err = cert.Verify(sh); err != nil {
return types.SignedHeader{}, err return types.SignedHeader{}, err
} }

View File

@ -4,12 +4,12 @@ import (
"fmt" "fmt"
"os" "os"
"testing" "testing"
"time"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/abci/example/kvstore" "github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/lite" "github.com/tendermint/tendermint/lite"
certclient "github.com/tendermint/tendermint/lite/client" certclient "github.com/tendermint/tendermint/lite/client"
nm "github.com/tendermint/tendermint/node" nm "github.com/tendermint/tendermint/node"
@ -20,6 +20,7 @@ import (
var node *nm.Node var node *nm.Node
var chainID = "tendermint_test" // TODO use from config. var chainID = "tendermint_test" // TODO use from config.
var waitForEventTimeout = 5 * time.Second
// TODO fix tests!! // TODO fix tests!!
@ -38,70 +39,87 @@ func kvstoreTx(k, v []byte) []byte {
return []byte(fmt.Sprintf("%s=%s", k, v)) return []byte(fmt.Sprintf("%s=%s", k, v))
} }
// TODO: enable it after general proof format has been adapted
// in abci/examples/kvstore.go
func _TestAppProofs(t *testing.T) { func _TestAppProofs(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)
prt := defaultProofRuntime()
cl := client.NewLocal(node) cl := client.NewLocal(node)
client.WaitForHeight(cl, 1, nil) client.WaitForHeight(cl, 1, nil)
// This sets up our trust on the node based on some past point.
source := certclient.NewProvider(chainID, cl)
seed, err := source.LatestFullCommit(chainID, 1, 1)
require.NoError(err, "%#v", err)
cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators)
// Wait for tx confirmation.
done := make(chan int64)
go func() {
evtTyp := types.EventTx
_, err = client.WaitForOneEvent(cl, evtTyp, waitForEventTimeout)
require.Nil(err, "%#v", err)
close(done)
}()
// Submit a transaction.
k := []byte("my-key") k := []byte("my-key")
v := []byte("my-value") v := []byte("my-value")
tx := kvstoreTx(k, v) tx := kvstoreTx(k, v)
br, err := cl.BroadcastTxCommit(tx) br, err := cl.BroadcastTxCommit(tx)
require.NoError(err, "%+v", err) require.NoError(err, "%#v", err)
require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx)
require.EqualValues(0, br.DeliverTx.Code) require.EqualValues(0, br.DeliverTx.Code)
brh := br.Height brh := br.Height
// This sets up our trust on the node based on some past point. // Fetch latest after tx commit.
source := certclient.NewProvider(chainID, cl) <-done
seed, err := source.LatestFullCommit(chainID, brh-2, brh-2)
require.NoError(err, "%+v", err)
cert := lite.NewBaseVerifier("my-chain", seed.Height(), seed.Validators)
client.WaitForHeight(cl, 3, nil)
latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1) latest, err := source.LatestFullCommit(chainID, 1, 1<<63-1)
require.NoError(err, "%+v", err) require.NoError(err, "%#v", err)
rootHash := latest.SignedHeader.AppHash rootHash := latest.SignedHeader.AppHash
if rootHash == nil {
// Fetch one block later, AppHash hasn't been committed yet.
// TODO find a way to avoid doing this.
client.WaitForHeight(cl, latest.SignedHeader.Height+1, nil)
latest, err = source.LatestFullCommit(chainID, latest.SignedHeader.Height+1, 1<<63-1)
require.NoError(err, "%#v", err)
rootHash = latest.SignedHeader.AppHash
}
require.NotNil(rootHash)
// verify a query before the tx block has no data (and valid non-exist proof) // verify a query before the tx block has no data (and valid non-exist proof)
bs, height, proof, err := GetWithProof(k, brh-1, cl, cert) bs, height, proof, err := GetWithProof(prt, k, brh-1, cl, cert)
fmt.Println(bs, height, proof, err) require.NoError(err, "%#v", err)
require.NotNil(err) // require.NotNil(proof)
require.True(IsErrNoData(err), err.Error()) // TODO: Ensure that *some* keys will be there, ensuring that proof is nil,
// (currently there's a race condition)
// and ensure that proof proves absence of k.
require.Nil(bs) require.Nil(bs)
// but given that block it is good // but given that block it is good
bs, height, proof, err = GetWithProof(k, brh, cl, cert) bs, height, proof, err = GetWithProof(prt, k, brh, cl, cert)
require.NoError(err, "%+v", err) require.NoError(err, "%#v", err)
require.NotNil(proof) require.NotNil(proof)
require.True(height >= int64(latest.Height())) require.Equal(height, brh)
// Alexis there is a bug here, somehow the above code gives us rootHash = nil
// and proof.Verify doesn't care, while proofNotExists.Verify fails.
// I am hacking this in to make it pass, but please investigate further.
rootHash = proof.Root()
//err = wire.ReadBinaryBytes(bs, &data)
//require.NoError(err, "%+v", err)
assert.EqualValues(v, bs) assert.EqualValues(v, bs)
err = proof.Verify(k, bs, rootHash) err = prt.VerifyValue(proof, rootHash, string(k), bs) // XXX key encoding
assert.NoError(err, "%+v", err) assert.NoError(err, "%#v", err)
// Test non-existing key. // Test non-existing key.
missing := []byte("my-missing-key") missing := []byte("my-missing-key")
bs, _, proof, err = GetWithProof(missing, 0, cl, cert) bs, _, proof, err = GetWithProof(prt, missing, 0, cl, cert)
require.True(IsErrNoData(err)) require.NoError(err)
require.Nil(bs) require.Nil(bs)
require.NotNil(proof) require.NotNil(proof)
err = proof.Verify(missing, nil, rootHash) err = prt.VerifyAbsence(proof, rootHash, string(missing)) // XXX VerifyAbsence(), keyencoding
assert.NoError(err, "%+v", err) assert.NoError(err, "%#v", err)
err = proof.Verify(k, nil, rootHash) err = prt.VerifyAbsence(proof, rootHash, string(k)) // XXX VerifyAbsence(), keyencoding
assert.Error(err) assert.Error(err, "%#v", err)
} }
func _TestTxProofs(t *testing.T) { func TestTxProofs(t *testing.T) {
assert, require := assert.New(t), require.New(t) assert, require := assert.New(t), require.New(t)
cl := client.NewLocal(node) cl := client.NewLocal(node)
@ -109,15 +127,15 @@ func _TestTxProofs(t *testing.T) {
tx := kvstoreTx([]byte("key-a"), []byte("value-a")) tx := kvstoreTx([]byte("key-a"), []byte("value-a"))
br, err := cl.BroadcastTxCommit(tx) br, err := cl.BroadcastTxCommit(tx)
require.NoError(err, "%+v", err) require.NoError(err, "%#v", err)
require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx) require.EqualValues(0, br.CheckTx.Code, "%#v", br.CheckTx)
require.EqualValues(0, br.DeliverTx.Code) require.EqualValues(0, br.DeliverTx.Code)
brh := br.Height brh := br.Height
source := certclient.NewProvider(chainID, cl) source := certclient.NewProvider(chainID, cl)
seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) seed, err := source.LatestFullCommit(chainID, brh-2, brh-2)
require.NoError(err, "%+v", err) require.NoError(err, "%#v", err)
cert := lite.NewBaseVerifier("my-chain", seed.Height(), seed.Validators) cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators)
// First let's make sure a bogus transaction hash returns a valid non-existence proof. // First let's make sure a bogus transaction hash returns a valid non-existence proof.
key := types.Tx([]byte("bogus")).Hash() key := types.Tx([]byte("bogus")).Hash()
@ -128,12 +146,12 @@ func _TestTxProofs(t *testing.T) {
// Now let's check with the real tx hash. // Now let's check with the real tx hash.
key = types.Tx(tx).Hash() key = types.Tx(tx).Hash()
res, err = cl.Tx(key, true) res, err = cl.Tx(key, true)
require.NoError(err, "%+v", err) require.NoError(err, "%#v", err)
require.NotNil(res) require.NotNil(res)
err = res.Proof.Validate(key) err = res.Proof.Validate(key)
assert.NoError(err, "%+v", err) assert.NoError(err, "%#v", err)
commit, err := GetCertifiedCommit(br.Height, cl, cert) commit, err := GetCertifiedCommit(br.Height, cl, cert)
require.Nil(err, "%+v", err) require.Nil(err, "%#v", err)
require.Equal(res.Proof.RootHash, commit.Header.DataHash) require.Equal(res.Proof.RootHash, commit.Header.DataHash)
} }

View File

@ -8,12 +8,12 @@ import (
lclient "github.com/tendermint/tendermint/lite/client" lclient "github.com/tendermint/tendermint/lite/client"
) )
func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger) (*lite.DynamicVerifier, error) { func NewVerifier(chainID, rootDir string, client lclient.SignStatusClient, logger log.Logger, cacheSize int) (*lite.DynamicVerifier, error) {
logger = logger.With("module", "lite/proxy") logger = logger.With("module", "lite/proxy")
logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client) logger.Info("lite/proxy/NewVerifier()...", "chainID", chainID, "rootDir", rootDir, "client", client)
memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(10) memProvider := lite.NewDBProvider("trusted.mem", dbm.NewMemDB()).SetLimit(cacheSize)
lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir)) lvlProvider := lite.NewDBProvider("trusted.lvl", dbm.NewDB("trust-base", dbm.LevelDBBackend, rootDir))
trust := lite.NewMultiProvider( trust := lite.NewMultiProvider(
memProvider, memProvider,

View File

@ -3,6 +3,7 @@ package proxy
import ( import (
cmn "github.com/tendermint/tendermint/libs/common" cmn "github.com/tendermint/tendermint/libs/common"
"github.com/tendermint/tendermint/crypto/merkle"
"github.com/tendermint/tendermint/lite" "github.com/tendermint/tendermint/lite"
rpcclient "github.com/tendermint/tendermint/rpc/client" rpcclient "github.com/tendermint/tendermint/rpc/client"
ctypes "github.com/tendermint/tendermint/rpc/core/types" ctypes "github.com/tendermint/tendermint/rpc/core/types"
@ -15,6 +16,7 @@ var _ rpcclient.Client = Wrapper{}
type Wrapper struct { type Wrapper struct {
rpcclient.Client rpcclient.Client
cert *lite.DynamicVerifier cert *lite.DynamicVerifier
prt *merkle.ProofRuntime
} }
// SecureClient uses a given Verifier to wrap an connection to an untrusted // SecureClient uses a given Verifier to wrap an connection to an untrusted
@ -22,7 +24,8 @@ type Wrapper struct {
// //
// If it is wrapping an HTTP rpcclient, it will also wrap the websocket interface // If it is wrapping an HTTP rpcclient, it will also wrap the websocket interface
func SecureClient(c rpcclient.Client, cert *lite.DynamicVerifier) Wrapper { func SecureClient(c rpcclient.Client, cert *lite.DynamicVerifier) Wrapper {
wrap := Wrapper{c, cert} prt := defaultProofRuntime()
wrap := Wrapper{c, cert, prt}
// TODO: no longer possible as no more such interface exposed.... // TODO: no longer possible as no more such interface exposed....
// if we wrap http client, then we can swap out the event switch to filter // if we wrap http client, then we can swap out the event switch to filter
// if hc, ok := c.(*rpcclient.HTTP); ok { // if hc, ok := c.(*rpcclient.HTTP); ok {
@ -36,7 +39,7 @@ func SecureClient(c rpcclient.Client, cert *lite.DynamicVerifier) Wrapper {
func (w Wrapper) ABCIQueryWithOptions(path string, data cmn.HexBytes, func (w Wrapper) ABCIQueryWithOptions(path string, data cmn.HexBytes,
opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
res, _, err := GetWithProofOptions(path, data, opts, w.Client, w.cert) res, err := GetWithProofOptions(w.prt, path, data, opts, w.Client, w.cert)
return res, err return res, err
} }
@ -134,10 +137,10 @@ func (w Wrapper) Commit(height *int64) (*ctypes.ResultCommit, error) {
} }
rpcclient.WaitForHeight(w.Client, *height, nil) rpcclient.WaitForHeight(w.Client, *height, nil)
res, err := w.Client.Commit(height) res, err := w.Client.Commit(height)
// if we got it, then certify it // if we got it, then verify it
if err == nil { if err == nil {
sh := res.SignedHeader sh := res.SignedHeader
err = w.cert.Certify(sh) err = w.cert.Verify(sh)
} }
return res, err return res, err
} }

View File

@ -8,6 +8,6 @@ import (
// Verifier must know the current or recent set of validitors by some other // Verifier must know the current or recent set of validitors by some other
// means. // means.
type Verifier interface { type Verifier interface {
Certify(sheader types.SignedHeader) error Verify(sheader types.SignedHeader) error
ChainID() string ChainID() string
} }

55
mempool/bench_test.go Normal file
View File

@ -0,0 +1,55 @@
package mempool
import (
"encoding/binary"
"testing"
"github.com/tendermint/tendermint/abci/example/kvstore"
"github.com/tendermint/tendermint/proxy"
)
func BenchmarkReap(b *testing.B) {
app := kvstore.NewKVStoreApplication()
cc := proxy.NewLocalClientCreator(app)
mempool := newMempoolWithApp(cc)
size := 10000
for i := 0; i < size; i++ {
tx := make([]byte, 8)
binary.BigEndian.PutUint64(tx, uint64(i))
mempool.CheckTx(tx, nil)
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
mempool.ReapMaxBytesMaxGas(100000000, 10000000)
}
}
func BenchmarkCacheInsertTime(b *testing.B) {
cache := newMapTxCache(b.N)
txs := make([][]byte, b.N)
for i := 0; i < b.N; i++ {
txs[i] = make([]byte, 8)
binary.BigEndian.PutUint64(txs[i], uint64(i))
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.Push(txs[i])
}
}
// This benchmark is probably skewed, since we actually will be removing
// txs in parallel, which may cause some overhead due to mutex locking.
func BenchmarkCacheRemoveTime(b *testing.B) {
cache := newMapTxCache(b.N)
txs := make([][]byte, b.N)
for i := 0; i < b.N; i++ {
txs[i] = make([]byte, 8)
binary.BigEndian.PutUint64(txs[i], uint64(i))
cache.Push(txs[i])
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.Remove(txs[i])
}
}

View File

@ -513,9 +513,7 @@ func (mem *Mempool) Update(
// Remove transactions that are already in txs. // Remove transactions that are already in txs.
goodTxs := mem.filterTxs(txsMap) goodTxs := mem.filterTxs(txsMap)
// Recheck mempool txs if any txs were committed in the block // Recheck mempool txs if any txs were committed in the block
// NOTE/XXX: in some apps a tx could be invalidated due to EndBlock, if mem.config.Recheck && len(goodTxs) > 0 {
// so we really still do need to recheck, but this is for debugging
if mem.config.Recheck && (mem.config.RecheckEmpty || len(goodTxs) > 0) {
mem.logger.Info("Recheck txs", "numtxs", len(goodTxs), "height", height) mem.logger.Info("Recheck txs", "numtxs", len(goodTxs), "height", height)
mem.recheckTxs(goodTxs) mem.recheckTxs(goodTxs)
// At this point, mem.txs are being rechecked. // At this point, mem.txs are being rechecked.

View File

@ -399,35 +399,6 @@ func TestMempoolCloseWAL(t *testing.T) {
require.Equal(t, 1, len(m3), "expecting the wal match in") require.Equal(t, 1, len(m3), "expecting the wal match in")
} }
func BenchmarkCacheInsertTime(b *testing.B) {
cache := newMapTxCache(b.N)
txs := make([][]byte, b.N)
for i := 0; i < b.N; i++ {
txs[i] = make([]byte, 8)
binary.BigEndian.PutUint64(txs[i], uint64(i))
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.Push(txs[i])
}
}
// This benchmark is probably skewed, since we actually will be removing
// txs in parallel, which may cause some overhead due to mutex locking.
func BenchmarkCacheRemoveTime(b *testing.B) {
cache := newMapTxCache(b.N)
txs := make([][]byte, b.N)
for i := 0; i < b.N; i++ {
txs[i] = make([]byte, 8)
binary.BigEndian.PutUint64(txs[i], uint64(i))
cache.Push(txs[i])
}
b.ResetTimer()
for i := 0; i < b.N; i++ {
cache.Remove(txs[i])
}
}
func checksumIt(data []byte) string { func checksumIt(data []byte) string {
h := md5.New() h := md5.New()
h.Write(data) h.Write(data)

View File

@ -1,5 +1,9 @@
# Local Cluster with Docker Compose # Local Cluster with Docker Compose
DEPRECATED!
See the [docs](https://tendermint.com/docs/networks/docker-compose.html).
## Requirements ## Requirements
- [Install tendermint](/docs/install.md) - [Install tendermint](/docs/install.md)

View File

@ -1,3 +1,3 @@
# Remote Cluster with Terraform and Ansible # Remote Cluster with Terraform and Ansible
See the [docs](/docs/terraform-and-ansible.md) See the [docs](https://tendermint.com/docs/networks/terraform-and-ansible.html).

View File

@ -359,7 +359,6 @@ func NewNode(config *cfg.Config,
// Filter peers by addr or pubkey with an ABCI query. // Filter peers by addr or pubkey with an ABCI query.
// If the query return code is OK, add peer. // If the query return code is OK, add peer.
// XXX: Query format subject to change
if config.FilterPeers { if config.FilterPeers {
connFilters = append( connFilters = append(
connFilters, connFilters,

View File

@ -38,14 +38,16 @@ func voteToStep(vote *types.Vote) int8 {
// FilePV implements PrivValidator using data persisted to disk // FilePV implements PrivValidator using data persisted to disk
// to prevent double signing. // to prevent double signing.
// NOTE: the directory containing the pv.filePath must already exist. // NOTE: the directory containing the pv.filePath must already exist.
// It includes the LastSignature and LastSignBytes so we don't lose the signature
// if the process crashes after signing but before the resulting consensus message is processed.
type FilePV struct { type FilePV struct {
Address types.Address `json:"address"` Address types.Address `json:"address"`
PubKey crypto.PubKey `json:"pub_key"` PubKey crypto.PubKey `json:"pub_key"`
LastHeight int64 `json:"last_height"` LastHeight int64 `json:"last_height"`
LastRound int `json:"last_round"` LastRound int `json:"last_round"`
LastStep int8 `json:"last_step"` LastStep int8 `json:"last_step"`
LastSignature []byte `json:"last_signature,omitempty"` // so we dont lose signatures XXX Why would we lose signatures? LastSignature []byte `json:"last_signature,omitempty"`
LastSignBytes cmn.HexBytes `json:"last_signbytes,omitempty"` // so we dont lose signatures XXX Why would we lose signatures? LastSignBytes cmn.HexBytes `json:"last_signbytes,omitempty"`
PrivKey crypto.PrivKey `json:"priv_key"` PrivKey crypto.PrivKey `json:"priv_key"`
// For persistence. // For persistence.
@ -311,21 +313,18 @@ func (pv *FilePV) String() string {
// returns the timestamp from the lastSignBytes. // returns the timestamp from the lastSignBytes.
// returns true if the only difference in the votes is their timestamp. // returns true if the only difference in the votes is their timestamp.
func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
var lastVote, newVote types.CanonicalJSONVote var lastVote, newVote types.CanonicalVote
if err := cdc.UnmarshalJSON(lastSignBytes, &lastVote); err != nil { if err := cdc.UnmarshalBinary(lastSignBytes, &lastVote); err != nil {
panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err)) panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err))
} }
if err := cdc.UnmarshalJSON(newSignBytes, &newVote); err != nil { if err := cdc.UnmarshalBinary(newSignBytes, &newVote); err != nil {
panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err)) panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err))
} }
lastTime, err := time.Parse(types.TimeFormat, lastVote.Timestamp) lastTime := lastVote.Timestamp
if err != nil {
panic(err)
}
// set the times to the same value and check equality // set the times to the same value and check equality
now := types.CanonicalTime(tmtime.Now()) now := tmtime.Now()
lastVote.Timestamp = now lastVote.Timestamp = now
newVote.Timestamp = now newVote.Timestamp = now
lastVoteBytes, _ := cdc.MarshalJSON(lastVote) lastVoteBytes, _ := cdc.MarshalJSON(lastVote)
@ -337,25 +336,21 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.T
// returns the timestamp from the lastSignBytes. // returns the timestamp from the lastSignBytes.
// returns true if the only difference in the proposals is their timestamp // returns true if the only difference in the proposals is their timestamp
func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) { func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
var lastProposal, newProposal types.CanonicalJSONProposal var lastProposal, newProposal types.CanonicalProposal
if err := cdc.UnmarshalJSON(lastSignBytes, &lastProposal); err != nil { if err := cdc.UnmarshalBinary(lastSignBytes, &lastProposal); err != nil {
panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err)) panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err))
} }
if err := cdc.UnmarshalJSON(newSignBytes, &newProposal); err != nil { if err := cdc.UnmarshalBinary(newSignBytes, &newProposal); err != nil {
panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err)) panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err))
} }
lastTime, err := time.Parse(types.TimeFormat, lastProposal.Timestamp) lastTime := lastProposal.Timestamp
if err != nil {
panic(err)
}
// set the times to the same value and check equality // set the times to the same value and check equality
now := types.CanonicalTime(tmtime.Now()) now := tmtime.Now()
lastProposal.Timestamp = now lastProposal.Timestamp = now
newProposal.Timestamp = now newProposal.Timestamp = now
lastProposalBytes, _ := cdc.MarshalJSON(lastProposal) lastProposalBytes, _ := cdc.MarshalBinary(lastProposal)
newProposalBytes, _ := cdc.MarshalJSON(newProposal) newProposalBytes, _ := cdc.MarshalBinary(newProposal)
return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes) return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes)
} }

View File

@ -7,7 +7,7 @@ import (
"net" "net"
"time" "time"
amino "github.com/tendermint/go-amino" "github.com/tendermint/go-amino"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/ed25519"
@ -30,6 +30,7 @@ var (
ErrDialRetryMax = errors.New("dialed maximum retries") ErrDialRetryMax = errors.New("dialed maximum retries")
ErrConnWaitTimeout = errors.New("waited for remote signer for too long") ErrConnWaitTimeout = errors.New("waited for remote signer for too long")
ErrConnTimeout = errors.New("remote signer timed out") ErrConnTimeout = errors.New("remote signer timed out")
ErrUnexpectedResponse = errors.New("received unexpected response")
) )
var ( var (
@ -150,7 +151,7 @@ func (sc *SocketPV) getPubKey() (crypto.PubKey, error) {
// SignVote implements PrivValidator. // SignVote implements PrivValidator.
func (sc *SocketPV) SignVote(chainID string, vote *types.Vote) error { func (sc *SocketPV) SignVote(chainID string, vote *types.Vote) error {
err := writeMsg(sc.conn, &SignVoteMsg{Vote: vote}) err := writeMsg(sc.conn, &SignVoteRequest{Vote: vote})
if err != nil { if err != nil {
return err return err
} }
@ -160,7 +161,16 @@ func (sc *SocketPV) SignVote(chainID string, vote *types.Vote) error {
return err return err
} }
*vote = *res.(*SignVoteMsg).Vote resp, ok := res.(*SignedVoteResponse)
if !ok {
return ErrUnexpectedResponse
}
if resp.Error != nil {
return fmt.Errorf("remote error occurred: code: %v, description: %s",
resp.Error.Code,
resp.Error.Description)
}
*vote = *resp.Vote
return nil return nil
} }
@ -170,7 +180,7 @@ func (sc *SocketPV) SignProposal(
chainID string, chainID string,
proposal *types.Proposal, proposal *types.Proposal,
) error { ) error {
err := writeMsg(sc.conn, &SignProposalMsg{Proposal: proposal}) err := writeMsg(sc.conn, &SignProposalRequest{Proposal: proposal})
if err != nil { if err != nil {
return err return err
} }
@ -179,8 +189,16 @@ func (sc *SocketPV) SignProposal(
if err != nil { if err != nil {
return err return err
} }
resp, ok := res.(*SignedProposalResponse)
*proposal = *res.(*SignProposalMsg).Proposal if !ok {
return ErrUnexpectedResponse
}
if resp.Error != nil {
return fmt.Errorf("remote error occurred: code: %v, description: %s",
resp.Error.Code,
resp.Error.Description)
}
*proposal = *resp.Proposal
return nil return nil
} }
@ -190,7 +208,7 @@ func (sc *SocketPV) SignHeartbeat(
chainID string, chainID string,
heartbeat *types.Heartbeat, heartbeat *types.Heartbeat,
) error { ) error {
err := writeMsg(sc.conn, &SignHeartbeatMsg{Heartbeat: heartbeat}) err := writeMsg(sc.conn, &SignHeartbeatRequest{Heartbeat: heartbeat})
if err != nil { if err != nil {
return err return err
} }
@ -199,8 +217,16 @@ func (sc *SocketPV) SignHeartbeat(
if err != nil { if err != nil {
return err return err
} }
resp, ok := res.(*SignedHeartbeatResponse)
*heartbeat = *res.(*SignHeartbeatMsg).Heartbeat if !ok {
return ErrUnexpectedResponse
}
if resp.Error != nil {
return fmt.Errorf("remote error occurred: code: %v, description: %s",
resp.Error.Code,
resp.Error.Description)
}
*heartbeat = *resp.Heartbeat
return nil return nil
} }
@ -462,22 +488,34 @@ func (rs *RemoteSigner) handleConnection(conn net.Conn) {
var p crypto.PubKey var p crypto.PubKey
p = rs.privVal.GetPubKey() p = rs.privVal.GetPubKey()
res = &PubKeyMsg{p} res = &PubKeyMsg{p}
case *SignVoteMsg: case *SignVoteRequest:
err = rs.privVal.SignVote(rs.chainID, r.Vote) err = rs.privVal.SignVote(rs.chainID, r.Vote)
res = &SignVoteMsg{r.Vote} if err != nil {
case *SignProposalMsg: res = &SignedVoteResponse{nil, &RemoteSignerError{0, err.Error()}}
} else {
res = &SignedVoteResponse{r.Vote, nil}
}
case *SignProposalRequest:
err = rs.privVal.SignProposal(rs.chainID, r.Proposal) err = rs.privVal.SignProposal(rs.chainID, r.Proposal)
res = &SignProposalMsg{r.Proposal} if err != nil {
case *SignHeartbeatMsg: res = &SignedProposalResponse{nil, &RemoteSignerError{0, err.Error()}}
} else {
res = &SignedProposalResponse{r.Proposal, nil}
}
case *SignHeartbeatRequest:
err = rs.privVal.SignHeartbeat(rs.chainID, r.Heartbeat) err = rs.privVal.SignHeartbeat(rs.chainID, r.Heartbeat)
res = &SignHeartbeatMsg{r.Heartbeat} if err != nil {
res = &SignedHeartbeatResponse{nil, &RemoteSignerError{0, err.Error()}}
} else {
res = &SignedHeartbeatResponse{r.Heartbeat, nil}
}
default: default:
err = fmt.Errorf("unknown msg: %v", r) err = fmt.Errorf("unknown msg: %v", r)
} }
if err != nil { if err != nil {
// only log the error; we'll reply with an error in res
rs.Logger.Error("handleConnection", "err", err) rs.Logger.Error("handleConnection", "err", err)
return
} }
err = writeMsg(conn, res) err = writeMsg(conn, res)
@ -496,9 +534,12 @@ type SocketPVMsg interface{}
func RegisterSocketPVMsg(cdc *amino.Codec) { func RegisterSocketPVMsg(cdc *amino.Codec) {
cdc.RegisterInterface((*SocketPVMsg)(nil), nil) cdc.RegisterInterface((*SocketPVMsg)(nil), nil)
cdc.RegisterConcrete(&PubKeyMsg{}, "tendermint/socketpv/PubKeyMsg", nil) cdc.RegisterConcrete(&PubKeyMsg{}, "tendermint/socketpv/PubKeyMsg", nil)
cdc.RegisterConcrete(&SignVoteMsg{}, "tendermint/socketpv/SignVoteMsg", nil) cdc.RegisterConcrete(&SignVoteRequest{}, "tendermint/socketpv/SignVoteRequest", nil)
cdc.RegisterConcrete(&SignProposalMsg{}, "tendermint/socketpv/SignProposalMsg", nil) cdc.RegisterConcrete(&SignedVoteResponse{}, "tendermint/socketpv/SignedVoteResponse", nil)
cdc.RegisterConcrete(&SignHeartbeatMsg{}, "tendermint/socketpv/SignHeartbeatMsg", nil) cdc.RegisterConcrete(&SignProposalRequest{}, "tendermint/socketpv/SignProposalRequest", nil)
cdc.RegisterConcrete(&SignedProposalResponse{}, "tendermint/socketpv/SignedProposalResponse", nil)
cdc.RegisterConcrete(&SignHeartbeatRequest{}, "tendermint/socketpv/SignHeartbeatRequest", nil)
cdc.RegisterConcrete(&SignedHeartbeatResponse{}, "tendermint/socketpv/SignedHeartbeatResponse", nil)
} }
// PubKeyMsg is a PrivValidatorSocket message containing the public key. // PubKeyMsg is a PrivValidatorSocket message containing the public key.
@ -506,21 +547,44 @@ type PubKeyMsg struct {
PubKey crypto.PubKey PubKey crypto.PubKey
} }
// SignVoteMsg is a PrivValidatorSocket message containing a vote. // SignVoteRequest is a PrivValidatorSocket message containing a vote.
type SignVoteMsg struct { type SignVoteRequest struct {
Vote *types.Vote Vote *types.Vote
} }
// SignProposalMsg is a PrivValidatorSocket message containing a Proposal. // SignedVoteResponse is a PrivValidatorSocket message containing a signed vote along with a potenial error message.
type SignProposalMsg struct { type SignedVoteResponse struct {
Vote *types.Vote
Error *RemoteSignerError
}
// SignProposalRequest is a PrivValidatorSocket message containing a Proposal.
type SignProposalRequest struct {
Proposal *types.Proposal Proposal *types.Proposal
} }
// SignHeartbeatMsg is a PrivValidatorSocket message containing a Heartbeat. type SignedProposalResponse struct {
type SignHeartbeatMsg struct { Proposal *types.Proposal
Error *RemoteSignerError
}
// SignHeartbeatRequest is a PrivValidatorSocket message containing a Heartbeat.
type SignHeartbeatRequest struct {
Heartbeat *types.Heartbeat Heartbeat *types.Heartbeat
} }
type SignedHeartbeatResponse struct {
Heartbeat *types.Heartbeat
Error *RemoteSignerError
}
// RemoteSignerError allows (remote) validators to include meaningful error descriptions in their reply.
type RemoteSignerError struct {
// TODO(ismail): create an enum of known errors
Code int
Description string
}
func readMsg(r io.Reader) (msg SocketPVMsg, err error) { func readMsg(r io.Reader) (msg SocketPVMsg, err error) {
const maxSocketPVMsgSize = 1024 * 10 const maxSocketPVMsgSize = 1024 * 10
_, err = cdc.UnmarshalBinaryReader(r, &msg, maxSocketPVMsgSize) _, err = cdc.UnmarshalBinaryReader(r, &msg, maxSocketPVMsgSize)

View File

@ -20,7 +20,7 @@ import (
func TestSocketPVAddress(t *testing.T) { func TestSocketPVAddress(t *testing.T) {
var ( var (
chainID = cmn.RandStr(12) chainID = cmn.RandStr(12)
sc, rs = testSetupSocketPair(t, chainID) sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV())
) )
defer sc.Stop() defer sc.Stop()
defer rs.Stop() defer rs.Stop()
@ -40,7 +40,7 @@ func TestSocketPVAddress(t *testing.T) {
func TestSocketPVPubKey(t *testing.T) { func TestSocketPVPubKey(t *testing.T) {
var ( var (
chainID = cmn.RandStr(12) chainID = cmn.RandStr(12)
sc, rs = testSetupSocketPair(t, chainID) sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV())
) )
defer sc.Stop() defer sc.Stop()
defer rs.Stop() defer rs.Stop()
@ -59,7 +59,7 @@ func TestSocketPVPubKey(t *testing.T) {
func TestSocketPVProposal(t *testing.T) { func TestSocketPVProposal(t *testing.T) {
var ( var (
chainID = cmn.RandStr(12) chainID = cmn.RandStr(12)
sc, rs = testSetupSocketPair(t, chainID) sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV())
ts = time.Now() ts = time.Now()
privProposal = &types.Proposal{Timestamp: ts} privProposal = &types.Proposal{Timestamp: ts}
@ -76,7 +76,7 @@ func TestSocketPVProposal(t *testing.T) {
func TestSocketPVVote(t *testing.T) { func TestSocketPVVote(t *testing.T) {
var ( var (
chainID = cmn.RandStr(12) chainID = cmn.RandStr(12)
sc, rs = testSetupSocketPair(t, chainID) sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV())
ts = time.Now() ts = time.Now()
vType = types.VoteTypePrecommit vType = types.VoteTypePrecommit
@ -94,7 +94,7 @@ func TestSocketPVVote(t *testing.T) {
func TestSocketPVHeartbeat(t *testing.T) { func TestSocketPVHeartbeat(t *testing.T) {
var ( var (
chainID = cmn.RandStr(12) chainID = cmn.RandStr(12)
sc, rs = testSetupSocketPair(t, chainID) sc, rs = testSetupSocketPair(t, chainID, types.NewMockPV())
want = &types.Heartbeat{} want = &types.Heartbeat{}
have = &types.Heartbeat{} have = &types.Heartbeat{}
@ -231,14 +231,163 @@ func TestRemoteSignerRetry(t *testing.T) {
} }
} }
func TestRemoteSignVoteErrors(t *testing.T) {
var (
chainID = cmn.RandStr(12)
sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV())
ts = time.Now()
vType = types.VoteTypePrecommit
vote = &types.Vote{Timestamp: ts, Type: vType}
)
defer sc.Stop()
defer rs.Stop()
err := writeMsg(sc.conn, &SignVoteRequest{Vote: vote})
require.NoError(t, err)
res, err := readMsg(sc.conn)
require.NoError(t, err)
resp := *res.(*SignedVoteResponse)
require.NotNil(t, resp.Error)
require.Equal(t, resp.Error.Description, types.ErroringMockPVErr.Error())
err = rs.privVal.SignVote(chainID, vote)
require.Error(t, err)
err = sc.SignVote(chainID, vote)
require.Error(t, err)
}
func TestRemoteSignProposalErrors(t *testing.T) {
var (
chainID = cmn.RandStr(12)
sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV())
ts = time.Now()
proposal = &types.Proposal{Timestamp: ts}
)
defer sc.Stop()
defer rs.Stop()
err := writeMsg(sc.conn, &SignProposalRequest{Proposal: proposal})
require.NoError(t, err)
res, err := readMsg(sc.conn)
require.NoError(t, err)
resp := *res.(*SignedProposalResponse)
require.NotNil(t, resp.Error)
require.Equal(t, resp.Error.Description, types.ErroringMockPVErr.Error())
err = rs.privVal.SignProposal(chainID, proposal)
require.Error(t, err)
err = sc.SignProposal(chainID, proposal)
require.Error(t, err)
}
func TestRemoteSignHeartbeatErrors(t *testing.T) {
var (
chainID = cmn.RandStr(12)
sc, rs = testSetupSocketPair(t, chainID, types.NewErroringMockPV())
hb = &types.Heartbeat{}
)
defer sc.Stop()
defer rs.Stop()
err := writeMsg(sc.conn, &SignHeartbeatRequest{Heartbeat: hb})
require.NoError(t, err)
res, err := readMsg(sc.conn)
require.NoError(t, err)
resp := *res.(*SignedHeartbeatResponse)
require.NotNil(t, resp.Error)
require.Equal(t, resp.Error.Description, types.ErroringMockPVErr.Error())
err = rs.privVal.SignHeartbeat(chainID, hb)
require.Error(t, err)
err = sc.SignHeartbeat(chainID, hb)
require.Error(t, err)
}
func TestErrUnexpectedResponse(t *testing.T) {
var (
addr = testFreeAddr(t)
logger = log.TestingLogger()
chainID = cmn.RandStr(12)
readyc = make(chan struct{})
errc = make(chan error, 1)
rs = NewRemoteSigner(
logger,
chainID,
addr,
types.NewMockPV(),
ed25519.GenPrivKey(),
)
sc = NewSocketPV(
logger,
addr,
ed25519.GenPrivKey(),
)
)
testStartSocketPV(t, readyc, sc)
defer sc.Stop()
RemoteSignerConnDeadline(time.Millisecond)(rs)
RemoteSignerConnRetries(1e6)(rs)
// we do not want to Start() the remote signer here and instead use the connection to
// reply with intentionally wrong replies below:
rsConn, err := rs.connect()
defer rsConn.Close()
require.NoError(t, err)
require.NotNil(t, rsConn)
<-readyc
// Heartbeat:
go func(errc chan error) {
errc <- sc.SignHeartbeat(chainID, &types.Heartbeat{})
}(errc)
// read request and write wrong response:
go testReadWriteResponse(t, &SignedVoteResponse{}, rsConn)
err = <-errc
require.Error(t, err)
require.Equal(t, err, ErrUnexpectedResponse)
// Proposal:
go func(errc chan error) {
errc <- sc.SignProposal(chainID, &types.Proposal{})
}(errc)
// read request and write wrong response:
go testReadWriteResponse(t, &SignedHeartbeatResponse{}, rsConn)
err = <-errc
require.Error(t, err)
require.Equal(t, err, ErrUnexpectedResponse)
// Vote:
go func(errc chan error) {
errc <- sc.SignVote(chainID, &types.Vote{})
}(errc)
// read request and write wrong response:
go testReadWriteResponse(t, &SignedHeartbeatResponse{}, rsConn)
err = <-errc
require.Error(t, err)
require.Equal(t, err, ErrUnexpectedResponse)
}
func testSetupSocketPair( func testSetupSocketPair(
t *testing.T, t *testing.T,
chainID string, chainID string,
privValidator types.PrivValidator,
) (*SocketPV, *RemoteSigner) { ) (*SocketPV, *RemoteSigner) {
var ( var (
addr = testFreeAddr(t) addr = testFreeAddr(t)
logger = log.TestingLogger() logger = log.TestingLogger()
privVal = types.NewMockPV() privVal = privValidator
readyc = make(chan struct{}) readyc = make(chan struct{})
rs = NewRemoteSigner( rs = NewRemoteSigner(
logger, logger,
@ -254,12 +403,7 @@ func testSetupSocketPair(
) )
) )
go func(sc *SocketPV) { testStartSocketPV(t, readyc, sc)
require.NoError(t, sc.Start())
assert.True(t, sc.IsRunning())
readyc <- struct{}{}
}(sc)
RemoteSignerConnDeadline(time.Millisecond)(rs) RemoteSignerConnDeadline(time.Millisecond)(rs)
RemoteSignerConnRetries(1e6)(rs) RemoteSignerConnRetries(1e6)(rs)
@ -272,6 +416,23 @@ func testSetupSocketPair(
return sc, rs return sc, rs
} }
func testReadWriteResponse(t *testing.T, resp SocketPVMsg, rsConn net.Conn) {
_, err := readMsg(rsConn)
require.NoError(t, err)
err = writeMsg(rsConn, resp)
require.NoError(t, err)
}
func testStartSocketPV(t *testing.T, readyc chan struct{}, sc *SocketPV) {
go func(sc *SocketPV) {
require.NoError(t, sc.Start())
assert.True(t, sc.IsRunning())
readyc <- struct{}{}
}(sc)
}
// testFreeAddr claims a free port so we don't block on listener being ready. // testFreeAddr claims a free port so we don't block on listener being ready.
func testFreeAddr(t *testing.T) string { func testFreeAddr(t *testing.T) string {
ln, err := net.Listen("tcp", "127.0.0.1:0") ln, err := net.Listen("tcp", "127.0.0.1:0")

View File

@ -75,7 +75,7 @@ func (c *HTTP) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQuer
func (c *HTTP) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { func (c *HTTP) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
result := new(ctypes.ResultABCIQuery) result := new(ctypes.ResultABCIQuery)
_, err := c.rpc.Call("abci_query", _, err := c.rpc.Call("abci_query",
map[string]interface{}{"path": path, "data": data, "height": opts.Height, "trusted": opts.Trusted}, map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove},
result) result)
if err != nil { if err != nil {
return nil, errors.Wrap(err, "ABCIQuery") return nil, errors.Wrap(err, "ABCIQuery")

View File

@ -61,7 +61,7 @@ func (c *Local) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQue
} }
func (Local) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { func (Local) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
return core.ABCIQuery(path, data, opts.Height, opts.Trusted) return core.ABCIQuery(path, data, opts.Height, opts.Prove)
} }
func (Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { func (Local) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {

View File

@ -31,10 +31,18 @@ func (a ABCIApp) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQu
} }
func (a ABCIApp) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { func (a ABCIApp) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
q := a.App.Query(abci.RequestQuery{Data: data, Path: path, Height: opts.Height, Prove: opts.Trusted}) q := a.App.Query(abci.RequestQuery{
Data: data,
Path: path,
Height: opts.Height,
Prove: opts.Prove,
})
return &ctypes.ResultABCIQuery{q}, nil return &ctypes.ResultABCIQuery{q}, nil
} }
// NOTE: Caller should call a.App.Commit() separately,
// this function does not actually wait for a commit.
// TODO: Make it wait for a commit and set res.Height appropriately.
func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {
res := ctypes.ResultBroadcastTxCommit{} res := ctypes.ResultBroadcastTxCommit{}
res.CheckTx = a.App.CheckTx(tx) res.CheckTx = a.App.CheckTx(tx)
@ -42,6 +50,7 @@ func (a ABCIApp) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit
return &res, nil return &res, nil
} }
res.DeliverTx = a.App.DeliverTx(tx) res.DeliverTx = a.App.DeliverTx(tx)
res.Height = -1 // TODO
return &res, nil return &res, nil
} }
@ -86,7 +95,7 @@ func (m ABCIMock) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQ
} }
func (m ABCIMock) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { func (m ABCIMock) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Trusted}) res, err := m.Query.GetResponse(QueryArgs{path, data, opts.Height, opts.Prove})
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -136,7 +145,7 @@ type QueryArgs struct {
Path string Path string
Data cmn.HexBytes Data cmn.HexBytes
Height int64 Height int64
Trusted bool Prove bool
} }
func (r *ABCIRecorder) addCall(call Call) { func (r *ABCIRecorder) addCall(call Call) {
@ -161,7 +170,7 @@ func (r *ABCIRecorder) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts
res, err := r.Client.ABCIQueryWithOptions(path, data, opts) res, err := r.Client.ABCIQueryWithOptions(path, data, opts)
r.addCall(Call{ r.addCall(Call{
Name: "abci_query", Name: "abci_query",
Args: QueryArgs{path, data, opts.Height, opts.Trusted}, Args: QueryArgs{path, data, opts.Height, opts.Prove},
Response: res, Response: res,
Error: err, Error: err,
}) })

View File

@ -51,7 +51,7 @@ func TestABCIMock(t *testing.T) {
assert.Equal("foobar", err.Error()) assert.Equal("foobar", err.Error())
// query always returns the response // query always returns the response
_query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Trusted: true}) _query, err := m.ABCIQueryWithOptions("/", nil, client.ABCIQueryOptions{Prove: false})
query := _query.Response query := _query.Response
require.Nil(err) require.Nil(err)
require.NotNil(query) require.NotNil(query)
@ -98,7 +98,7 @@ func TestABCIRecorder(t *testing.T) {
_, err := r.ABCIInfo() _, err := r.ABCIInfo()
assert.Nil(err, "expected no err on info") assert.Nil(err, "expected no err on info")
_, err = r.ABCIQueryWithOptions("path", cmn.HexBytes("data"), client.ABCIQueryOptions{Trusted: false}) _, err = r.ABCIQueryWithOptions("path", cmn.HexBytes("data"), client.ABCIQueryOptions{Prove: false})
assert.NotNil(err, "expected error on query") assert.NotNil(err, "expected error on query")
require.Equal(2, len(r.Calls)) require.Equal(2, len(r.Calls))
@ -122,7 +122,7 @@ func TestABCIRecorder(t *testing.T) {
require.True(ok) require.True(ok)
assert.Equal("path", qa.Path) assert.Equal("path", qa.Path)
assert.EqualValues("data", qa.Data) assert.EqualValues("data", qa.Data)
assert.False(qa.Trusted) assert.False(qa.Prove)
// now add some broadcasts (should all err) // now add some broadcasts (should all err)
txs := []types.Tx{{1}, {2}, {3}} txs := []types.Tx{{1}, {2}, {3}}
@ -173,9 +173,17 @@ func TestABCIApp(t *testing.T) {
require.NotNil(res.DeliverTx) require.NotNil(res.DeliverTx)
assert.True(res.DeliverTx.IsOK()) assert.True(res.DeliverTx.IsOK())
// commit
// TODO: This may not be necessary in the future
if res.Height == -1 {
m.App.Commit()
}
// check the key // check the key
_qres, err := m.ABCIQueryWithOptions("/key", cmn.HexBytes(key), client.ABCIQueryOptions{Trusted: true}) _qres, err := m.ABCIQueryWithOptions("/key", cmn.HexBytes(key), client.ABCIQueryOptions{Prove: true})
qres := _qres.Response qres := _qres.Response
require.Nil(err) require.Nil(err)
assert.EqualValues(value, qres.Value) assert.EqualValues(value, qres.Value)
// XXX Check proof
} }

View File

@ -1,3 +1,5 @@
package mock
/* /*
package mock returns a Client implementation that package mock returns a Client implementation that
accepts various (mock) implementations of the various methods. accepts various (mock) implementations of the various methods.
@ -11,7 +13,6 @@ For real clients, you probably want the "http" package. If you
want to directly call a tendermint node in process, you can use the want to directly call a tendermint node in process, you can use the
"local" package. "local" package.
*/ */
package mock
import ( import (
"reflect" "reflect"
@ -87,7 +88,7 @@ func (c Client) ABCIQuery(path string, data cmn.HexBytes) (*ctypes.ResultABCIQue
} }
func (c Client) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { func (c Client) ABCIQueryWithOptions(path string, data cmn.HexBytes, opts client.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
return core.ABCIQuery(path, data, opts.Height, opts.Trusted) return core.ABCIQuery(path, data, opts.Height, opts.Prove)
} }
func (c Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) { func (c Client) BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error) {

View File

@ -166,10 +166,10 @@ func TestAppCalls(t *testing.T) {
if err := client.WaitForHeight(c, apph, nil); err != nil { if err := client.WaitForHeight(c, apph, nil); err != nil {
t.Error(err) t.Error(err)
} }
_qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: true}) _qres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Prove: false})
qres := _qres.Response qres := _qres.Response
if assert.Nil(err) && assert.True(qres.IsOK()) { if assert.Nil(err) && assert.True(qres.IsOK()) {
// assert.Equal(k, data.GetKey()) // only returned for proofs assert.Equal(k, qres.Key)
assert.EqualValues(v, qres.Value) assert.EqualValues(v, qres.Value)
} }
@ -221,10 +221,12 @@ func TestAppCalls(t *testing.T) {
assert.Equal(block.Block.LastCommit, commit2.Commit) assert.Equal(block.Block.LastCommit, commit2.Commit)
// and we got a proof that works! // and we got a proof that works!
_pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Trusted: false}) _pres, err := c.ABCIQueryWithOptions("/key", k, client.ABCIQueryOptions{Prove: true})
pres := _pres.Response pres := _pres.Response
assert.Nil(err) assert.Nil(err)
assert.True(pres.IsOK()) assert.True(pres.IsOK())
// XXX Test proof
} }
} }
@ -310,7 +312,7 @@ func TestTx(t *testing.T) {
// time to verify the proof // time to verify the proof
proof := ptx.Proof proof := ptx.Proof
if tc.prove && assert.EqualValues(t, tx, proof.Data) { if tc.prove && assert.EqualValues(t, tx, proof.Data) {
assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash))
} }
} }
} }
@ -348,7 +350,7 @@ func TestTxSearch(t *testing.T) {
// time to verify the proof // time to verify the proof
proof := ptx.Proof proof := ptx.Proof
if assert.EqualValues(t, tx, proof.Data) { if assert.EqualValues(t, tx, proof.Data) {
assert.True(t, proof.Proof.Verify(proof.Index, proof.Total, txHash, proof.RootHash)) assert.NoError(t, proof.Proof.Verify(proof.RootHash, txHash))
} }
// query by height // query by height
@ -362,7 +364,7 @@ func TestTxSearch(t *testing.T) {
require.Len(t, result.Txs, 0) require.Len(t, result.Txs, 0)
// we query using a tag (see kvstore application) // we query using a tag (see kvstore application)
result, err = c.TxSearch("app.creator='jae'", false, 1, 30) result, err = c.TxSearch("app.creator='Cosmoshi Netowoko'", false, 1, 30)
require.Nil(t, err, "%+v", err) require.Nil(t, err, "%+v", err)
if len(result.Txs) == 0 { if len(result.Txs) == 0 {
t.Fatal("expected a lot of transactions") t.Fatal("expected a lot of transactions")

View File

@ -4,9 +4,8 @@ package client
// than the DefaultABCIQueryOptions. // than the DefaultABCIQueryOptions.
type ABCIQueryOptions struct { type ABCIQueryOptions struct {
Height int64 Height int64
Trusted bool Prove bool
} }
// DefaultABCIQueryOptions are latest height (0) and trusted equal to false // DefaultABCIQueryOptions are latest height (0) and prove false.
// (which will result in a proof being returned). var DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Prove: false}
var DefaultABCIQueryOptions = ABCIQueryOptions{Height: 0, Trusted: false}

View File

@ -1,8 +1,6 @@
package core package core
import ( import (
"fmt"
abci "github.com/tendermint/tendermint/abci/types" abci "github.com/tendermint/tendermint/abci/types"
cmn "github.com/tendermint/tendermint/libs/common" cmn "github.com/tendermint/tendermint/libs/common"
ctypes "github.com/tendermint/tendermint/rpc/core/types" ctypes "github.com/tendermint/tendermint/rpc/core/types"
@ -12,7 +10,7 @@ import (
// Query the application for some information. // Query the application for some information.
// //
// ```shell // ```shell
// curl 'localhost:26657/abci_query?path=""&data="abcd"&trusted=false' // curl 'localhost:26657/abci_query?path=""&data="abcd"&prove=false'
// ``` // ```
// //
// ```go // ```go
@ -48,17 +46,13 @@ import (
// | path | string | false | false | Path to the data ("/a/b/c") | // | path | string | false | false | Path to the data ("/a/b/c") |
// | data | []byte | false | true | Data | // | data | []byte | false | true | Data |
// | height | int64 | 0 | false | Height (0 means latest) | // | height | int64 | 0 | false | Height (0 means latest) |
// | trusted | bool | false | false | Does not include a proof of the data inclusion | // | prove | bool | false | false | Includes proof if true |
func ABCIQuery(path string, data cmn.HexBytes, height int64, trusted bool) (*ctypes.ResultABCIQuery, error) { func ABCIQuery(path string, data cmn.HexBytes, height int64, prove bool) (*ctypes.ResultABCIQuery, error) {
if height < 0 {
return nil, fmt.Errorf("height must be non-negative")
}
resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{ resQuery, err := proxyAppQuery.QuerySync(abci.RequestQuery{
Path: path, Path: path,
Data: data, Data: data,
Height: height, Height: height,
Prove: !trusted, Prove: prove,
}) })
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -36,7 +36,7 @@ var Routes = map[string]*rpc.RPCFunc{
"broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"), "broadcast_tx_async": rpc.NewRPCFunc(BroadcastTxAsync, "tx"),
// abci API // abci API
"abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,trusted"), "abci_query": rpc.NewRPCFunc(ABCIQuery, "path,data,height,prove"),
"abci_info": rpc.NewRPCFunc(ABCIInfo, ""), "abci_info": rpc.NewRPCFunc(ABCIInfo, ""),
} }

View File

@ -41,7 +41,7 @@ set -e
# we should not be able to look up the value # we should not be able to look up the value
RESPONSE=`abci-cli query \"$VALUE\"` RESPONSE=`abci-cli query \"$VALUE\"`
set +e set +e
A=`echo $RESPONSE | grep $VALUE` A=`echo $RESPONSE | grep \"value: $VALUE\"`
if [[ $? == 0 ]]; then if [[ $? == 0 ]]; then
echo "Found '$VALUE' for $VALUE when we should not have. Response:" echo "Found '$VALUE' for $VALUE when we should not have. Response:"
echo "$RESPONSE" echo "$RESPONSE"

View File

@ -709,7 +709,6 @@ func (h hasher) Hash() []byte {
} }
} }
return hasher.Sum(nil) return hasher.Sum(nil)
} }
func aminoHash(item interface{}) []byte { func aminoHash(item interface{}) []byte {

View File

@ -1,6 +1,7 @@
package types package types
import ( import (
"crypto/rand"
"math" "math"
"testing" "testing"
"time" "time"
@ -8,7 +9,6 @@ import (
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
"github.com/tendermint/tendermint/crypto"
"github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/crypto/tmhash"
cmn "github.com/tendermint/tendermint/libs/common" cmn "github.com/tendermint/tendermint/libs/common"
) )
@ -45,51 +45,37 @@ func TestBlockValidateBasic(t *testing.T) {
ev := NewMockGoodEvidence(h, 0, valSet.Validators[0].Address) ev := NewMockGoodEvidence(h, 0, valSet.Validators[0].Address)
evList := []Evidence{ev} evList := []Evidence{ev}
testCases := []struct {
testName string
malleateBlock func(*Block)
expErr bool
}{
{"Make Block", func(blk *Block) {}, false},
{"Make Block w/ proposer Addr", func(blk *Block) { blk.ProposerAddress = valSet.GetProposer().Address }, false},
{"Increase NumTxs", func(blk *Block) { blk.NumTxs++ }, true},
{"Remove 1/2 the commits", func(blk *Block) {
blk.LastCommit.Precommits = commit.Precommits[:commit.Size()/2]
blk.LastCommit.hash = nil // clear hash or change wont be noticed
}, true},
{"Remove LastCommitHash", func(blk *Block) { blk.LastCommitHash = []byte("something else") }, true},
{"Tampered Data", func(blk *Block) {
blk.Data.Txs[0] = Tx("something else")
blk.Data.hash = nil // clear hash or change wont be noticed
}, true},
{"Tampered DataHash", func(blk *Block) {
blk.DataHash = cmn.RandBytes(len(blk.DataHash))
}, true},
{"Tampered EvidenceHash", func(blk *Block) {
blk.EvidenceHash = []byte("something else")
}, true},
}
for _, tc := range testCases {
t.Run(tc.testName, func(t *testing.T) {
block := MakeBlock(h, txs, commit, evList) block := MakeBlock(h, txs, commit, evList)
require.NotNil(t, block) tc.malleateBlock(block)
block.ProposerAddress = valSet.GetProposer().Address assert.Equal(t, tc.expErr, block.ValidateBasic() != nil, "Validate Basic had an unexpected result")
})
// proper block must pass }
err = block.ValidateBasic()
require.NoError(t, err)
// tamper with NumTxs
block = MakeBlock(h, txs, commit, evList)
block.NumTxs++
err = block.ValidateBasic()
require.Error(t, err)
// remove 1/2 the commits
block = MakeBlock(h, txs, commit, evList)
block.LastCommit.Precommits = commit.Precommits[:commit.Size()/2]
block.LastCommit.hash = nil // clear hash or change wont be noticed
err = block.ValidateBasic()
require.Error(t, err)
// tamper with LastCommitHash
block = MakeBlock(h, txs, commit, evList)
block.LastCommitHash = []byte("something else")
err = block.ValidateBasic()
require.Error(t, err)
// tamper with data
block = MakeBlock(h, txs, commit, evList)
block.Data.Txs[0] = Tx("something else")
block.Data.hash = nil // clear hash or change wont be noticed
err = block.ValidateBasic()
require.Error(t, err)
// tamper with DataHash
block = MakeBlock(h, txs, commit, evList)
block.DataHash = cmn.RandBytes(len(block.DataHash))
err = block.ValidateBasic()
require.Error(t, err)
// tamper with evidence
block = MakeBlock(h, txs, commit, evList)
block.EvidenceHash = []byte("something else")
err = block.ValidateBasic()
require.Error(t, err)
} }
func TestBlockHash(t *testing.T) { func TestBlockHash(t *testing.T) {
@ -161,7 +147,11 @@ func TestBlockString(t *testing.T) {
} }
func makeBlockIDRandom() BlockID { func makeBlockIDRandom() BlockID {
blockHash, blockPartsHeader := crypto.CRandBytes(tmhash.Size), PartSetHeader{123, crypto.CRandBytes(tmhash.Size)} blockHash := make([]byte, tmhash.Size)
partSetHash := make([]byte, tmhash.Size)
rand.Read(blockHash)
rand.Read(partSetHash)
blockPartsHeader := PartSetHeader{123, partSetHash}
return BlockID{blockHash, blockPartsHeader} return BlockID{blockHash, blockPartsHeader}
} }
@ -211,28 +201,25 @@ func TestCommit(t *testing.T) {
} }
func TestCommitValidateBasic(t *testing.T) { func TestCommitValidateBasic(t *testing.T) {
commit := randCommit() testCases := []struct {
assert.NoError(t, commit.ValidateBasic()) testName string
malleateCommit func(*Commit)
// nil precommit is OK expectErr bool
commit = randCommit() }{
commit.Precommits[0] = nil {"Random Commit", func(com *Commit) {}, false},
assert.NoError(t, commit.ValidateBasic()) {"Nil precommit", func(com *Commit) { com.Precommits[0] = nil }, false},
{"Incorrect signature", func(com *Commit) { com.Precommits[0].Signature = []byte{0} }, false},
// tamper with types {"Incorrect type", func(com *Commit) { com.Precommits[0].Type = VoteTypePrevote }, true},
commit = randCommit() {"Incorrect height", func(com *Commit) { com.Precommits[0].Height = int64(100) }, true},
commit.Precommits[0].Type = VoteTypePrevote {"Incorrect round", func(com *Commit) { com.Precommits[0].Round = 100 }, true},
assert.Error(t, commit.ValidateBasic()) }
for _, tc := range testCases {
// tamper with height t.Run(tc.testName, func(t *testing.T) {
commit = randCommit() com := randCommit()
commit.Precommits[0].Height = int64(100) tc.malleateCommit(com)
assert.Error(t, commit.ValidateBasic()) assert.Equal(t, tc.expectErr, com.ValidateBasic() != nil, "Validate Basic had an unexpected result")
})
// tamper with round }
commit = randCommit()
commit.Precommits[0].Round = 100
assert.Error(t, commit.ValidateBasic())
} }
func TestMaxHeaderBytes(t *testing.T) { func TestMaxHeaderBytes(t *testing.T) {

116
types/canonical.go Normal file
View File

@ -0,0 +1,116 @@
package types
import (
"time"
cmn "github.com/tendermint/tendermint/libs/common"
tmtime "github.com/tendermint/tendermint/types/time"
)
// Canonical* wraps the structs in types for amino encoding them for use in SignBytes / the Signable interface.
// TimeFormat is used for generating the sigs
const TimeFormat = time.RFC3339Nano
type CanonicalBlockID struct {
Hash cmn.HexBytes `json:"hash,omitempty"`
PartsHeader CanonicalPartSetHeader `json:"parts,omitempty"`
}
type CanonicalPartSetHeader struct {
Hash cmn.HexBytes `json:"hash,omitempty"`
Total int `json:"total,omitempty"`
}
type CanonicalProposal struct {
ChainID string `json:"@chain_id"`
Type string `json:"@type"`
BlockPartsHeader CanonicalPartSetHeader `json:"block_parts_header"`
Height int64 `json:"height"`
POLBlockID CanonicalBlockID `json:"pol_block_id"`
POLRound int `json:"pol_round"`
Round int `json:"round"`
Timestamp time.Time `json:"timestamp"`
}
type CanonicalVote struct {
ChainID string `json:"@chain_id"`
Type string `json:"@type"`
BlockID CanonicalBlockID `json:"block_id"`
Height int64 `json:"height"`
Round int `json:"round"`
Timestamp time.Time `json:"timestamp"`
VoteType byte `json:"type"`
}
type CanonicalHeartbeat struct {
ChainID string `json:"@chain_id"`
Type string `json:"@type"`
Height int64 `json:"height"`
Round int `json:"round"`
Sequence int `json:"sequence"`
ValidatorAddress Address `json:"validator_address"`
ValidatorIndex int `json:"validator_index"`
}
//-----------------------------------
// Canonicalize the structs
func CanonicalizeBlockID(blockID BlockID) CanonicalBlockID {
return CanonicalBlockID{
Hash: blockID.Hash,
PartsHeader: CanonicalizePartSetHeader(blockID.PartsHeader),
}
}
func CanonicalizePartSetHeader(psh PartSetHeader) CanonicalPartSetHeader {
return CanonicalPartSetHeader{
psh.Hash,
psh.Total,
}
}
func CanonicalizeProposal(chainID string, proposal *Proposal) CanonicalProposal {
return CanonicalProposal{
ChainID: chainID,
Type: "proposal",
BlockPartsHeader: CanonicalizePartSetHeader(proposal.BlockPartsHeader),
Height: proposal.Height,
Timestamp: proposal.Timestamp,
POLBlockID: CanonicalizeBlockID(proposal.POLBlockID),
POLRound: proposal.POLRound,
Round: proposal.Round,
}
}
func CanonicalizeVote(chainID string, vote *Vote) CanonicalVote {
return CanonicalVote{
ChainID: chainID,
Type: "vote",
BlockID: CanonicalizeBlockID(vote.BlockID),
Height: vote.Height,
Round: vote.Round,
Timestamp: vote.Timestamp,
VoteType: vote.Type,
}
}
func CanonicalizeHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalHeartbeat {
return CanonicalHeartbeat{
ChainID: chainID,
Type: "heartbeat",
Height: heartbeat.Height,
Round: heartbeat.Round,
Sequence: heartbeat.Sequence,
ValidatorAddress: heartbeat.ValidatorAddress,
ValidatorIndex: heartbeat.ValidatorIndex,
}
}
// CanonicalTime can be used to stringify time in a canonical way.
func CanonicalTime(t time.Time) string {
// Note that sending time over amino resets it to
// local time, we need to force UTC here, so the
// signatures match
return tmtime.Canonical(t).Format(TimeFormat)
}

View File

@ -1,115 +0,0 @@
package types
import (
"time"
cmn "github.com/tendermint/tendermint/libs/common"
tmtime "github.com/tendermint/tendermint/types/time"
)
// Canonical json is amino's json for structs with fields in alphabetical order
// TimeFormat is used for generating the sigs
const TimeFormat = time.RFC3339Nano
type CanonicalJSONBlockID struct {
Hash cmn.HexBytes `json:"hash,omitempty"`
PartsHeader CanonicalJSONPartSetHeader `json:"parts,omitempty"`
}
type CanonicalJSONPartSetHeader struct {
Hash cmn.HexBytes `json:"hash,omitempty"`
Total int `json:"total,omitempty"`
}
type CanonicalJSONProposal struct {
ChainID string `json:"@chain_id"`
Type string `json:"@type"`
BlockPartsHeader CanonicalJSONPartSetHeader `json:"block_parts_header"`
Height int64 `json:"height"`
POLBlockID CanonicalJSONBlockID `json:"pol_block_id"`
POLRound int `json:"pol_round"`
Round int `json:"round"`
Timestamp string `json:"timestamp"`
}
type CanonicalJSONVote struct {
ChainID string `json:"@chain_id"`
Type string `json:"@type"`
BlockID CanonicalJSONBlockID `json:"block_id"`
Height int64 `json:"height"`
Round int `json:"round"`
Timestamp string `json:"timestamp"`
VoteType byte `json:"type"`
}
type CanonicalJSONHeartbeat struct {
ChainID string `json:"@chain_id"`
Type string `json:"@type"`
Height int64 `json:"height"`
Round int `json:"round"`
Sequence int `json:"sequence"`
ValidatorAddress Address `json:"validator_address"`
ValidatorIndex int `json:"validator_index"`
}
//-----------------------------------
// Canonicalize the structs
func CanonicalBlockID(blockID BlockID) CanonicalJSONBlockID {
return CanonicalJSONBlockID{
Hash: blockID.Hash,
PartsHeader: CanonicalPartSetHeader(blockID.PartsHeader),
}
}
func CanonicalPartSetHeader(psh PartSetHeader) CanonicalJSONPartSetHeader {
return CanonicalJSONPartSetHeader{
psh.Hash,
psh.Total,
}
}
func CanonicalProposal(chainID string, proposal *Proposal) CanonicalJSONProposal {
return CanonicalJSONProposal{
ChainID: chainID,
Type: "proposal",
BlockPartsHeader: CanonicalPartSetHeader(proposal.BlockPartsHeader),
Height: proposal.Height,
Timestamp: CanonicalTime(proposal.Timestamp),
POLBlockID: CanonicalBlockID(proposal.POLBlockID),
POLRound: proposal.POLRound,
Round: proposal.Round,
}
}
func CanonicalVote(chainID string, vote *Vote) CanonicalJSONVote {
return CanonicalJSONVote{
ChainID: chainID,
Type: "vote",
BlockID: CanonicalBlockID(vote.BlockID),
Height: vote.Height,
Round: vote.Round,
Timestamp: CanonicalTime(vote.Timestamp),
VoteType: vote.Type,
}
}
func CanonicalHeartbeat(chainID string, heartbeat *Heartbeat) CanonicalJSONHeartbeat {
return CanonicalJSONHeartbeat{
ChainID: chainID,
Type: "heartbeat",
Height: heartbeat.Height,
Round: heartbeat.Round,
Sequence: heartbeat.Sequence,
ValidatorAddress: heartbeat.ValidatorAddress,
ValidatorIndex: heartbeat.ValidatorIndex,
}
}
func CanonicalTime(t time.Time) string {
// Note that sending time over amino resets it to
// local time, we need to force UTC here, so the
// signatures match
return tmtime.Canonical(t).Format(TimeFormat)
}

View File

@ -23,7 +23,7 @@ type Heartbeat struct {
// SignBytes returns the Heartbeat bytes for signing. // SignBytes returns the Heartbeat bytes for signing.
// It panics if the Heartbeat is nil. // It panics if the Heartbeat is nil.
func (heartbeat *Heartbeat) SignBytes(chainID string) []byte { func (heartbeat *Heartbeat) SignBytes(chainID string) []byte {
bz, err := cdc.MarshalJSON(CanonicalHeartbeat(chainID, heartbeat)) bz, err := cdc.MarshalBinary(CanonicalizeHeartbeat(chainID, heartbeat))
if err != nil { if err != nil {
panic(err) panic(err)
} }

View File

@ -34,19 +34,27 @@ func TestHeartbeatString(t *testing.T) {
} }
func TestHeartbeatWriteSignBytes(t *testing.T) { func TestHeartbeatWriteSignBytes(t *testing.T) {
chainID := "test_chain_id"
hb := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1} {
bz := hb.SignBytes("0xdeadbeef") testHeartbeat := &Heartbeat{ValidatorIndex: 1, Height: 10, Round: 1}
// XXX HMMMMMMM signBytes := testHeartbeat.SignBytes(chainID)
require.Equal(t, string(bz), `{"@chain_id":"0xdeadbeef","@type":"heartbeat","height":"10","round":"1","sequence":"0","validator_address":"","validator_index":"1"}`) expected, err := cdc.MarshalBinary(CanonicalizeHeartbeat(chainID, testHeartbeat))
require.NoError(t, err)
require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Heartbeat")
}
plainHb := &Heartbeat{} {
bz = plainHb.SignBytes("0xdeadbeef") testHeartbeat := &Heartbeat{}
require.Equal(t, string(bz), `{"@chain_id":"0xdeadbeef","@type":"heartbeat","height":"0","round":"0","sequence":"0","validator_address":"","validator_index":"0"}`) signBytes := testHeartbeat.SignBytes(chainID)
expected, err := cdc.MarshalBinary(CanonicalizeHeartbeat(chainID, testHeartbeat))
require.NoError(t, err)
require.Equal(t, expected, signBytes, "Got unexpected sign bytes for Heartbeat")
}
require.Panics(t, func() { require.Panics(t, func() {
var nilHb *Heartbeat var nilHb *Heartbeat
bz := nilHb.SignBytes("0xdeadbeef") signBytes := nilHb.SignBytes(chainID)
require.Equal(t, string(bz), "null") require.Equal(t, string(signBytes), "null")
}) })
} }

View File

@ -99,8 +99,6 @@ func (params ConsensusParams) Update(params2 *abci.ConsensusParams) ConsensusPar
} }
// we must defensively consider any structs may be nil // we must defensively consider any structs may be nil
// XXX: it's cast city over here. It's ok because we only do int32->int
// but still, watch it champ.
if params2.BlockSize != nil { if params2.BlockSize != nil {
res.BlockSize.MaxBytes = params2.BlockSize.MaxBytes res.BlockSize.MaxBytes = params2.BlockSize.MaxBytes
res.BlockSize.MaxGas = params2.BlockSize.MaxGas res.BlockSize.MaxGas = params2.BlockSize.MaxGas

View File

@ -190,7 +190,7 @@ func (ps *PartSet) AddPart(part *Part) (bool, error) {
} }
// Check hash proof // Check hash proof
if !part.Proof.Verify(part.Index, ps.total, part.Hash(), ps.Hash()) { if part.Proof.Verify(ps.Hash(), part.Hash()) != nil {
return false, ErrPartSetInvalidProof return false, ErrPartSetInvalidProof
} }

View File

@ -2,6 +2,7 @@ package types
import ( import (
"bytes" "bytes"
"errors"
"fmt" "fmt"
"github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto"
@ -103,3 +104,29 @@ func (pv *MockPV) DisableChecks() {
// Currently this does nothing, // Currently this does nothing,
// as MockPV has no safety checks at all. // as MockPV has no safety checks at all.
} }
type erroringMockPV struct {
*MockPV
}
var ErroringMockPVErr = errors.New("erroringMockPV always returns an error")
// Implements PrivValidator.
func (pv *erroringMockPV) SignVote(chainID string, vote *Vote) error {
return ErroringMockPVErr
}
// Implements PrivValidator.
func (pv *erroringMockPV) SignProposal(chainID string, proposal *Proposal) error {
return ErroringMockPVErr
}
// signHeartbeat signs the heartbeat without any checking.
func (pv *erroringMockPV) SignHeartbeat(chainID string, heartbeat *Heartbeat) error {
return ErroringMockPVErr
}
// NewErroringMockPV returns a MockPV that fails on each signing request. Again, for testing only.
func NewErroringMockPV() *erroringMockPV {
return &erroringMockPV{&MockPV{ed25519.GenPrivKey()}}
}

View File

@ -52,7 +52,7 @@ func (p *Proposal) String() string {
// SignBytes returns the Proposal bytes for signing // SignBytes returns the Proposal bytes for signing
func (p *Proposal) SignBytes(chainID string) []byte { func (p *Proposal) SignBytes(chainID string) []byte {
bz, err := cdc.MarshalJSON(CanonicalProposal(chainID, p)) bz, err := cdc.MarshalBinary(CanonicalizeProposal(chainID, p))
if err != nil { if err != nil {
panic(err) panic(err)
} }

Some files were not shown because too many files have changed in this diff Show More