Merge branch 'develop' into config

This commit is contained in:
Zach 2018-01-10 14:21:24 +00:00 committed by GitHub
commit 39acf1c5e8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
41 changed files with 1110 additions and 404 deletions

View File

@ -1,34 +1,13 @@
GOTOOLS := \
GOTOOLS = \
github.com/mitchellh/gox \
github.com/Masterminds/glide \
github.com/tcnksm/ghr \
gopkg.in/alecthomas/gometalinter.v2
GO_MIN_VERSION := 1.9.2
PACKAGES := $(shell go list ./... | grep -v '/vendor/')
BUILD_TAGS ?= tendermint
TMHOME ?= $(HOME)/.tendermint
GOPATH ?= $(shell go env GOPATH)
GOROOT ?= $(shell go env GOROOT)
GOGCCFLAGS ?= $(shell go env GOGCCFLAGS)
#LDFLAGS_EXTRA ?= -w -s
XC_ARCH ?= 386 amd64 arm
XC_OS ?= solaris darwin freebsd linux windows
XC_OSARCH ?= !darwin/arm !solaris/amd64 !freebsd/amd64
BUILD_OUTPUT ?= ./build/{{.OS}}_{{.Arch}}/tendermint
GOX_FLAGS = -os="$(XC_OS)" -arch="$(XC_ARCH)" -osarch="$(XC_OSARCH)" -output="$(BUILD_OUTPUT)"
ifeq ($(BUILD_FLAGS_RACE),YES)
RACEFLAG=-race
else
RACEFLAG=
endif
BUILD_FLAGS = -asmflags "-trimpath $(GOPATH)" -gcflags "-trimpath $(GOPATH)" -tags "$(BUILD_TAGS)" -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=$(shell git rev-parse --short=8 HEAD) $(LDFLAGS_EXTRA)" $(RACEFLAG)
GO_VERSION:=$(shell go version | grep -o '[[:digit:]]\+.[[:digit:]]\+.[[:digit:]]\+')
#Check that that minor version of GO meets the minimum required
GO_MINOR_VERSION := $(shell grep -o \.[[:digit:]][[:digit:]]*\. <<< $(GO_VERSION) | grep -o [[:digit:]]* )
GO_MIN_MINOR_VERSION := $(shell grep -o \.[[:digit:]][[:digit:]]*\. <<< $(GO_MIN_VERSION) | grep -o [[:digit:]]* )
GO_MINOR_VERSION_CHECK := $(shell test $(GO_MINOR_VERSION) -ge $(GO_MIN_MINOR_VERSION) && echo YES)
GOTOOLS_CHECK = gox glide ghr gometalinter.v2
PACKAGES=$(shell go list ./... | grep -v '/vendor/')
BUILD_TAGS?=tendermint
TMHOME = $${TMHOME:-$$HOME/.tendermint}
BUILD_FLAGS = -ldflags "-X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short HEAD`"
all: check build test install metalinter
@ -38,61 +17,27 @@ check: check_tools get_vendor_deps
########################################
### Build
build_xc: check_tools
$(shell which gox) $(BUILD_FLAGS) $(GOX_FLAGS) ./cmd/tendermint/
build:
ifeq ($(OS),Windows_NT)
make build_xc XC_ARCH=amd64 XC_OS=windows BUILD_OUTPUT=$(GOPATH)/bin/tendermint
else
make build_xc XC_ARCH=amd64 XC_OS="$(shell uname -s)" BUILD_OUTPUT=$(GOPATH)/bin/tendermint
endif
go build $(BUILD_FLAGS) -o build/tendermint ./cmd/tendermint/
build_race:
#TODO: Wait for this to be merged: https://github.com/mitchellh/gox/pull/105 Then switch over to make build and remove the go build line.
# make build BUILD_FLAGS_RACE=YES
$(shell which go) build $(BUILD_FLAGS) -race -o "$(BUILD_OUTPUT)" ./cmd/tendermint/
go build -race $(BUILD_FLAGS) -o build/tendermint ./cmd/tendermint
# dist builds binaries for all platforms and packages them for distribution
dist:
@BUILD_TAGS='$(BUILD_TAGS)' sh -c "'$(CURDIR)/scripts/dist.sh'"
install:
make build
go install $(BUILD_FLAGS) ./cmd/tendermint
########################################
### Tools & dependencies
check_tools:
ifeq ($(GO_VERSION),)
$(error go not found)
endif
#Check minimum required go version
ifneq ($(GO_VERSION),$(GO_MIN_VERSION))
$(warning WARNING: build will not be deterministic. go version should be $(GO_MIN_VERSION))
ifneq ($(GO_MINOR_VERSION_CHECK),YES)
$(error ERROR: The minor version of Go ($(GO_VERSION)) is lower than the minimum required ($(GO_MIN_VERSION)))
endif
endif
#-fdebug-prefix-map switches the temporary, randomized workdir name in the binary to a static text
ifneq ($(findstring -fdebug-prefix-map,$(GOGCCFLAGS)),-fdebug-prefix-map)
$(warning WARNING: build will not be deterministic. The compiler does not support the '-fdebug-prefix-map' flag.)
endif
#GOROOT string is copied into the binary. For deterministic builds, we agree to keep it at /usr/local/go. (Default for golang:1.9.2 docker image, linux and osx.)
ifneq ($(GOROOT),/usr/local/go)
$(warning WARNING: build will not be deterministic. GOROOT should be set to /usr/local/go)
endif
#GOPATH string is copied into the binary. Although the -trimpath flag tries to eliminate it, it doesn't do it everywhere in Go 1.9.2. For deterministic builds we agree to keep it at /go. (Default for golang:1.9.2 docker image.)
ifneq ($(GOPATH),/go)
$(warning WARNING: build will not be deterministic. GOPATH should be set to /go)
endif
#External dependencies defined in GOTOOLS are built with get_tools. If they are already available on the system (for exmaple using a package manager), then get_tools might not be necessary.
ifneq ($(findstring $(GOPATH)/bin,$(PATH)),$(GOPATH)/bin)
$(warning WARNING: PATH does not contain GOPATH/bin. Some external dependencies might be unavailable.)
endif
# https://stackoverflow.com/a/25668869
@echo "Found tools: $(foreach tool,$(notdir $(GOTOOLS)),$(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH. Add GOPATH/bin to PATH and run 'make get_tools'")))"
@# https://stackoverflow.com/a/25668869
@echo "Found tools: $(foreach tool,$(GOTOOLS_CHECK),\
$(if $(shell which $(tool)),$(tool),$(error "No $(tool) in PATH")))"
get_tools:
@echo "--> Installing tools"

View File

@ -0,0 +1,139 @@
# Tendermint
## Overview
This is a quick start guide. If you have a vague idea about how Tendermint works
and want to get started right away, continue. Otherwise, [review the documentation](http://tendermint.readthedocs.io/en/master/)
## Install
### Quick Install
On a fresh Ubuntu 16.04 machine can be done with [this script](https://git.io/vNLfY), like so:
```
curl -L https://git.io/vNLfY | bash
source ~/.profile
```
WARNING: do not run the above on your local machine.
The script is also used to facilitate cluster deployment below.
### Manual Install
Requires:
- `go` minimum version 1.9.2
- `$GOPATH` set and `$GOPATH/bin` on your $PATH (see https://github.com/tendermint/tendermint/wiki/Setting-GOPATH)
To install Tendermint, run:
```
go get github.com/tendermint/tendermint
cd $GOPATH/src/github.com/tendermint/tendermint
make get_vendor_deps
make install
```
Confirm installation:
```
$ tendermint version
0.15.0-381fe19
```
## Initialization
Running:
```
tendermint init
```
will create the required files for a single, local node.
These files are found in `$HOME/.tendermint`:
```
$ ls $HOME/.tendermint
config.toml data genesis.json priv_validator.json
```
For a single, local node, no further configuration is required.
Configuring a cluster is covered further below.
## Local Node
Start tendermint with a simple in-process application:
```
tendermint node --proxy_app=dummy
```
and blocks will start to stream in:
```
I[01-06|01:45:15.592] Executed block module=state height=1 validTxs=0 invalidTxs=0
I[01-06|01:45:15.624] Committed state module=state height=1 txs=0 appHash=
```
Check the status with:
```
curl -s localhost:46657/status
```
### Sending Transactions
With the dummy app running, we can send transactions:
```
curl -s 'localhost:46657/broadcast_tx_commit?tx="abcd"'
```
and check that it worked with:
```
curl -s 'localhost:46657/abci_query?data="abcd"'
```
We can send transactions with a key:value store:
```
curl -s 'localhost:46657/broadcast_tx_commit?tx="name=satoshi"'
```
and query the key:
```
curl -s 'localhost:46657/abci_query?data="name"'
```
where the value is returned in hex.
## Cluster of Nodes
First create four Ubuntu cloud machines. The following was testing on Digital Ocean Ubuntu 16.04 x64 (3GB/1CPU, 20GB SSD). We'll refer to their respective IP addresses below as IP1, IP2, IP3, IP4.
Then, `ssh` into each machine, and `curl` then execute [this script](https://git.io/vNLfY):
```
curl -L https://git.io/vNLfY | bash
source ~/.profile
```
This will install `go` and other dependencies, get the Tendermint source code, then compile the `tendermint` binary.
Next, `cd` into `docs/examples`. Each command below should be run from each node, in sequence:
```
tendermint node --home ./node1 --proxy_app=dummy
tendermint node --home ./node2 --proxy_app=dummy --p2p.seeds IP1:46656
tendermint node --home ./node3 --proxy_app=dummy --p2p.seeds IP1:46656,IP2:46656
tendermint node --home ./node4 --proxy_app=dummy --p2p.seeds IP1:46656,IP2:46656,IP3:46656
```
Note that after the third node is started, blocks will start to stream in because >2/3 of validators (defined in the `genesis.json` have come online). Seeds can also be specified in the `config.toml`. See [this PR](https://github.com/tendermint/tendermint/pull/792) for more information about configuration options.
Transactions can then be sent as covered in the single, local node example above.

View File

@ -0,0 +1,32 @@
#!/usr/bin/env bash
# XXX: this script is meant to be used only on a fresh Ubuntu 16.04 instance
# and has only been tested on Digital Ocean
# get and unpack golang
curl -O https://storage.googleapis.com/golang/go1.9.2.linux-amd64.tar.gz
tar -xvf go1.9.2.linux-amd64.tar.gz
apt install make
## move go and add binary to path
mv go /usr/local
echo "export PATH=\$PATH:/usr/local/go/bin" >> ~/.profile
## create the GOPATH directory, set GOPATH and put on PATH
mkdir goApps
echo "export GOPATH=/root/goApps" >> ~/.profile
echo "export PATH=\$PATH:\$GOPATH/bin" >> ~/.profile
source ~/.profile
## get the code and move into it
REPO=github.com/tendermint/tendermint
go get $REPO
cd $GOPATH/src/$REPO
## build
git checkout v0.15.0
make get_tools
make get_vendor_deps
make install

View File

@ -0,0 +1,15 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
proxy_app = "tcp://127.0.0.1:46658"
moniker = "penguin"
fast_sync = true
db_backend = "leveldb"
log_level = "state:info,*:error"
[rpc]
laddr = "tcp://0.0.0.0:46657"
[p2p]
laddr = "tcp://0.0.0.0:46656"
seeds = ""

View File

@ -0,0 +1,42 @@
{
"genesis_time":"0001-01-01T00:00:00Z",
"chain_id":"test-chain-wt7apy",
"validators":[
{
"pub_key":{
"type":"ed25519",
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
},
"power":10,
"name":"node1"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
},
"power":10,
"name":"node2"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
},
"power":10,
"name":"node3"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
},
"power":10,
"name":"node4"
}
],
"app_hash":""
}

View File

@ -0,0 +1,15 @@
{
"address":"4DC2756029CE0D8F8C6C3E4C3CE6EE8C30AF352F",
"pub_key":{
"type":"ed25519",
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
},
"last_height":0,
"last_round":0,
"last_step":0,
"last_signature":null,
"priv_key":{
"type":"ed25519",
"data":"4D3648E1D93C8703E436BFF814728B6BD270CFDFD686DF5385E8ACBEB7BE2D7DF08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
}
}

View File

@ -0,0 +1,15 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
proxy_app = "tcp://127.0.0.1:46658"
moniker = "penguin"
fast_sync = true
db_backend = "leveldb"
log_level = "state:info,*:error"
[rpc]
laddr = "tcp://0.0.0.0:46657"
[p2p]
laddr = "tcp://0.0.0.0:46656"
seeds = ""

View File

@ -0,0 +1,42 @@
{
"genesis_time":"0001-01-01T00:00:00Z",
"chain_id":"test-chain-wt7apy",
"validators":[
{
"pub_key":{
"type":"ed25519",
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
},
"power":10,
"name":"node1"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
},
"power":10,
"name":"node2"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
},
"power":10,
"name":"node3"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
},
"power":10,
"name":"node4"
}
],
"app_hash":""
}

View File

@ -0,0 +1,15 @@
{
"address": "DD6C63A762608A9DDD4A845657743777F63121D6",
"pub_key": {
"type": "ed25519",
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
},
"last_height": 0,
"last_round": 0,
"last_step": 0,
"last_signature": null,
"priv_key": {
"type": "ed25519",
"data": "7B0DE666FF5E9B437D284BCE767F612381890C018B93B0A105D2E829A568DA6FA8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
}
}

View File

@ -0,0 +1,15 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
proxy_app = "tcp://127.0.0.1:46658"
moniker = "penguin"
fast_sync = true
db_backend = "leveldb"
log_level = "state:info,*:error"
[rpc]
laddr = "tcp://0.0.0.0:46657"
[p2p]
laddr = "tcp://0.0.0.0:46656"
seeds = ""

View File

@ -0,0 +1,42 @@
{
"genesis_time":"0001-01-01T00:00:00Z",
"chain_id":"test-chain-wt7apy",
"validators":[
{
"pub_key":{
"type":"ed25519",
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
},
"power":10,
"name":"node1"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
},
"power":10,
"name":"node2"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
},
"power":10,
"name":"node3"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
},
"power":10,
"name":"node4"
}
],
"app_hash":""
}

View File

@ -0,0 +1,15 @@
{
"address": "6D6A1E313B407B5474106CA8759C976B777AB659",
"pub_key": {
"type": "ed25519",
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
},
"last_height": 0,
"last_round": 0,
"last_step": 0,
"last_signature": null,
"priv_key": {
"type": "ed25519",
"data": "622432A370111A5C25CFE121E163FE709C9D5C95F551EDBD7A2C69A8545C9B76E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
}
}

View File

@ -0,0 +1,15 @@
# This is a TOML config file.
# For more information, see https://github.com/toml-lang/toml
proxy_app = "tcp://127.0.0.1:46658"
moniker = "penguin"
fast_sync = true
db_backend = "leveldb"
log_level = "state:info,*:error"
[rpc]
laddr = "tcp://0.0.0.0:46657"
[p2p]
laddr = "tcp://0.0.0.0:46656"
seeds = ""

View File

@ -0,0 +1,42 @@
{
"genesis_time":"0001-01-01T00:00:00Z",
"chain_id":"test-chain-wt7apy",
"validators":[
{
"pub_key":{
"type":"ed25519",
"data":"F08446C80A33E10D620E21450821B58D053778528F2B583D423B3E46EC647D30"
},
"power":10,
"name":"node1"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "A8423F70A9E512643B4B00F7C3701ECAD1F31B0A1FAA45852C41046353B9A07F"
},
"power":10,
"name":"node2"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "E52EFFAEDFE1D618ECDA71DE3B23592B3612CAABA0C10826E4C3120B2198C29A"
},
"power":10,
"name":"node3"
}
,
{
"pub_key":{
"type":"ed25519",
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
},
"power":10,
"name":"node4"
}
],
"app_hash":""
}

View File

@ -0,0 +1,15 @@
{
"address": "829A9663611D3DD88A3D84EA0249679D650A0755",
"pub_key": {
"type": "ed25519",
"data": "2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
},
"last_height": 0,
"last_round": 0,
"last_step": 0,
"last_signature": null,
"priv_key": {
"type": "ed25519",
"data": "0A604D1C9AE94A50150BF39E603239092F9392E4773F4D8F4AC1D86E6438E89E2B8FC09C07955A02998DFE5AF1AAD1C44115ECA7635FF51A867CF4265D347C07"
}
}

View File

@ -6,19 +6,6 @@ From Binary
To download pre-built binaries, see the `Download page <https://tendermint.com/download>`__.
From Source using Docker
------------------------
If you have docker running, all you need is the ``golang`` image to build tendermint.
If you don't, you can get help setting it up `here <https://docs.docker.com/engine/installation/>`__.
::
mkdir $HOME/tendermintbin
docker run --rm -it -v $HOME/tendermintbin:/go/bin:Z golang:1.9.2 /bin/bash -c "go-wrapper download github.com/tendermint/tendermint/cmd/tendermint ; make -C /go/src/github.com/tendermint/tendermint get_tools get_vendor_deps build_cc"
You will find the ``tendermint`` binaries for different architectures and operating systems in your ``$HOME/tendermintbin`` folder.
From Source
-----------

View File

@ -6,14 +6,14 @@ Here we describe configuration options around the Peer Exchange.
`--p2p.seed_mode`
The node operates in seed mode. It will kick incoming peers after sharing some peers.
It will continually crawl the network for peers.
The node operates in seed mode. In seed mode, a node continuously crawls the network for peers,
and upon incoming connection shares some peers and disconnects.
## Seeds
`--p2p.seeds “1.2.3.4:466656,2.3.4.5:4444”`
Dials these seeds when we need more peers. They will return a list of peers and then disconnect.
Dials these seeds when we need more peers. They should return a list of peers and then disconnect.
If we already have enough peers in the address book, we may never need to dial them.
## Persistent Peers
@ -27,7 +27,7 @@ anchor us in the p2p network.
Note that the auto-redial uses exponential backoff and will give up
after a day of trying to connect.
NOTE: If `dial_seeds` and `persistent_peers` intersect,
NOTE: If `seeds` and `persistent_peers` intersect,
the user will be WARNED that seeds may auto-close connections
and the node may not be able to keep the connection persistent.

View File

@ -1,12 +1,14 @@
## P2P Multiplex Connection
...
## MConnection
`MConnection` is a multiplex connection:
__multiplex__ *noun* a system or signal involving simultaneous transmission of
several messages along a single channel of communication.
Each `MConnection` handles message transmission on multiple abstract communication
`Channel`s. Each channel has a globally unique byte id.
`MConnection` is a multiplex connection that supports multiple independent streams
with distinct quality of service guarantees atop a single TCP connection.
Each stream is known as a `Channel` and each `Channel` has a globally unique byte id.
Each `Channel` also has a relative priority that determines the quality of service
of the `Channel` in comparison to the others.
The byte id and the relative priorities of each `Channel` are configured upon
initialization of the connection.
@ -14,12 +16,13 @@ The `MConnection` supports three packet types: Ping, Pong, and Msg.
### Ping and Pong
The ping and pong messages consist of writing a single byte to the connection; 0x1 and 0x2, respectively
The ping and pong messages consist of writing a single byte to the connection; 0x1 and 0x2, respectively.
When we haven't received any messages on an `MConnection` in a time `pingTimeout`, we send a ping message.
When a ping is received on the `MConnection`, a pong is sent in response.
When a ping is received on the `MConnection`, a pong is sent in response only if there are no other messages
to send and the peer has not sent us too many pings.
If a pong is not received in sufficient time, the peer's score should be decremented (TODO).
If a pong or message is not received in sufficient time after a ping, disconnect from the peer.
### Msg
@ -57,8 +60,8 @@ func (m MConnection) TrySend(chID byte, msg interface{}) bool {}
for the channel with the given id byte `chID`. The message `msg` is serialized
using the `tendermint/wire` submodule's `WriteBinary()` reflection routine.
`TrySend(chID, msg)` is a nonblocking call that returns false if the channel's
queue is full.
`TrySend(chID, msg)` is a nonblocking call that queues the message msg in the channel
with the given id byte chID if the queue is not full; otherwise it returns false immediately.
`Send()` and `TrySend()` are also exposed for each `Peer`.
@ -103,14 +106,3 @@ for _, peer := range switch.Peers().List() {
}
}
```
### PexReactor/AddrBook
A `PEXReactor` reactor implementation is provided to automate peer discovery.
```go
book := p2p.NewAddrBook(addrBookFilePath)
pexReactor := p2p.NewPEXReactor(book)
...
switch := NewSwitch([]Reactor{pexReactor, myReactor, ...})
```

View File

@ -39,8 +39,10 @@ A node checks its address book on startup and attempts to connect to peers from
If it can't connect to any peers after some time, it falls back to the seeds to find more.
Restarted full nodes can run the `blockchain` or `consensus` reactor protocols to sync up
to the latest state of the blockchain, assuming they aren't too far behind.
If they are too far behind, they may need to validate a recent `H` and `HASH` out-of-band again.
to the latest state of the blockchain from wherever they were last.
In a Proof-of-Stake context, if they are sufficiently far behind (greater than the length
of the unbonding period), they will need to validate a recent `H` and `HASH` out-of-band again
so they know they have synced the correct chain.
## Validator Node
@ -54,6 +56,7 @@ Validators that know and trust each other can accept incoming connections from o
## Sentry Node
Sentry nodes are guardians of a validator node and provide it access to the rest of the network.
They should be well connected to other full nodes on the network.
Sentry nodes may be dynamic, but should maintain persistent connections to some evolving random subset of each other.
They should always expect to have direct incoming connections from the validator node and its backup/s.
They do not report the validator node's address in the PEX.

View File

@ -5,15 +5,11 @@ and how other peers are found.
## Peer Identity
Tendermint peers are expected to maintain long-term persistent identities in the form of a private key.
Each peer has an ID defined as `peer.ID == peer.PrivKey.Address()`, where `Address` uses the scheme defined in go-crypto.
Peer ID's must come with some Proof-of-Work; that is,
they must satisfy `peer.PrivKey.Address() < target` for some difficulty target.
This ensures they are not too easy to generate. To begin, let `target == 2^240`.
Tendermint peers are expected to maintain long-term persistent identities in the form of a public key.
Each peer has an ID defined as `peer.ID == peer.PubKey.Address()`, where `Address` uses the scheme defined in go-crypto.
A single peer ID can have multiple IP addresses associated with it.
For simplicity, we only keep track of the latest one.
TODO: define how to deal with this.
When attempting to connect to a peer, we use the PeerURL: `<ID>@<IP>:<PORT>`.
We will attempt to connect to the peer at IP:PORT, and verify,
@ -22,7 +18,7 @@ corresponding to `<ID>`. This prevents man-in-the-middle attacks on the peer lay
Peers can also be connected to without specifying an ID, ie. just `<IP>:<PORT>`.
In this case, the peer must be authenticated out-of-band of Tendermint,
for instance via VPN
for instance via VPN.
## Connections
@ -49,8 +45,8 @@ It goes as follows:
- if we had the smaller ephemeral pubkey, use nonce1 for receiving, nonce2 for sending;
else the opposite
- all communications from now on are encrypted using the shared secret and the nonces, where each nonce
- we now have an encrypted channel, but still need to authenticate
increments by 2 every time it is used
- we now have an encrypted channel, but still need to authenticate
- generate a common challenge to sign:
- SHA256 of the sorted (lowest first) and concatenated ephemeral pub keys
- sign the common challenge with our persistent private key
@ -76,7 +72,7 @@ an existing peer. If so, we disconnect.
We also check the peer's address and public key against
an optional whitelist which can be managed through the ABCI app -
if the whitelist is enabled and the peer does not qualigy, the connection is
if the whitelist is enabled and the peer does not qualify, the connection is
terminated.
@ -86,14 +82,14 @@ The Tendermint Version Handshake allows the peers to exchange their NodeInfo:
```
type NodeInfo struct {
PubKey crypto.PubKey `json:"pub_key"`
Moniker string `json:"moniker"`
Network string `json:"network"`
RemoteAddr string `json:"remote_addr"`
ListenAddr string `json:"listen_addr"` // accepting in
Version string `json:"version"` // major.minor.revision
Channels []int8 `json:"channels"` // active reactor channels
Other []string `json:"other"` // other application specific data
PubKey crypto.PubKey
Moniker string
Network string
RemoteAddr string
ListenAddr string
Version string
Channels []int8
Other []string
}
```

View File

@ -37,7 +37,7 @@ func TestProvider(t *testing.T) {
// let's check this is valid somehow
assert.Nil(seed.ValidateBasic(chainID))
cert := lite.NewStatic(chainID, seed.Validators)
cert := lite.NewStaticCertifier(chainID, seed.Validators)
// historical queries now work :)
lower := sh - 5

View File

@ -6,9 +6,9 @@ import (
liteErr "github.com/tendermint/tendermint/lite/errors"
)
var _ Certifier = &Dynamic{}
var _ Certifier = (*DynamicCertifier)(nil)
// Dynamic uses a Static for Certify, but adds an
// DynamicCertifier uses a StaticCertifier for Certify, but adds an
// Update method to allow for a change of validators.
//
// You can pass in a FullCommit with another validator set,
@ -17,46 +17,48 @@ var _ Certifier = &Dynamic{}
// validator set for the next Certify call.
// For security, it will only follow validator set changes
// going forward.
type Dynamic struct {
cert *Static
type DynamicCertifier struct {
cert *StaticCertifier
lastHeight int64
}
// NewDynamic returns a new dynamic certifier.
func NewDynamic(chainID string, vals *types.ValidatorSet, height int64) *Dynamic {
return &Dynamic{
cert: NewStatic(chainID, vals),
func NewDynamicCertifier(chainID string, vals *types.ValidatorSet, height int64) *DynamicCertifier {
return &DynamicCertifier{
cert: NewStaticCertifier(chainID, vals),
lastHeight: height,
}
}
// ChainID returns the chain id of this certifier.
func (c *Dynamic) ChainID() string {
return c.cert.ChainID()
// Implements Certifier.
func (dc *DynamicCertifier) ChainID() string {
return dc.cert.ChainID()
}
// Validators returns the validators of this certifier.
func (c *Dynamic) Validators() *types.ValidatorSet {
return c.cert.vSet
func (dc *DynamicCertifier) Validators() *types.ValidatorSet {
return dc.cert.vSet
}
// Hash returns the hash of this certifier.
func (c *Dynamic) Hash() []byte {
return c.cert.Hash()
func (dc *DynamicCertifier) Hash() []byte {
return dc.cert.Hash()
}
// LastHeight returns the last height of this certifier.
func (c *Dynamic) LastHeight() int64 {
return c.lastHeight
func (dc *DynamicCertifier) LastHeight() int64 {
return dc.lastHeight
}
// Certify will verify whether the commit is valid and will update the height if it is or return an
// error if it is not.
func (c *Dynamic) Certify(check Commit) error {
err := c.cert.Certify(check)
// Implements Certifier.
func (dc *DynamicCertifier) Certify(check Commit) error {
err := dc.cert.Certify(check)
if err == nil {
// update last seen height if input is valid
c.lastHeight = check.Height()
dc.lastHeight = check.Height()
}
return err
}
@ -65,15 +67,15 @@ func (c *Dynamic) Certify(check Commit) error {
// the certifying validator set if safe to do so.
//
// Returns an error if update is impossible (invalid proof or IsTooMuchChangeErr)
func (c *Dynamic) Update(fc FullCommit) error {
func (dc *DynamicCertifier) Update(fc FullCommit) error {
// ignore all checkpoints in the past -> only to the future
h := fc.Height()
if h <= c.lastHeight {
if h <= dc.lastHeight {
return liteErr.ErrPastTime()
}
// first, verify if the input is self-consistent....
err := fc.ValidateBasic(c.ChainID())
err := fc.ValidateBasic(dc.ChainID())
if err != nil {
return err
}
@ -82,14 +84,13 @@ func (c *Dynamic) Update(fc FullCommit) error {
// would be approved by the currently known validator set
// as well as the new set
commit := fc.Commit.Commit
err = c.Validators().VerifyCommitAny(fc.Validators, c.ChainID(),
commit.BlockID, h, commit)
err = dc.Validators().VerifyCommitAny(fc.Validators, dc.ChainID(), commit.BlockID, h, commit)
if err != nil {
return liteErr.ErrTooMuchChange()
}
// looks good, we can update
c.cert = NewStatic(c.ChainID(), fc.Validators)
c.lastHeight = h
dc.cert = NewStaticCertifier(dc.ChainID(), fc.Validators)
dc.lastHeight = h
return nil
}

View File

@ -23,7 +23,7 @@ func TestDynamicCert(t *testing.T) {
vals := keys.ToValidators(20, 10)
// and a certifier based on our known set
chainID := "test-dyno"
cert := lite.NewDynamic(chainID, vals, 0)
cert := lite.NewDynamicCertifier(chainID, vals, 0)
cases := []struct {
keys lite.ValKeys
@ -67,7 +67,7 @@ func TestDynamicUpdate(t *testing.T) {
chainID := "test-dyno-up"
keys := lite.GenValKeys(5)
vals := keys.ToValidators(20, 0)
cert := lite.NewDynamic(chainID, vals, 40)
cert := lite.NewDynamicCertifier(chainID, vals, 40)
// one valid block to give us a sense of time
h := int64(100)

View File

@ -1,155 +0,0 @@
package lite
import (
"github.com/tendermint/tendermint/types"
liteErr "github.com/tendermint/tendermint/lite/errors"
)
// Inquiring wraps a dynamic certifier and implements an auto-update strategy. If a call to Certify
// fails due to a change it validator set, Inquiring will try and find a previous FullCommit which
// it can use to safely update the validator set. It uses a source provider to obtain the needed
// FullCommits. It stores properly validated data on the local system.
type Inquiring struct {
cert *Dynamic
// These are only properly validated data, from local system
trusted Provider
// This is a source of new info, like a node rpc, or other import method
Source Provider
}
// NewInquiring returns a new Inquiring object. It uses the trusted provider to store validated
// data and the source provider to obtain missing FullCommits.
//
// Example: The trusted provider should a CacheProvider, MemProvider or files.Provider. The source
// provider should be a client.HTTPProvider.
func NewInquiring(chainID string, fc FullCommit, trusted Provider, source Provider) *Inquiring {
// store the data in trusted
// TODO: StoredCommit() can return an error and we need to handle this.
trusted.StoreCommit(fc)
return &Inquiring{
cert: NewDynamic(chainID, fc.Validators, fc.Height()),
trusted: trusted,
Source: source,
}
}
// ChainID returns the chain id.
func (c *Inquiring) ChainID() string {
return c.cert.ChainID()
}
// Validators returns the validator set.
func (c *Inquiring) Validators() *types.ValidatorSet {
return c.cert.cert.vSet
}
// LastHeight returns the last height.
func (c *Inquiring) LastHeight() int64 {
return c.cert.lastHeight
}
// Certify makes sure this is checkpoint is valid.
//
// If the validators have changed since the last know time, it looks
// for a path to prove the new validators.
//
// On success, it will store the checkpoint in the store for later viewing
func (c *Inquiring) Certify(commit Commit) error {
err := c.useClosestTrust(commit.Height())
if err != nil {
return err
}
err = c.cert.Certify(commit)
if !liteErr.IsValidatorsChangedErr(err) {
return err
}
err = c.updateToHash(commit.Header.ValidatorsHash)
if err != nil {
return err
}
err = c.cert.Certify(commit)
if err != nil {
return err
}
// store the new checkpoint
return c.trusted.StoreCommit(NewFullCommit(commit, c.Validators()))
}
// Update will verify if this is a valid change and update
// the certifying validator set if safe to do so.
func (c *Inquiring) Update(fc FullCommit) error {
err := c.useClosestTrust(fc.Height())
if err != nil {
return err
}
err = c.cert.Update(fc)
if err == nil {
err = c.trusted.StoreCommit(fc)
}
return err
}
func (c *Inquiring) useClosestTrust(h int64) error {
closest, err := c.trusted.GetByHeight(h)
if err != nil {
return err
}
// if the best seed is not the one we currently use,
// let's just reset the dynamic validator
if closest.Height() != c.LastHeight() {
c.cert = NewDynamic(c.ChainID(), closest.Validators, closest.Height())
}
return nil
}
// updateToHash gets the validator hash we want to update to
// if IsTooMuchChangeErr, we try to find a path by binary search over height
func (c *Inquiring) updateToHash(vhash []byte) error {
// try to get the match, and update
fc, err := c.Source.GetByHash(vhash)
if err != nil {
return err
}
err = c.cert.Update(fc)
// handle IsTooMuchChangeErr by using divide and conquer
if liteErr.IsTooMuchChangeErr(err) {
err = c.updateToHeight(fc.Height())
}
return err
}
// updateToHeight will use divide-and-conquer to find a path to h
func (c *Inquiring) updateToHeight(h int64) error {
// try to update to this height (with checks)
fc, err := c.Source.GetByHeight(h)
if err != nil {
return err
}
start, end := c.LastHeight(), fc.Height()
if end <= start {
return liteErr.ErrNoPathFound()
}
err = c.Update(fc)
// we can handle IsTooMuchChangeErr specially
if !liteErr.IsTooMuchChangeErr(err) {
return err
}
// try to update to mid
mid := (start + end) / 2
err = c.updateToHeight(mid)
if err != nil {
return err
}
// if we made it to mid, we recurse
return c.updateToHeight(h)
}

163
lite/inquiring_certifier.go Normal file
View File

@ -0,0 +1,163 @@
package lite
import (
"github.com/tendermint/tendermint/types"
liteErr "github.com/tendermint/tendermint/lite/errors"
)
var _ Certifier = (*InquiringCertifier)(nil)
// InquiringCertifier wraps a dynamic certifier and implements an auto-update strategy. If a call
// to Certify fails due to a change it validator set, InquiringCertifier will try and find a
// previous FullCommit which it can use to safely update the validator set. It uses a source
// provider to obtain the needed FullCommits. It stores properly validated data on the local system.
type InquiringCertifier struct {
cert *DynamicCertifier
// These are only properly validated data, from local system
trusted Provider
// This is a source of new info, like a node rpc, or other import method
Source Provider
}
// NewInquiringCertifier returns a new Inquiring object. It uses the trusted provider to store
// validated data and the source provider to obtain missing FullCommits.
//
// Example: The trusted provider should a CacheProvider, MemProvider or files.Provider. The source
// provider should be a client.HTTPProvider.
func NewInquiringCertifier(chainID string, fc FullCommit, trusted Provider,
source Provider) (*InquiringCertifier, error) {
// store the data in trusted
err := trusted.StoreCommit(fc)
if err != nil {
return nil, err
}
return &InquiringCertifier{
cert: NewDynamicCertifier(chainID, fc.Validators, fc.Height()),
trusted: trusted,
Source: source,
}, nil
}
// ChainID returns the chain id.
// Implements Certifier.
func (ic *InquiringCertifier) ChainID() string {
return ic.cert.ChainID()
}
// Validators returns the validator set.
func (ic *InquiringCertifier) Validators() *types.ValidatorSet {
return ic.cert.cert.vSet
}
// LastHeight returns the last height.
func (ic *InquiringCertifier) LastHeight() int64 {
return ic.cert.lastHeight
}
// Certify makes sure this is checkpoint is valid.
//
// If the validators have changed since the last know time, it looks
// for a path to prove the new validators.
//
// On success, it will store the checkpoint in the store for later viewing
// Implements Certifier.
func (ic *InquiringCertifier) Certify(commit Commit) error {
err := ic.useClosestTrust(commit.Height())
if err != nil {
return err
}
err = ic.cert.Certify(commit)
if !liteErr.IsValidatorsChangedErr(err) {
return err
}
err = ic.updateToHash(commit.Header.ValidatorsHash)
if err != nil {
return err
}
err = ic.cert.Certify(commit)
if err != nil {
return err
}
// store the new checkpoint
return ic.trusted.StoreCommit(NewFullCommit(commit, ic.Validators()))
}
// Update will verify if this is a valid change and update
// the certifying validator set if safe to do so.
func (ic *InquiringCertifier) Update(fc FullCommit) error {
err := ic.useClosestTrust(fc.Height())
if err != nil {
return err
}
err = ic.cert.Update(fc)
if err == nil {
err = ic.trusted.StoreCommit(fc)
}
return err
}
func (ic *InquiringCertifier) useClosestTrust(h int64) error {
closest, err := ic.trusted.GetByHeight(h)
if err != nil {
return err
}
// if the best seed is not the one we currently use,
// let's just reset the dynamic validator
if closest.Height() != ic.LastHeight() {
ic.cert = NewDynamicCertifier(ic.ChainID(), closest.Validators, closest.Height())
}
return nil
}
// updateToHash gets the validator hash we want to update to
// if IsTooMuchChangeErr, we try to find a path by binary search over height
func (ic *InquiringCertifier) updateToHash(vhash []byte) error {
// try to get the match, and update
fc, err := ic.Source.GetByHash(vhash)
if err != nil {
return err
}
err = ic.cert.Update(fc)
// handle IsTooMuchChangeErr by using divide and conquer
if liteErr.IsTooMuchChangeErr(err) {
err = ic.updateToHeight(fc.Height())
}
return err
}
// updateToHeight will use divide-and-conquer to find a path to h
func (ic *InquiringCertifier) updateToHeight(h int64) error {
// try to update to this height (with checks)
fc, err := ic.Source.GetByHeight(h)
if err != nil {
return err
}
start, end := ic.LastHeight(), fc.Height()
if end <= start {
return liteErr.ErrNoPathFound()
}
err = ic.Update(fc)
// we can handle IsTooMuchChangeErr specially
if !liteErr.IsTooMuchChangeErr(err) {
return err
}
// try to update to mid
mid := (start + end) / 2
err = ic.updateToHeight(mid)
if err != nil {
return err
}
// if we made it to mid, we recurse
return ic.updateToHeight(h)
}

View File

@ -32,18 +32,20 @@ func TestInquirerValidPath(t *testing.T) {
vals := keys.ToValidators(vote, 0)
h := int64(20 + 10*i)
appHash := []byte(fmt.Sprintf("h=%d", h))
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, len(keys))
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0,
len(keys))
}
// initialize a certifier with the initial state
cert := lite.NewInquiring(chainID, commits[0], trust, source)
cert, err := lite.NewInquiringCertifier(chainID, commits[0], trust, source)
require.Nil(err)
// this should fail validation....
commit := commits[count-1].Commit
err := cert.Certify(commit)
err = cert.Certify(commit)
require.NotNil(err)
// add a few seed in the middle should be insufficient
// adding a few commits in the middle should be insufficient
for i := 10; i < 13; i++ {
err := source.StoreCommit(commits[i])
require.Nil(err)
@ -81,11 +83,12 @@ func TestInquirerMinimalPath(t *testing.T) {
h := int64(5 + 10*i)
appHash := []byte(fmt.Sprintf("h=%d", h))
resHash := []byte(fmt.Sprintf("res=%d", h))
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, len(keys))
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0,
len(keys))
}
// initialize a certifier with the initial state
cert := lite.NewInquiring(chainID, commits[0], trust, source)
cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source)
// this should fail validation....
commit := commits[count-1].Commit
@ -130,11 +133,12 @@ func TestInquirerVerifyHistorical(t *testing.T) {
h := int64(20 + 10*i)
appHash := []byte(fmt.Sprintf("h=%d", h))
resHash := []byte(fmt.Sprintf("res=%d", h))
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0, len(keys))
commits[i] = keys.GenFullCommit(chainID, h, nil, vals, appHash, consHash, resHash, 0,
len(keys))
}
// initialize a certifier with the initial state
cert := lite.NewInquiring(chainID, commits[0], trust, source)
cert, _ := lite.NewInquiringCertifier(chainID, commits[0], trust, source)
// store a few commits as trust
for _, i := range []int{2, 5} {

View File

@ -105,7 +105,7 @@ func BenchmarkCertifyCommitSec100(b *testing.B) {
func benchmarkCertifyCommit(b *testing.B, keys lite.ValKeys) {
chainID := "bench-certify"
vals := keys.ToValidators(20, 10)
cert := lite.NewStatic(chainID, vals)
cert := lite.NewStaticCertifier(chainID, vals)
check := keys.GenCommit(chainID, 123, nil, vals, []byte("foo"), []byte("params"), []byte("res"), 0, len(keys))
for i := 0; i < b.N; i++ {
err := cert.Certify(check)

View File

@ -6,7 +6,7 @@ import (
"github.com/tendermint/tendermint/lite/files"
)
func GetCertifier(chainID, rootDir, nodeAddr string) (*lite.Inquiring, error) {
func GetCertifier(chainID, rootDir, nodeAddr string) (*lite.InquiringCertifier, error) {
trust := lite.NewCacheProvider(
lite.NewMemStoreProvider(),
files.NewProvider(rootDir),
@ -25,6 +25,11 @@ func GetCertifier(chainID, rootDir, nodeAddr string) (*lite.Inquiring, error) {
if err != nil {
return nil, err
}
cert := lite.NewInquiring(chainID, fc, trust, source)
cert, err := lite.NewInquiringCertifier(chainID, fc, trust, source)
if err != nil {
return nil, err
}
return cert, nil
}

View File

@ -18,7 +18,11 @@ const (
// set up the rpc routes to proxy via the given client,
// and start up an http/rpc server on the location given by bind (eg. :1234)
func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger) error {
c.Start()
err := c.Start()
if err != nil {
return err
}
r := RPCRoutes(c)
// build the handler...
@ -30,7 +34,7 @@ func StartProxy(c rpcclient.Client, listenAddr string, logger log.Logger) error
core.SetLogger(logger)
mux.HandleFunc(wsEndpoint, wm.WebsocketHandler)
_, err := rpc.StartHTTPServer(listenAddr, mux, logger)
_, err = rpc.StartHTTPServer(listenAddr, mux, logger)
return err
}

View File

@ -51,7 +51,7 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption
// make sure the proof is the proper height
if resp.IsErr() {
err = errors.Errorf("Query error %d: %d", resp.Code)
err = errors.Errorf("Query error for key %d: %d", key, resp.Code)
return nil, nil, err
}
if len(resp.Key) == 0 || len(resp.Proof) == 0 {
@ -79,7 +79,7 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption
if err != nil {
return nil, nil, errors.Wrap(err, "Couldn't verify proof")
}
return &ctypes.ResultABCIQuery{resp}, eproof, nil
return &ctypes.ResultABCIQuery{Response: resp}, eproof, nil
}
// The key wasn't found, construct a proof of non-existence.
@ -93,13 +93,12 @@ func GetWithProofOptions(path string, key []byte, opts rpcclient.ABCIQueryOption
if err != nil {
return nil, nil, errors.Wrap(err, "Couldn't verify proof")
}
return &ctypes.ResultABCIQuery{resp}, aproof, ErrNoData()
return &ctypes.ResultABCIQuery{Response: resp}, aproof, ErrNoData()
}
// GetCertifiedCommit gets the signed header for a given height
// and certifies it. Returns error if unable to get a proven header.
func GetCertifiedCommit(h int64, node rpcclient.Client,
cert lite.Certifier) (empty lite.Commit, err error) {
func GetCertifiedCommit(h int64, node rpcclient.Client, cert lite.Certifier) (lite.Commit, error) {
// FIXME: cannot use cert.GetByHeight for now, as it also requires
// Validators and will fail on querying tendermint for non-current height.
@ -107,14 +106,18 @@ func GetCertifiedCommit(h int64, node rpcclient.Client,
rpcclient.WaitForHeight(node, h, nil)
cresp, err := node.Commit(&h)
if err != nil {
return
return lite.Commit{}, err
}
commit := client.CommitFromResult(cresp)
commit := client.CommitFromResult(cresp)
// validate downloaded checkpoint with our request and trust store.
if commit.Height() != h {
return empty, certerr.ErrHeightMismatch(h, commit.Height())
return lite.Commit{}, certerr.ErrHeightMismatch(h, commit.Height())
}
err = cert.Certify(commit)
if err = cert.Certify(commit); err != nil {
return lite.Commit{}, err
}
return commit, nil
}

View File

@ -58,7 +58,7 @@ func _TestAppProofs(t *testing.T) {
source := certclient.NewProvider(cl)
seed, err := source.GetByHeight(brh - 2)
require.NoError(err, "%+v", err)
cert := lite.NewStatic("my-chain", seed.Validators)
cert := lite.NewStaticCertifier("my-chain", seed.Validators)
client.WaitForHeight(cl, 3, nil)
latest, err := source.LatestCommit()
@ -117,7 +117,7 @@ func _TestTxProofs(t *testing.T) {
source := certclient.NewProvider(cl)
seed, err := source.GetByHeight(brh - 2)
require.NoError(err, "%+v", err)
cert := lite.NewStatic("my-chain", seed.Validators)
cert := lite.NewStaticCertifier("my-chain", seed.Validators)
// First let's make sure a bogus transaction hash returns a valid non-existence proof.
key := types.Tx([]byte("bogus")).Hash()
@ -136,5 +136,4 @@ func _TestTxProofs(t *testing.T) {
commit, err := GetCertifiedCommit(br.Height, cl, cert)
require.Nil(err, "%+v", err)
require.Equal(res.Proof.RootHash, commit.Header.DataHash)
}

View File

@ -15,14 +15,14 @@ var _ rpcclient.Client = Wrapper{}
// provable before passing it along. Allows you to make any rpcclient fully secure.
type Wrapper struct {
rpcclient.Client
cert *lite.Inquiring
cert *lite.InquiringCertifier
}
// SecureClient uses a given certifier to wrap an connection to an untrusted
// host and return a cryptographically secure rpc client.
//
// If it is wrapping an HTTP rpcclient, it will also wrap the websocket interface
func SecureClient(c rpcclient.Client, cert *lite.Inquiring) Wrapper {
func SecureClient(c rpcclient.Client, cert *lite.InquiringCertifier) Wrapper {
wrap := Wrapper{c, cert}
// TODO: no longer possible as no more such interface exposed....
// if we wrap http client, then we can swap out the event switch to filter
@ -34,7 +34,9 @@ func SecureClient(c rpcclient.Client, cert *lite.Inquiring) Wrapper {
}
// ABCIQueryWithOptions exposes all options for the ABCI query and verifies the returned proof
func (w Wrapper) ABCIQueryWithOptions(path string, data data.Bytes, opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
func (w Wrapper) ABCIQueryWithOptions(path string, data data.Bytes,
opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) {
res, _, err := GetWithProofOptions(path, data, opts, w.Client, w.cert)
return res, err
}

View File

@ -10,62 +10,64 @@ import (
liteErr "github.com/tendermint/tendermint/lite/errors"
)
var _ Certifier = &Static{}
var _ Certifier = (*StaticCertifier)(nil)
// Static assumes a static set of validators, set on
// StaticCertifier assumes a static set of validators, set on
// initilization and checks against them.
// The signatures on every header is checked for > 2/3 votes
// against the known validator set upon Certify
//
// Good for testing or really simple chains. Building block
// to support real-world functionality.
type Static struct {
type StaticCertifier struct {
chainID string
vSet *types.ValidatorSet
vhash []byte
}
// NewStatic returns a new certifier with a static validator set.
func NewStatic(chainID string, vals *types.ValidatorSet) *Static {
return &Static{
// NewStaticCertifier returns a new certifier with a static validator set.
func NewStaticCertifier(chainID string, vals *types.ValidatorSet) *StaticCertifier {
return &StaticCertifier{
chainID: chainID,
vSet: vals,
}
}
// ChainID returns the chain id.
func (c *Static) ChainID() string {
return c.chainID
// Implements Certifier.
func (sc *StaticCertifier) ChainID() string {
return sc.chainID
}
// Validators returns the validator set.
func (c *Static) Validators() *types.ValidatorSet {
return c.vSet
func (sc *StaticCertifier) Validators() *types.ValidatorSet {
return sc.vSet
}
// Hash returns the hash of the validator set.
func (c *Static) Hash() []byte {
if len(c.vhash) == 0 {
c.vhash = c.vSet.Hash()
func (sc *StaticCertifier) Hash() []byte {
if len(sc.vhash) == 0 {
sc.vhash = sc.vSet.Hash()
}
return c.vhash
return sc.vhash
}
// Certify makes sure that the commit is valid.
func (c *Static) Certify(commit Commit) error {
// Implements Certifier.
func (sc *StaticCertifier) Certify(commit Commit) error {
// do basic sanity checks
err := commit.ValidateBasic(c.chainID)
err := commit.ValidateBasic(sc.chainID)
if err != nil {
return err
}
// make sure it has the same validator set we have (static means static)
if !bytes.Equal(c.Hash(), commit.Header.ValidatorsHash) {
if !bytes.Equal(sc.Hash(), commit.Header.ValidatorsHash) {
return liteErr.ErrValidatorsChanged()
}
// then make sure we have the proper signatures for this
err = c.vSet.VerifyCommit(c.chainID, commit.Commit.BlockID,
err = sc.vSet.VerifyCommit(sc.chainID, commit.Commit.BlockID,
commit.Header.Height, commit.Commit)
return errors.WithStack(err)
}

View File

@ -21,7 +21,7 @@ func TestStaticCert(t *testing.T) {
vals := keys.ToValidators(20, 10)
// and a certifier based on our known set
chainID := "test-static"
cert := lite.NewStatic(chainID, vals)
cert := lite.NewStaticCertifier(chainID, vals)
cases := []struct {
keys lite.ValKeys

View File

@ -185,9 +185,9 @@ func NewNode(config *cfg.Config,
// Log whether this node is a validator or an observer
if state.Validators.HasAddress(privValidator.GetAddress()) {
consensusLogger.Info("This node is a validator")
consensusLogger.Info("This node is a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
} else {
consensusLogger.Info("This node is not a validator")
consensusLogger.Info("This node is not a validator", "addr", privValidator.GetAddress(), "pubKey", privValidator.GetPubKey())
}
// Make MempoolReactor

View File

@ -33,7 +33,8 @@ type ABCIClient interface {
// reading from abci app
ABCIInfo() (*ctypes.ResultABCIInfo, error)
ABCIQuery(path string, data data.Bytes) (*ctypes.ResultABCIQuery, error)
ABCIQueryWithOptions(path string, data data.Bytes, opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error)
ABCIQueryWithOptions(path string, data data.Bytes,
opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error)
// writing to abci app
BroadcastTxCommit(tx types.Tx) (*ctypes.ResultBroadcastTxCommit, error)

View File

@ -9,8 +9,33 @@ DIR="$( cd -P "$( dirname "$SOURCE" )/.." && pwd )"
# Change into that dir because we expect that.
cd "$DIR"
# Make sure build tools are available, get VENDORED dependencies and build
make get_tools get_vendor_deps build_xc
# Get the git commit
GIT_COMMIT="$(git rev-parse --short HEAD)"
GIT_IMPORT="github.com/tendermint/tendermint/version"
# Determine the arch/os combos we're building for
XC_ARCH=${XC_ARCH:-"386 amd64 arm"}
XC_OS=${XC_OS:-"solaris darwin freebsd linux windows"}
# Make sure build tools are available.
# TODO: Tools should be "vendored" too.
make get_tools
# Get VENDORED dependencies
make get_vendor_deps
# Build!
# ldflags: -s Omit the symbol table and debug information.
# -w Omit the DWARF symbol table.
echo "==> Building..."
"$(which gox)" \
-os="${XC_OS}" \
-arch="${XC_ARCH}" \
-osarch="!darwin/arm !solaris/amd64 !freebsd/amd64" \
-ldflags "-s -w -X ${GIT_IMPORT}.GitCommit=${GIT_COMMIT}" \
-output "build/pkg/{{.OS}}_{{.Arch}}/tendermint" \
-tags="${BUILD_TAGS}" \
github.com/tendermint/tendermint/cmd/tendermint
# Zip all the files.
echo "==> Packaging..."

View File

@ -17,10 +17,10 @@ import (
// TODO: type ?
const (
stepNone = 0 // Used to distinguish the initial state
stepPropose = 1
stepPrevote = 2
stepPrecommit = 3
stepNone int8 = 0 // Used to distinguish the initial state
stepPropose int8 = 1
stepPrevote int8 = 2
stepPrecommit int8 = 3
)
func voteToStep(vote *Vote) int8 {
@ -199,12 +199,9 @@ func (privVal *PrivValidatorFS) Reset() {
func (privVal *PrivValidatorFS) SignVote(chainID string, vote *Vote) error {
privVal.mtx.Lock()
defer privVal.mtx.Unlock()
signature, err := privVal.signBytesHRS(vote.Height, vote.Round, voteToStep(vote),
SignBytes(chainID, vote), checkVotesOnlyDifferByTimestamp)
if err != nil {
if err := privVal.signVote(chainID, vote); err != nil {
return errors.New(cmn.Fmt("Error signing vote: %v", err))
}
vote.Signature = signature
return nil
}
@ -213,12 +210,9 @@ func (privVal *PrivValidatorFS) SignVote(chainID string, vote *Vote) error {
func (privVal *PrivValidatorFS) SignProposal(chainID string, proposal *Proposal) error {
privVal.mtx.Lock()
defer privVal.mtx.Unlock()
signature, err := privVal.signBytesHRS(proposal.Height, proposal.Round, stepPropose,
SignBytes(chainID, proposal), checkProposalsOnlyDifferByTimestamp)
if err != nil {
if err := privVal.signProposal(chainID, proposal); err != nil {
return fmt.Errorf("Error signing proposal: %v", err)
}
proposal.Signature = signature
return nil
}
@ -250,36 +244,82 @@ func (privVal *PrivValidatorFS) checkHRS(height int64, round int, step int8) (bo
return false, nil
}
// signBytesHRS signs the given signBytes if the height/round/step (HRS) are
// greater than the latest state. If the HRS are equal and the only thing changed is the timestamp,
// it returns the privValidator.LastSignature. Else it returns an error.
func (privVal *PrivValidatorFS) signBytesHRS(height int64, round int, step int8,
signBytes []byte, checkFn checkOnlyDifferByTimestamp) (crypto.Signature, error) {
sig := crypto.Signature{}
// signVote checks if the vote is good to sign and sets the vote signature.
// It may need to set the timestamp as well if the vote is otherwise the same as
// a previously signed vote (ie. we crashed after signing but before the vote hit the WAL).
func (privVal *PrivValidatorFS) signVote(chainID string, vote *Vote) error {
height, round, step := vote.Height, vote.Round, voteToStep(vote)
signBytes := SignBytes(chainID, vote)
sameHRS, err := privVal.checkHRS(height, round, step)
if err != nil {
return sig, err
return err
}
// We might crash before writing to the wal,
// causing us to try to re-sign for the same HRS
// causing us to try to re-sign for the same HRS.
// If signbytes are the same, use the last signature.
// If they only differ by timestamp, use last timestamp and signature
// Otherwise, return error
if sameHRS {
// if they're the same or only differ by timestamp,
// return the LastSignature. Otherwise, error
if bytes.Equal(signBytes, privVal.LastSignBytes) ||
checkFn(privVal.LastSignBytes, signBytes) {
return privVal.LastSignature, nil
if bytes.Equal(signBytes, privVal.LastSignBytes) {
vote.Signature = privVal.LastSignature
} else if timestamp, ok := checkVotesOnlyDifferByTimestamp(privVal.LastSignBytes, signBytes); ok {
vote.Timestamp = timestamp
vote.Signature = privVal.LastSignature
} else {
err = fmt.Errorf("Conflicting data")
}
return sig, fmt.Errorf("Conflicting data")
return err
}
sig, err = privVal.Sign(signBytes)
// It passed the checks. Sign the vote
sig, err := privVal.Sign(signBytes)
if err != nil {
return sig, err
return err
}
privVal.saveSigned(height, round, step, signBytes, sig)
return sig, nil
vote.Signature = sig
return nil
}
// signProposal checks if the proposal is good to sign and sets the proposal signature.
// It may need to set the timestamp as well if the proposal is otherwise the same as
// a previously signed proposal ie. we crashed after signing but before the proposal hit the WAL).
func (privVal *PrivValidatorFS) signProposal(chainID string, proposal *Proposal) error {
height, round, step := proposal.Height, proposal.Round, stepPropose
signBytes := SignBytes(chainID, proposal)
sameHRS, err := privVal.checkHRS(height, round, step)
if err != nil {
return err
}
// We might crash before writing to the wal,
// causing us to try to re-sign for the same HRS.
// If signbytes are the same, use the last signature.
// If they only differ by timestamp, use last timestamp and signature
// Otherwise, return error
if sameHRS {
if bytes.Equal(signBytes, privVal.LastSignBytes) {
proposal.Signature = privVal.LastSignature
} else if timestamp, ok := checkProposalsOnlyDifferByTimestamp(privVal.LastSignBytes, signBytes); ok {
proposal.Timestamp = timestamp
proposal.Signature = privVal.LastSignature
} else {
err = fmt.Errorf("Conflicting data")
}
return err
}
// It passed the checks. Sign the proposal
sig, err := privVal.Sign(signBytes)
if err != nil {
return err
}
privVal.saveSigned(height, round, step, signBytes, sig)
proposal.Signature = sig
return nil
}
// Persist height/round/step and signature
@ -329,10 +369,9 @@ func (pvs PrivValidatorsByAddress) Swap(i, j int) {
//-------------------------------------
type checkOnlyDifferByTimestamp func([]byte, []byte) bool
// returns true if the only difference in the votes is their timestamp
func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) bool {
// returns the timestamp from the lastSignBytes.
// returns true if the only difference in the votes is their timestamp.
func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
var lastVote, newVote CanonicalJSONOnceVote
if err := json.Unmarshal(lastSignBytes, &lastVote); err != nil {
panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into vote: %v", err))
@ -341,6 +380,11 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) bool {
panic(fmt.Sprintf("signBytes cannot be unmarshalled into vote: %v", err))
}
lastTime, err := time.Parse(timeFormat, lastVote.Vote.Timestamp)
if err != nil {
panic(err)
}
// set the times to the same value and check equality
now := CanonicalTime(time.Now())
lastVote.Vote.Timestamp = now
@ -348,11 +392,12 @@ func checkVotesOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) bool {
lastVoteBytes, _ := json.Marshal(lastVote)
newVoteBytes, _ := json.Marshal(newVote)
return bytes.Equal(newVoteBytes, lastVoteBytes)
return lastTime, bytes.Equal(newVoteBytes, lastVoteBytes)
}
// returns the timestamp from the lastSignBytes.
// returns true if the only difference in the proposals is their timestamp
func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) bool {
func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) (time.Time, bool) {
var lastProposal, newProposal CanonicalJSONOnceProposal
if err := json.Unmarshal(lastSignBytes, &lastProposal); err != nil {
panic(fmt.Sprintf("LastSignBytes cannot be unmarshalled into proposal: %v", err))
@ -361,6 +406,11 @@ func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) boo
panic(fmt.Sprintf("signBytes cannot be unmarshalled into proposal: %v", err))
}
lastTime, err := time.Parse(timeFormat, lastProposal.Proposal.Timestamp)
if err != nil {
panic(err)
}
// set the times to the same value and check equality
now := CanonicalTime(time.Now())
lastProposal.Proposal.Timestamp = now
@ -368,5 +418,5 @@ func checkProposalsOnlyDifferByTimestamp(lastSignBytes, newSignBytes []byte) boo
lastProposalBytes, _ := json.Marshal(lastProposal)
newProposalBytes, _ := json.Marshal(newProposal)
return bytes.Equal(newProposalBytes, lastProposalBytes)
return lastTime, bytes.Equal(newProposalBytes, lastProposalBytes)
}

View File

@ -173,6 +173,58 @@ func TestSignProposal(t *testing.T) {
assert.Equal(sig, proposal.Signature)
}
func TestDifferByTimestamp(t *testing.T) {
_, tempFilePath := cmn.Tempfile("priv_validator_")
privVal := GenPrivValidatorFS(tempFilePath)
block1 := PartSetHeader{5, []byte{1, 2, 3}}
height, round := int64(10), 1
chainID := "mychainid"
// test proposal
{
proposal := newProposal(height, round, block1)
err := privVal.SignProposal(chainID, proposal)
assert.NoError(t, err, "expected no error signing proposal")
signBytes := SignBytes(chainID, proposal)
sig := proposal.Signature
timeStamp := clipToMS(proposal.Timestamp)
// manipulate the timestamp. should get changed back
proposal.Timestamp = proposal.Timestamp.Add(time.Millisecond)
proposal.Signature = crypto.Signature{}
err = privVal.SignProposal("mychainid", proposal)
assert.NoError(t, err, "expected no error on signing same proposal")
assert.Equal(t, timeStamp, proposal.Timestamp)
assert.Equal(t, signBytes, SignBytes(chainID, proposal))
assert.Equal(t, sig, proposal.Signature)
}
// test vote
{
voteType := VoteTypePrevote
blockID := BlockID{[]byte{1, 2, 3}, PartSetHeader{}}
vote := newVote(privVal.Address, 0, height, round, voteType, blockID)
err := privVal.SignVote("mychainid", vote)
assert.NoError(t, err, "expected no error signing vote")
signBytes := SignBytes(chainID, vote)
sig := vote.Signature
timeStamp := clipToMS(vote.Timestamp)
// manipulate the timestamp. should get changed back
vote.Timestamp = vote.Timestamp.Add(time.Millisecond)
vote.Signature = crypto.Signature{}
err = privVal.SignVote("mychainid", vote)
assert.NoError(t, err, "expected no error on signing same vote")
assert.Equal(t, timeStamp, vote.Timestamp)
assert.Equal(t, signBytes, SignBytes(chainID, vote))
assert.Equal(t, sig, vote.Signature)
}
}
func newVote(addr data.Bytes, idx int, height int64, round int, typ byte, blockID BlockID) *Vote {
return &Vote{
ValidatorAddress: addr,
@ -190,5 +242,13 @@ func newProposal(height int64, round int, partsHeader PartSetHeader) *Proposal {
Height: height,
Round: round,
BlockPartsHeader: partsHeader,
Timestamp: time.Now().UTC(),
}
}
func clipToMS(t time.Time) time.Time {
nano := t.UnixNano()
million := int64(1000000)
nano = (nano / million) * million
return time.Unix(0, nano).UTC()
}

View File

@ -3,6 +3,7 @@ package types
import (
"bytes"
"fmt"
"math"
"sort"
"strings"
@ -48,12 +49,12 @@ func NewValidatorSet(vals []*Validator) *ValidatorSet {
}
// incrementAccum and update the proposer
// TODO: mind the overflow when times and votingPower shares too large.
func (valSet *ValidatorSet) IncrementAccum(times int) {
// Add VotingPower * times to each validator and order into heap.
validatorsHeap := cmn.NewHeap()
for _, val := range valSet.Validators {
val.Accum += val.VotingPower * int64(times) // TODO: mind overflow
// check for overflow both multiplication and sum
val.Accum = safeAddClip(val.Accum, safeMulClip(val.VotingPower, int64(times)))
validatorsHeap.Push(val, accumComparable{val})
}
@ -63,7 +64,9 @@ func (valSet *ValidatorSet) IncrementAccum(times int) {
if i == times-1 {
valSet.Proposer = mostest
}
mostest.Accum -= int64(valSet.TotalVotingPower())
// mind underflow
mostest.Accum = safeSubClip(mostest.Accum, valSet.TotalVotingPower())
validatorsHeap.Update(mostest, accumComparable{mostest})
}
}
@ -117,7 +120,8 @@ func (valSet *ValidatorSet) Size() int {
func (valSet *ValidatorSet) TotalVotingPower() int64 {
if valSet.totalVotingPower == 0 {
for _, val := range valSet.Validators {
valSet.totalVotingPower += val.VotingPower
// mind overflow
valSet.totalVotingPower = safeAddClip(valSet.totalVotingPower, val.VotingPower)
}
}
return valSet.totalVotingPower
@ -425,3 +429,77 @@ func RandValidatorSet(numValidators int, votingPower int64) (*ValidatorSet, []*P
sort.Sort(PrivValidatorsByAddress(privValidators))
return valSet, privValidators
}
///////////////////////////////////////////////////////////////////////////////
// Safe multiplication and addition/subtraction
func safeMul(a, b int64) (int64, bool) {
if a == 0 || b == 0 {
return 0, false
}
if a == 1 {
return b, false
}
if b == 1 {
return a, false
}
if a == math.MinInt64 || b == math.MinInt64 {
return -1, true
}
c := a * b
return c, c/b != a
}
func safeAdd(a, b int64) (int64, bool) {
if b > 0 && a > math.MaxInt64-b {
return -1, true
} else if b < 0 && a < math.MinInt64-b {
return -1, true
}
return a + b, false
}
func safeSub(a, b int64) (int64, bool) {
if b > 0 && a < math.MinInt64+b {
return -1, true
} else if b < 0 && a > math.MaxInt64+b {
return -1, true
}
return a - b, false
}
func safeMulClip(a, b int64) int64 {
c, overflow := safeMul(a, b)
if overflow {
if (a < 0 || b < 0) && !(a < 0 && b < 0) {
return math.MinInt64
} else {
return math.MaxInt64
}
}
return c
}
func safeAddClip(a, b int64) int64 {
c, overflow := safeAdd(a, b)
if overflow {
if b < 0 {
return math.MinInt64
} else {
return math.MaxInt64
}
}
return c
}
func safeSubClip(a, b int64) int64 {
c, overflow := safeSub(a, b)
if overflow {
if b > 0 {
return math.MinInt64
} else {
return math.MaxInt64
}
}
return c
}

View File

@ -2,11 +2,14 @@ package types
import (
"bytes"
"math"
"strings"
"testing"
"testing/quick"
"github.com/tendermint/go-crypto"
"github.com/tendermint/go-wire"
"github.com/stretchr/testify/assert"
crypto "github.com/tendermint/go-crypto"
wire "github.com/tendermint/go-wire"
cmn "github.com/tendermint/tmlibs/common"
)
@ -190,6 +193,85 @@ func TestProposerSelection3(t *testing.T) {
}
}
func TestValidatorSetTotalVotingPowerOverflows(t *testing.T) {
vset := NewValidatorSet([]*Validator{
{Address: []byte("a"), VotingPower: math.MaxInt64, Accum: 0},
{Address: []byte("b"), VotingPower: math.MaxInt64, Accum: 0},
{Address: []byte("c"), VotingPower: math.MaxInt64, Accum: 0},
})
assert.EqualValues(t, math.MaxInt64, vset.TotalVotingPower())
}
func TestValidatorSetIncrementAccumOverflows(t *testing.T) {
// NewValidatorSet calls IncrementAccum(1)
vset := NewValidatorSet([]*Validator{
// too much voting power
0: {Address: []byte("a"), VotingPower: math.MaxInt64, Accum: 0},
// too big accum
1: {Address: []byte("b"), VotingPower: 10, Accum: math.MaxInt64},
// almost too big accum
2: {Address: []byte("c"), VotingPower: 10, Accum: math.MaxInt64 - 5},
})
assert.Equal(t, int64(0), vset.Validators[0].Accum, "0") // because we decrement val with most voting power
assert.EqualValues(t, math.MaxInt64, vset.Validators[1].Accum, "1")
assert.EqualValues(t, math.MaxInt64, vset.Validators[2].Accum, "2")
}
func TestValidatorSetIncrementAccumUnderflows(t *testing.T) {
// NewValidatorSet calls IncrementAccum(1)
vset := NewValidatorSet([]*Validator{
0: {Address: []byte("a"), VotingPower: math.MaxInt64, Accum: math.MinInt64},
1: {Address: []byte("b"), VotingPower: 1, Accum: math.MinInt64},
})
vset.IncrementAccum(5)
assert.EqualValues(t, math.MinInt64, vset.Validators[0].Accum, "0")
assert.EqualValues(t, math.MinInt64, vset.Validators[1].Accum, "1")
}
func TestSafeMul(t *testing.T) {
f := func(a, b int64) bool {
c, overflow := safeMul(a, b)
return overflow || (!overflow && c == a*b)
}
if err := quick.Check(f, nil); err != nil {
t.Error(err)
}
}
func TestSafeAdd(t *testing.T) {
f := func(a, b int64) bool {
c, overflow := safeAdd(a, b)
return overflow || (!overflow && c == a+b)
}
if err := quick.Check(f, nil); err != nil {
t.Error(err)
}
}
func TestSafeMulClip(t *testing.T) {
assert.EqualValues(t, math.MaxInt64, safeMulClip(math.MinInt64, math.MinInt64))
assert.EqualValues(t, math.MinInt64, safeMulClip(math.MaxInt64, math.MinInt64))
assert.EqualValues(t, math.MinInt64, safeMulClip(math.MinInt64, math.MaxInt64))
assert.EqualValues(t, math.MaxInt64, safeMulClip(math.MaxInt64, 2))
}
func TestSafeAddClip(t *testing.T) {
assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, 10))
assert.EqualValues(t, math.MaxInt64, safeAddClip(math.MaxInt64, math.MaxInt64))
assert.EqualValues(t, math.MinInt64, safeAddClip(math.MinInt64, -10))
}
func TestSafeSubClip(t *testing.T) {
assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, 10))
assert.EqualValues(t, 0, safeSubClip(math.MinInt64, math.MinInt64))
assert.EqualValues(t, math.MinInt64, safeSubClip(math.MinInt64, math.MaxInt64))
assert.EqualValues(t, math.MaxInt64, safeSubClip(math.MaxInt64, -10))
}
func BenchmarkValidatorSetCopy(b *testing.B) {
b.StopTimer()
vset := NewValidatorSet([]*Validator{})